query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
If there are sequence numbers in the IPv6 ACL, remove them
def _rm_ipv6_acl_sequence_numbers(self): for acl in self.get_children('startswith', 'ipv6 access-list '): for entry in acl.children: if entry.text.startswith('sequence'): entry.text = ' '.join(entry.text.split()[2:]) return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_acl(self, **kwargs):\n # Validate required and accepted parameters\n params_validator.validate_params_slx_ver17s_apply_acl(**kwargs)\n\n # Parse params\n acl_name = self.ip.parse_acl_name(**kwargs)\n callback = kwargs.pop('callback', self._callback)\n acl = self._get_acl_info(acl_name, get_seqs=False)\n address_type = acl['protocol']\n\n kwargs['address_type'] = address_type\n # Parse params\n user_data = self._parse_params_for_apply_or_remove_acl(**kwargs)\n\n self.validate_interfaces(callback, user_data)\n\n result = {}\n for intf in user_data['interface_list']:\n user_data['intf'] = intf\n t = jinja2.Template(acl_template.acl_remove)\n config = t.render(**user_data)\n config = ' '.join(config.split())\n try:\n callback(config)\n result[intf] = True\n except Exception as e:\n if '<bad-element>access-group</bad-element>' in str(e):\n result[intf] = None\n else:\n raise\n return result", "def _FixIPv6Address(self, netblocks):\n new_list = []\n length = len(netblocks)\n if length > 0:\n number_ipv6 = 0\n for netblock in netblocks:\n if netblock.version == 4:\n new_list.append(netblock)\n elif netblock.version == 6:\n number_ipv6 += 1\n if number_ipv6 == length:\n return True, new_list\n return False, new_list", "def remove_sequence(self):\n self.sequence_fragment_list = []", "def _add_acl_sequence_numbers(self):\n\n ipv4_acl_sw = 'ip access-list'\n # ipv6_acl_sw = ('ipv6 access-list')\n if self.host.os in ['ios']:\n acl_line_sw = ('permit', 'deny')\n else:\n acl_line_sw = ('permit', 'deny', 'remark')\n for child in self.children:\n if child.text.startswith(ipv4_acl_sw):\n sn = 10\n for sub_child in child.children:\n if sub_child.text.startswith(acl_line_sw):\n sub_child.text = \"{} {}\".format(sn, sub_child.text)\n sn += 10\n\n return self", "def fours_removed(seq):\n length = len(seq) - 4\n new_seq = seq[4:length:2]\n return new_seq", "def delete_acl(self, sg):\n self.security_group_driver.delete_acl(sg)", "def remove_ipv4_address(self, net_interface, address):\n self._runner.run('ip addr del %s dev %s' % (address, net_interface))", "def remove_4s_every_other_in_between(seq):\n seq_copy = seq [4:-4:2]\n return seq_copy", "def remove_ipv6(self, id_equip, id_ipv6):\n\n if not is_valid_int_param(id_equip):\n raise InvalidParameterError(\n u'The identifier of equipment is invalid or was not informed.')\n\n if not is_valid_int_param(id_ipv6):\n raise InvalidParameterError(\n u'The identifier of ip is invalid or was not informed.')\n\n url = 'ipv6/' + str(id_ipv6) + '/equipment/' + \\\n str(id_equip) + '/remove/'\n\n code, xml = self.submit(None, 'DELETE', url)\n\n return self.response(code, xml)", "def clear_ipv4_addresses(self, net_interface):\n ip_info = self.get_ipv4_addresses(net_interface)\n\n for address, _ in ip_info:\n self.remove_ipv4_address(net_interface, address)", "def delIfMatchedAddr(ipv4Addresses_, fIpv4Addresses_):\n s1 = netaddr.IPSet(ipv4Addresses_)\n l2 = []\n for i in fIpv4Addresses_[:]:\n m = re.search(r'(.*) \\.\\.\\. (.*)', i)\n if not m:\n l2.append(i)\n else:\n l2 += netaddr.IPSet(netaddr.iter_iprange(m.group(1), m.group(2)))\n s2 = netaddr.IPSet(l2)\n return map(str, list(s1 - s2))", "def removeDuplicates(seq):\n\n pass", "def remove_ats(self):\n\t\tfor key in self.keys():\n\t\t\tif key[:1] == '@':\n\t\t\t\ttry: del self[key]\n\t\t\t\texcept: pass", "def FilterIPv4InIPv6FormatAddrs(addrs):\n filtered = []\n for addr in addrs:\n ipaddr = ipaddress.ip_interface(addr).ip\n if isinstance(ipaddr, ipaddress.IPv6Address):\n ipv6 = ipaddress.IPv6Address(ipaddr)\n # Check if it's an IPv4-mapped or 6to4 address.\n if ipv6.ipv4_mapped is not None or ipv6.sixtofour is not None:\n continue\n # Check if it's an IPv4-compatible address.\n if ipv6.packed.hex(\n )[:24] == '000000000000000000000000' and not ipv6.is_unspecified:\n continue\n filtered += [addr]\n return filtered", "def remove_pgident_mapping(self, user):\n pass", "def remove_guff(seqs):\n new_seqs = {}\n stop_codons = [\"TGA\", \"TAA\", \"TAG\"]\n for key, value in seqs.items():\n new_seq = \"\"\n for i in range(len(value)-2):\n if value[i:i+3] == \"ATG\":\n break\n\n for j in range(i, len(value)-2, 3):\n if value[j:j+3] in stop_codons:\n new_seqs[key] = value[i:j+3]\n break\n\n return new_seqs", "def purge_redundancy(scaff_list):\n for scaff in list(scaff_list):\n if len(scaff) < 4:\n scaff_list.remove(scaff)\n\n to_delete = [\"deleted\"] #place-marker for deleted scaffolds\n \n for n in range(0,(len(scaff_list)-1)):\n\n if scaff_list[n] != to_delete: \n n_core = scaff_list[n][1:-1]\n for m in range((n+1),len(scaff_list)):\n if scaff_list[m] != to_delete:\n m_core = scaff_list[m][1:-1]\n if list_in_list(m_core, scaff_list[n]):\n scaff_list[m] = to_delete\n elif list_in_list(n_core, scaff_list[m]):\n scaff_list[n] = to_delete\n \n if \"dummy\" in m_core[0]:\n if list_in_list([m_core[1]], scaff_list[n]) or list_in_list([m_core[2]], scaff_list[n]):\n scaff_list[m] = to_delete\n elif \"dummy\" in n_core[0]:\n if list_in_list([n_core[1]], scaff_list[m]) or list_in_list([n_core[2]], scaff_list[m]):\n scaff_list[n] = to_delete\n \n while to_delete in scaff_list:\n scaff_list.remove(to_delete)\n \n return scaff_list", "def remove_many_descriptors(self, uuids):", "def remove_aids(infr, aids):\n infr.print('remove_aids len(aids)={}'.format(len(aids)), level=3)\n\n # Determine which edges are going to be removed\n remove_edges = nxu.edges_outgoing(infr.graph, aids)\n\n old_groups = list(infr.positive_components())\n\n # Remove from tertiary bookkeeping structures\n remove_idxs = list(ut.take(ut.make_index_lookup(infr.aids), aids))\n ut.delete_items_by_index(infr.orig_name_labels, remove_idxs)\n ut.delete_items_by_index(infr.aids, remove_idxs)\n infr.aids_set = set(infr.aids)\n\n # Remove from secondary bookkeeping structures\n ut.delete_dict_keys(infr.external_feedback, remove_edges)\n ut.delete_dict_keys(infr.internal_feedback, remove_edges)\n\n # Remove from core bookkeeping structures\n infr.graph.remove_nodes_from(aids)\n for graph in infr.review_graphs.values():\n graph.remove_nodes_from(aids)\n\n infr.queue.delete_items(remove_edges)\n\n # TODO: should refactor to preform a dyanmic step, but in this case is\n # less work to use a bazooka to shoot a fly.\n infr.apply_nondynamic_update()\n\n # I'm unsure if relabeling is necessary\n infr.relabel_using_reviews()\n\n new_groups = list(infr.positive_components())\n\n # logger.info('old_groups = {!r}'.format(old_groups))\n # logger.info('new_groups = {!r}'.format(new_groups))\n delta = ut.grouping_delta(old_groups, new_groups)\n splits = delta['splits']\n\n n_old = len(splits['old'])\n n_new = len(list(ut.flatten(splits['new'])))\n infr.print(\n 'removing {} aids split {} old PCCs into {} new PCCs'.format(\n len(aids), n_old, n_new\n )\n )\n\n return splits\n # logger.info(ub.repr2(delta, nl=2))", "def remove_descriptor(self, uuid):", "def remove_dead_entries(self, packet):\n for route in self.forwarding_table:\n for dead_entry in packet[MESG]:\n sameSource = route[SRCE] == packet[SRCE]\n sameDest = route[DEST] == packet[DEST]\n if sameSource and sameDest and dead_entry[NTWK] == route[NTWK] and dead_entry[NMSK] == route[NMSK]:\n self.forwarding_table.remove(route)\n self.revoked.append(route)\n break", "def mask_tokens(self, sequence):\n n_tokens = len(sequence)\n n_masked_tokens = int(self.masking_proportion*n_tokens/100)\n indexes = [random.randint(0, n_tokens-1) for i in range(n_masked_tokens)]\n while len(set(indexes))!=n_masked_tokens:\n indexes = [random.randint(0, n_tokens-1) for i in range(n_masked_tokens)]\n sequence = np.array(sequence)\n sequence[indexes] = 4\n return list(sequence)", "def delete_port_acl(self, port, acl):\n raise NotImplementedError # pragma: no cover", "def remove_padding(paddedMsg, block_size): \n try:\n if not valid_padding(paddedMsg, block_size):\n raise ValueError\n except ValueError:\n print(f\"{ paddedMsg } has invalid PKCS#7 padding.\")\n return\n \n last_byte = paddedMsg[-1]\n unpadded = paddedMsg[:-last_byte]\n print(f\"Padding removed successfully...\")\n print(f\"Before padding removal: { paddedMsg }\")\n print(f\"After padding removal: { unpadded }\")", "def short_whitelist(whitelist):\n for x in [\"guid-4\", \"guid-5\"]:\n whitelist.remove(x)\n return whitelist", "def test_removeMgmtObjCSIfromAT(self) -> None:\n\t\tat = findXPath(TestRemote_Annc.bat, 'm2m:bat/at').copy()\n\t\tat.pop(0) # remove REMOTECSEID\n\t\tdct = \t{ 'm2m:bat' : {\n\t\t\t\t\t'at' : at \t\t\t# with REMOTECSEID removed\n\t\t\t\t}}\n\t\tr, rsc = UPDATE(batURL, ORIGINATOR, dct)\n\t\tself.assertEqual(rsc, RC.updated)\n\t\tself.assertEqual(len(findXPath(r, 'm2m:bat/at')), 0)\n\n\t\tr, rsc = RETRIEVE(f'{REMOTEURL}/~{TestRemote_Annc.remoteBatRI}', ORIGINATOR)\n\t\tself.assertEqual(rsc, RC.notFound)", "def _clean(self):\n limit = datetime.now() - timedelta(seconds=self._timeout)\n \n for uid in [uid for uid, timestamp in self._reservedUID.iteritems()\n if timestamp < limit]:\n del self._reservedUID[uid]", "def upgradeFixInvitations():\n from base import get_group_database, get_user_database\n \n user_db = get_user_database()\n group_db = get_group_database()\n \n for user in user_db.root.values():\n for g in group_db.users_groups(user):\n g.remove_invitation(user)\n group_db.invitations.remove_all_user_invitations(user, g)", "def removeAll(self, addr: ghidra.program.model.address.Address) -> None:\n ...", "def delete_acl_rule(self, sgr):\n self.security_group_driver.delete_acl_rule(sgr)" ]
[ "0.5408441", "0.5385359", "0.52064013", "0.5154123", "0.51238096", "0.5115195", "0.5109617", "0.5068291", "0.5053385", "0.5036614", "0.49867594", "0.4969584", "0.49682197", "0.4940877", "0.49407664", "0.49260843", "0.49187565", "0.48895997", "0.48849586", "0.4824361", "0.48172918", "0.47985795", "0.47385702", "0.47365797", "0.4733479", "0.47264966", "0.4710033", "0.46931696", "0.4692117", "0.4690044" ]
0.8416185
0
Add a copy of the ancestry of parent_to_add to self and return the deepest child which is equivalent to parent_to_add
def add_ancestor_copy_of(self, parent_to_add): base = self for parent in parent_to_add.lineage(): if parent.root is not parent: base = base.add_shallow_copy_of(parent) return base
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_parent(self, parent, *args, **kwargs):\n return parent.add_child(self, **kwargs)", "def AddParent(self, parent=None):\n if isinstance(parent, Tree):\n parent.AddNode(self)", "def add_parent(self, node):\n # Ensure that node is a BaseNode\n if not isinstance(node, BaseNode):\n raise TypeError(\n 'add_parent() is expecting a Job or Dagman instance.'\n ' Got an object of type {}'.format(type(node)))\n\n # Don't bother continuing if node is already in the parents list\n if self._hasparent(node):\n return self\n\n # Add node to existing parents\n self.parents.append(node)\n self.logger.debug(\n 'Added {} as a parent for {}'.format(node.name, self.name))\n\n # Add self instance as a child to the new parent node\n node.add_child(self)\n\n return self", "def add_parent(self, child, parent):\r\n setp = self._parents.setdefault(child, set())\r\n setp.add(parent)", "def find_parent(self):\n parent = self._parent\n if parent:\n return parent\n elif not self.is_root:\n psobj = self.get_sobj().GetFather()\n parent = self.__class__(self._std, self._bld, psobj.GetID())\n self._parent = parent\n return parent", "def add_parent(self, parent, add_child=True):\n if parent not in self._parents:\n self._parents.append(parent)\n\n if add_child:\n parent.add_child(self, add_parent=False)\n\n return True\n\n return False", "def addParent(self, parentNode):\n if (type(parentNode)==list or isIterable(parentNode)):\n for p in parentNode:\n if (self.isAncestorOf(p)):\n raise CycleException, \"%s is a decendent of %s, cannot add as parent.\" % (parentNode, self)\n if (p not in self.parents()):\n p._children.append(self)\n self._parents.append(p)\n else:\n if (self.isAncestorOf(parentNode)):\n raise CycleException, \"%s is a decendent of %s, cannot add as parent.\" % (parentNode, self)\n if (parentNode not in self.parents()):\n parentNode._children.append(self)\n self._parents.append(parentNode)", "def parent(self):\n other = self\n while True:\n for rev in other._hgmo['parents']:\n parent = Push(rev)\n if parent.id != self.id:\n return parent\n other = parent", "def set_parent(self, parent):\n if self not in parent.children:\n parent.children.append(self)\n self.parent = parent", "def add_parent(self, node):\n self.parents.append(node)\n self.parent_depencencies_left += 1", "def get_parent(self):\n return BinaryNode.or_none(self.parent)", "def get_parent(self):\n if not self._parent:\n self._parent = yield self.parent_resource.get(self.parent_id)\n\n raise Return(self._parent)", "def add_deep_copy_of(self, child_to_add, merged=False):\n\n new_child = self.add_shallow_copy_of(child_to_add, merged=merged)\n for child in child_to_add.children:\n new_child.add_deep_copy_of(child, merged=merged)\n\n return new_child", "def new_child(self, parent, *args, **kwargs):\n child = self.new_element(*args, **kwargs)\n parent.append(child)\n return child", "def getParent(self):\n\n return self._father", "def addChild(self, node):\n if IElement.providedBy(node):\n node.parent = self\n self.children.append(node)\n return node", "def add_node(self, title='', parent=None, by_title=False):\n\n sel_stmt = []\n conn = self.engine.connect()\n\n # cover cases where id is sent as int, str or row object\n parent_id = parent\n try:\n parent_id = parent_id.id\n except AttributeError:\n pass\n\n if by_title:\n parent_id = self.get_first_id(parent)\n if not parent_id:\n raise Exception('Parent node does not exist.')\n\n if parent_id is not None:\n # check parent exists\n if not self.node_exists(parent_id):\n raise Exception('Parent node does not exist.')\n\n # store new node\n new_node_pk = conn.execute(self.nodes.insert(), {'title': title}).inserted_primary_key[0]\n\n # add new paths for all the ancestors of the parent node\n sel_stmt.append(\n select(\n [self.paths.c.ancestor, bindparam('d1', new_node_pk), self.paths.c.depth + 1]\n ).where(\n self.paths.c.descendant == parent_id\n )\n )\n else:\n # add new node\n new_node_pk = conn.execute(self.nodes.insert(), {'title': title}).inserted_primary_key[0]\n\n # add path to self\n sel_stmt.append(\n select(\n [bindparam('a2', new_node_pk), bindparam('d2', new_node_pk), bindparam('l2', 0)]\n )\n )\n\n # add paths\n conn.execute(self.paths.insert().from_select(['ancestor', 'descendant', 'depth'], union_all(*sel_stmt)))\n\n return new_node_pk", "def add_child(self, node):\n if self is node:\n parent_id = \"\"\n _nodeid=\"N_\"+str(0)\n else:\n if not issubclass(node.__class__, Node):\n raise TypeError(\"{}.add_child: arg «node»=«{}», type {} not valid.\".format(self.__class__.__name__, node, type(node)))\n self.childs.append(node)\n node.parent = self\n parent_id = self.TV.selection()[0]\n _nodeid=\"N_\"+str(self.node_count)\n # parent = self.rootnode.get_node_by_id(parent_id)\n # if parent is None:\n # return None\n\n # print(\"self.TV.insert node._nodeid\", node._nodeid)\n # print(\"self.TV.insert node.data\", node.data)\n \n self.TV.insert(parent_id, 'end', _nodeid, text=node.name)\n\n # parent_id = self.TreeView.selection()[0]\n # node_name = askstring(\"New Child\", prompt=\"Enter the node name\", initialvalue=\"\")\n # if not node_name:\n # node_name = \"no-name-node\"\n # # self.TV.insert(item, 'end', 'LC_'+str(self.TVleafref), \n # # text='Load case '+str(self.TVleafref))\n # #self.node_count += 1\n \n # self.TreeView.insert(parent_id, 'end', self._nodeid, text=self.name)\n\n return node", "def add(self, node):\n parent_id = node.get_parent_id()\n \n if parent_id is None:\n return 0\n\n if parent_id == 'root':\n self._root.add_child(node)\n self._count += 1\n return 1\n\n # get parent node if it exists\n parent_node = self.search(parent_id)\n \n if parent_node:\n parent_node.add_child(node)\n self._count += 1\n return 1\n else:\n # parent node doesn't exist yet\n return -1", "def add(self, value, parent=None):\n if self.value is None:\n self.value = value\n self.parent = parent\n return\n if value < self.value:\n if self.left:\n self.left.add(value, parent=self)\n else:\n self.left = __class__(value, parent=self) \n else:\n if self.right:\n self.right.add(value, parent=self)\n else:\n self.right = __class__(value, parent=self)", "def get_parent(self) :\n return self.parent", "def get_parent(self):\n return self._parent", "def _highest_parent_(self):\n if self._parent_ is None:\n return self\n return self._parent_._highest_parent_", "def get_parent(self):\n return self.parent", "def get_parent(self):\n return self.parent", "def get_parent(self):\n return self.parent", "def parent(self):\n return self if self.is_root else self.__parent", "def addChild(self, child):\n #assert child not in self.children\n #if child not in self.children:\n child.parents.append(self)\n self.children.append(child)", "def get_parent(self):\n return self.__parent", "def get_parent(self):\n return self.__parent" ]
[ "0.7143506", "0.6888295", "0.6326358", "0.61969703", "0.59369355", "0.5930564", "0.5896978", "0.5784762", "0.5730257", "0.56396216", "0.5617881", "0.5575947", "0.5527584", "0.5464083", "0.5445128", "0.5442927", "0.543735", "0.54366136", "0.540554", "0.5379422", "0.5377961", "0.5359744", "0.5338458", "0.53234106", "0.53234106", "0.53234106", "0.53125787", "0.5271888", "0.5258161", "0.5258161" ]
0.7688856
0
Sets self.order integer on all children
def set_order_weight(self): for child in self.all_children(): for rule in self.options['ordering']: if child.lineage_test(rule): child.order_weight = rule['order']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_ordering(self):\n self._descendants = sorted(self.unordered_descendants(), key=lambda node: node.ord)\n for (new_ord, node) in enumerate(self._descendants, 1):\n node.ord = new_ord", "def reorder_children(self, increasing=True):\n self.traverse_order_children(self.root, increasing)", "def set_order(self, order):\n self.order = order", "def set_order(self, order):\n self.order = order", "def _sort_tree(self):\n \n self._children = sorted(self._children, key=lambda x : x.id_num)\n for c in self.children:\n if hasattr(c, '_sort_tree'):\n c._sort_tree()\n \n return", "def order(self, order):\n self._order = order", "def order(self, order):\n\n self._order = order", "def order(self, order):\n\n self._order = order", "def order(self, order):\n\n self._order = order", "def order(self, order=0):\n # type: (int) -> Entity\n self.type_def['order'] = order\n\n return self", "def update_order():", "def update_order():", "def setOrder(self, verbose = 1):\n\n self.order = np.arange(self.atoms.shape[0])\n if verbose > 0:\n string = \"Updated the saved order\"\n ut.infoPrint(string)", "def set_task_order(self, order):\n for task in self.tasks:\n task.order = order", "def order(self):\n raise NotImplementedError()", "def setOrder(self, order):\n\t\tself.orderInData = order", "def order_vertices(self):\r\n \r\n ordered = False\r\n while ordered == False:\r\n for i in range(len(self.vertices)):\r\n ordered = True\r\n for parent in self.vertices[i].parents:\r\n if parent>i:\r\n ordered = False\r\n self.swap_vertices(i, parent)", "def order(orderedObjects):\n for idx, obj in enumerate(orderedObjects):\n obj.position = idx\n obj.save()", "def SetOrder(self, order):\n if self.__order != order:\n self.__order = order\n self.Modified()", "def Order(self) -> int:", "def setLatticeOrder(self):\n\t\taccNodes = self.getNodes()\n\t\telemInLine = {}\n\t\tfor i in range(len(accNodes)):\n\t\t\telem = accNodes[i]\t\t\t\n\t\t\telemname = elem.getName()\n\t\t\tif(elemInLine.has_key(elemname)):\n\t\t\t\telemInLine[elemname] += 1\n\t\t\telse:\telemInLine[elemname] = 1\n\t\t\tnode = self.getNodes()[i]\n\t\t\tnode.setParam(\"TPName\",node.getName()+\"_\"+str(elemInLine[elemname]))\n\t\t\t#node.setParam(\"sequence\",i+1)\n\t\t\t#print \"debug node\",node.getName(),node.getParamsDict()", "def __init__(self, order=0, shape=None):\n self.child = None\n self.shape = shape\n self.order = order\n self.index = 0", "def setOrder(self, *args):\n return _libsbml.CompartmentGlyph_setOrder(self, *args)", "def inorder(self):\n if len(self.children) == 0:\n return ['%s' % self.val]\n params = (' , ').join([' <N> '] * len(self.children))\n inorder = ['%s ( %s ) ' % (self.val, params)]\n for child in self.children:\n inorder += child.inorder()\n\n return inorder", "def normalize_page_order(self,pages):\n for index,page in enumerate(pages):\n page.order = index + 1\n page.save()", "def __init__(self):\n self._order_list = []", "def SortChildren(self, item):\r\n\r\n if not self._attr_set:\r\n setattr(self._main_win, \"OnCompareItems\", self.OnCompareItems)\r\n self._attr_set = True\r\n \r\n self._main_win.SortChildren(item)", "def reorder(self, new_order):\n #TODO doesn't work probably CRA 3/2019\n for field in [\"atoms\", \"xyz\"]:\n self.__dict__[field] = self.__dict__[field][list(new_order)]\n self.atoms = [self.atoms[i] for i in new_order]", "def order ( self ) :\n return self.__order", "def sort_nodes(self):\n nodes = self._chain.root_node.ordered_subnodes_hierarchy()\n self._chain.nodes = nodes" ]
[ "0.7573093", "0.69861233", "0.6946041", "0.6946041", "0.6795527", "0.67951113", "0.6672119", "0.6672119", "0.6672119", "0.65924275", "0.6464976", "0.6464976", "0.6452091", "0.641608", "0.63908696", "0.638663", "0.6239694", "0.62139374", "0.61743754", "0.6174167", "0.61634743", "0.6122527", "0.6119088", "0.6110062", "0.610935", "0.6090148", "0.60283864", "0.60148746", "0.599788", "0.5961194" ]
0.7651721
0
Adds the sectional exiting text as a child
def add_sectional_exiting(self): # TODO why do we need to delete the delete the sub_child and then # recreate it? for child in self.all_children(): for rule in self.options['sectional_exiting']: if child.lineage_test(rule): if rule['exit_text'] in child: child.del_child_by_text(rule['exit_text']) new_child = child.add_child(rule['exit_text']) new_child.order_weight = 999
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def section(self, label):\n return self.text(label, bold=True)", "def soft_break(self, el, text):\n\n if el.name == 'p' and el.namespace and el.namespace == self.namespaces[\"text\"]:\n text.append('\\n')", "def make_bottom_text( self ):\n return None", "def add_section(self, text: str) -> None:\n\n tag = r'''\\newpage\n \\section{%s}''' % (text)\n self.doc = self.doc + tag", "def create_caption(section, superscript, text):\n section.append('\\n')\n\n # Superscript\n section.append(bold(pylatex.NoEscape(r'{\\footnotesize \\textsuperscript {' + superscript + '}}')))\n\n # Text\n section.append(italic(pylatex.NoEscape(r'{\\footnotesize {' + text + '}}')))", "def hed(self, part, text):\n n = Node(node_type=Node.APPENDIX, label=[part, self.appendix_letter],\n title=text)\n self.m_stack.push_last((0, n))\n self.paragraph_counter = 0\n self.depth = 0", "def addContent(text):", "def add_subsection(self, text: str) -> None:\n\n tag = r'\\subsection{%s}' % (text)\n self.doc = self.doc + tag", "def get_text(element):\n if element.tag.split('}')[-1] == 'h3':\n return \"\\n\" # New section (double newline)\n return re.sub(\"\\s+\", \" \", ((element.text or '') + ''.join(map(get_text, element)) + (element.tail or '')))", "def start_underline(self):\n pass", "def addContent(self, text):\n text = _coercedUnicode(text)\n c = self.children\n if len(c) > 0 and isinstance(c[-1], unicode):\n c[-1] = c[-1] + text\n else:\n c.append(text)\n return c[-1]", "def add_text(parent, text, transform='', text_height=12, color='#000000'):\n text_style = {'font-size': '%dpx' % text_height, 'font-style': 'normal', 'font-weight': 'normal',\n 'fill': color, 'font-family': 'Bitstream Vera Sans,sans-serif',\n 'text-anchor': 'middle', 'text-align': 'center'}\n\n text_attribs = {\n inkex.addNS('label', 'inkscape'): 'Annotation',\n 'style': simplestyle.formatStyle(text_style)\n }\n if transform != \"translate(0,0)\":\n text_attribs['transform'] = transform\n text_node = inkex.etree.SubElement(parent, inkex.addNS('text', 'svg'), text_attribs)\n text_node.text = text", "def _create_text(self):\n assert len(self.state) > 0\n tmp = \"\"\n for tag in self.state:\n if \"<span\" in tag or \"<div\" in tag:\n continue\n if len(tag) > self._max_len:\n tmp += self.__split_seq(tag) + \"\\n\" + \"\\n\"\n else:\n tmp += tag + \"\\n\" + \"\\n\"\n\n self.text = copy.copy(tmp)", "def section_underline_overindented_and_contentless(): # noqa: D416", "def render(self, value, context=None):\n return self.child_blocks['section_break'].render(value)", "def addSection(self, name, fontstyle=None):\n if self.isFirstSection:\n self.isFirstSection = False\n else:\n self.menu.addSeparator() # menu.add(JSeparator()) ???\n\n label = JLabel(name)\n label.setLocation(4, 4)\n if fontstyle is not None:\n label.font = fontstyle\n self.applyStyle(label)\n self.menu.add(label)", "def save_text(self):\n content = self.get_content()\n if content != '':\n self.text.append((content, self.context, self.ancestor))", "def end_paragraph(self):\n raise NotImplementedError", "def Add_Text( self, th ):\r\n self.text_handle = th", "def add_text_block(self, title, row, column, row_span = 1, column_span = 1, padx = 1, pady = 0, initial_text = ''):\n\n id = 'Widget{}'.format(len(self.widgets.keys()))\n new_text_block = widgets.ScrollTextBlock(id, title, self.grid, row, column, row_span, column_span, padx, pady, initial_text)\n self.widgets[id] = new_text_block\n if self.selected_widget is None:\n self.set_selected_widget(id)\n return new_text_block", "def section_header(text):\n\n print \"---- %s ----\" % text", "def add_subsection(self):\n self.add_child() # lint-amnesty, pylint: disable=no-member", "def add_heading(self, level, text):\n heading_template = self.templateEnv.get_template(f'{ReportGenerator.COMPONENTS_FOLDER}/heading{level}.html')\n heading_output = heading_template.render(text=text)\n self.contents.append(heading_output)", "def _append_strong(cls, text, element, container):\n text = replace_whitespaces(text)\n run = container.add_run(text=text)\n run.bold = True\n if element.getparent().tag == 'em':\n run.italic = True\n return container", "def heading(self):\n return self.wrap(\"-------\")", "def add_subsubsection(self, text: str) -> None:\n\n tag = r'\\subsubsection{%s}' % (text)\n self.doc = self.doc + tag", "def newTextChild(self, parent, name, content):\n if parent is None: parent__o = None\n else: parent__o = parent._o\n ret = libxml2mod.xmlNewTextChild(parent__o, self._o, name, content)\n if ret is None:raise treeError('xmlNewTextChild() failed')\n __tmp = xmlNode(_obj=ret)\n return __tmp", "def set_intro_text_2(self, text, color):\n text_y_position = (self.objects['titleimage'].b() + self.border.bottom()) / 2\n\ttext_y_position = text_y_position - 30\n self.objects['introtext2'] = LCARSText((self.width/2, text_y_position),\n text,\n 36,\n TextAlign.XALIGN_CENTRE, color, Colours.BG, True)", "def section_underline_overindented(): # noqa: D416", "def addIndents(self, prevLevel, nextLevel):\n for num in range(self.level - prevLevel):\n self.textLines[0] = u'<div>%s' % self.textLines[0]\n for num in range(self.level - nextLevel):\n self.textLines[-1] = u'%s</div>' % self.textLines[-1]\n return self.level" ]
[ "0.60722476", "0.5907909", "0.589407", "0.58685046", "0.5809762", "0.5808015", "0.572188", "0.5656439", "0.56449705", "0.56381184", "0.56246924", "0.56188685", "0.5572785", "0.55693084", "0.5529585", "0.55236554", "0.55192935", "0.551879", "0.5510168", "0.54871815", "0.5467052", "0.5456512", "0.53862524", "0.53794414", "0.5375512", "0.537383", "0.53666186", "0.53484154", "0.5329088", "0.5307355" ]
0.67059284
0
unpack the provided binary data stream into this class's fields
def unpack(self, stream): if not isinstance(stream, StatefulByteStream): stream = StatefulByteStream(stream) # map unpacked values to the fields that they correspond to for field in self.__fields: fmt = self.BYTE_ORDER + field.format slice_size = field.size * field.count if isinstance(field, StructObject): field.val = field.unpack(stream.slice(slice_size)) elif field.sizeof is not None: self._unpack_sizeof(field, stream) else: field.val = struct.unpack(fmt, stream.slice(slice_size)) # if we unpacked something that was not a collection or a byte # string, remove the single element from it's containing tuple if field.count == 1 and field.typ is not bytes and isinstance(field.val, tuple): field.val = field.val[0] # return the current instance from unpack to facilitate chaining # :class:`StructObject` instances return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _unpack(self, headerBytes):\n pass", "def deserialize(self, data):", "def parse_from_bytes(self, raw_buffer):\n\n try:\n (cpu_svn,\n self.misc_select,\n _,\n attributes,\n mr_enclave,\n _,\n mr_signer,\n _,\n self.isv_prod_id,\n self.isv_svn,\n _,\n report_data) = \\\n struct.unpack(self._format, raw_buffer)\n\n # Further parse embedded structures\n self.cpu_svn.parse_from_bytes(cpu_svn)\n self.attributes.parse_from_bytes(attributes)\n self.mr_enclave.parse_from_bytes(mr_enclave)\n self.mr_signer.parse_from_bytes(mr_signer)\n self.report_data.parse_from_bytes(report_data)\n except struct.error as se:\n raise ValueError('Unable to parse: {}'.format(se))", "def extract(self, byte_stream: BytesIO):\n if self.big_endian_ints is None or self.big_endian_floats is None:\n raise Exception(\"Endianness not set before parsing\")\n\n # header extraction\n header: Header = self.header(byte_stream)\n\n # data extraction\n parsed_data: Dict[Any, Any] = {}\n try:\n parsed_data = self.parse_data(byte_stream, header)\n except Exception as e:\n LOGGER.exception(\"Error parsing data\") # Automatically grabs and prints exception info\n\n parsed_data[DataEntryIds.TIME] = header.timestamp\n\n self.big_endian_ints = None\n self.big_endian_floats = None\n return parsed_data", "def deserialize(self, blob):\n pass", "def unpack(stream, **kwargs):\n data = stream.read()\n return unpackb(data, **kwargs)", "def unpack (self, buffer):\n\t\timport struct\n\t\tvalues = struct.unpack (self.struct, buffer)\n\t\tj = 0\n\t\tfor i in self.structref:\n\t\t\tself.value[i[self.NAME]] = values[j]\n\t\t\tj = j + 1", "def load(datastream):", "def _unpack(self, headerBytes):\n xtraH = struct.unpack(self.PACKAGING_FORMAT, headerBytes)\n\n self.qubit_id = xtraH[0]\n self.remote_app_id = xtraH[1]\n self.remote_node = xtraH[2]\n self.datetime = xtraH[3]\n self.remote_port = xtraH[4]\n self.outcome = xtraH[5]", "def deserialize(self, instream):\n\n raise Exception(\"Not implemented!\"+self.__class__)", "def _unpack(self, headerBytes):\n xtraH = struct.unpack(self.PACKAGING_FORMAT, headerBytes)\n\n self.qubit_id = xtraH[0]\n self.remote_app_id = xtraH[1]\n self.remote_node = xtraH[2]\n self.cmdLength = xtraH[3]\n self.remote_port = xtraH[4]\n self.step = xtraH[5]", "def _unpack(self, headerBytes):\n cqcH = struct.unpack(self.PACKAGING_FORMAT, headerBytes)\n self.version = cqcH[0]\n self.tp = cqcH[1]\n self.app_id = cqcH[2]\n self.length = cqcH[3]", "def unpack(self, data):\n ptr = 0\n try:\n ptr, self.transaction_id = bbclib_binary.get_bigint(ptr, data)\n self.idlen_conf[\"transaction_id\"] = len(self.transaction_id)\n ptr, num = bbclib_binary.get_n_byte_int(ptr, 2, data)\n if num == 1:\n ptr, self.asset_id = bbclib_binary.get_bigint(ptr, data)\n self.idlen_conf[\"asset_id\"] = len(self.asset_id)\n else:\n self.asset_id = None\n except:\n return False\n return True", "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 12\n (_x.hlive, _x.hstate, _x.hfinished, _x.pressure, _x.c1, _x.c2, _x.c3, _x.c4, _x.c5, _x.c6, _x.c7, _x.c8,) = _struct_12B.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, reader: serialization.BinaryReader) -> None:\n self.hash_start = reader.read_serializable(types.UInt256)\n self.count = reader.read_int16()", "def decode(raw_bytes, *, serialization=None, subtypes=tuple()):\n raise NotImplementedError", "def deserialize(self, reader: serialization.BinaryReader) -> None:\n super(MerkleBlockPayload, self).deserialize(reader)\n self.content_count = reader.read_var_int()\n self.hashes = reader.read_serializable_list(types.UInt256)\n self.flags = reader.read_var_bytes()", "def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 8\n (self.i,) = _struct_d.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def _unpack(self, headerBytes) -> None:\n unpacked = struct.unpack(self.PACKAGING_FORMAT, headerBytes)\n\n self.first_operand = unpacked[0]\n self.operator = unpacked[1]\n self.type_of_second_operand = unpacked[2]\n self.second_operand = unpacked[3]\n self.length = unpacked[4]", "def _unpack(self, headerBytes):\n if self._cqc_version < 2:\n header = struct.unpack(self.PACKAGING_FORMAT_V1, headerBytes)\n self.remote_app_id = header[0]\n self.remote_node = header[1]\n self.remote_port = header[2]\n else:\n header = struct.unpack(self.PACKAGING_FORMAT, headerBytes)\n self.remote_app_id = header[0]\n self.remote_port = header[1]\n self.remote_node = header[2]", "def unpack(self, s):\n\n raise NotImplementedError()", "def _read_record(self, stream):\n header = stream.read(4)\n if len(header) < 4:\n return None\n size, rec_type = struct.unpack('>HH', header)\n data_type = (rec_type & 0x00ff)\n rec_type = rec_type // 256\n data = None\n if size > 4:\n if data_type == 0x01:\n data = numpy.array(\n struct.unpack('>{0}H'.format((size - 4) // 2),\n stream.read(size - 4)),\n dtype='uint')\n elif data_type == 0x02:\n data = numpy.array(\n struct.unpack('>{0}h'.format((size - 4) // 2),\n stream.read(size - 4)),\n dtype='int')\n elif data_type == 0x03:\n data = numpy.array(\n struct.unpack('>{0}l'.format((size - 4) // 4),\n stream.read(size - 4)),\n dtype='int')\n elif data_type == 0x05:\n data = numpy.array([\n _eight_byte_real_to_float(stream.read(8))\n for _ in range((size - 4) // 8)\n ])\n else:\n data = stream.read(size - 4)\n if str is not bytes:\n if data[-1] == 0:\n data = data[:-1].decode('ascii')\n else:\n data = data.decode('ascii')\n elif data[-1] == '\\0':\n data = data[:-1]\n return [rec_type, data]", "def _unpack(self, headerBytes):\n header = struct.unpack(self.PACKAGING_FORMAT, headerBytes)\n self.step = header[0]", "def unpack_from(self, data, is_hexen): \n \n raise Exception('Undefined unpack_from in MapObject child.')", "def _unpack(self, headerBytes):\n header = struct.unpack(self.PACKAGING_FORMAT, headerBytes)\n self.datetime = header[0]", "def unpack(self, packedData):\n raise NotImplementedError, 'Method need to be overriden'", "def deserialize(self, rawBinaryData):\n\n PlayerMessage.deserialize(self, rawBinaryData)\n self.player.skinVarient = self._readByte(rawBinaryData, self._currentPos)\n self._currentPos += self.byteFormatLen \n self.player.hair = self._readByte(rawBinaryData, self._currentPos)\n self._currentPos += self.byteFormatLen\n self.player.isMale = True if self.player.skinVarient < 4 else False\n self.player.name = self._readString(rawBinaryData[self._currentPos:])\n self._currentPos += len(self.player.name)\n self.player.hairDye = self._readByte(rawBinaryData, self._currentPos)\n self._currentPos += self.byteFormatLen\n self.player.hideVisuals = self._readByte(rawBinaryData, self._currentPos)\n self._currentPos += self.byteFormatLen\n self.player.hideVisuals2 = self._readByte(rawBinaryData, self._currentPos)\n self._currentPos += self.byteFormatLen\n self.player.hideMisc = self._readByte(rawBinaryData, self._currentPos)\n self._currentPos += self.byteFormatLen\n self.player.hairColor = self._readColor24(\n rawBinaryData, self._currentPos)\n self._currentPos += self.color24FormatLen\n self.player.skinColor = self._readColor24(\n rawBinaryData, self._currentPos)\n self._currentPos += self.color24FormatLen\n self.player.eyeColor = self._readColor24(\n rawBinaryData, self._currentPos)\n self._currentPos += self.color24FormatLen\n self.player.shirtColor = self._readColor24(\n rawBinaryData, self._currentPos)\n self._currentPos += self.color24FormatLen\n self.player.underShirtColor = self._readColor24(\n rawBinaryData, self._currentPos)\n self._currentPos += self.color24FormatLen \n self.player.pantsColor = self._readColor24(\n rawBinaryData, self._currentPos)\n self._currentPos += self.color24FormatLen\n self.player.shoeColor = self._readColor24(\n rawBinaryData, self._currentPos)\n self._currentPos += self.color24FormatLen\n self.player.difficulty = self._readByte(\n rawBinaryData, self._currentPos)\n self._currentPos += self.byteFormatLen\n return self", "def _unpack(self, headerBytes) -> None:\n unpacked = struct.unpack(self.PACKAGING_FORMAT, headerBytes)\n\n self.ref_id = unpacked[0]", "def from_bytes(self, ???):", "def deserialize(self, byte: bytes):\n pass" ]
[ "0.6962862", "0.67571527", "0.6735019", "0.66133356", "0.6599829", "0.6524207", "0.6514203", "0.6513174", "0.6495409", "0.64911854", "0.64876413", "0.6414714", "0.6409808", "0.63841546", "0.6362015", "0.63400155", "0.6332114", "0.6315264", "0.63131934", "0.6259287", "0.62559617", "0.6249398", "0.62475926", "0.6244976", "0.6243105", "0.6233978", "0.62227654", "0.61753464", "0.6160705", "0.61564475" ]
0.686265
1
append_check_sum is used for exporting Team Manager/Meet Manager compliant files.
def append_check_sum(input_str, file_format=None): # check if inputStr is a string if not isinstance(input_str, unicode): raise CheckSumExportException("input_str parameter is not a string") #check if fileFormat is specified and if it is, if it is a string if file_format is None: if len(input_str) == HY3FileLength: file_format = "HY3" elif len(input_str) == CL2FileLength: file_format = "CL2" else: raise CheckSumExportException("inputStr is not the correct " + "length for either CL2 or HY3") elif not isinstance(file_format, unicode): raise CheckSumExportException("fileFormat parameter is not a string") else: if input_str.upper() == "HY3": file_format = "HY3" elif input_str.upper() == "CL2": file_format = "CL2" else: raise CheckSumExportException("fileFormat parameter is \ neither HY3 nor CL2") if file_format == "HY3": evens = input_str[::2] evens_sum = sequence_sum(evens) odds = input_str[1::2] odds_sum = sequence_sum(odds) total = evens_sum + 2*odds_sum final = unicode(int(floor(total/HY3Scale) + HY3Offset)) return input_str + final[-1] + final[-2] else: # CL2 total = sequence_sum(input_str) final = unicode(int(floor(total/CL2Scale) + CL2Offset)) suffix = "" if input_str.startswith("DO"): suffix += "NN" else: suffix += " N" return input_str + suffix + final[-1] + final[-2]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_sum(self) -> str:\n pass", "def checksum(self, check: bool):\r\n self._checksum = check\r\n self.checksum_hist.append(check)", "def add_check_sums(hdu_list: fits.HDUList):\n for hdu in hdu_list:\n hdu.verify(\"fix\")\n hdu.add_checksum()\n hdu.header.insert(\"CHECKSUM\", BLANK_CARD)\n hdu.header.insert(\"CHECKSUM\", (\" \", \"DATA INTEGRITY\"))\n hdu.add_checksum()\n\n return None", "def test_append_filename(self, home_is_temp):\n data = np.arange(6)\n testfile = self.temp(\"test_append_1.fits\")\n\n # Test case 1: creation of file\n fits.append(testfile, data=data, checksum=True)\n\n # Test case 2: append to existing file, with verify=True\n # Also test that additional keyword can be passed to fitsopen\n fits.append(testfile, data=data * 2, checksum=True, ignore_blank=True)\n\n # Test case 3: append to existing file, with verify=False\n fits.append(testfile, data=data * 3, checksum=True, verify=False)\n\n with fits.open(testfile, checksum=True) as hdu1:\n np.testing.assert_array_equal(hdu1[0].data, data)\n np.testing.assert_array_equal(hdu1[1].data, data * 2)\n np.testing.assert_array_equal(hdu1[2].data, data * 3)", "def checkSumHelper(arg, dirname, fnames):\n val = 0\n files = [name for name in fnames if os.path.splitext(name)[1] in EXTENSIONS]\n for file in files:\n absFile = os.path.join(dirname,file)\n try:\n stats = os.stat(absFile)\n except OSError,e:\n # This is to skip over temporary files or files\n # nosy doesn't have permission to access\n # print \"Nosy: skipping file %s with error %s\"%(absFile,e)\n continue\n val += stats[stat.ST_SIZE] + stats[stat.ST_MTIME]\n arg.append(val)\n return", "def calculate_md5_checksum_for_files(\n md5sum_cache_dir: Path, update: bool = False, skip_provider_dependencies_check: bool = False\n) -> tuple[list[str], list[str]]:\n not_modified_files = []\n modified_files = []\n if not skip_provider_dependencies_check:\n modified_provider_yaml_files = []\n for file in ALL_PROVIDER_YAML_FILES:\n # Only check provider yaml files once and save the result immediately.\n # If we need to regenerate the dependencies and they are not modified then\n # all is fine and we can save checksums for the new files\n if check_md5_sum_for_file(file, md5sum_cache_dir, True):\n modified_provider_yaml_files.append(file)\n if modified_provider_yaml_files:\n get_console().print(\n \"[info]Attempting to generate provider dependencies. \"\n \"Provider yaml files changed since last check:[/]\"\n )\n get_console().print(\n [os.fspath(file.relative_to(AIRFLOW_SOURCES_ROOT)) for file in modified_provider_yaml_files]\n )\n # Regenerate provider_dependencies.json\n run_command(\n [\n sys.executable,\n os.fspath(\n AIRFLOW_SOURCES_ROOT\n / \"scripts\"\n / \"ci\"\n / \"pre_commit\"\n / \"pre_commit_update_providers_dependencies.py\"\n ),\n ],\n cwd=AIRFLOW_SOURCES_ROOT,\n )\n for file in FILES_FOR_REBUILD_CHECK:\n is_modified = check_md5_sum_for_file(file, md5sum_cache_dir, update)\n if is_modified:\n modified_files.append(file)\n else:\n not_modified_files.append(file)\n return modified_files, not_modified_files", "def writing_sum_sold(file_name):\n result = str(reports.sum_sold(file_name))\n with open (\"report_for_judy_part2.txt\", \"+a\") as f:\n f.write(result)\n f.write(\"\\n\")", "def end_sumup(self, sumup_name, line):\n if self.sum_status[sumup_name]:\n thekey = self.sum_status[sumup_name]\n self.sumups[thekey] += [line]\n self.sum_status[sumup_name] = 0\n return True\n return False", "def appendStatsInFile(components):\n append_file_iteration_index = open(ProjectConfigFile.OUTPUT_STATISTICAL_FILE_NAME,'a')\n # print \"Components %s\" % (components)\n for comp in components[:-1]:\n append_file_iteration_index.write(\"%s,\"%(comp))\n # append_file_iteration_index.write(\"%s,\" % (ProjectConfigFile.RISK_ELIMINATION))\n append_file_iteration_index.write(\"%s\\n\" % (components[-1]))\n append_file_iteration_index.close()", "def append_checksum(file_path):\n\n cksum = calc_file_crc32(file_path)\n f = open(file_path, 'ab')\n f.write(struct.pack('<I', cksum))\n f.close()", "def set_pkg_chk_sum(self, doc, chk_sum):\n self.assert_package_exists()\n if not self.package_chk_sum_set:\n self.package_chk_sum_set = True\n doc.package.check_sum = checksum_from_sha1(chk_sum)\n return True\n else:\n raise CardinalityError('Package::CheckSum')", "def set_chksum(self, doc, chksum):\n doc.ext_document_references[-1].check_sum = checksum_from_sha1(\n chksum)", "def test_006_log_append(self):\n __test = chess_storage.ChessStorage()\n __test_data = list(range(consts.TEST_LIST_LENGHT))\n __dir_game_saves = os.path.dirname(__file__)\n __dir_game_saves = os.path.join(__dir_game_saves, 'games')\n __dir_game_log = os.path.join(__dir_game_saves, \"log\")\n __test_logname = consts.TEST_FILENAME + \"_log.txt\"\n __dir_game_logfile = os.path.join(__dir_game_log, __test_logname)\n # pylint: disable = protected-access\n __log_test = __test._ChessStorage__log_append(__dir_game_logfile, __test_data)\n # pylint: enable = protected-access\n self.assertEqual(__log_test, consts.ERROR_CODES[\"SUCCESSFULL\"])", "def test_save_fg_append(tfg):\n\n file_name = 'test_fooofgroup_append'\n\n save_fg(tfg, file_name, TEST_DATA_PATH, True, save_results=True)\n save_fg(tfg, file_name, TEST_DATA_PATH, True, save_results=True)\n\n assert os.path.exists(os.path.join(TEST_DATA_PATH, file_name + '.json'))", "def GenerateOldFilesCksum(self):\r\n\t\tif len(self.file_list) > 0:\r\n\t\t\tfor file in self.file_list:\r\n\t\t\t\tstatus, output = commands.getstatusoutput('cksum ' + file)\r\n\t\t\t\tif status == 0:\r\n\t\t\t\t\tpos = output.find(temp)\r\n\t\t\t\t\tif pos != -1:\r\n\t\t\t\t\t\toutput = output[:pos - 1]\r\n\t\t\t\t\tself.old_file_cksum[file] = output\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.old_file_cksum[file] = None\r\n\t\telse:\r\n\t\t\tprint \"无文件存在!\".decode(\"utf-8\")", "def _append_to_user_display(self, append_check):\n if append_check == \"\":\n pass\n else:\n user_display.append(append_check)", "def test_append_with_header(self):\n testfile = self.temp(\"test_append_1.fits\")\n with fits.open(self.data(\"test0.fits\")) as hdus:\n for hdu in hdus:\n fits.append(testfile, hdu.data, hdu.header, checksum=True)\n\n with fits.open(testfile, checksum=True) as hdus:\n assert len(hdus) == 5", "def add_check(self, check):\n assert isinstance(check, CheckItem)\n self._check_list.append(check)", "def test_local_md5sum(self):\n cwl_local_path = os.path.abspath('testdata/md5sum.cwl')\n workflow_attachment_path = os.path.abspath('testdata/dockstore-tool-md5sum.cwl')\n output_filepath, _ = run_cwl_md5sum(cwl_input='file://' + cwl_local_path,\n workflow_attachment='file://' + workflow_attachment_path)\n\n self.assertTrue(check_for_file(output_filepath), 'Output file was not found: ' + str(output_filepath))", "def test_merge_sum(self):\n ars = self.ar[2009][11]['general']\n ars2 = awstats_reader.AwstatsReader(test_file_dir,\n 'joshuakugler.com')[2009][11]['general']\n self.assertEqual(ars.merge(ars2, 'LastUpdate', 'parsed'), 1262637)", "def GenerateNewFilesCksum(self):\r\n\t\tif len(self.file_list) > 0:\r\n\t\t\tfor file in self.file_list:\r\n\t\t\t\tstatus, output = commands.getstatusoutput('cksum ' + file)\r\n\t\t\t\tif status == 0:\r\n\t\t\t\t\tpos = output.find(temp)\r\n\t\t\t\t\tif pos != -1:\r\n\t\t\t\t\t\toutput = output[:pos - 1]\r\n\t\t\t\t\tself.new_file_cksum[file] = output\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.new_file_cksum[file] = None\r\n\t\telse:\r\n\t\t\tprint \"无文件存在!\".decode(\"utf-8\")", "def add_checks(self, checks: Iterable[Check]):\n self.checks.extend(checks)\n return self", "def md5sum_check_if_build_is_needed(md5sum_cache_dir: Path, skip_provider_dependencies_check: bool) -> bool:\n build_needed = False\n modified_files, not_modified_files = calculate_md5_checksum_for_files(\n md5sum_cache_dir, update=False, skip_provider_dependencies_check=skip_provider_dependencies_check\n )\n if modified_files:\n get_console().print(\n f\"[warning]The following important files are modified in {AIRFLOW_SOURCES_ROOT} \"\n f\"since last time image was built: [/]\\n\\n\"\n )\n for file in modified_files:\n get_console().print(f\" * [info]{file}[/]\")\n get_console().print(\"\\n[warning]Likely CI image needs rebuild[/]\\n\")\n build_needed = True\n else:\n get_console().print(\n \"[info]Docker image build is not needed for CI build as no important files are changed! \"\n \"You can add --force-build to force it[/]\"\n )\n return build_needed", "def chk_append_in_out(target_word, word_to_add, data):\r\n # insert syntax to write to temp file\r\n import shutil\r\n\r\n filename = filepath1(data)\r\n dict_tmpfile = filepath1(data + ' tmp')\r\n\r\n with open(filename, 'r',\r\n encoding='utf8') as dict_file:\r\n dict_reader = csv.reader(dict_file, delimiter='\\t',\r\n quoting=csv.QUOTE_MINIMAL)\r\n data = [l for l in dict_reader]\r\n\r\n for line in data:\r\n if type(line) == list and len(line) > 1:\r\n for word in line[1:]:\r\n if word == word_to_add:\r\n break\r\n\r\n else:\r\n line.append(word_to_add)\r\n else:\r\n line.extend(target_word, word_to_add)\r\n \r\n with open(dict_tmpfile, 'w', encoding='utf8') as dict_tmpfile:\r\n dict_writer = csv.writer(dict_tmpfile, delimiter='\\t',\r\n quoting=csv.QUOTE_MINIMAL)\r\n for line in data:\r\n dict_writer.writerow(line)\r\n\r\n shutil.move(dict_tmpfile, filename)", "def set_file_chksum(self, doc, chksum):\n if self.has_package(doc) and self.has_file(doc):\n if not self.file_chksum_set:\n self.file_chksum_set = True\n self.file(doc).chk_sum = checksum_from_sha1(chksum)\n return True\n else:\n raise CardinalityError('File::CheckSum')\n else:\n raise OrderError('File::CheckSum')", "def addCheck(self, check: Check):\n # TODO Support Multiple checks\n self.checks.append(check)\n self._VerificationRunBuilder.addCheck(check._Check)\n return self", "def GenerateOldOutputCksum(self):\r\n\t\tfor temp in self.output_name.split('|'):\r\n\t\t\ttemp = self.output_path + temp\r\n\t\t\tstatus, output = commands.getstatusoutput('cksum ' + temp)\r\n\t\t\tif status == 0:\t\r\n\t\t\t\tpos = output.find(temp)\r\n\t\t\t\tif pos != -1:\r\n\t\t\t\t\toutput = output[:pos - 1]\t\t\t\r\n\t\t\t\tself.old_output_cksum[temp] = output\r\n\t\t\telse:\r\n\t\t\t\tself.old_output_cksum[temp] = None", "def on_check_append_messages_toggled(self, checkBox):\n\t\tself.logView.set_append_messages(checkBox.get_active())\n\t\tself.emit('append-messages-changed')", "def add_entries(entries_list):\r\n #| - add_entries\r\n sum_tot = 0.\r\n for entry in entries_list:\r\n if entry is None:\r\n summand = 0.\r\n else:\r\n summand = entry\r\n sum_tot += summand\r\n\r\n return(sum_tot)\r\n #__|\r", "def verify_results(outdir_path, original_array_path, R, O, file_format, addition, split_merge=False):\n\n if file_format == \"HDF5\":\n file_manager = HDF5_manager()\n else:\n print(\"File format not supported yet. Aborting...\")\n sys.exit(1)\n\n partition = get_blocks_shape(R, O)\n orig_arr_data = file_manager.read_all(original_array_path)\n all_true = True\n\n if split_merge:\n result_arrpath = os.path.join(outdir_path, \"0_0_0.hdf5\")\n return file_manager.check_split_merge(original_array_path, result_arrpath)\n\n for i in range(partition[0]):\n for j in range(partition[1]):\n for k in range(partition[2]):\n outfilepath = os.path.join(outdir_path, str(i) + \"_\" + str(j) + \"_\" + str(k) + \".hdf5\")\n data_stored = file_manager.read_all(outfilepath)\n ground_truth = orig_arr_data[i*O[0]:(i+1)*O[0],j*O[1]:(j+1)*O[1],k*O[2]:(k+1)*O[2]]\n \n if addition:\n ground_truth = ground_truth +1\n\n try:\n assert np.allclose(data_stored, ground_truth, rtol=1e-02)\n # print(f\"Good output file {outfilepath}\")\n except:\n print(f\"Error: bad rechunking {outfilepath}\")\n print(f\"Slices from ground truth {i*O[0]}:{(i+1)*O[0]}, {j*O[1]}:{(j+1)*O[1]}, {k*O[2]}:{(k+1)*O[2]}\")\n print(\"data_stored\", data_stored)\n print(\"ground_truth\", ground_truth)\n all_true = False # do not return here to see all failures\n\n file_manager.close_infiles() # close all files\n return all_true" ]
[ "0.57053113", "0.56518656", "0.5623373", "0.5575254", "0.55170244", "0.5495961", "0.52891517", "0.5253438", "0.5153764", "0.512485", "0.50724775", "0.5054209", "0.50178397", "0.50017375", "0.49846122", "0.49659887", "0.4963159", "0.49346223", "0.49120694", "0.49071398", "0.4857082", "0.48495257", "0.4840836", "0.48381472", "0.4833496", "0.48298845", "0.48130697", "0.4811439", "0.4792151", "0.4782593" ]
0.64782333
0
Adds enableAutopublishing field to ATCT types
def makeATCTTypesAutoPublishAware(): makeTypesAutoPublishAware(atct_types) print "---------- PATCH: ADDED enableAutopublishing field TO ATCT TYPES ----------"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _enableautopublishing(obj, **kwargs):\n\n from collective.autopublishing.behavior import IAutoPublishing\n\n if IAutoPublishing.providedBy(obj):\n return getattr(obj, \"enableAutopublishing\", True)\n\n return False", "def makeTypesAutoPublishAware(types):\n for t in types:\n t.schema.addField(enableAutopublishingField.copy())\n t.schema.moveField('enableAutopublishing', after='expirationDate')\n generateMethods(t, t.schema.fields())", "def for_publishing_only(self):\n self.token['type'] = 'publish'\n\n return self", "def enable(ctx):\n\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"enabled\"})", "def enable(self):\n self._enabled = True", "def setAllowAnnotations(self,value):\n self.PDFreactorConfiguration.in1[\"allowAnnotations\"] = value", "def setAllowInsert(self, value, **kwa):\n\n if type(value) == BooleanType:\n self.fgField.allow_insert = value\n else:\n self.fgField.allow_insert = value == '1'", "def enable(self):\n self.enabled = True", "def enable(self):\n self.enabled = True", "def enabled(self):\n raise NotImplementedError", "def __init__(__self__, *,\n enable_alts: Optional[pulumi.Input[bool]] = None):\n if enable_alts is not None:\n pulumi.set(__self__, \"enable_alts\", enable_alts)", "def enable(self) -> None:", "def enable(self, *args, **kwargs):\n pass", "def auto(self, auto):\n self._auto = auto", "def setAutomaticMode(self, enabling: bool) -> None:\n ...", "def enable(self):\n raise NotImplementedError", "def enable(self):\n pass", "async def enable(self, **kwargs) -> None: # pylint: disable=unused-argument\r\n await self.set_ena(True)", "def enable(self, is_en=True):\n self._raise_not_implemented()", "def enable_alts(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_alts\")", "def __enable__(self) -> None:\n pass", "def enable(self):", "def _enable(self):\n self.debug_log(\"Enabling...\")\n self._register_handlers()", "def allow_automatic(self, allow_automatic):\n\n self._allow_automatic = allow_automatic", "def _registerPublisher(self, callerId, topic, topicType, callerApi):\n if topic not in self.FilterPublishedTopic:\n self.__docWriter.addPub(callerId, topic, topicType)", "async def enable(self, ctx):\n await self.config.guild(ctx.guild).auto.set(True)\n await ctx.send(_(\"Automatic voicechannel creation enabled.\"))", "def Enabled(self) -> bool:", "def wasm_reference_types(self, enable):\n\n if not isinstance(enable, bool):\n raise TypeError('expected a bool')\n dll.wasmtime_config_wasm_reference_types_set(self.__ptr__, enable)", "def _force_on(self):\n self._interface.set('fw_wp_vref', self._fw_wp_vref)\n self._interface.set('fw_wp_en', 'on')\n self._interface.set('fw_wp', 'on')", "def enabled(self, enabled):\n \n self._enabled = enabled" ]
[ "0.6962681", "0.6759251", "0.5387826", "0.50007504", "0.49590155", "0.49114823", "0.49056712", "0.48902673", "0.48902673", "0.48852003", "0.48625377", "0.48427656", "0.48221064", "0.4786588", "0.47820434", "0.47744322", "0.47438973", "0.47431564", "0.4741863", "0.4723688", "0.47109795", "0.47006038", "0.46936226", "0.46830586", "0.46726495", "0.46684122", "0.46655485", "0.46602854", "0.46599308", "0.46597645" ]
0.85065866
0
Requests the module description and available output channels. Input ADDR is the module location (integer 1 through 6).
def get_module_info(self, c, ADDR): if self.device_detected == True: resp = yield subprocess.check_output("cacli DESC "+str(ADDR)) else: resp = "Device not connected." print "Device not connected. " #Eventually make this actually throw an error instead of printing something returnValue(resp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def device_info(node):\n\n if \"cpu\" in node and \"total_mbufs\" in node[\"cpu\"]:\n total_mbufs = node[\"cpu\"][\"total_mbufs\"]\n if total_mbufs != 0:\n print(\"Total Number of Buffers: {}\".format(total_mbufs))\n\n vpp = VppPCIUtil(node)\n vpp.get_all_devices()\n linkup_devs = vpp.get_link_up_devices()\n if len(linkup_devs):\n print(\"\\nDevices with link up (can not be used with VPP):\")\n vpp.show_vpp_devices(linkup_devs, show_header=False)\n # for dev in linkup_devs:\n # print (\" \" + dev)\n kernel_devs = vpp.get_kernel_devices()\n if len(kernel_devs):\n print(\"\\nDevices bound to kernel drivers:\")\n vpp.show_vpp_devices(kernel_devs, show_header=False)\n else:\n print(\"\\nNo devices bound to kernel drivers\")\n\n dpdk_devs = vpp.get_dpdk_devices()\n if len(dpdk_devs):\n print(\"\\nDevices bound to DPDK drivers:\")\n vpp.show_vpp_devices(dpdk_devs, show_interfaces=True, show_header=False)\n else:\n print(\"\\nNo devices bound to DPDK drivers\")\n\n other_devs = vpp.get_other_devices()\n if len(other_devs):\n print(\"\\nDevices not bound to Kernel or DPDK drivers:\")\n vpp.show_vpp_devices(other_devs, show_interfaces=True, show_header=False)\n else:\n print(\"\\nNo devices not bound to Kernel or DPDK drivers\")\n\n vpputl = VPPUtil()\n interfaces = vpputl.get_hardware(node)\n if interfaces == {}:\n return\n\n print(\"\\nDevices in use by VPP:\")\n\n if len(interfaces.items()) < 2:\n print(\"None\")\n return\n\n print(\n \"{:30} {:4} {:4} {:7} {:4} {:7}\".format(\n \"Name\", \"Numa\", \"RXQs\", \"RXDescs\", \"TXQs\", \"TXDescs\"\n )\n )\n for intf in sorted(interfaces.items()):\n name = intf[0]\n value = intf[1]\n if name == \"local0\":\n continue\n numa = rx_qs = rx_ds = tx_qs = tx_ds = \"\"\n if \"numa\" in value:\n numa = int(value[\"numa\"])\n if \"rx queues\" in value:\n rx_qs = int(value[\"rx queues\"])\n if \"rx descs\" in value:\n rx_ds = int(value[\"rx descs\"])\n if \"tx queues\" in value:\n tx_qs = int(value[\"tx queues\"])\n if \"tx descs\" in value:\n tx_ds = int(value[\"tx descs\"])\n\n print(\n \"{:30} {:>4} {:>4} {:>7} {:>4} {:>7}\".format(\n name, numa, rx_qs, rx_ds, tx_qs, tx_ds\n )\n )", "def get_module_info(self):\n self._log_msg_start(\"Poll basic module info\")\n return self._ubx.poll(\"NAV-SVINFO\")", "def probe_ports( self, ):\r\n ports = self.com_driver.list_available()\r\n self.gui.print_info_string( \"\" )\r\n self.gui.print_info_string( \"Reported Ports from driver:\" )\r\n self.gui.print_info_string( \"\" )\r\n if len( ports ) == 0:\r\n self.gui.print_info_string( \"None \\n\" )\r\n else:\r\n for i_port in ports:\r\n self.gui.print_info_string( i_port[0] )\r\n #self.gui.print_info_string( \"\\n\" )\r\n\r\n self.close_driver()\r\n\r\n self.gui.print_info_string( \"\\nProbe Ports from parameters:\\n\" )\r\n ports = self.com_driver.probe_available( self.parameters.port_list )\r\n ix_line = 0 # what is this ??\r\n for i_port in ports:\r\n ix_line += 1\r\n self.gui.print_info_string( str( i_port ) )\r\n if ix_line == 10:\r\n ix_line = 0\r\n self.gui.print_info_string( \"\\n\" )\r\n #logger.log( fll, a_str )\r\n\r\n return", "def get_module_list(self, c):\r\n if self.device_detected == True:\r\n resp = yield subprocess.check_output(\"cacli modlist\")\r\n else:\r\n resp = \"Device not connected.\"\r\n print \"Device not connected. \"\r\n #Eventually make this actually throw an error instead of printing something\r\n returnValue(resp)", "def present_module_info():\n writer()\n print_heading(\"Module Info\")\n writer(f\"GWT Version: {GWT_VERSION}\")\n writer(f\"Content-Type: {CONTENT_TYPE}\")\n writer(f\"X-GWT-Module-Base: {BASE_URL}\")\n writer(f\"X-GWT-Permutation: {GWT_PERMUTATION}\")\n if RPC_MODE:\n writer(f\"RPC Version: {RPC_VERSION}\")\n writer(f\"RPC Flags: {RPC_FLAGS}\")\n writer()", "def request_switch_desc(self):\n parser = self.datapath.ofproto_parser\n req = parser.OFPDescStatsRequest(self.datapath, 0)\n self.logger.debug(\"Sending description request to dpid=%s\",\n self.datapath.id)\n self.datapath.send_msg(req)", "def getOutputPortsInfo(self):\n return [(gport.parentItem().module, gport.port, gport.controller.get_connections_from(gport.controller.current_pipeline, [gport.parentItem().module.id], gport.port.name), (gport.parentItem().boundingRect().right()-gport.parentItem().boundingRect().left())/2) for gport in self.pipelineView.getSelectedOutputPorts()]", "def port_nic():", "async def get_module_data(request):\n hw = hw_from_req(request)\n requested_serial = request.match_info['serial']\n res = None\n\n for module in hw.attached_modules:\n is_serial_match = module.device_info.get('serial') == requested_serial\n if is_serial_match and hasattr(module, 'live_data'):\n res = module.live_data\n\n if res:\n return web.json_response(res, status=200)\n else:\n return web.json_response({\"message\": \"Module not found\"}, status=404)", "def ports(self):\r\n # check connected to chip\r\n if not self._core.is_connected():\r\n raise ka_exceptions.NotConnectedError()\r\n\r\n # check symbols are loaded\r\n self._core.sym.assert_have_symbols()\r\n\r\n # get the symbols we need\r\n READ_OFFSET_ADDR = self._core.sym.varfind('$cbuffer.read_port_offset_addr')\r\n WRITE_OFFSET_ADDR = self._core.sym.varfind('$cbuffer.write_port_offset_addr')\r\n READ_LIMIT_ADDR = self._core.sym.varfind('$cbuffer.read_port_limit_addr')\r\n WRITE_LIMIT_ADDR = self._core.sym.varfind('$cbuffer.write_port_limit_addr')\r\n READ_BUFFER_SIZE = self._core.sym.varfind('$cbuffer.read_port_buffer_size')\r\n WRITE_BUFFER_SIZE = self._core.sym.varfind('$cbuffer.write_port_buffer_size')\r\n\r\n def read_dm(addr):\r\n return self._core.dm[addr]\r\n\r\n # get the read and write offset\r\n read_offset_addr = self._read_var_with_size_check(READ_OFFSET_ADDR.addr, READ_OFFSET_ADDR.size_in_addressable_units)\r\n write_offset_addr = self._read_var_with_size_check(WRITE_OFFSET_ADDR.addr, WRITE_OFFSET_ADDR.size_in_addressable_units)\r\n read_offset = map(read_dm, read_offset_addr)\r\n write_offset = map(read_dm, write_offset_addr)\r\n\r\n # get the read and write limit\r\n read_limit_addr = self._read_var_with_size_check(READ_LIMIT_ADDR.addr, READ_LIMIT_ADDR.size_in_addressable_units)\r\n write_limit_addr = self._read_var_with_size_check(WRITE_LIMIT_ADDR.addr, WRITE_LIMIT_ADDR.size_in_addressable_units)\r\n read_limit = map(read_dm, read_limit_addr)\r\n write_limit = map(read_dm, write_limit_addr)\r\n\r\n # get the port size\r\n read_size = self._read_var_with_size_check(READ_BUFFER_SIZE.addr, READ_BUFFER_SIZE.size_in_addressable_units)\r\n write_size = self._read_var_with_size_check(WRITE_BUFFER_SIZE.addr, WRITE_BUFFER_SIZE.size_in_addressable_units)\r\n # calculate size mask (size-1) for non-zero sizes\r\n read_mask = map(lambda s: s - (s>0), read_size)\r\n write_mask = map(lambda s: s - (s>0), write_size)\r\n\r\n # calculate data/space in port\r\n read_data = map(lambda l,o,m: (l - o) & m, read_limit, read_offset, read_mask)\r\n write_space = map(lambda l,o,m: (l - o) & m - 1, write_limit, write_offset, write_mask)\r\n\r\n # read port configs\r\n READ_CONF_BASE = self._core.sym.constfind('$READ_PORT0_CONFIG').value\r\n WRITE_CONF_BASE = self._core.sym.constfind('$WRITE_PORT0_CONFIG').value\r\n read_conf = self._read_var_with_size_check(READ_CONF_BASE, READ_OFFSET_ADDR.size_in_addressable_units)\r\n write_conf = self._read_var_with_size_check(WRITE_CONF_BASE, WRITE_OFFSET_ADDR.size_in_addressable_units)\r\n\r\n # extract data size (in octets) from config\r\n read_data_size = map(lambda c: (c & 0x3) + 1, read_conf)\r\n write_space_size = map(lambda c: (c & 0x3) + 1, write_conf)\r\n\r\n # decode configs into strings\r\n read_conf_str = map(lambda c,s: (\"8\" if s==1 else (\"16\" if s==2 else (\"24\" if s==3 else \"??\"))) + \"-bit, \" \\\r\n + (\"Big Endian\" if (c & 0x4) else \"Little Endian\") + \", \" \\\r\n + (\"No Sign Ext\" if (c & 0x8) else \"Sign Ext\" ), \\\r\n read_conf, read_data_size)\r\n write_conf_str = map(lambda c,s: (\"8\" if s==1 else (\"16\" if s==2 else (\"24\" if s==3 else \"??\"))) + \"-bit, \" \\\r\n + (\"Big Endian\" if (c & 0x4) else \"Little Endian\") + \", \" \\\r\n + (\"Saturate\" if (c & 0x8) else \"No Saturate\"), \\\r\n write_conf, write_space_size)\r\n\r\n # print information\r\n print \"Read ports:\\n Port Status Offset Address Size(Bytes) Data Config\"\r\n for i in range(len(read_offset_addr)):\r\n if read_offset_addr[i]:\r\n print \" %2i Enabled %6i (0x%04X) %5i (0x%04X) %5i %s\" % \\\r\n (i, read_offset_addr[i], read_offset_addr[i], read_size[i], read_size[i], read_data[i]/read_data_size[i], read_conf_str[i])\r\n else:\r\n print \" %2i Disabled\" % i\r\n\r\n print \"Write ports:\\n Port Status Offset Address Size(Bytes) Space Config\"\r\n for i in range(len(write_offset_addr)):\r\n if write_offset_addr[i]:\r\n print \" %2i Enabled %6i (0x%04X) %5i (0x%04X) %5i %s\" % \\\r\n (i, write_offset_addr[i], write_offset_addr[i], write_size[i], write_size[i], write_space[i]/write_space_size[i], write_conf_str[i])\r\n else:\r\n print \" %2i Disabled\" % i", "def module_info():\n pass", "def get_port_list(self):\r\n self.ports = Manager().dict()\r\n self.value = Manager().dict()\r\n self.sensors = dict()\r\n for p in self.device.ports['input']:\r\n if p.enabled:\r\n self.ports[p.number] = p\r\n self.value[p.number] = 'Connexion à la carte'\r\n self.sensors[p.number] = Sensor.get(p._type)", "def module_config_info():\n\n print(\"consumes\", CONSUMES)\n module_config_template()", "def get_software_info():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><system><software><info></info></software></system></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def main():\r\n with manager.connect(host=HOST, port=PORT, username=USER,\r\n password=PASS, hostkey_verify=False,\r\n device_params={'name': 'huawei'},\r\n allow_agent=False, look_for_keys=False) as m:\r\n\r\n interface_filter = \"\"\"\r\n <filter>\r\n <interfaces xmlns=\"urn:huawei:yang:huawei-ietf-interfaces-deviations-ATN-980B\">\r\n <interface></interface>\r\n </interfaces>\r\n </filter>\r\n \"\"\"\r\n\r\n interfaces = m.get_config('running', interface_filter)\r\n # print YANG module\r\n print('***Here is the YANG Module***')\r\n #data = m.get_schema('ietf-interfaces')\r\n print(interfaces.xml)", "def list_ports(state):\n\tstate.report()", "async def module_command_module(self, ctx, parsed):\n mcs = ModuleCmdStatus\n results = []\n subcmd = parsed.subcmd\n if subcmd.endswith(\"load\"): # load, reload\n mtype = parsed.args[\"mtype\"]\n if parsed.args[\"mtype\"] == \"protocol\" and subcmd == \"reload\":\n await ctx.reply_command_result(parsed, \"Reloading protocol modules is not yet implemented.\")\n return\n for mod_id in parsed.args[\"module\"]:\n try:\n module = await getattr(self, f\"{subcmd}_{mtype}\")(mod_id)\n except NoSuchModule:\n status = mcs.NO_SUCH_MOD\n except (ModuleLoadError, ModuleRegisterError) as ex:\n status = getattr(mcs, f\"{subcmd.upper()}_FAIL\")\n self.logger.exception(ex)\n except ModuleAlreadyLoaded:\n status = mcs.ALREADY_LOADED\n except ModuleNotLoaded:\n status = mcs.NOT_YET_LOADED\n else:\n status = getattr(mcs, f\"{subcmd.upper()}_OK\")\n results.append(ModuleCmdResult(mod_id, status, mtype))\n elif subcmd == \"list\":\n status = mcs.QUERY\n for category in parsed.args[\"category\"]:\n if parsed.args[\"loaded\"]:\n pool = (mod.identifier for mod in getattr(self, f\"get_loaded_{category}s\")())\n else:\n pool = getattr(self, f\"get_available_{category}s\")()\n results.extend(ModuleCmdResult(mod, status, category) for mod in pool)\n elif subcmd == \"info\":\n status = mcs.QUERY\n mtype = parsed.args[\"mtype\"]\n for mod_id in parsed.args[\"module\"]:\n info = {}\n try:\n module = getattr(self, f\"_{mtype}s\")[mod_id]\n except KeyError:\n if getattr(self, f\"{mtype}_available\")(mod_id):\n status = mcs.NOT_YET_LOADED\n else:\n status = mcs.NO_SUCH_MOD\n else:\n for attr in (\"name\", \"description\", \"author\", \"version\", \"license\"):\n info[attr] = getattr(module, attr)\n results.append(ModuleCmdResult(mod_id, status, mtype, info))\n await ctx.core_command_module(parsed, results)", "def get_info():\n\n temp_modules = []\n\n\n print(\"Welcome to the module creator.\")\n\n while True:\n \n module_name = input(\"What is the name of the module? (Enter 'q' to quit) \")\n \n if module_name == 'q':\n break\n \n module_type = input(f\"What type of module is this (module types can be found in ansible documentation): \")\n\n parameters = []\n while True:\n parameter = input(\"What are the parameter of this module? (Enter 'q' to quit) \")\n if parameter == 'q':\n make_module(module_name, module_type, parameters)\n break\n else:\n parameters.append(parameter)", "def modules():", "async def module10(\n pypck_client: PchkConnectionManager,\n) -> AsyncGenerator[ModuleConnection, None]:\n lcn_addr = LcnAddr(0, 10, False)\n module = pypck_client.get_module_conn(lcn_addr)\n yield module\n await module.cancel_requests()", "def moduleInfo(*args, definition: bool=True, listModules: bool=True, moduleName: AnyStr=\"\",\n path: bool=True, version: bool=True, **kwargs)->List[AnyStr]:\n pass", "def portconfig():\r\n print('''\\n%s at %s acting as user %s\r\n\\nPort Configuration Menu''' % (PACKETMASTER.model, ADDRESS, USERNAME))\r\n choice = moves.input('''\r\n 1 - Get current port configuration\r\n 2 - Get current port status\r\n 3 - Get current port counters\r\n 4 - Get SFP status\r\n 5 - Change Port Configuration\r\n 6 - Shut Down or Activate Port\r\n 7 - Reset Port Counters\r\n 8 - Back\r\n 9 - Quit \\n\r\n Enter selection number: ''')\r\n try:\r\n choice = int(choice)\r\n except ValueError as reason:\r\n print(\"That is not a valid selection.\", reason)\r\n portconfig()\r\n execute = {1: PACKETMASTER.port_config,\r\n 2: PACKETMASTER.port_info,\r\n 3: PACKETMASTER.port_statistics,\r\n 4: PACKETMASTER.sfp_info,\r\n 5: PACKETMASTER.set_port_config_guided,\r\n 6: PACKETMASTER.port_on_off_guided,\r\n 7: PACKETMASTER.reset_port_counters,\r\n 8: hardwareconfig,\r\n 9: exit}\r\n if choice in execute:\r\n try:\r\n select = execute[choice]\r\n run = select()\r\n print(run)\r\n portconfig()\r\n except KeyError as reason:\r\n print(reason)\r\n else:\r\n print(\"That is not a valid selection.\")\r\n portconfig()", "def port_desc_stats_reply_handler(self, ev):\n msg = ev.msg\n dpid = msg.datapath.id\n ofproto = msg.datapath.ofproto\n\n config_dict = {ofproto.OFPPC_PORT_DOWN: \"Down\",\n ofproto.OFPPC_NO_RECV: \"No Recv\",\n ofproto.OFPPC_NO_FWD: \"No Farward\",\n ofproto.OFPPC_NO_PACKET_IN: \"No Packet-in\"}\n\n state_dict = {ofproto.OFPPS_LINK_DOWN: \"Down\",\n ofproto.OFPPS_BLOCKED: \"Blocked\",\n ofproto.OFPPS_LIVE: \"Live\"}\n\n ports = []\n for p in ev.msg.body:\n ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x '\n 'state=0x%08x curr=0x%08x advertised=0x%08x '\n 'supported=0x%08x peer=0x%08x curr_speed=%d '\n 'max_speed=%d' %\n (p.port_no, p.hw_addr,\n p.name, p.config,\n p.state, p.curr, p.advertised,\n p.supported, p.peer, p.curr_speed,\n p.max_speed))\n\n if p.config in config_dict:\n config = config_dict[p.config]\n else:\n config = \"up\"\n\n if p.state in state_dict:\n state = state_dict[p.state]\n else:\n state = \"up\"\n port_feature = (config, state, p.curr_speed)\n self.port_features[dpid][p.port_no] = port_feature", "def get_actuator_info(self, c, ADDR, CH):\r\n if self.device_detected == True:\r\n resp = yield subprocess.check_output(\"cacli INFO \"+str(ADDR) + \" \" + str(CH))\r\n type = self.find_between(resp,\"TYPE :\",\"\\r\\n\")\r\n tag = self.find_between(resp,\"TAG :\",\"\\r\\n\")\r\n info = [type, tag]\r\n else:\r\n resp = \"Device not connected.\"\r\n info = [resp, resp]\r\n #Eventually make this actually throw an error instead of printing something\r\n returnValue(info)", "def __init__(self, \n address: int = 97, \n moduletype: str = 'DO', \n name: str = 'Atlas_DO_sensor', \n bus: int = 1) -> None:\n # The .initialise method is called in AtlasI2C __init__ \n # to initialise the sensors.\n super().__init__(address=address, moduletype=moduletype, name=name, bus=bus)\n\n for param in self._PARAMS: # Ensures that all measurement parameters are enabled.\n self.query(f'O,{param},1')\n time.sleep(2)", "def _print_port_mod_config_mask(variable, name):\n\n print('PortMod %s: ' % name, end='')\n printed = False\n variable = _parse_phy_curr(variable)\n for i in variable:\n print(red(dissector.get_phy_config(i)), end='')\n printed = True\n else:\n _dont_print_0(printed)\n print()", "def request_configure(self, req, beams, channels, int_time, blank_phases):\n\n message = (\"nbeams={}, nchannels={}, integration_time={},\"\n \" nblank_phases={}\").format(beams, channels,\n int_time, blank_phases)\n log.info(\"Configuring FITS interface server with params: {}\".format(\n message))\n self.nbeams = beams\n self.nchannels = channels\n self.integration_time = int_time\n self.nblank_phases = blank_phases\n self._fw_connection_manager.drop_connection()\n self._stop_capture()\n self._configured = True\n return (\"ok\",)", "def getInputPortsInfo(self):\n return [(gport.parentItem().module, gport.port, gport.controller.get_connections_to(gport.controller.current_pipeline, [gport.parentItem().module.id], gport.port.name), (gport.parentItem().boundingRect().right()-gport.parentItem().boundingRect().left())/2) for gport in self.pipelineView.getSelectedInputPorts()]", "def COM(cmd,data): #Status: WIP\r\n #Desc CMD Target Address\r\n if cmd == 'U': #get update U\r\n parseLocal(data)\r\n if cmd == 'T':\r\n setETM(data)\r\n# rpc(addr,getLocals,addr, lTime, lSped, lLoca, lStat)\r\n elif cmd == 'M': #merge M\r\n setETM(data)\r\n merge()\r\n elif cmd == 'E': #help E multicasted\r\n setStatus(data)\r\n emergency()\r\n elif cmd == 'D':\r\n getDest()", "def list_ports():\n print '\\nHere is the list of available ports on this machine:'\n # lp.comports returns a list of (port, description, hardware ID) tuples\n iterator = sorted(lp.comports())\n for port, desc, hwid in iterator:\n print port\n exit()" ]
[ "0.5255414", "0.5238491", "0.5188195", "0.5180859", "0.5165055", "0.51609516", "0.5111383", "0.5096917", "0.5089235", "0.5071356", "0.5061544", "0.5058985", "0.5021037", "0.501727", "0.5011895", "0.49662185", "0.49413484", "0.49329716", "0.489246", "0.48664945", "0.4837501", "0.48059002", "0.47996813", "0.4794238", "0.4789724", "0.47858328", "0.47650376", "0.47567916", "0.47501436", "0.47465718" ]
0.624494
0
Requests information about a user defined Tags (name) or set actuator Types. Input ADDR is the module location (integer 1 through 6). Input CH is the module channel, integer 1 through 3. Returns array of strings. First element is the Type. Second element is the Tag.
def get_actuator_info(self, c, ADDR, CH): if self.device_detected == True: resp = yield subprocess.check_output("cacli INFO "+str(ADDR) + " " + str(CH)) type = self.find_between(resp,"TYPE :","\r\n") tag = self.find_between(resp,"TAG :","\r\n") info = [type, tag] else: resp = "Device not connected." info = [resp, resp] #Eventually make this actually throw an error instead of printing something returnValue(info)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_tags(reader_ip, event_type):\n # Open socket using reader IP address\n cmd = rapid.Command(reader_ip)\n cmd.open()\n print \"Connection to %s opened\" % (reader_ip)\n\n # Reader Login\n cmd.execute(\"reader.login\", (\"admin\", \"readeradmin\"))\n rc = cmd.execute(\"reader.who_am_i\", ())\n print \"Logged in as: %s \" % rc\n\n # Open an event channel and get id\n id = cmd.getEventChannel(event_callback)\n print \"Event Channel ID %s created\" % id\n\n # Register for event_type\n cmd.execute(\"reader.events.register\", (id, event_type))\n print \"Registered for %s on Ch. %s\" % (event_type, id)\n\n # start tag read in active mode\n cmd.set(\"setup.operating_mode\", \"active\")\n print \"Mode: Active\"\n\n # stdout redirection for creating tag list\n stdout = sys.stdout #backup original stdout to console\n sys.stdout = open(\"tag_list.log\", \"w\")\n\n # wait for some tag reads\n time.sleep(1)\n\n # stop tag read in standby mode\n cmd.set(\"setup.operating_mode\", \"standby\")\n sys.stdout.close() # close log file\n sys.stdout = stdout # revert to console output\n print \"Mode: Standby\"\n print \"./tag_list.log generated\"\n\n # Unregister for event_type\n cmd.execute(\"reader.events.unregister\", (id, event_type))\n print \"Unregistered for %s on Ch. %s\" % (event_type, id)\n\n # Close the command connection and event channel\n cmd.close()\n print \"Connection Closed\"", "def get_tags_of_type(self, sentence: str, tag_type: PosType):\n spacy_tags = SpacyWrapper.TAGS[tag_type]\n doc = self.model(sentence)\n # Token and Tag\n tags = [str(token) for token in doc if token.pos_ in spacy_tags]\n return tags", "def handle_tags(self, request):\n \"\"\"\n @api {get} /tags List tags\n @apiName GetTags\n @apiGroup Misc\n @apiVersion 1.0.0\n\n @apiDescription List currenty used tags\n\n @apiSuccessExample {json} Example response:\n [\n \"tag1\",\n \"tag2\"\n ]\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n tags = []\n\n for task in self.cluster.config.get('tasks').values():\n if 'tags' in task:\n tags += task['tags']\n\n tags = list(set(tags))\n\n return HTTPReply(code = 200, body = json.dumps(tags), headers = headers)", "def getTag(self, inputs, tag):\n result = {}\n for into in inputs:\n for i in into:\n if i in self.sim.agents:\n agentTags = self.sim.agents[i].access[\"tags\"]\n if tag in agentTags:\n result[i] = agentTags[tag]\n return result", "def read_chip_info(self):\n return [self.read_chip_type(), self.read_chip_revision()]", "def identifyChip(chipType):\n with open('../illumina_files/illumina_dict.pickle', \"rb\") as f:\n chipDict = pickle.load(f)\n\n values = chipDict[chipType]\n\n print('BPM: ' + values[0] + '\\n')\n print('EGT: ' + values[1] + '\\n')\n print('CSV: ' + values[2] + '\\n')\n\n return values[0], values[1], values[2]", "def list(self):\n return list(\n filter(\n lambda x: x.get('type') == 'tagit', # pragma: no cover\n self._post(\n request=ApiActions.LIST.value,\n uri=ApiUri.ACTIONS.value,\n ).get('actions')\n )\n )", "def list(self):\n\n\t\treturn self._list(\"/tag\", \"tag\")", "def list_tags() -> Optional[Dict[str, Target]]:\n if hasattr(_ffi_api, \"TargetTagListTags\"):\n return _ffi_api.TargetTagListTags()\n return None", "def sensor_types():\n sensors = FetchandStore.get_data(\"https://tie.digitraffic.fi/api/v1/data/weather-data\")\n sensornames = [sensor[\"name\"] for sensor in sensors ]\n sensornames = list(set(sensornames))\n for index, sensorname in enumerate(sorted(sensornames)):\n print(index, sensorname)", "def info_equipment_reactors_get():\n equipment = _equipment_by_group(438) # 438 == Mobile Reactor\n return equipment, 200", "def get_all_tags():\n try:\n data = ReadTag().run()\n except Exception as ex:\n return jsonify({'code': '500','message':'Internal server error'})\n else:\n return jsonify({'code': '200','data': data})", "def _find_type(trial: dict) -> list:\n tag = [trial['study_type'], trial['overall_status']]\n if 'phase' in trial:\n tag.append(trial['phase'])\n del trial['phase']\n if 'last_known_status' in trial and trial['last_known_status'] != trial['overall_status']:\n tag.append(trial['last_known_status'])\n if 'last_known_status' in trial:\n del trial['last_known_status']\n del trial['study_type']\n if 'keyword' in trial:\n if isinstance(trial['keyword'], list):\n tag.extend(trial['keyword'])\n else:\n tag.append(trial['keyword'])\n del trial['keyword']\n if 'intervention' in trial:\n if isinstance(trial['intervention'], dict):\n tag.append(trial['intervention']['intervention_type'])\n else:\n tag.extend([intervention['intervention_type'] for intervention in trial['intervention']])\n # it contains more information then type\n # del trial['intervention']\n if 'biospec_retention' in trial:\n tag.append(trial['biospec_retention'])\n del trial['biospec_retention']\n return tag", "def get(self):\n res = SmartAPI.get_tags(self.args.field)\n self.finish(res)", "def get(self):\n res = SmartAPI.get_tags(self.args.field)\n self.finish(res)", "def get_tag_info(xint,conn):\n\n get_tags = ('SELECT DISTINCT fip2.value '\n 'FROM interaction i, feature_interaction fi, feature_interactionprop fip, '\n 'feature f, cvterm cvt, feature_interactionprop fip2, cvterm cvt2 '\n 'WHERE f.feature_id = fi.feature_id AND fi.interaction_id = i.interaction_id '\n 'AND fi.feature_interaction_id = fip.feature_interaction_id '\n 'AND fip.type_id = cvt.cvterm_id AND cvt.name = \\'participating feature\\' '\n 'AND fi.feature_interaction_id = fip2.feature_interaction_id AND fip2.type_id = cvt2.cvterm_id '\n 'AND cvt2.name = \\'comment\\' AND f.uniquename = %s AND i.uniquename = %s')\n tags = connect(get_tags,xint,conn)\n return(tags)", "def _tags(self):\n retval = []\n for of in self.tagnames:\n retval.append([of, self.get_datatype(of), self.get(of)])\n return retval", "def tags(self):\r\n url = '{0}/tags/'.format(self.get_url())\r\n request = http.Request('GET', url)\r\n\r\n return request, parsers.parse_json", "def get_actor_tags(request):\n\n if request.method == \"POST\" and request.is_ajax():\n tag_type = request.POST.get('type', None)\n if not tag_type:\n return HttpResponse(json.dumps({'success': False,\n 'message': 'Need a tag type.'}),\n content_type=\"application/json\")\n result = get_actor_tags_by_type(tag_type)\n return HttpResponse(json.dumps(result),\n content_type=\"application/json\")\n else:\n error = \"Expected AJAX POST\"\n return render_to_response(\"error.html\",\n {\"error\" : error },\n RequestContext(request))", "def get_all(self, event_type):\r\n get_trait_name = storage.models.Trait.get_name_by_type\r\n return [TraitDescription(name=t['name'],\r\n type=get_trait_name(t['data_type']))\r\n for t in pecan.request.storage_conn\r\n .get_trait_types(event_type)]", "def getTagList(self):\n if not self.proxy:\n self.proxy = self.session.service(\"ALBehaviorManager\")\n return self.proxy.getTagList()", "def ListTags(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def getDeviceInfo():\n url = \"https://api.roblox.com/reference/deviceinfo\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j", "async def incidentTypes(self, includeHidden: bool = False) -> Iterable[str]:", "def getTags(number=None):", "def typedAntennaNames() :\n a=s.getAntennaAssignments()\n namelist = []\n for i in a:\n namelist.append( i.typedAntennaName )\n return namelist", "def retrieve_required_tags_list(resource_type: str) -> list:\n dynamodb_client = boto3.client('dynamodb')\n try : \n get_required_tags_response = dynamodb_client.get_item(\n TableName = \"required-tags-table\",\n Key = {\"resource_type\": {'S': resource_type}},\n ProjectionExpression = \"tags\",\n )\n required_tags = [ key['S'] for key in get_required_tags_response['Item']['tags']['L'] ] \n except :\n get_required_tags_response = dynamodb_client.get_item(\n TableName = \"required-tags-table\",\n Key = {\"resource_type\": {'S': \"Default\"}},\n ProjectionExpression = \"tags\",\n )\n required_tags = [ key['S'] for key in get_required_tags_response['Item']['tags']['L'] ] \n\n return required_tags", "def intf_TAGQUERY(E):\n if not inc.TXT_or_LST_of_TXTs(E.The,1):\n print(\"Input Error: tag?\")\n print(intf_TAGQUERY.__doc__)\n return # Without doing much of anything.\n mytags= E.The.StackPop().val\n if type(mytags)==type(list()):\n #mytags= map(lambda x:x.val, mytags) # Should now be a list of TXTs.\n mytags= [x.val for x in mytags] # Should now be a list of TXTs.\n else:\n mytags= [ mytags ] # Also a (1 item) list of ints.\n qualifying_ents= list()\n for myeid in MMEL.El.keys():\n alltagshere= True # Assume they're here until one is not found.\n for mytag in mytags:\n #print(\"Searching entity #%d for tag ''%s''\" % (myeid,mytag))\n if not MMEL.El[myeid].has_tag(mytag):\n alltagshere= False\n break\n if alltagshere:\n qualifying_ents.append( objectifier.StackOB_VAL(myeid) )\n E.The.StackPush( objectifier.StackOB_LST(qualifying_ents) )", "def get_antivirus_info():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><anti-virus><upgrade><info></info></upgrade></anti-virus></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def test_plc_read_val(plc_ip, tag_name):\n\n plc = ClxDriver()\n if plc.open(plc_ip):\n tagg = plc.read_tag(tag_name)\n plc.close()\n return (tagg)\n \n else:\n print(\"Unable to open\", plc_ip)" ]
[ "0.55414027", "0.5046226", "0.50079", "0.49143046", "0.49027643", "0.48714507", "0.48003745", "0.46709114", "0.4625631", "0.46155882", "0.4607507", "0.46046448", "0.45815864", "0.45771107", "0.45771107", "0.45745647", "0.45738086", "0.4564302", "0.45355794", "0.45329583", "0.45264006", "0.44754612", "0.446605", "0.44555813", "0.4446383", "0.4443503", "0.44224444", "0.44200405", "0.44196197", "0.44090524" ]
0.5941915
0
Moves specified actuator with specified parameters. ADDR and CH specify the module address (1 through 6) and channel (1 through 3). TYPE specifies the cryo actuator model. TEMP is the nearest integer temperature (0 through 300). DIR determines CW (1) vs CWW (0) stack rotation. FREQ is the interger frequency of operation input in Hertz. REL is the piezo step size parameter input. Value is a percentage (0100%). STEPS is the number of actuation steps. Range is 0 to 50000, where 0 is used for infinite movement. TORQUE corresponds to an optional torque factor, between 1 and 30. Larger values can be useful for unsticking the JPE.
def move(self, c, ADDR, CH, TYPE, TEMP, DIR, FREQ, REL, STEPS, TORQUE = None): if self.device_detected == True: #Add input checks if TORQUE == None: resp = yield subprocess.check_output("cacli MOV "+str(ADDR) + " " + str(CH) + " " + TYPE + " " + str(TEMP) + " " + str(DIR) + " " + str(FREQ) + " " + str(REL) + " " + str(STEPS)) else: resp = yield subprocess.check_output("cacli MOV "+str(ADDR) + " " + str(CH) + " " + TYPE + " " + str(TEMP) + " " + str(DIR) + " " + str(FREQ) + " " + str(REL) + " " + str(STEPS) + " " + str(TORQUE)) else: resp = "Device not connected." print "Device not connected. " #Eventually make this actually throw an error instead of printing something returnValue(resp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_xyz(self,c, ADDR, TEMP, FREQ, REL, XYZ, TORQUE = None):\r\n try:\r\n VEC = np.dot(self.T1,XYZ)\r\n VEC = self.adjustForWeight(VEC)\r\n VEC = [round(x) for x in VEC]\r\n print VEC\r\n \r\n #have each cycle take ~1 second\r\n cycle_size = int(FREQ/2)\r\n \r\n if VEC[0] > 0:\r\n dir_chn_1 = 1\r\n else:\r\n dir_chn_1 = 0\r\n\r\n if VEC[1] > 0:\r\n dir_chn_2 = 1\r\n else:\r\n dir_chn_2 = 0\r\n \r\n if VEC[2] > 0:\r\n dir_chn_3 = 1\r\n else:\r\n dir_chn_3 = 0\r\n \r\n #Find the largest number of steps that need to be taken\r\n max = np.max(np.abs(VEC))\r\n #Determine the number of cycles based on the max number of step taken in a cycle (cycle_size)\r\n num_cycles = floor(max / cycle_size)\r\n #Determine the amount to move each cycle in each channel \r\n \r\n VEC_cycle = [int(x) for x in np.multiply(VEC, cycle_size / max)]\r\n remainder = [int(x) for x in np.subtract(VEC, np.multiply(VEC_cycle, num_cycles))]\r\n \r\n print \"Taking \" + str(VEC) + \" steps in channel 1, 2 and 3 respectively.\"\r\n print \"This will be done over \" + str(num_cycles) + \" cycles of \" + str(VEC_cycle) + \" steps.\"\r\n print \"And a final cycle with the remainder of \" + str(remainder) + \" steps.\"\r\n\r\n VEC_cycle = np.abs(VEC_cycle)\r\n remainder = np.abs(remainder)\r\n \r\n for i in range (0,int(num_cycles)):\r\n if VEC_cycle[0] > 0:\r\n yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, VEC_cycle[0], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if VEC_cycle[1] > 0:\r\n yield self.move(c, ADDR, 2, 'CA1801', TEMP, dir_chn_2, FREQ, REL, VEC_cycle[1], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if VEC_cycle[2] > 0:\r\n yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, VEC_cycle[2], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n \r\n tot_remain = 0\r\n for rem in remainder:\r\n tot_remain = tot_remain + rem\r\n \r\n if tot_remain != 0:\r\n if remainder[0] > 0:\r\n yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, remainder[0], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if remainder[1] > 0:\r\n yield self.move(c, ADDR, 2, 'CA1801', TEMP, dir_chn_2, FREQ, REL, remainder[1], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if remainder[2] > 0:\r\n yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, remainder[2], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n \r\n returnValue('Success!')\r\n except Exception as inst:\r\n print inst", "def move_x(self,c, ADDR, TEMP, FREQ, REL, X, TORQUE = None):\r\n \r\n VEC = np.dot(self.T1,[X,0,0])\r\n VEC = self.adjustForWeight(VEC)\r\n VEC = [round(x) for x in VEC]\r\n print VEC\r\n print 'Knob 2 should always need to move 0 for this. If it is not showing 0, then something went werd'\r\n #have each cycle take ~1 second\r\n cycle_size = int(FREQ/2)\r\n \r\n #TODO, just implement these cycles into the move XYZ general command\r\n #Direction should just be positive is 1, negative is 0\r\n if VEC[0] > 0:\r\n dir_chn_1 = 1\r\n dir_chn_3 = 0\r\n else:\r\n dir_chn_1 = 0\r\n dir_chn_3 = 1\r\n \r\n #Find the largest number of steps that need to be taken\r\n max = np.max(np.abs(VEC))\r\n #Determine the number of cycles based on the max number of step taken in a cycle (cycle_size)\r\n num_cycles = floor(max / cycle_size)\r\n #Determine the amount to move each cycle in each channel \r\n VEC_cycle = [int(x) for x in np.multiply(VEC, cycle_size / max)]\r\n remainder = [int(x) for x in np.subtract(VEC, np.multiply(VEC_cycle, num_cycles))]\r\n \r\n print \"Taking \" + str(VEC) + \" steps in channel 1, 2 and 3 respectively.\"\r\n print \"This will be done over \" + str(num_cycles) + \" cycles of \" + str(VEC_cycle) + \" steps.\"\r\n print \"And a final cycle with the remainder of \" + str(remainder) + \" steps.\"\r\n \r\n VEC_cycle = np.abs(VEC_cycle)\r\n remainder = np.abs(remainder)\r\n \r\n for i in range (0,int(num_cycles)):\r\n if VEC_cycle[0] > 0:\r\n yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, VEC_cycle[0], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if VEC_cycle[2] > 0:\r\n yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, VEC_cycle[2], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n \r\n tot_remain = 0\r\n for rem in remainder:\r\n tot_remain = tot_remain + rem\r\n \r\n if tot_remain != 0:\r\n if remainder[0] > 0:\r\n yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, remainder[0], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if remainder[2] > 0:\r\n yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, remainder[2], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n \r\n returnValue('Success!')", "def move_y(self,c, ADDR, TEMP, FREQ, REL, Y, TORQUE = None):\r\n \r\n VEC = np.dot(self.T1,[0,Y,0])\r\n VEC = self.adjustForWeight(VEC)\r\n VEC = [round(x) for x in VEC]\r\n print VEC\r\n \r\n #Have each cycle take ~1.5 seconds\r\n cycle_size = int(FREQ/2)\r\n \r\n #Determine the direction\r\n if VEC[0] >0:\r\n dir_chn_1 = 1\r\n dir_chn_2 = 0\r\n dir_chn_3 = 1\r\n else:\r\n dir_chn_1 = 0\r\n dir_chn_2 = 1\r\n dir_chn_3 = 0\r\n \r\n #Find the largest number of steps that need to be taken\r\n max = np.max(np.abs(VEC))\r\n #Determine the number of cycles based on the max number of step taken in a cycle (cycle_size)\r\n num_cycles = floor(max / cycle_size)\r\n #Determine the amount to move each cycle in each channel \r\n VEC_cycle = [int(x) for x in np.multiply(VEC, cycle_size / max)]\r\n remainder = [int(x) for x in np.subtract(VEC, np.multiply(VEC_cycle, num_cycles))]\r\n \r\n print \"Taking \" + str(VEC) + \" steps in channel 1, 2 and 3 respectively.\"\r\n print \"This will be done over \" + str(num_cycles) + \" cycles of \" + str(VEC_cycle) + \" steps.\"\r\n print \"And a final cycle with the remainder of \" + str(remainder) + \" steps.\"\r\n\r\n VEC_cycle = np.abs(VEC_cycle)\r\n remainder = np.abs(remainder)\r\n \r\n for i in range (0,int(num_cycles)):\r\n if VEC_cycle[0] > 0:\r\n yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, VEC_cycle[0], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if VEC_cycle[1] > 0:\r\n yield self.move(c, ADDR, 2, 'CA1801', TEMP, dir_chn_2, FREQ, REL, VEC_cycle[1], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if VEC_cycle[2] > 0:\r\n yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, VEC_cycle[2], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n \r\n tot_remain = 0\r\n for rem in remainder:\r\n tot_remain = tot_remain + rem\r\n \r\n if tot_remain != 0:\r\n if remainder[0] > 0:\r\n yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, remainder[0], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if remainder[1] > 0:\r\n yield self.move(c, ADDR, 2, 'CA1801', TEMP, dir_chn_2, FREQ, REL, remainder[1], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if remainder[2] > 0:\r\n yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, remainder[2], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n \r\n returnValue('Success!')", "def move_z(self,c, ADDR, TEMP, FREQ, REL, Z, TORQUE = None):\r\n \r\n #Calculate steps in knobs 1 2 and 3\r\n VEC = np.dot(self.T1,[0.0,0.0,Z])\r\n VEC = self.adjustForWeight(VEC)\r\n VEC = [round(x) for x in VEC]\r\n print VEC\r\n \r\n #Have each cycle take ~1.5 seconds\r\n cycle_size = float(FREQ/2)\r\n \r\n #Determine the direction\r\n if VEC[0] >0:\r\n dir_chn_1 = 1\r\n dir_chn_2 = 1\r\n dir_chn_3 = 1\r\n else:\r\n dir_chn_1 = 0\r\n dir_chn_2 = 0\r\n dir_chn_3 = 0\r\n \r\n #Find the largest number of steps that need to be taken\r\n max = np.max(np.abs(VEC))\r\n #Determine the number of cycles based on the max number of step taken in a cycle (cycle_size)\r\n num_cycles = floor(max / cycle_size)\r\n #Determine the amount to move each cycle in each channel \r\n VEC_cycle = [int(x) for x in np.multiply(VEC, cycle_size / max)]\r\n remainder = [int(x) for x in np.subtract(VEC, np.multiply(VEC_cycle, num_cycles))]\r\n \r\n print \"Taking \" + str(VEC) + \" steps in channel 1, 2 and 3 respectively.\"\r\n print \"This will be done over \" + str(num_cycles) + \" cycles of \" + str(VEC_cycle) + \" steps.\"\r\n print \"And a final cycle with the remainder of \" + str(remainder) + \" steps.\"\r\n \r\n VEC_cycle = np.abs(VEC_cycle)\r\n remainder = np.abs(remainder)\r\n \r\n for i in range (0,int(num_cycles)):\r\n if VEC_cycle[0] > 0:\r\n yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, VEC_cycle[0], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if VEC_cycle[1] > 0:\r\n yield self.move(c, ADDR, 2, 'CA1801', TEMP, dir_chn_2, FREQ, REL, VEC_cycle[1], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if VEC_cycle[2] > 0:\r\n yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, VEC_cycle[2], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n \r\n tot_remain = 0\r\n for rem in remainder:\r\n tot_remain = tot_remain + rem\r\n \r\n if tot_remain != 0:\r\n if remainder[0] > 0:\r\n yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, remainder[0], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if remainder[1] > 0:\r\n yield self.move(c, ADDR, 2, 'CA1801', TEMP, dir_chn_2, FREQ, REL, remainder[1], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if remainder[2] > 0:\r\n yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, remainder[2], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n \r\n returnValue('Success!')", "def set_param_motor():\n servo.setSpeed(0, 0) # max = 255\n servo.setAccel(0, 0)\n servo.setSpeed(1, 150) # max = 255\n servo.setAccel(1, 150)", "def move(self, step):\n\n status = self.read()\n Logger.getLogger().debug(\"Status in move method: %s\", status)\n # while the motors are moving we don't want to start another movement\n if status > CurtainsStatus.OPEN or self.motor.value:\n return\n\n self.target = step\n\n # deciding the movement direction\n if self.steps() < self.target:\n self.__open__()\n elif self.steps() > self.target:\n self.__close__()", "def forward(self, param):\n\t\tif param:\n\t\t\tself.linear_move(param * .3048)\n\t\telse:\n\t\t\tself.linear_move(riu.default_dist * .3048)", "async def _move_via_actuator_vals(self, act_val_pairs: ActuatorVals, velocity=60):\n coros = []\n\n head_args = {a.name: v for a, v in act_val_pairs.items() if a.is_head}\n if head_args:\n coros.append(self.move_head(**head_args, velocity=velocity))\n\n arm_args = {a.name.split('_')[0]: v for a, v in act_val_pairs.items() if not a.is_head}\n if arm_args:\n settings = (ArmSettings(side, val, velocity) for side, val in arm_args.items())\n coros.append(self._move_arms_via_settings(*settings))\n\n if coros:\n return await asyncio.gather(*coros)", "def move(self, direction):\n\n self.direction = direction\n self.logger.debug('current direction: ' + direction)\n\n #remember axis name that instrument thinks in\n if 'Z' in self.current_axis:\n axis_string = 'ZPiezoStepper'\n else:\n if self.direction == 'left' or self.direction == 'right':\n axis_string = 'XPiezoStepper'\n else:\n axis_string = 'YPiezoStepper'\n\n if self.current_move == 'move absolute':\n #combine the spinbox and unit combobox user input to a pint quantity\n self.logger.info('moving to an absolute position')\n distance = self.gui.doubleSpinBox_distance.value()\n unit = self.gui.comboBox_unit.currentText()\n\n self.logger.debug('axis: ' + axis_string)\n local_distance = ur(str(distance) + unit)\n self.logger.debug('to position: ' + str(local_distance))\n\n self.moving_thread = WorkThread(self.anc350_instrument.move_to,axis_string, local_distance)\n self.moving_thread.start()\n\n elif self.current_move == 'move relative':\n # combine the spinbox and unit combobox user input to a pint quantity\n # add minussign to communicate correct direction to instrument\n self.logger.info('moving relative')\n distance = self.gui.doubleSpinBox_distance.value()\n unit = self.gui.comboBox_unit.currentText()\n self.logger.debug('axis:' + axis_string)\n self.logger.debug('direction: '+ direction)\n\n if self.direction == 'right' or self.direction == 'up':\n local_distance = ur(str(distance) + unit)\n self.logger.debug(str(local_distance))\n elif self.direction == 'left' or self.direction == 'down':\n local_distance = ur(str(-1 * distance) + unit)\n self.logger.debug(str(local_distance))\n\n self.moving_thread = WorkThread(self.anc350_instrument.move_relative,axis_string, local_distance)\n self.moving_thread.start()\n\n elif self.current_move == 'continuous' or self.current_move == 'step':\n # convert direction buttons clicked to direction integers that instrument wants\n # than move for 1s continuously, since the stop button doesnt work yet\n if self.direction == 'left':\n if 'Z' in self.current_axis:\n direction_int = 0 # correct direction, corresponds to labels closer and away\n else:\n direction_int = 1\n elif self.direction == 'right':\n if 'Z' in self.current_axis:\n direction_int = 1 # correct direction, corresponds to labels closer and away\n else:\n direction_int = 0\n elif self.direction == 'up':\n direction_int = 0\n elif self.direction == 'down':\n direction_int = 1\n\n if self.current_move == 'continuous':\n self.logger.info('moving continuously')\n self.moving_thread = WorkThread(self.anc350_instrument.move_continuous, axis_string, direction_int)\n self.moving_thread.start()\n\n elif self.current_move == 'step':\n self.logger.info('making a step')\n self.anc350_instrument.given_step(axis_string, direction_int, 1)", "def move():\n # step 1 of task analysis: get data\n data = get_data('MovementData/Walking_02.txt')\n # step 2: get the initial orientation of the sensor\n sensor_orientation = get_init_orientation_sensor(data.acc[0])\n # step 3: get the vector of the right horizontal semi-circular canal's on-direction\n rhscc_init_on_dir = get_init_on_dir_rh_scc(15)\n # preparation for step 4: align the angular velocity sensor data with the global coordinate system\n angular_velocities_aligned_globally = align_sensor_data_globally(data.omega, sensor_orientation)\n # step 4: calculate the stimulation of the cupula\n stimuli = get_scc_stimulation(angular_velocities_aligned_globally, rhscc_init_on_dir)\n # step 5: get the transfer function of the scc with the dynamics provided in the lecture\n scc_trans_fun = get_scc_transfer_fun(0.01, 5)\n # step 6: get the cupular deflection\n max_cupular_deflection = calculate_max_cupular_deflection(scc_trans_fun, stimuli, data.rate)\n # preparation for step 7: align the acceleration sensor data with the global coordinate system\n accelerations_aligned_globally = align_sensor_data_globally(data.acc, sensor_orientation)\n # step 8: calculate the maxmimum left- and rightwards stimulation of the otolithic organ\n max_left_right_stimuli = calculate_otolithic_max_stimuli(accelerations_aligned_globally, 1)\n # step 9: calculate the head orientation\n head_orientations = calculate_head_orientation(angular_velocities_aligned_globally, data.rate)\n\n return max_cupular_deflection, max_left_right_stimuli, head_orientations", "def move(self):\n movement = self.recognize_color()\n if movement is not None:\n if rand.random() <= self.proba_color:\n movement()\n else:\n r = rand.random()\n if r <= self.proba[0]:\n self.turn_left()\n elif r <= self.proba[0] + self.proba[1]:\n self.move_forward()\n else:\n self.turn_right()\n self.set_color()", "def move(self, pos, relative=False, wait=0, update=False,\n check_limits=True, check_start=3, check_end=True,\n check_problems=True, dial=False, elog=False, silent=False):\n # Check input\n if not self._usable_number(pos):\n errmsg = \"Recieved invalid pos {0} for motor {1} (pv {2})... aborting.\"\n logprint(errmsg.format(pos, self.name, self.pvname), print_screen=True)\n return False\n\n # Apply relative and dial\n here = self.wm()\n if dial and update:\n dial_offset = self.get_par(\"offset\")\n if relative:\n pos += here\n elif dial:\n pos += dial_offset\n if not self.within_limits(pos, pypslog=True):\n return False\n\n # Log move intention\n logmsg = \"moving {0} (pv {1}) to {2}, previous position: {3}\"\n logprint(logmsg.format(self.name, self.pvname, pos, here))\n \n if update and not silent:\n txt = \"Initial position: {}\"\n if dial:\n print txt.format(self.wm_string_dial()) \n else:\n print txt.format(self.wm_string())\n\n # Set up dmov monitor to look for transition 1 -> 0 if applicable\n if check_start:\n self._monitor_move_start(here)\n\n # The important part\n self._move(pos)\n readback = self.get_pvobj(\"readback\")\n\n # Check that we started: wait on dmov 1 -> 0 monitor if hasn't happened\n # If dmov is not available, wait for rbv to move outside of mres\n if check_start:\n if self._usable_number(check_start):\n did_start = self._wait_move_start(check_start)\n else:\n did_start = self._wait_move_start()\n if not did_start:\n self.stop()\n logmsg = \"motor {0} (pv {1}) failed to start\"\n logprint(logmsg.format(self.name, self.pvname), print_screen=True)\n return False\n\n # Watch for problems\n if check_problems:\n self._add_wait_cb(self.check_stall)\n\n # We have to wait if elog\n if elog and not (wait or update):\n wait = True\n\n # We're done if we aren't waiting\n if not (wait or update):\n return True\n\n # Interpret wait timeout\n wait_timeout = -1\n if wait:\n if self._usable_number(wait):\n wait_timeout = wait\n\n # Wait/interrupt block\n if wait or update:\n if update:\n if dial:\n display_offset = dial_offset\n else:\n display_offset = 0\n show_pos = self._update_cb(wait_timeout, display_offset)\n else:\n show_pos = lambda e=None: None\n with CallbackContext(readback, show_pos):\n try:\n if wait_timeout <= 0:\n motion_time = self.estimatedTimeForMotion(abs(here-pos))\n if motion_time is None:\n wait_ok = self.wait(60)\n else:\n wait_ok = self.wait(max(motion_time * 2.0, 60))\n else:\n wait_ok = self.wait(timeout=wait_timeout)\n except KeyboardInterrupt:\n print \"\\rCtrl+c pressed, stopping motor.\"\n return self._move_cleanup(False, elog, here, pos)\n except Exception: # Handle other exceptions cleanly before raise\n self._move_cleanup(False, elog, here, pos)\n show_pos()\n traceback.print_exc()\n raise\n show_pos()\n if not wait_ok:\n return self._move_cleanup(False, elog, here, pos)\n\n # Check that we made it\n if check_end and not self.at_pos(pos):\n logmsg = \"Motor {0} (pv {1}) reached {2} instead of desired pos {3}\"\n logprint(logmsg.format(self.name, self.pvname, self.wm(), pos),\n print_screen=True)\n return self._move_cleanup(False, elog, here, pos)\n\n # If everything went ok, return True\n return self._move_cleanup(True, elog, here, pos)", "def servo_set_speed_limit(ch, accel):\n\n # Check to make sure speed is in range\n speed = max(accel, accel_limit_min)\n speed = min(accel, accel_limit_max)\n\n # Send command to servo controller\n servo_send_cmd(cmd_set_accel, ch, accel)", "def do_move(self, rel=True):\n cmd = self.MGMSG_MOT_MOVE_ABSOLUTE\n if rel:\n cmd = self.MGMSG_MOT_MOVE_RELATIVE\n self.__send_short(cmd, self.__chan, 0x00)", "def _moveSteps(self, direction, steps: int, speed: int, is_blocking=False):\n print(\"Move command: ({}, {}, {}, {})\".format(direction, speed, steps, is_blocking))\n if direction in Direction.FORWARD.value:\n self.drive.on_for_rotations(SpeedPercent(speed), SpeedPercent(speed), steps, block=is_blocking)\n\n if direction in Direction.BACKWARD.value:\n self.drive.on_for_rotations(SpeedPercent(-speed), SpeedPercent(-speed), steps, block=is_blocking)\n\n if direction in Direction.LEFT.value:\n self._turn(direction, speed)\n self.drive.on_for_rotations(SpeedPercent(speed), SpeedPercent(speed), steps, block=is_blocking)\n offset = -1\n self.index = self.new_index(self.index, offset)\n self.pointing = self.direction[self.index]\n\n if direction in Direction.RIGHT.value:\n self._turn(direction, speed)\n self.drive.on_for_rotations(SpeedPercent(speed), SpeedPercent(speed), steps, block=is_blocking)\n offset = 1\n self.index = self.new_index(self.index, offset)\n self.pointing = self.direction[self.index]\n\n if direction in Direction.STOP.value:\n self.drive.off()\n self.patrol_mode = False\n self.enemy_not_detected = False\n print(\"STOP!! patrol mode = {} y enemy not detected = {}\".format(self.patrol_mode, self.enemy_not_detected))\n\n if direction in Direction.PAUSE.value:\n self.drive.off()\n print(\"Pause to kill the enemy\")", "def do_steps(self, motornum, val):\n #print \"Moving in steps...\"\n steps = abs(val)\n if val < 0:\n direction = 1\n else:\n direction = 2\n mag = steps\n\n self.takesteps(mag=mag, direction=direction, motornum=motornum)\n self.do_azangle()\n self.do_altangle()", "def move(self, head, steps):\n self.turn(head)\n if self.direction == 0:\n self.x += int(steps)\n if self.direction == 1:\n self.y += int(steps)\n if self.direction == 2:\n self.x -= int(steps)\n if self.direction == 3:\n self.y -= int(steps)", "def set_action(self, node_uuid, index, data):\n params = {}\n if data == \"forward\":\n try:\n m = self.values['num'].get_data_index(index=index)\n if m is not None:\n self._bus.i2c_acquire()\n try:\n self._bus.pca9685_manager.getMotor(m).run(Adafruit_MotorHAT.FORWARD)\n finally:\n self._bus.i2c_release()\n except Exception:\n logger.exception('[%s] - Exception when running forward')\n elif data == \"backward\":\n try:\n m = self.values['num'].get_data_index(index=index)\n if m is not None:\n self._bus.i2c_acquire()\n try:\n self._bus.pca9685_manager.getMotor(m).run(Adafruit_MotorHAT.BACKWARD)\n finally:\n self._bus.i2c_release()\n except Exception:\n logger.exception('[%s] - Exception when running backward')\n elif data == \"release\":\n m = self.values['num'].get_data_index(index=index)\n if m is not None:\n try:\n self._bus.i2c_acquire()\n try:\n self._bus.pca9685_manager.getMotor(m).run(Adafruit_MotorHAT.RELEASE)\n finally:\n self._bus.i2c_release()\n except Exception:\n logger.exception('[%s] - Exception when releasing one motor %s', self.__class__.__name__, m)", "def move(contr):\n \n # Get the currently active camera to adapt control method\n scene = logic.getCurrentScene()\n active_camera = scene.active_camera\n \n # get the object this script is attached to\n human = contr.owner\n\n # if the human is external, do nothing\n if human.get('External_Robot_Tag') or human['disable_keyboard_control']:\n return\n \n # get the suffix of the human to reference the right objects\n suffix = human.name[-4:] if human.name[-4] == \".\" else \"\"\n\n # set the movement speed\n speed = human['Speed']\n\n # Get sensor named Mouse\n keyboard = contr.sensors['All_Keys']\n\n # Default movement speed\n move_speed = [0.0, 0.0, 0.0]\n rotation_speed = [0.0, 0.0, 0.0]\n\n human_camera = \"Human_Camera\" + suffix\n\n if human['move_cameraFP'] and active_camera.name != human_camera:\n return\n\n keylist = keyboard.events\n for key in keylist:\n # key[0] == events.keycode, key[1] = status\n if key[1] == logic.KX_INPUT_ACTIVE:\n if human['Manipulate']:\n if key[0] == FORWARDS:\n move_speed[0] = speed\n elif key[0] == BACKWARDS:\n move_speed[0] = -speed\n elif key[0] == TURN_LEFT:\n rotation_speed[2] = speed\n elif key[0] == TURN_RIGHT:\n rotation_speed[2] = -speed\n elif key[0] == RIGHT:\n if active_camera.name == human_camera:\n move_speed[1] = -speed\n else:\n rotation_speed[2] = -speed\n elif key[0] == LEFT:\n if active_camera.name == human_camera:\n move_speed[1] = speed\n else:\n rotation_speed[2] = speed\n else:\n if key[0] in (FORWARDS, BACKWARDS, LEFT, RIGHT):\n move_speed[0] = speed\n if active_camera.name != human_camera and key[0] == BACKWARDS:\n move_speed[0] = -speed\n\n # The second parameter of 'applyMovement' determines\n # a movement with respect to the object's local\n # coordinate system\n human.applyMovement( move_speed, True )\n human.applyRotation( rotation_speed, True )\n\n \"\"\"\n if key[0] == events.UPARROWKEY:\n move_speed[0] = speed\n elif key[0] == events.DOWNARROWKEY:\n move_speed[0] = -speed\n elif key[0] == events.LEFTARROWKEY:\n rotation_speed[2] = speed\n elif key[0] == events.RIGHTARROWKEY:\n rotation_speed[2] = -speed\n elif key[0] == events.AKEY:\n move_speed[2] = speed\n elif key[0] == events.EKEY:\n move_speed[2] = -speed\n \"\"\"\n\n elif key[1] == logic.KX_INPUT_JUST_ACTIVATED:\n # Other actions activated with the keyboard\n # Reset camera to center\n if key[0] == events.NKEY and keyboard.positive:\n reset_view(contr)\n # Switch between look and manipulate\n elif key[0] == events.XKEY:\n toggle_manipulate(contr)", "def example_move(self):\n self.right() # start rotating right\n time.sleep(1) # turn for a second\n self.stop() # stop\n self.servo(1000) # look right\n time.sleep(.25) # give your head time to move\n self.servo(2000) # look left", "def example_move(self):\n self.right() # start rotating right\n time.sleep(1) # turn for a second\n self.stop() # stop\n self.servo(1000) # look right\n time.sleep(.25) # give your head time to move\n self.servo(2000) # look left", "def movement(self, action):\r\n\r\n #if its moving horizontally only can move vertically in the next move\r\n if self.velocities[1] == 0:\r\n if action == 0 :\r\n self.velocities[0] = 0\r\n self.velocities[1] = -1\r\n if action == 1 :\r\n self.velocities[0] = 0\r\n self.velocities[1] = 1\r\n\r\n #if its moving vertically only can move horizontally in the next move\r\n if self.velocities[0] == 0:\r\n if action == 2 :\r\n self.velocities[0] = -1\r\n self.velocities[1] = 0\r\n if action == 3 :\r\n self.velocities[0] = 1\r\n self.velocities[1] = 0\r\n \r\n self.displacement()", "def motor_update(self, vmot):\n\n # Weighted shift, with shifted GS activity spread across neighbouring\n # units proportional to their distance to the \"hypothetical\" shifted\n # activity of each unit.\n # Has natural tendency to dissolve peak activity (increase population\n # entropy), but free from rounding errors.\n\n vx, vy = vmot\n\n # Get lower amount of shift along axes.\n xshf = int(np.floor(vx / self.xres))\n yshf = int(np.floor(vy / self.yres))\n\n # Get weights of shifts.\n wx = 1 - (vx - xshf * self.xres) / self.xres\n wy = 1 - (vy - yshf * self.yres) / self.yres\n\n # Get four shift configurations (lower or upper along each axis).\n shfts = [(xshf+ix, yshf+iy, abs(ix-wx) * abs(iy-wy))\n for ix in [0, 1] for iy in [0, 1]]\n\n # Get weighted sum of four shifted GS activity matrices.\n # This step increases entropy quite a bit, especially if shifted\n # activity has to be split equally among inheriting units (i.e. falls\n # to the center of the grid).\n # If activity shifting is circular, entropy is relatively stable,\n # otherwise it drops massively if activity falls off of grid!\n self.P = sum([w * utils.shift(self.P, xshf, yshf, self.circular, 0)\n for xshf, yshf, w in shfts if w != 0])\n\n # Introduce minimum activity to allow all units to have non-zero\n # probability and be able to receiving HC feedback.\n # This step increases entropy, but only very slightly.\n self.P[self.P < self.min_activ] = self.min_activ\n self.P = self.P / self.P.sum()\n\n self.pass_through_lateral_conn()", "def push_movement(self, position: Tuple[int, int], rel: Tuple[int, int], buttons: Tuple[int, int, int]):\n raise NotImplementedError", "def turn(dir, speed, runtime):\n\trightMotor.run_timed(duty_cycle_sp=-dir*speed, time_sp=runtime)\n\tleftMotor.run_timed(duty_cycle_sp=dir*speed, time_sp=runtime)", "def move(self, axis, dist):\n t = self.moveTime\n N = self.moveSamples\n # read initial position for all channels\n texts = [getattr(self, ax + \"Label\").text()\n for ax in self.activeChannels]\n initPos = [re.findall(r\"[-+]?\\d*\\.\\d+|[-+]?\\d+\", t)[0] for t in texts]\n initPos = np.array(initPos, dtype=float)[:, np.newaxis]\n fullPos = np.repeat(initPos, self.nSamples, axis=1)\n\n # make position ramp for moving axis\n ramp = makeRamp(0, dist, self.nSamples)\n fullPos[self.activeChannels.index(axis)] += ramp\n\n# factors = np.array([convFactors['x'], convFactors['y'],\n# convFactors['z']])[:, np.newaxis]\n# fullSignal = fullPos/factors\n toc = ptime.time()\n for i in range(self.nSamples):\n# self.aotask.write(fullSignal, auto_start=True)\n# time.sleep(t / N)\n borrar = 1+1\n\n print(\"se mueve en\", np.round(ptime.time() - toc, 3), \"segs\")\n\n # update position text\n newPos = fullPos[self.activeChannels.index(axis)][-1]\n# newText = \"<strong>\" + axis + \" = {0:.2f} µm</strong>\".format(newPos)\n newText = \"{}\".format(newPos)\n getattr(self, axis + \"Label\").setText(newText)\n self.paramChanged()", "def move(self, x=None, y=None, z=None, delay=MOVEMENT_DELAY_TIME, delay_factor=MOVEMENT_DELAY_MULTIPLIER):\n if not self.p.online():\n raise RuntimeError(\"Cannot move - printer is not connected.\")\n \n self.p.move_coord(x=x, y=y, z=z)\n # Will sleep for a factor of the amount of distance we need to travel\n dx = 0 if not x else x\n dy = 0 if not y else y\n dz = 0 if not z else z\n distance = (dx**2 + dy**2 + dz**2) ** 0.5\n time.sleep(delay + distance * delay_factor)", "def setMovement(self, movement, isSpecial = False, canControl = True):\n\n vel = self.ode_body.getLinearVel()\n for i in range(len(self.direction)):\n vel[i] = self.direction[i] * movement\n\n self.ode_body.setLinearVel(vel)\n\n self.moveVal = self.direction\n self.moveSpecial = isSpecial\n self.isMove = [False, False]\n self.direction = [self.moveVal[0], self.moveVal[1]]\n\n if not canControl:\n self.knockback()\n self.moveLock(None, 9999)\n self.isKnockback = True\n \n # Play Sound\n if movement > 10:\n self.sfx['lunge'].play()", "def _set_action(self, action):\n\n rospy.logdebug(\"Start Set Action ==>\"+str(action))\n # We convert the actions to speed movements to send to the parent class of Parrot\n linear_speed_vector = Vector3()\n angular_speed = 0.0\n\n if action == 0: # FORWARDS\n linear_speed_vector.x = self.linear_forward_speed\n self.last_action = \"FORWARDS\"\n elif action == 1: # BACKWARDS\n linear_speed_vector.x = -1*self.linear_forward_speed\n self.last_action = \"BACKWARDS\"\n elif action == 2: # STRAFE_LEFT\n linear_speed_vector.y = self.linear_forward_speed\n self.last_action = \"STRAFE_LEFT\"\n elif action == 3: # STRAFE_RIGHT\n linear_speed_vector.y = -1*self.linear_forward_speed\n self.last_action = \"STRAFE_RIGHT\"\n elif action == 4: # UP\n linear_speed_vector.z = self.linear_forward_speed\n self.last_action = \"UP\"\n elif action == 5: # DOWN\n linear_speed_vector.z = -1*self.linear_forward_speed\n self.last_action = \"DOWN\"\n\n # We tell drone the linear and angular speed to set to execute\n self.move_base(linear_speed_vector,\n angular_speed,\n epsilon=0.05,\n update_rate=10)\n\n rospy.logdebug(\"END Set Action ==>\"+str(action))", "def move(self, direction, proportional=False):\n servo = pygame.mixer.Sound(file='resources/Servo_Motor.wav')\n servo.set_volume(0.2)\n # validity of direction\n if not proportional:\n assert direction != 0 and -1 <= direction <= 1, \"No valid movement\"\n # p: eigene position, d * (vorne/hinten): positionsänderung\n self.pos = [p + (d * direction)\n for p, d in zip(self.pos, DIRECTIONS[self.rotation])]\n if self.speakers:\n self.speakers.play(servo)\n new_turn = \"{0}\".format(self.pos)\n self._call_gamelog_callbacks(new_turn)" ]
[ "0.57304", "0.56972367", "0.5554035", "0.54021627", "0.51400924", "0.49471164", "0.4805451", "0.47309983", "0.47292915", "0.46970686", "0.45975408", "0.45794663", "0.45610094", "0.4551254", "0.4550327", "0.45158133", "0.45089412", "0.45004815", "0.44989535", "0.44668517", "0.44668517", "0.4465641", "0.4452107", "0.4445455", "0.43982324", "0.43847358", "0.43797198", "0.43792492", "0.43700722", "0.43309134" ]
0.67335975
0
Centers the piezos specified by ADDR in order to keep track of position. This will run the piezos through their full movement range. Make sure this is only called with no sensitive sample and be destroyed.
def center(self,c, ADDR): #FIGURE OUT HOW TO DO THIS #Actually pretty sure this is impossible to do from software returnValue('Success!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def center_on_spawn(self):\n self.center_on(*self.world.metadata['playerStart'])", "def do_polar_alignment_test(self, *arg):\n if self.ready is False:\n return\n\n start_time = current_time(flatten=True)\n\n base_dir = '{}/images/drift_align/{}'.format(\n os.getenv('PANDIR'), start_time)\n plot_fn = '{}/{}_center_overlay.jpg'.format(base_dir, start_time)\n\n mount = self.pocs.observatory.mount\n\n print_info(\"Moving to home position\")\n self.pocs.say(\"Moving to home position\")\n mount.slew_to_home()\n\n # Polar Rotation\n pole_fn = polar_rotation(self.pocs, base_dir=base_dir)\n pole_fn = pole_fn.replace('.cr2', '.fits')\n\n # Mount Rotation\n rotate_fn = mount_rotation(self.pocs, base_dir=base_dir)\n rotate_fn = rotate_fn.replace('.cr2', '.fits')\n\n print_info(\"Moving back to home\")\n self.pocs.say(\"Moving back to home\")\n mount.slew_to_home()\n\n print_info(\"Solving celestial pole image\")\n self.pocs.say(\"Solving celestial pole image\")\n try:\n pole_center = polar_alignment_utils.analyze_polar_rotation(pole_fn)\n except error.SolveError:\n print_warning(\"Unable to solve pole image.\")\n print_warning(\"Will proceeed with rotation image but analysis not possible\")\n pole_center = None\n else:\n pole_center = (float(pole_center[0]), float(pole_center[1]))\n\n print_info(\"Starting analysis of rotation image\")\n self.pocs.say(\"Starting analysis of rotation image\")\n try:\n rotate_center = polar_alignment_utils.analyze_ra_rotation(rotate_fn)\n except Exception:\n print_warning(\"Unable to process rotation image\")\n rotate_center = None\n\n if pole_center is not None and rotate_center is not None:\n print_info(\"Plotting centers\")\n self.pocs.say(\"Plotting centers\")\n\n print_info(\"Pole: {} {}\".format(pole_center, pole_fn))\n self.pocs.say(\"Pole : {:0.2f} x {:0.2f}\".format(\n pole_center[0], pole_center[1]))\n\n print_info(\"Rotate: {} {}\".format(rotate_center, rotate_fn))\n self.pocs.say(\"Rotate: {:0.2f} x {:0.2f}\".format(\n rotate_center[0], rotate_center[1]))\n\n d_x = pole_center[0] - rotate_center[0]\n d_y = pole_center[1] - rotate_center[1]\n\n self.pocs.say(\"d_x: {:0.2f}\".format(d_x))\n self.pocs.say(\"d_y: {:0.2f}\".format(d_y))\n\n fig = polar_alignment_utils.plot_center(\n pole_fn, rotate_fn, pole_center, rotate_center)\n\n print_info(\"Plot image: {}\".format(plot_fn))\n fig.tight_layout()\n fig.savefig(plot_fn)\n\n try:\n os.unlink('/var/panoptes/images/latest.jpg')\n except Exception:\n pass\n try:\n os.symlink(plot_fn, '/var/panoptes/images/latest.jpg')\n except Exception:\n print_warning(\"Can't link latest image\")\n\n with open('/var/panoptes/images/drift_align/center.txt'.format(base_dir), 'a') as f:\n f.write('{}.{},{},{},{},{},{}\\n'.format(start_time, pole_center[0], pole_center[\n 1], rotate_center[0], rotate_center[1], d_x, d_y))\n\n print_info(\"Done with polar alignment test\")\n self.pocs.say(\"Done with polar alignment test\")", "def return_to_center(): #ignore this for now, use move_to_position_(0,0)\n current_pos = '\\xAA\\xBB\\xCC\\xDD'\n #run command until back to center (0,0)\n while True: #change the byte locations\n current_pos = to_center()\n print(current_pos)\n time.sleep(0.2) #check timing\n if((current_pos[1] == 0) and (current_pos[1] == 0)):\n break\n print('At center')", "def update_center(self): \r\n \r\n self.grfx[0].center = self.center\r\n\r\n self.update_bbox()", "def setCenter(self, center):\n p = center - self.center\n for i in range(len(self.points)):\n self.points[i] += p", "def centerCircOnSeg(self, circles, segments, relSize=0.18):\n for circ in circles:\n circ.moved = False\n for seg in segments:\n for circ in circles: \n d = seg.distanceTo(circ.center)\n #debug( ' ', seg.projectPoint(circ.center))\n if d < circ.radius*relSize and not circ.moved :\n circ.center = seg.projectPoint(circ.center)\n circ.moved = True", "def recenter(self, point=(0, 0)):\n self.center = Point(*point)", "def center(self):\n if self.pos != 0.0:\n self.pos = 0.0", "def _move_receptor_to_grid_center(self):\n lower_receptor_corner = np.array([self._crd[:,i].min() for i in range(3)], dtype=float)\n upper_receptor_corner = np.array([self._crd[:,i].max() for i in range(3)], dtype=float)\n \n receptor_box_center = (upper_receptor_corner + lower_receptor_corner) / 2.\n grid_center = (self._origin_crd + self._uper_most_corner_crd) / 2.\n displacement = grid_center - receptor_box_center\n\n print(\"Receptor is translated by \", displacement)\n\n for atom_ind in range(len(self._crd)):\n self._crd[atom_ind] += displacement\n return None", "def center_ship(self):\r\n self.center = self.screen_rect.centerx", "def center_ship(self):\r\n self.center = self.screen_rect.centerx", "def set_center(self, center=0.0):\n\n self.centerFromImage = center\n\n tempCenter = [0] * 2\n\n tempCenter[0] = center[0] - 305 # Xcoord offset\n tempCenter[1] = 313 - center[1] # Ycoord offset\n\n self.centerFromRobot = tempCenter", "def _set_center(self):\n sl_center = np.array(\n [self.sl_list[k].mean_pos for k in range(self.nb_sl)]\n )\n sl_nb_pts = np.array(\n [self.sl_list[k].nb_points for k in range(self.nb_sl)]\n )\n sl_wcenter = [sl_center[k] * sl_nb_pts[k] for k in range(self.nb_sl)]\n self.center = np.sum(sl_wcenter, axis=0) / np.sum(sl_nb_pts)", "def _center(pos, shift):\n x = np.concatenate((pos[0], pos[0] + shift[0]))\n y = np.concatenate((pos[1], pos[1] + shift[1]))\n return (x.max() + x.min()) / 2, (y.max() + y.min()) / 2", "def updatePos(self):\n self.setPos(self.centerX-self.boundingRect().width()/2.0,\n self.centerY-self.boundingRect().height()/2.0)", "def cells_center(self,refresh=False,mode='first3'):\n if refresh is True:\n to_update=slice(None)\n elif refresh is not False:\n to_update=refresh\n else:\n to_update = np.isnan(self.cells['_center'][:,0])\n\n if np.sum(to_update) > 0:\n if mode=='first3':\n p1,p2,p3 = [self.nodes['x'][self.cells['nodes'][to_update,i]] for i in [0,1,2]]\n self.cells['_center'][to_update] = circumcenter(p1,p2,p3)\n elif mode=='sequential':\n for c in np.arange(self.Ncells())[to_update]:\n points=self.nodes['x'][self.cell_to_nodes(c)]\n self.cells['_center'][c] = poly_circumcenter(points)\n \n return self.cells['_center']", "def center_ship(self):\n self.center = self.screen_rect.centerx", "def center_ship(self):\n self.center = self.screen_rect.centerx", "def center_ship(self):\n self.center = self.screen_rect.centerx", "def center(self):\n cp = self.dat.flowsheet.getCenter()\n self.centerOn(cp[0], cp[1])", "def move(self):\n x = y = z = 0.0\n for cell in self.cells:\n x += (cell.x)#*n\n y += (cell.y)#*n\n z += (cell.z)#*n\n np = float(len(self.cells))\n med = numpy.array([x/np,y/np,z/np])\n \n dists = []\n for cell in self.cells:\n d = (cell.x-self.x)**2+(cell.y-self.y)**2+(cell.z-self.z)**2\n d = numpy.sqrt(d)\n dists.append(d)\n #md = (cell.x-med[0])**2+(cell.y-med[1])**2+(cell.z-med[2])**2\n #dists[-1] = (dists[-1]+md)/2\n cell = self.cells[numpy.argmin(dists)]\n cc = numpy.array([cell.x, cell.y, cell.z])\n \n t = self.t\n if abs(self.dnp) * ( self.np-self.np_req) > 0:\n t = self.tr\n self.dcenter = (1-t)*(med-self.center + self.u*(cc-med))\n self.x,self.y,self.z = self.center = self.center + self.dcenter", "def test_phan_center(self):\n known_phan_center = Point(256, 256)\n self.quart.analyze()\n self.assertAlmostEqual(\n self.quart.hu_module.phan_center.x, known_phan_center.x, delta=0.7\n )\n self.assertAlmostEqual(\n self.quart.hu_module.phan_center.y, known_phan_center.y, delta=0.7\n )", "def setCenter(self, p):\n self.__center = p", "def autocenter(self):\n if self.contour_exists:\n corr_r, corr_c = find_center_mass(self.largest_cont)\n self.new_cent(\n (self.r - self.radius + corr_r, self.c - self.radius + corr_c)\n )", "def estimate_center(self, center_accuracy):\n estimated_center = numpy.random.rand(3) * center_accuracy + self._x\n estimated_center[2] = 0.0\n return estimated_center", "def position_center(self, x, y):\n self.x = x\n self.y = y\n self.pos[0] = x - self.pos[2]/2\n self.pos[1] = y - self.pos[3]/2", "def setCenter(self, np):\n p = self.getCenter()\n v = Vector.createFromTwoPoints(p, np)\n for i in range(len(self.points)):\n self.points[i] = v(self.points[i])", "def Run_pCenter(p): \n \n start_time = time.time()\n \n distMatrix = computeDistanceMatrix()\n #print distMatrix\n \n SDmin, locations = SolveModel(p, distMatrix)\n \n total_time = time.time()-start_time\n #SDmin = m.objVal\n \n displaySolution(locations, p, SDmin, total_time)", "def set_center(self, center):\n self._center = center\n self._reset_slot_bounds()", "def center(self, destination):\n self.move(destination=destination, origin=self.center)" ]
[ "0.5961", "0.57463413", "0.56795865", "0.5533931", "0.54940224", "0.5443845", "0.5440208", "0.543193", "0.541925", "0.53592706", "0.53592706", "0.53461343", "0.53208303", "0.5320681", "0.5311021", "0.5308331", "0.52896976", "0.52896976", "0.52896976", "0.52491367", "0.5239588", "0.52353084", "0.5218753", "0.51937", "0.51838106", "0.5179815", "0.5166808", "0.51630086", "0.5150722", "0.51450074" ]
0.62783
0
Request CADM move sample in the according to the arbitrary vector XYZ. XYZ should be a 3 element list with the number of steps to be taken in the x, y, and z direction respectively. Intergers not necessary because the xyz coordinates need to be transformed into other coordinates first, after which they will be rounded. Output not yet implememnted. Output returns the true number of steps taken in the x, y, and z directions (not necessarily equal), and the number of steps taken in radial directions.
def move_xyz(self,c, ADDR, TEMP, FREQ, REL, XYZ, TORQUE = None): try: VEC = np.dot(self.T1,XYZ) VEC = self.adjustForWeight(VEC) VEC = [round(x) for x in VEC] print VEC #have each cycle take ~1 second cycle_size = int(FREQ/2) if VEC[0] > 0: dir_chn_1 = 1 else: dir_chn_1 = 0 if VEC[1] > 0: dir_chn_2 = 1 else: dir_chn_2 = 0 if VEC[2] > 0: dir_chn_3 = 1 else: dir_chn_3 = 0 #Find the largest number of steps that need to be taken max = np.max(np.abs(VEC)) #Determine the number of cycles based on the max number of step taken in a cycle (cycle_size) num_cycles = floor(max / cycle_size) #Determine the amount to move each cycle in each channel VEC_cycle = [int(x) for x in np.multiply(VEC, cycle_size / max)] remainder = [int(x) for x in np.subtract(VEC, np.multiply(VEC_cycle, num_cycles))] print "Taking " + str(VEC) + " steps in channel 1, 2 and 3 respectively." print "This will be done over " + str(num_cycles) + " cycles of " + str(VEC_cycle) + " steps." print "And a final cycle with the remainder of " + str(remainder) + " steps." VEC_cycle = np.abs(VEC_cycle) remainder = np.abs(remainder) for i in range (0,int(num_cycles)): if VEC_cycle[0] > 0: yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, VEC_cycle[0], TORQUE) yield self.pause_while_moving(c,ADDR) if VEC_cycle[1] > 0: yield self.move(c, ADDR, 2, 'CA1801', TEMP, dir_chn_2, FREQ, REL, VEC_cycle[1], TORQUE) yield self.pause_while_moving(c,ADDR) if VEC_cycle[2] > 0: yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, VEC_cycle[2], TORQUE) yield self.pause_while_moving(c,ADDR) tot_remain = 0 for rem in remainder: tot_remain = tot_remain + rem if tot_remain != 0: if remainder[0] > 0: yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, remainder[0], TORQUE) yield self.pause_while_moving(c,ADDR) if remainder[1] > 0: yield self.move(c, ADDR, 2, 'CA1801', TEMP, dir_chn_2, FREQ, REL, remainder[1], TORQUE) yield self.pause_while_moving(c,ADDR) if remainder[2] > 0: yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, remainder[2], TORQUE) yield self.pause_while_moving(c,ADDR) returnValue('Success!') except Exception as inst: print inst
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_z(self,c, ADDR, TEMP, FREQ, REL, Z, TORQUE = None):\r\n \r\n #Calculate steps in knobs 1 2 and 3\r\n VEC = np.dot(self.T1,[0.0,0.0,Z])\r\n VEC = self.adjustForWeight(VEC)\r\n VEC = [round(x) for x in VEC]\r\n print VEC\r\n \r\n #Have each cycle take ~1.5 seconds\r\n cycle_size = float(FREQ/2)\r\n \r\n #Determine the direction\r\n if VEC[0] >0:\r\n dir_chn_1 = 1\r\n dir_chn_2 = 1\r\n dir_chn_3 = 1\r\n else:\r\n dir_chn_1 = 0\r\n dir_chn_2 = 0\r\n dir_chn_3 = 0\r\n \r\n #Find the largest number of steps that need to be taken\r\n max = np.max(np.abs(VEC))\r\n #Determine the number of cycles based on the max number of step taken in a cycle (cycle_size)\r\n num_cycles = floor(max / cycle_size)\r\n #Determine the amount to move each cycle in each channel \r\n VEC_cycle = [int(x) for x in np.multiply(VEC, cycle_size / max)]\r\n remainder = [int(x) for x in np.subtract(VEC, np.multiply(VEC_cycle, num_cycles))]\r\n \r\n print \"Taking \" + str(VEC) + \" steps in channel 1, 2 and 3 respectively.\"\r\n print \"This will be done over \" + str(num_cycles) + \" cycles of \" + str(VEC_cycle) + \" steps.\"\r\n print \"And a final cycle with the remainder of \" + str(remainder) + \" steps.\"\r\n \r\n VEC_cycle = np.abs(VEC_cycle)\r\n remainder = np.abs(remainder)\r\n \r\n for i in range (0,int(num_cycles)):\r\n if VEC_cycle[0] > 0:\r\n yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, VEC_cycle[0], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if VEC_cycle[1] > 0:\r\n yield self.move(c, ADDR, 2, 'CA1801', TEMP, dir_chn_2, FREQ, REL, VEC_cycle[1], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if VEC_cycle[2] > 0:\r\n yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, VEC_cycle[2], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n \r\n tot_remain = 0\r\n for rem in remainder:\r\n tot_remain = tot_remain + rem\r\n \r\n if tot_remain != 0:\r\n if remainder[0] > 0:\r\n yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, remainder[0], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if remainder[1] > 0:\r\n yield self.move(c, ADDR, 2, 'CA1801', TEMP, dir_chn_2, FREQ, REL, remainder[1], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if remainder[2] > 0:\r\n yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, remainder[2], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n \r\n returnValue('Success!')", "def move_x(self,c, ADDR, TEMP, FREQ, REL, X, TORQUE = None):\r\n \r\n VEC = np.dot(self.T1,[X,0,0])\r\n VEC = self.adjustForWeight(VEC)\r\n VEC = [round(x) for x in VEC]\r\n print VEC\r\n print 'Knob 2 should always need to move 0 for this. If it is not showing 0, then something went werd'\r\n #have each cycle take ~1 second\r\n cycle_size = int(FREQ/2)\r\n \r\n #TODO, just implement these cycles into the move XYZ general command\r\n #Direction should just be positive is 1, negative is 0\r\n if VEC[0] > 0:\r\n dir_chn_1 = 1\r\n dir_chn_3 = 0\r\n else:\r\n dir_chn_1 = 0\r\n dir_chn_3 = 1\r\n \r\n #Find the largest number of steps that need to be taken\r\n max = np.max(np.abs(VEC))\r\n #Determine the number of cycles based on the max number of step taken in a cycle (cycle_size)\r\n num_cycles = floor(max / cycle_size)\r\n #Determine the amount to move each cycle in each channel \r\n VEC_cycle = [int(x) for x in np.multiply(VEC, cycle_size / max)]\r\n remainder = [int(x) for x in np.subtract(VEC, np.multiply(VEC_cycle, num_cycles))]\r\n \r\n print \"Taking \" + str(VEC) + \" steps in channel 1, 2 and 3 respectively.\"\r\n print \"This will be done over \" + str(num_cycles) + \" cycles of \" + str(VEC_cycle) + \" steps.\"\r\n print \"And a final cycle with the remainder of \" + str(remainder) + \" steps.\"\r\n \r\n VEC_cycle = np.abs(VEC_cycle)\r\n remainder = np.abs(remainder)\r\n \r\n for i in range (0,int(num_cycles)):\r\n if VEC_cycle[0] > 0:\r\n yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, VEC_cycle[0], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if VEC_cycle[2] > 0:\r\n yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, VEC_cycle[2], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n \r\n tot_remain = 0\r\n for rem in remainder:\r\n tot_remain = tot_remain + rem\r\n \r\n if tot_remain != 0:\r\n if remainder[0] > 0:\r\n yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, remainder[0], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if remainder[2] > 0:\r\n yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, remainder[2], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n \r\n returnValue('Success!')", "def test_move(self):\n # Run a handful of GCMC moves\n n_moves = 10\n std_gcmc_sphere_sampler.move(std_gcmc_sphere_simulation.context, n_moves)\n\n # Check that all of the appropriate variables seem to have been updated\n # Hard to test individual moves as they are rarely accepted - just need to check the overall behaviour\n assert std_gcmc_sphere_sampler.n_moves == n_moves\n assert 0 <= std_gcmc_sphere_sampler.n_accepted <= n_moves\n assert len(std_gcmc_sphere_sampler.Ns) == n_moves\n assert len(std_gcmc_sphere_sampler.acceptance_probabilities) == n_moves\n assert isinstance(std_gcmc_sphere_sampler.energy, Quantity)\n assert std_gcmc_sphere_sampler.energy.unit.is_compatible(kilocalories_per_mole)\n\n return None", "def test_move(self):\n neq_gcmc_sphere_sampler.reset()\n\n # Just run one move, as they are a bit more expensive\n neq_gcmc_sphere_sampler.move(neq_gcmc_sphere_simulation.context, 1)\n\n # Check some of the variables have been updated as appropriate\n assert neq_gcmc_sphere_sampler.n_moves == 1\n assert 0 <= neq_gcmc_sphere_sampler.n_accepted <= 1\n assert len(neq_gcmc_sphere_sampler.Ns) == 1\n assert len(neq_gcmc_sphere_sampler.acceptance_probabilities) == 1\n\n # Check the NCMC-specific variables\n assert isinstance(neq_gcmc_sphere_sampler.velocities, Quantity)\n assert neq_gcmc_sphere_sampler.velocities.unit.is_compatible(nanometers/picosecond)\n assert len(neq_gcmc_sphere_sampler.insert_works) + len(neq_gcmc_sphere_sampler.delete_works) == 1\n assert 0 <= neq_gcmc_sphere_sampler.n_left_sphere <= 1\n assert 0 <= neq_gcmc_sphere_sampler.n_explosions <= 1\n\n return None", "def test_deletionMove(self):\n # Prep for a move\n # Read in positions\n neq_gcmc_sphere_sampler.context = neq_gcmc_sphere_simulation.context\n state = neq_gcmc_sphere_sampler.context.getState(getPositions=True, enforcePeriodicBox=True, getVelocities=True)\n neq_gcmc_sphere_sampler.positions = deepcopy(state.getPositions(asNumpy=True))\n neq_gcmc_sphere_sampler.velocities = deepcopy(state.getVelocities(asNumpy=True))\n\n # Update GCMC region based on current state\n neq_gcmc_sphere_sampler.updateGCMCSphere(state)\n\n # Set to NCMC integrator\n neq_gcmc_sphere_sampler.compound_integrator.setCurrentIntegrator(1)\n\n # Just run one move to make sure it doesn't crash\n neq_gcmc_sphere_sampler.deletionMove()\n\n # Reset the compound integrator\n neq_gcmc_sphere_sampler.compound_integrator.setCurrentIntegrator(0)\n\n return None", "def test_move(self):\n # Run a handful of GCMC moves\n n_moves = 10\n std_gcmc_system_sampler.move(std_gcmc_system_simulation.context, n_moves)\n\n # Check that all of the appropriate variables seem to have been updated\n # Hard to test individual moves as they are rarely accepted - just need to check the overall behaviour\n assert std_gcmc_system_sampler.n_moves == n_moves\n assert 0 <= std_gcmc_system_sampler.n_accepted <= n_moves\n assert len(std_gcmc_system_sampler.Ns) == n_moves\n assert len(std_gcmc_system_sampler.acceptance_probabilities) == n_moves\n assert isinstance(std_gcmc_system_sampler.energy, Quantity)\n assert std_gcmc_system_sampler.energy.unit.is_compatible(kilocalories_per_mole)\n\n return None", "def test_move(self):\n neq_gcmc_system_sampler.reset()\n\n # Just run one move, as they are a bit more expensive\n neq_gcmc_system_sampler.move(neq_gcmc_system_simulation.context, 1)\n\n # Check some of the variables have been updated as appropriate\n assert neq_gcmc_system_sampler.n_moves == 1\n assert 0 <= neq_gcmc_system_sampler.n_accepted <= 1\n assert len(neq_gcmc_system_sampler.Ns) == 1\n assert len(neq_gcmc_system_sampler.acceptance_probabilities) == 1\n\n # Check the NCMC-specific variables\n assert isinstance(neq_gcmc_system_sampler.velocities, Quantity)\n assert neq_gcmc_system_sampler.velocities.unit.is_compatible(nanometers/picosecond)\n assert len(neq_gcmc_system_sampler.insert_works) + len(neq_gcmc_system_sampler.delete_works) == 1\n assert 0 <= neq_gcmc_system_sampler.n_explosions <= 1\n\n return None", "def test_insertionMove(self):\n # Prep for a move\n # Read in positions\n neq_gcmc_sphere_sampler.context = neq_gcmc_sphere_simulation.context\n state = neq_gcmc_sphere_sampler.context.getState(getPositions=True, enforcePeriodicBox=True, getVelocities=True)\n neq_gcmc_sphere_sampler.positions = deepcopy(state.getPositions(asNumpy=True))\n neq_gcmc_sphere_sampler.velocities = deepcopy(state.getVelocities(asNumpy=True))\n\n # Update GCMC region based on current state\n neq_gcmc_sphere_sampler.updateGCMCSphere(state)\n\n # Set to NCMC integrator\n neq_gcmc_sphere_sampler.compound_integrator.setCurrentIntegrator(1)\n\n # Just run one move to make sure it doesn't crash\n neq_gcmc_sphere_sampler.insertionMove()\n\n # Reset the compound integrator\n neq_gcmc_sphere_sampler.compound_integrator.setCurrentIntegrator(0)\n\n return None", "def forward(self, input_xyz, input_dir):\n\n xyz_ = input_xyz\n for i in range(self.D_1):\n if i in self.skips:\n xyz_ = torch.cat([input_xyz, xyz_], -1)\n xyz_ = getattr(self, f\"xyz_encoding_{i + 1}\")(xyz_)\n\n mid_input=torch.cat([xyz_,input_dir],dim=-1)\n\n for i in range(self.D_2):\n if i==0:\n xyz_=getattr(self, f\"direction_encoding_{i + 1}\")(mid_input)\n else:\n xyz_ = getattr(self, f\"direction_encoding_{i + 1}\")(xyz_)\n\n out=self.visibility(xyz_)\n\n return out", "def test_deletionMove(self):\n # Prep for a move\n # Read in positions\n neq_gcmc_system_sampler.context = neq_gcmc_system_simulation.context\n state = neq_gcmc_system_sampler.context.getState(getPositions=True, enforcePeriodicBox=True, getVelocities=True)\n neq_gcmc_system_sampler.positions = deepcopy(state.getPositions(asNumpy=True))\n neq_gcmc_system_sampler.velocities = deepcopy(state.getVelocities(asNumpy=True))\n\n # Set to NCMC integrator\n neq_gcmc_system_sampler.compound_integrator.setCurrentIntegrator(1)\n\n # Just run one move to make sure it doesn't crash\n neq_gcmc_system_sampler.deletionMove()\n\n # Reset the compound integrator\n neq_gcmc_sphere_sampler.compound_integrator.setCurrentIntegrator(0)\n\n return None", "def move(axis, value):\n #print(\"moving\", axis, value)\n if axis in sample:\n sample[axis] = value\n elif axis == \"detectorMaskMap\":\n # Detector mask is \"0\", \"2\", \"4\", \"6\", \"8\" or \"10\"\n value = str(int(value/2)*2)\n candor.move(**{axis: value})\n elif axis == \"Q_z\":\n # Update slits to maintain constant footprint whem moving Qz\n F = sample['sample_width']\n sample_angle = candor['sampleAngleMotor']\n candor.move(**{axis: value})\n L2S = abs(candor.PRE_SAMPLE_SLIT_Z)\n L12 = abs(candor.SOURCE_APERTURE_Z - candor.PRE_SAMPLE_SLIT_Z)\n S2 = F*np.sin(np.radians(sample_angle))/(1+(1+R12)*L2S/L12)\n S1 = S2 * R12\n candor.move(slitAperture1=S1, slitAperture2=S2)\n else:\n # TODO: check that qx is capturing diffuse beam\n candor.move(**{axis: value})", "def move_step(self, dvec):\n if not ((dvec.x * dvec.y) == 0):\n print(\"vector cannot contain both x and y componenets\")\n return 0\n step = round(dvec*(1/abs(dvec)))\n new_cloc = self.get_cloc() + step\n self.order_hist.append(new_cloc)\n self.unorder_hist.add(new_cloc)\n return step*-1", "def move(self, c, ADDR, CH, TYPE, TEMP, DIR, FREQ, REL, STEPS, TORQUE = None):\r\n if self.device_detected == True:\r\n #Add input checks\r\n if TORQUE == None:\r\n resp = yield subprocess.check_output(\"cacli MOV \"+str(ADDR) + \" \" + str(CH)\r\n + \" \" + TYPE + \" \" + str(TEMP) + \" \" + str(DIR) + \" \" + str(FREQ) + \" \" +\r\n str(REL) + \" \" + str(STEPS))\r\n else:\r\n resp = yield subprocess.check_output(\"cacli MOV \"+str(ADDR) + \" \" + str(CH)\r\n + \" \" + TYPE + \" \" + str(TEMP) + \" \" + str(DIR) + \" \" + str(FREQ) + \" \" +\r\n str(REL) + \" \" + str(STEPS) + \" \" + str(TORQUE))\r\n else:\r\n resp = \"Device not connected.\"\r\n print \"Device not connected. \"\r\n #Eventually make this actually throw an error instead of printing something\r\n returnValue(resp)", "def test_insertionMove(self):\n # Prep for a move\n # Read in positions\n neq_gcmc_system_sampler.context = neq_gcmc_system_simulation.context\n state = neq_gcmc_system_sampler.context.getState(getPositions=True, enforcePeriodicBox=True, getVelocities=True)\n neq_gcmc_system_sampler.positions = deepcopy(state.getPositions(asNumpy=True))\n neq_gcmc_system_sampler.velocities = deepcopy(state.getVelocities(asNumpy=True))\n\n # Set to NCMC integrator\n neq_gcmc_system_sampler.compound_integrator.setCurrentIntegrator(1)\n\n # Just run one move to make sure it doesn't crash\n neq_gcmc_system_sampler.insertionMove()\n\n # Reset the compound integrator\n neq_gcmc_sphere_sampler.compound_integrator.setCurrentIntegrator(0)\n\n return None", "def test_calc_move(self):\n t = AioBaseTurtle()\n t.speed(speed=5)\n steps, delta = t._calc_move(Vec2D(0, 100))\n self.assertEqual(steps, 20)\n self.assertAlmostEqual(delta[0], 0.0)\n self.assertAlmostEqual(delta[1], 5.0)", "def next_move(self, sample_todo, measurements):\n dist = .4\n dist_inc = .3\n\n if self.sample == True:\n action = 'sample '\n self.sample = False\n return action\n\n # raise NotImplementedError\n if len(self.todo) == 0:\n self.todo = sample_todo\n # print self.todo\n\n if self.todo != sample_todo:\n # print \"sample found\", self.todo, sample_todo\n # print \"found\" \n if self.last_todo != sample_todo:\n # print \"found new site\"\n self.robot_found = False\n\n if self.movements == 3:\n self.distance += dist_inc\n steering = .71\n self.movements = 1\n # action = 'move ' + str(steering) + ' ' + str(self.distance)\n self.sample = True\n elif self.movements == 0: # first movement\n self.distance = dist\n steering = .71\n self.movements += 1\n # action = 'move ' + str(steering) + ' ' + str(self.distance)\n self.sample = True\n else:\n steering = 0\n self.movements += 1\n # action = 'move ' + str(steering) + ' ' + str(self.distance)\n self.sample = True\n\n if self.robot_found == True:\n steering = measurements[self.site_id]['bearing']\n distance = measurements[self.site_id]['distance']\n \n # print distance \n # exit()\n if (distance > self.max_dist):\n distance = self.max_dist\n\n if (steering > self.max_steer):\n steering = self.max_steer\n\n if (steering < (-self.max_steer)):\n steering = -self.max_steer\n # print \"going to found site\", steering, distance\n self.distance = distance\n else:\n for m in measurements:\n # print m\n if measurements[m]['type'] == 'site':\n self.robot_found = True\n self.site_id = m\n steering = measurements[m]['bearing']\n distance = measurements[m]['distance']\n\n if (distance > self.max_dist):\n distance = self.max_dist\n\n if (steering > self.max_steer):\n steering = self.max_steer\n\n if (steering < (-self.max_steer)):\n steering = -self.max_steer\n\n self.distance = distance\n break\n\n if (self.distance > self.max_dist):\n self.distance = self.max_dist\n\n if (steering > self.max_steer):\n steering = self.max_steer\n\n if (steering < (-self.max_steer)):\n steering = -self.max_steer\n\n self.last_todo = sample_todo\n self.bearing = truncate_angle(self.bearing + float (steering))\n self.x = self.x + (self.distance * math.cos(self.bearing))\n self.y = self.y + (self.distance * math.sin(self.bearing))\n \n action = 'move ' + str(steering) + ' ' + str(self.distance)\n \n self.sample = True\n return action\n \n\n if self.explore == True:\n if self.movements == 7:\n self.distance += dist_inc\n steering = 1.570963\n self.movements = 1\n # action = 'move ' + str(steering) + ' ' + str(self.distance)\n self.sample = True\n elif self.movements == 0: # first movement\n self.distance = dist\n steering = 1.570963\n self.movements += 1\n # action = 'move ' + str(steering) + ' ' + str(self.distance)\n self.sample = True\n else:\n steering = 0\n self.movements += 1\n # action = 'move ' + str(steering) + ' ' + str(self.distance)\n self.sample = True\n # print measurements\n \n if self.site_id == 0:\n for m in measurements:\n if measurements[m]['type'] == 'site':\n self.site_id = m\n # print \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@site\",m\n steering = measurements[m]['bearing']\n distance = measurements[m]['distance']\n # print steering\n # print distance \n # exit()\n if (distance > self.max_dist):\n distance = self.max_dist\n\n if (steering > self.max_steer):\n steering = self.max_steer\n\n if (steering < (-self.max_steer)):\n steering = -self.max_steer\n\n self.distance = distance\n break\n else:\n steering = measurements[self.site_id]['bearing']\n distance = measurements[self.site_id]['distance']\n # print steering\n # print distance \n # exit()\n if (distance > self.max_dist):\n distance = self.max_dist\n\n if (steering > self.max_steer):\n steering = self.max_steer\n\n if (steering < (-self.max_steer)):\n steering = -self.max_steer\n\n self.distance = distance\n\n self.bearing = truncate_angle(self.bearing + float(steering))\n self.x = self.x + (self.distance * math.cos(self.bearing))\n self.y = self.y + (self.distance * math.sin(self.bearing))\n\n # print \"ice rover x,y\", self.x, self.y\n action = 'move ' + str(steering) + ' ' + str(self.distance)\n # print \"movements \", self.movements\n # print \"bearing is \", self.bearing\n # print \"action is\", action\n return action", "def move_y(self,c, ADDR, TEMP, FREQ, REL, Y, TORQUE = None):\r\n \r\n VEC = np.dot(self.T1,[0,Y,0])\r\n VEC = self.adjustForWeight(VEC)\r\n VEC = [round(x) for x in VEC]\r\n print VEC\r\n \r\n #Have each cycle take ~1.5 seconds\r\n cycle_size = int(FREQ/2)\r\n \r\n #Determine the direction\r\n if VEC[0] >0:\r\n dir_chn_1 = 1\r\n dir_chn_2 = 0\r\n dir_chn_3 = 1\r\n else:\r\n dir_chn_1 = 0\r\n dir_chn_2 = 1\r\n dir_chn_3 = 0\r\n \r\n #Find the largest number of steps that need to be taken\r\n max = np.max(np.abs(VEC))\r\n #Determine the number of cycles based on the max number of step taken in a cycle (cycle_size)\r\n num_cycles = floor(max / cycle_size)\r\n #Determine the amount to move each cycle in each channel \r\n VEC_cycle = [int(x) for x in np.multiply(VEC, cycle_size / max)]\r\n remainder = [int(x) for x in np.subtract(VEC, np.multiply(VEC_cycle, num_cycles))]\r\n \r\n print \"Taking \" + str(VEC) + \" steps in channel 1, 2 and 3 respectively.\"\r\n print \"This will be done over \" + str(num_cycles) + \" cycles of \" + str(VEC_cycle) + \" steps.\"\r\n print \"And a final cycle with the remainder of \" + str(remainder) + \" steps.\"\r\n\r\n VEC_cycle = np.abs(VEC_cycle)\r\n remainder = np.abs(remainder)\r\n \r\n for i in range (0,int(num_cycles)):\r\n if VEC_cycle[0] > 0:\r\n yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, VEC_cycle[0], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if VEC_cycle[1] > 0:\r\n yield self.move(c, ADDR, 2, 'CA1801', TEMP, dir_chn_2, FREQ, REL, VEC_cycle[1], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if VEC_cycle[2] > 0:\r\n yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, VEC_cycle[2], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n \r\n tot_remain = 0\r\n for rem in remainder:\r\n tot_remain = tot_remain + rem\r\n \r\n if tot_remain != 0:\r\n if remainder[0] > 0:\r\n yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, remainder[0], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if remainder[1] > 0:\r\n yield self.move(c, ADDR, 2, 'CA1801', TEMP, dir_chn_2, FREQ, REL, remainder[1], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if remainder[2] > 0:\r\n yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, remainder[2], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n \r\n returnValue('Success!')", "def next_sample(self, z):\n xp = self.A @ self.x\n Pp = self.A @ self.P @ self.A.T + self.Q\n\n self.K = Pp @ self.H.T * inv(self.H @ Pp @ self.H.T + self.R)\n\n self.x = xp + self.K @ (np.array([[z]] - self.H @ xp))\n self.P = Pp - self.K @ self.H @ Pp\n\n self.pos = self.x[0]\n self.vel = self.x[1]\n return self.pos, self.vel", "def next_sample(self, z):\n xp = self.A @ self.x\n Pp = self.A @ self.P @ self.A.T + self.Q\n\n self.K = Pp @ self.H.T * inv(self.H @ Pp @ self.H.T + self.R)\n\n self.x = xp + self.K @ (np.array([[z]] - self.H @ xp))\n self.P = Pp - self.K @ self.H @ Pp\n\n self.pos = self.x[0]\n self.vel = self.x[1]\n return self.pos, self.vel", "def moveit_cartesian_path(start_pos, start_quat,\n delta_xyz, moveit_group,\n eef_step, jump_threshold=0.0):\n start_pos = np.array(start_pos).flatten()\n\n delta_xyz = np.array(delta_xyz).flatten()\n end_pos = start_pos + delta_xyz\n moveit_waypoints = []\n wpose = moveit_group.get_current_pose().pose\n wpose.position.x = start_pos[0]\n wpose.position.y = start_pos[1]\n wpose.position.z = start_pos[2]\n wpose.orientation.x = start_quat[0]\n wpose.orientation.y = start_quat[1]\n wpose.orientation.z = start_quat[2]\n wpose.orientation.w = start_quat[3]\n moveit_waypoints.append(copy.deepcopy(wpose))\n\n wpose.position.x = end_pos[0]\n wpose.position.y = end_pos[1]\n wpose.position.z = end_pos[2]\n wpose.orientation.x = start_quat[0]\n wpose.orientation.y = start_quat[1]\n wpose.orientation.z = start_quat[2]\n wpose.orientation.w = start_quat[3]\n moveit_waypoints.append(copy.deepcopy(wpose))\n\n (plan, fraction) = moveit_group.compute_cartesian_path(\n moveit_waypoints, # waypoints to follow\n eef_step, # eef_step\n jump_threshold) # jump_threshold\n return plan", "def calc_acc_frame(velocity, step_size, frame, vel_start_frame):\n #The offset required due to the velocities starting a vel_start_frame\n acc_offset = frame - vel_start_frame + 1\n if ((acc_offset) < step_size):\n raise IndexError(\"Acceleration cannot be calculated for this frame\")\n else:\n try:\n acc = (velocity[acc_offset - 1] - velocity[acc_offset - 1 - step_size]) / step_size\n return acc\n #return round(acc,2)\n except IndexError:\n print(\"Frame or step_size out of bounds\")", "def run(self):\n for direction in self.directions:\n rotation = direction[0]\n steps = direction[1]\n\n self.make_rotation(rotation)\n hq_found = self.travel(steps)\n\n if hq_found:\n return (abs(self.new_loc[0] + self.new_loc[1]))", "def move():\n # step 1 of task analysis: get data\n data = get_data('MovementData/Walking_02.txt')\n # step 2: get the initial orientation of the sensor\n sensor_orientation = get_init_orientation_sensor(data.acc[0])\n # step 3: get the vector of the right horizontal semi-circular canal's on-direction\n rhscc_init_on_dir = get_init_on_dir_rh_scc(15)\n # preparation for step 4: align the angular velocity sensor data with the global coordinate system\n angular_velocities_aligned_globally = align_sensor_data_globally(data.omega, sensor_orientation)\n # step 4: calculate the stimulation of the cupula\n stimuli = get_scc_stimulation(angular_velocities_aligned_globally, rhscc_init_on_dir)\n # step 5: get the transfer function of the scc with the dynamics provided in the lecture\n scc_trans_fun = get_scc_transfer_fun(0.01, 5)\n # step 6: get the cupular deflection\n max_cupular_deflection = calculate_max_cupular_deflection(scc_trans_fun, stimuli, data.rate)\n # preparation for step 7: align the acceleration sensor data with the global coordinate system\n accelerations_aligned_globally = align_sensor_data_globally(data.acc, sensor_orientation)\n # step 8: calculate the maxmimum left- and rightwards stimulation of the otolithic organ\n max_left_right_stimuli = calculate_otolithic_max_stimuli(accelerations_aligned_globally, 1)\n # step 9: calculate the head orientation\n head_orientations = calculate_head_orientation(angular_velocities_aligned_globally, data.rate)\n\n return max_cupular_deflection, max_left_right_stimuli, head_orientations", "def test03_sample_ray_differential(variants_vec_spectral, origin, direction):\n near_clip = 1.0\n camera = create_camera(origin, direction, near_clip=near_clip)\n\n time = 0.5\n wav_sample = [0.5, 0.33, 0.1]\n pos_sample = [[0.2, 0.1, 0.2], [0.6, 0.9, 0.2]]\n\n ray, spec_weight = camera.sample_ray_differential(time, wav_sample, pos_sample, 0)\n\n # Importance sample wavelength and weight\n wav, spec = mi.sample_rgb_spectrum(mi.sample_shifted(wav_sample))\n\n assert dr.allclose(ray.wavelengths, wav)\n assert dr.allclose(spec_weight, spec)\n assert dr.allclose(ray.time, time)\n\n inv_z = dr.rcp((camera.world_transform().inverse() @ ray.d).z)\n o = mi.Point3f(origin) + near_clip * inv_z * mi.Vector3f(ray.d)\n assert dr.allclose(ray.o, o, atol=1e-4)\n\n\n # Check that the derivatives are orthogonal\n assert dr.allclose(dr.dot(ray.d_x - ray.d, ray.d_y - ray.d), 0, atol=1e-7)\n\n # Check that a [0.5, 0.5] position_sample generates a ray\n # that points in the camera direction\n ray_center, _ = camera.sample_ray_differential(0, 0, [0.5, 0.5], 0)\n assert dr.allclose(ray_center.d, direction, atol=1e-7)\n\n # Check correctness of the ray derivatives\n\n # Deltas in screen space\n dx = 1.0 / camera.film().crop_size().x\n dy = 1.0 / camera.film().crop_size().y\n\n # Sample the rays by offsetting the position_sample with the deltas\n ray_dx, _ = camera.sample_ray_differential(0, 0, [0.5 + dx, 0.5], 0)\n ray_dy, _ = camera.sample_ray_differential(0, 0, [0.5, 0.5 + dy], 0)\n\n assert dr.allclose(ray_dx.d, ray_center.d_x)\n assert dr.allclose(ray_dy.d, ray_center.d_y)", "def distanceXYZ(self, samples=None, dXYZ_factors=None):\n data = self.get_data(traces=['psdXYZ', 'positionXYZ'], samples=samples)\n psdXYZ = data[:, 0:3]\n positionXYZ = data[:, 3:6]\n calibration = self.calibration\n\n distXYZ = distanceXYZ(positionXYZ, psdXYZ=psdXYZ,\n calibration=calibration,\n dXYZ_factors=dXYZ_factors)\n return distXYZ", "def linear_move(self, initial_position, final_position):\n if any(initial_position - final_position):\n # The desired position is not the actual position (would make a 'divide by zero' error otherwise)\n\n # Compute directional vector\n dir_vector = final_position - initial_position\n\n # Divide directional vector as a series of vector of norm 10um\n step_vector = 15 * dir_vector/np.linalg.norm(dir_vector)\n\n # Number of sub-directional vector to make\n nb_step = np.linalg.norm(dir_vector) / 15.\n\n # Moving the arm\n for step in range(1, int(nb_step)+1):\n intermediate_position = step * self.inv_mat * step_vector\n self.arm.absolute_move_group(self.inv_mat*initial_position + intermediate_position, [0, 1, 2])\n time.sleep(0.1)\n\n # make final move to desired position\n self.arm.absolute_move_group(self.inv_mat*final_position, [0, 1, 2])\n pass", "def drag(self, points, steps=None, durations=None):\n if durations is None:\n durations = [0.1] * (len(points) - 1)\n\n if steps is None:\n steps = [10] * (len(points) - 1)\n\n if len(points) < 2:\n raise EnvironmentError(\"Need at least 2 points\")\n\n if len(durations) + 1 != len(points):\n raise EnvironmentError(\n \"Durations size should be one less than points size.\")\n\n if len(steps) + 1 != len(points):\n raise EnvironmentError(\"Steps size should be one less than points size.\")\n\n self.android_device_driver.drag_start(points[0].x, points[0].y)\n time.sleep(0.5)\n for i in range(len(points) - 1):\n startx = points[i].x\n starty = points[i].y\n endx = points[i + 1].x\n endy = points[i + 1].y\n disx = (endx - startx) / steps[i]\n disy = (endy - starty) / steps[i]\n for j in range(steps[i]):\n self.android_device_driver.drag_move(startx + disx * j,\n starty + disy * j)\n time.sleep(durations[i])\n\n self.android_device_driver.drag_stop(points[-1].x, points[-1].y)", "def distance(acceleration_linX, acceleration_linY, acceleration_linZ, deltat):\n acceleration_linX = abs(acceleration_linX)\n if -0.05 <= acceleration_linX <= 0.05:\n acceleration_linX = 0.0\n velocityX = integrations.getVelocityX(acceleration_linX, deltat)\n else:\n velocityX = integrations.getVelocityX(acceleration_linX, deltat)\n\n positionX, distanceX = integrations.getPositionX(\n velocityX, acceleration_linX, deltat)\n return positionX, distanceX # , positionY, positionZ", "def test_squared_moving_integration_args():\n from sleepecg._heartbeat_detection import _squared_moving_integration\n x = np.array([0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0])\n window_length = 10\n\n _squared_moving_integration(x, window_length)\n _squared_moving_integration(x=x, window_length=window_length)\n _squared_moving_integration(x, window_length=window_length)\n _squared_moving_integration(window_length=window_length, x=x)", "def distance(XYZ1=np.array([0, 0, 0], dtype='float32'),\n XYZ2=np.array([1, 1, 1], dtype='float32')):\n a=XYZ2-XYZ1\n b=a**2\n c=b.sum()\n return np.sqrt(c)" ]
[ "0.54861337", "0.53565013", "0.53368104", "0.52942014", "0.5061911", "0.5029198", "0.49740016", "0.49231988", "0.49029607", "0.48911953", "0.48539475", "0.4834735", "0.47666973", "0.47465584", "0.47099647", "0.46815482", "0.46700305", "0.46635395", "0.46635395", "0.46608484", "0.46347573", "0.4594618", "0.45917863", "0.45753914", "0.4573569", "0.45628405", "0.45623675", "0.4552984", "0.45518774", "0.45469072" ]
0.6438862
0
Request CADM move sample in the according to the arbitrary vector XYZ. XYZ should be a 3 element list with the number of steps to be taken in the x, y, and z direction respectively. Intergers not necessary because the xyz coordinates need to be transformed into other coordinates first, after which they will be rounded. Output not yet implememnted. Output returns the true number of steps taken in the x, y, and z directions (not necessarily equal), and the number of steps taken in radial directions.
def move_z(self,c, ADDR, TEMP, FREQ, REL, Z, TORQUE = None): #Calculate steps in knobs 1 2 and 3 VEC = np.dot(self.T1,[0.0,0.0,Z]) VEC = self.adjustForWeight(VEC) VEC = [round(x) for x in VEC] print VEC #Have each cycle take ~1.5 seconds cycle_size = float(FREQ/2) #Determine the direction if VEC[0] >0: dir_chn_1 = 1 dir_chn_2 = 1 dir_chn_3 = 1 else: dir_chn_1 = 0 dir_chn_2 = 0 dir_chn_3 = 0 #Find the largest number of steps that need to be taken max = np.max(np.abs(VEC)) #Determine the number of cycles based on the max number of step taken in a cycle (cycle_size) num_cycles = floor(max / cycle_size) #Determine the amount to move each cycle in each channel VEC_cycle = [int(x) for x in np.multiply(VEC, cycle_size / max)] remainder = [int(x) for x in np.subtract(VEC, np.multiply(VEC_cycle, num_cycles))] print "Taking " + str(VEC) + " steps in channel 1, 2 and 3 respectively." print "This will be done over " + str(num_cycles) + " cycles of " + str(VEC_cycle) + " steps." print "And a final cycle with the remainder of " + str(remainder) + " steps." VEC_cycle = np.abs(VEC_cycle) remainder = np.abs(remainder) for i in range (0,int(num_cycles)): if VEC_cycle[0] > 0: yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, VEC_cycle[0], TORQUE) yield self.pause_while_moving(c,ADDR) if VEC_cycle[1] > 0: yield self.move(c, ADDR, 2, 'CA1801', TEMP, dir_chn_2, FREQ, REL, VEC_cycle[1], TORQUE) yield self.pause_while_moving(c,ADDR) if VEC_cycle[2] > 0: yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, VEC_cycle[2], TORQUE) yield self.pause_while_moving(c,ADDR) tot_remain = 0 for rem in remainder: tot_remain = tot_remain + rem if tot_remain != 0: if remainder[0] > 0: yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, remainder[0], TORQUE) yield self.pause_while_moving(c,ADDR) if remainder[1] > 0: yield self.move(c, ADDR, 2, 'CA1801', TEMP, dir_chn_2, FREQ, REL, remainder[1], TORQUE) yield self.pause_while_moving(c,ADDR) if remainder[2] > 0: yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, remainder[2], TORQUE) yield self.pause_while_moving(c,ADDR) returnValue('Success!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_xyz(self,c, ADDR, TEMP, FREQ, REL, XYZ, TORQUE = None):\r\n try:\r\n VEC = np.dot(self.T1,XYZ)\r\n VEC = self.adjustForWeight(VEC)\r\n VEC = [round(x) for x in VEC]\r\n print VEC\r\n \r\n #have each cycle take ~1 second\r\n cycle_size = int(FREQ/2)\r\n \r\n if VEC[0] > 0:\r\n dir_chn_1 = 1\r\n else:\r\n dir_chn_1 = 0\r\n\r\n if VEC[1] > 0:\r\n dir_chn_2 = 1\r\n else:\r\n dir_chn_2 = 0\r\n \r\n if VEC[2] > 0:\r\n dir_chn_3 = 1\r\n else:\r\n dir_chn_3 = 0\r\n \r\n #Find the largest number of steps that need to be taken\r\n max = np.max(np.abs(VEC))\r\n #Determine the number of cycles based on the max number of step taken in a cycle (cycle_size)\r\n num_cycles = floor(max / cycle_size)\r\n #Determine the amount to move each cycle in each channel \r\n \r\n VEC_cycle = [int(x) for x in np.multiply(VEC, cycle_size / max)]\r\n remainder = [int(x) for x in np.subtract(VEC, np.multiply(VEC_cycle, num_cycles))]\r\n \r\n print \"Taking \" + str(VEC) + \" steps in channel 1, 2 and 3 respectively.\"\r\n print \"This will be done over \" + str(num_cycles) + \" cycles of \" + str(VEC_cycle) + \" steps.\"\r\n print \"And a final cycle with the remainder of \" + str(remainder) + \" steps.\"\r\n\r\n VEC_cycle = np.abs(VEC_cycle)\r\n remainder = np.abs(remainder)\r\n \r\n for i in range (0,int(num_cycles)):\r\n if VEC_cycle[0] > 0:\r\n yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, VEC_cycle[0], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if VEC_cycle[1] > 0:\r\n yield self.move(c, ADDR, 2, 'CA1801', TEMP, dir_chn_2, FREQ, REL, VEC_cycle[1], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if VEC_cycle[2] > 0:\r\n yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, VEC_cycle[2], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n \r\n tot_remain = 0\r\n for rem in remainder:\r\n tot_remain = tot_remain + rem\r\n \r\n if tot_remain != 0:\r\n if remainder[0] > 0:\r\n yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, remainder[0], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if remainder[1] > 0:\r\n yield self.move(c, ADDR, 2, 'CA1801', TEMP, dir_chn_2, FREQ, REL, remainder[1], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if remainder[2] > 0:\r\n yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, remainder[2], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n \r\n returnValue('Success!')\r\n except Exception as inst:\r\n print inst", "def move_x(self,c, ADDR, TEMP, FREQ, REL, X, TORQUE = None):\r\n \r\n VEC = np.dot(self.T1,[X,0,0])\r\n VEC = self.adjustForWeight(VEC)\r\n VEC = [round(x) for x in VEC]\r\n print VEC\r\n print 'Knob 2 should always need to move 0 for this. If it is not showing 0, then something went werd'\r\n #have each cycle take ~1 second\r\n cycle_size = int(FREQ/2)\r\n \r\n #TODO, just implement these cycles into the move XYZ general command\r\n #Direction should just be positive is 1, negative is 0\r\n if VEC[0] > 0:\r\n dir_chn_1 = 1\r\n dir_chn_3 = 0\r\n else:\r\n dir_chn_1 = 0\r\n dir_chn_3 = 1\r\n \r\n #Find the largest number of steps that need to be taken\r\n max = np.max(np.abs(VEC))\r\n #Determine the number of cycles based on the max number of step taken in a cycle (cycle_size)\r\n num_cycles = floor(max / cycle_size)\r\n #Determine the amount to move each cycle in each channel \r\n VEC_cycle = [int(x) for x in np.multiply(VEC, cycle_size / max)]\r\n remainder = [int(x) for x in np.subtract(VEC, np.multiply(VEC_cycle, num_cycles))]\r\n \r\n print \"Taking \" + str(VEC) + \" steps in channel 1, 2 and 3 respectively.\"\r\n print \"This will be done over \" + str(num_cycles) + \" cycles of \" + str(VEC_cycle) + \" steps.\"\r\n print \"And a final cycle with the remainder of \" + str(remainder) + \" steps.\"\r\n \r\n VEC_cycle = np.abs(VEC_cycle)\r\n remainder = np.abs(remainder)\r\n \r\n for i in range (0,int(num_cycles)):\r\n if VEC_cycle[0] > 0:\r\n yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, VEC_cycle[0], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if VEC_cycle[2] > 0:\r\n yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, VEC_cycle[2], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n \r\n tot_remain = 0\r\n for rem in remainder:\r\n tot_remain = tot_remain + rem\r\n \r\n if tot_remain != 0:\r\n if remainder[0] > 0:\r\n yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, remainder[0], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if remainder[2] > 0:\r\n yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, remainder[2], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n \r\n returnValue('Success!')", "def test_move(self):\n # Run a handful of GCMC moves\n n_moves = 10\n std_gcmc_sphere_sampler.move(std_gcmc_sphere_simulation.context, n_moves)\n\n # Check that all of the appropriate variables seem to have been updated\n # Hard to test individual moves as they are rarely accepted - just need to check the overall behaviour\n assert std_gcmc_sphere_sampler.n_moves == n_moves\n assert 0 <= std_gcmc_sphere_sampler.n_accepted <= n_moves\n assert len(std_gcmc_sphere_sampler.Ns) == n_moves\n assert len(std_gcmc_sphere_sampler.acceptance_probabilities) == n_moves\n assert isinstance(std_gcmc_sphere_sampler.energy, Quantity)\n assert std_gcmc_sphere_sampler.energy.unit.is_compatible(kilocalories_per_mole)\n\n return None", "def test_move(self):\n neq_gcmc_sphere_sampler.reset()\n\n # Just run one move, as they are a bit more expensive\n neq_gcmc_sphere_sampler.move(neq_gcmc_sphere_simulation.context, 1)\n\n # Check some of the variables have been updated as appropriate\n assert neq_gcmc_sphere_sampler.n_moves == 1\n assert 0 <= neq_gcmc_sphere_sampler.n_accepted <= 1\n assert len(neq_gcmc_sphere_sampler.Ns) == 1\n assert len(neq_gcmc_sphere_sampler.acceptance_probabilities) == 1\n\n # Check the NCMC-specific variables\n assert isinstance(neq_gcmc_sphere_sampler.velocities, Quantity)\n assert neq_gcmc_sphere_sampler.velocities.unit.is_compatible(nanometers/picosecond)\n assert len(neq_gcmc_sphere_sampler.insert_works) + len(neq_gcmc_sphere_sampler.delete_works) == 1\n assert 0 <= neq_gcmc_sphere_sampler.n_left_sphere <= 1\n assert 0 <= neq_gcmc_sphere_sampler.n_explosions <= 1\n\n return None", "def test_deletionMove(self):\n # Prep for a move\n # Read in positions\n neq_gcmc_sphere_sampler.context = neq_gcmc_sphere_simulation.context\n state = neq_gcmc_sphere_sampler.context.getState(getPositions=True, enforcePeriodicBox=True, getVelocities=True)\n neq_gcmc_sphere_sampler.positions = deepcopy(state.getPositions(asNumpy=True))\n neq_gcmc_sphere_sampler.velocities = deepcopy(state.getVelocities(asNumpy=True))\n\n # Update GCMC region based on current state\n neq_gcmc_sphere_sampler.updateGCMCSphere(state)\n\n # Set to NCMC integrator\n neq_gcmc_sphere_sampler.compound_integrator.setCurrentIntegrator(1)\n\n # Just run one move to make sure it doesn't crash\n neq_gcmc_sphere_sampler.deletionMove()\n\n # Reset the compound integrator\n neq_gcmc_sphere_sampler.compound_integrator.setCurrentIntegrator(0)\n\n return None", "def test_move(self):\n # Run a handful of GCMC moves\n n_moves = 10\n std_gcmc_system_sampler.move(std_gcmc_system_simulation.context, n_moves)\n\n # Check that all of the appropriate variables seem to have been updated\n # Hard to test individual moves as they are rarely accepted - just need to check the overall behaviour\n assert std_gcmc_system_sampler.n_moves == n_moves\n assert 0 <= std_gcmc_system_sampler.n_accepted <= n_moves\n assert len(std_gcmc_system_sampler.Ns) == n_moves\n assert len(std_gcmc_system_sampler.acceptance_probabilities) == n_moves\n assert isinstance(std_gcmc_system_sampler.energy, Quantity)\n assert std_gcmc_system_sampler.energy.unit.is_compatible(kilocalories_per_mole)\n\n return None", "def test_move(self):\n neq_gcmc_system_sampler.reset()\n\n # Just run one move, as they are a bit more expensive\n neq_gcmc_system_sampler.move(neq_gcmc_system_simulation.context, 1)\n\n # Check some of the variables have been updated as appropriate\n assert neq_gcmc_system_sampler.n_moves == 1\n assert 0 <= neq_gcmc_system_sampler.n_accepted <= 1\n assert len(neq_gcmc_system_sampler.Ns) == 1\n assert len(neq_gcmc_system_sampler.acceptance_probabilities) == 1\n\n # Check the NCMC-specific variables\n assert isinstance(neq_gcmc_system_sampler.velocities, Quantity)\n assert neq_gcmc_system_sampler.velocities.unit.is_compatible(nanometers/picosecond)\n assert len(neq_gcmc_system_sampler.insert_works) + len(neq_gcmc_system_sampler.delete_works) == 1\n assert 0 <= neq_gcmc_system_sampler.n_explosions <= 1\n\n return None", "def test_insertionMove(self):\n # Prep for a move\n # Read in positions\n neq_gcmc_sphere_sampler.context = neq_gcmc_sphere_simulation.context\n state = neq_gcmc_sphere_sampler.context.getState(getPositions=True, enforcePeriodicBox=True, getVelocities=True)\n neq_gcmc_sphere_sampler.positions = deepcopy(state.getPositions(asNumpy=True))\n neq_gcmc_sphere_sampler.velocities = deepcopy(state.getVelocities(asNumpy=True))\n\n # Update GCMC region based on current state\n neq_gcmc_sphere_sampler.updateGCMCSphere(state)\n\n # Set to NCMC integrator\n neq_gcmc_sphere_sampler.compound_integrator.setCurrentIntegrator(1)\n\n # Just run one move to make sure it doesn't crash\n neq_gcmc_sphere_sampler.insertionMove()\n\n # Reset the compound integrator\n neq_gcmc_sphere_sampler.compound_integrator.setCurrentIntegrator(0)\n\n return None", "def forward(self, input_xyz, input_dir):\n\n xyz_ = input_xyz\n for i in range(self.D_1):\n if i in self.skips:\n xyz_ = torch.cat([input_xyz, xyz_], -1)\n xyz_ = getattr(self, f\"xyz_encoding_{i + 1}\")(xyz_)\n\n mid_input=torch.cat([xyz_,input_dir],dim=-1)\n\n for i in range(self.D_2):\n if i==0:\n xyz_=getattr(self, f\"direction_encoding_{i + 1}\")(mid_input)\n else:\n xyz_ = getattr(self, f\"direction_encoding_{i + 1}\")(xyz_)\n\n out=self.visibility(xyz_)\n\n return out", "def test_deletionMove(self):\n # Prep for a move\n # Read in positions\n neq_gcmc_system_sampler.context = neq_gcmc_system_simulation.context\n state = neq_gcmc_system_sampler.context.getState(getPositions=True, enforcePeriodicBox=True, getVelocities=True)\n neq_gcmc_system_sampler.positions = deepcopy(state.getPositions(asNumpy=True))\n neq_gcmc_system_sampler.velocities = deepcopy(state.getVelocities(asNumpy=True))\n\n # Set to NCMC integrator\n neq_gcmc_system_sampler.compound_integrator.setCurrentIntegrator(1)\n\n # Just run one move to make sure it doesn't crash\n neq_gcmc_system_sampler.deletionMove()\n\n # Reset the compound integrator\n neq_gcmc_sphere_sampler.compound_integrator.setCurrentIntegrator(0)\n\n return None", "def move(axis, value):\n #print(\"moving\", axis, value)\n if axis in sample:\n sample[axis] = value\n elif axis == \"detectorMaskMap\":\n # Detector mask is \"0\", \"2\", \"4\", \"6\", \"8\" or \"10\"\n value = str(int(value/2)*2)\n candor.move(**{axis: value})\n elif axis == \"Q_z\":\n # Update slits to maintain constant footprint whem moving Qz\n F = sample['sample_width']\n sample_angle = candor['sampleAngleMotor']\n candor.move(**{axis: value})\n L2S = abs(candor.PRE_SAMPLE_SLIT_Z)\n L12 = abs(candor.SOURCE_APERTURE_Z - candor.PRE_SAMPLE_SLIT_Z)\n S2 = F*np.sin(np.radians(sample_angle))/(1+(1+R12)*L2S/L12)\n S1 = S2 * R12\n candor.move(slitAperture1=S1, slitAperture2=S2)\n else:\n # TODO: check that qx is capturing diffuse beam\n candor.move(**{axis: value})", "def move_step(self, dvec):\n if not ((dvec.x * dvec.y) == 0):\n print(\"vector cannot contain both x and y componenets\")\n return 0\n step = round(dvec*(1/abs(dvec)))\n new_cloc = self.get_cloc() + step\n self.order_hist.append(new_cloc)\n self.unorder_hist.add(new_cloc)\n return step*-1", "def move(self, c, ADDR, CH, TYPE, TEMP, DIR, FREQ, REL, STEPS, TORQUE = None):\r\n if self.device_detected == True:\r\n #Add input checks\r\n if TORQUE == None:\r\n resp = yield subprocess.check_output(\"cacli MOV \"+str(ADDR) + \" \" + str(CH)\r\n + \" \" + TYPE + \" \" + str(TEMP) + \" \" + str(DIR) + \" \" + str(FREQ) + \" \" +\r\n str(REL) + \" \" + str(STEPS))\r\n else:\r\n resp = yield subprocess.check_output(\"cacli MOV \"+str(ADDR) + \" \" + str(CH)\r\n + \" \" + TYPE + \" \" + str(TEMP) + \" \" + str(DIR) + \" \" + str(FREQ) + \" \" +\r\n str(REL) + \" \" + str(STEPS) + \" \" + str(TORQUE))\r\n else:\r\n resp = \"Device not connected.\"\r\n print \"Device not connected. \"\r\n #Eventually make this actually throw an error instead of printing something\r\n returnValue(resp)", "def test_insertionMove(self):\n # Prep for a move\n # Read in positions\n neq_gcmc_system_sampler.context = neq_gcmc_system_simulation.context\n state = neq_gcmc_system_sampler.context.getState(getPositions=True, enforcePeriodicBox=True, getVelocities=True)\n neq_gcmc_system_sampler.positions = deepcopy(state.getPositions(asNumpy=True))\n neq_gcmc_system_sampler.velocities = deepcopy(state.getVelocities(asNumpy=True))\n\n # Set to NCMC integrator\n neq_gcmc_system_sampler.compound_integrator.setCurrentIntegrator(1)\n\n # Just run one move to make sure it doesn't crash\n neq_gcmc_system_sampler.insertionMove()\n\n # Reset the compound integrator\n neq_gcmc_sphere_sampler.compound_integrator.setCurrentIntegrator(0)\n\n return None", "def test_calc_move(self):\n t = AioBaseTurtle()\n t.speed(speed=5)\n steps, delta = t._calc_move(Vec2D(0, 100))\n self.assertEqual(steps, 20)\n self.assertAlmostEqual(delta[0], 0.0)\n self.assertAlmostEqual(delta[1], 5.0)", "def next_move(self, sample_todo, measurements):\n dist = .4\n dist_inc = .3\n\n if self.sample == True:\n action = 'sample '\n self.sample = False\n return action\n\n # raise NotImplementedError\n if len(self.todo) == 0:\n self.todo = sample_todo\n # print self.todo\n\n if self.todo != sample_todo:\n # print \"sample found\", self.todo, sample_todo\n # print \"found\" \n if self.last_todo != sample_todo:\n # print \"found new site\"\n self.robot_found = False\n\n if self.movements == 3:\n self.distance += dist_inc\n steering = .71\n self.movements = 1\n # action = 'move ' + str(steering) + ' ' + str(self.distance)\n self.sample = True\n elif self.movements == 0: # first movement\n self.distance = dist\n steering = .71\n self.movements += 1\n # action = 'move ' + str(steering) + ' ' + str(self.distance)\n self.sample = True\n else:\n steering = 0\n self.movements += 1\n # action = 'move ' + str(steering) + ' ' + str(self.distance)\n self.sample = True\n\n if self.robot_found == True:\n steering = measurements[self.site_id]['bearing']\n distance = measurements[self.site_id]['distance']\n \n # print distance \n # exit()\n if (distance > self.max_dist):\n distance = self.max_dist\n\n if (steering > self.max_steer):\n steering = self.max_steer\n\n if (steering < (-self.max_steer)):\n steering = -self.max_steer\n # print \"going to found site\", steering, distance\n self.distance = distance\n else:\n for m in measurements:\n # print m\n if measurements[m]['type'] == 'site':\n self.robot_found = True\n self.site_id = m\n steering = measurements[m]['bearing']\n distance = measurements[m]['distance']\n\n if (distance > self.max_dist):\n distance = self.max_dist\n\n if (steering > self.max_steer):\n steering = self.max_steer\n\n if (steering < (-self.max_steer)):\n steering = -self.max_steer\n\n self.distance = distance\n break\n\n if (self.distance > self.max_dist):\n self.distance = self.max_dist\n\n if (steering > self.max_steer):\n steering = self.max_steer\n\n if (steering < (-self.max_steer)):\n steering = -self.max_steer\n\n self.last_todo = sample_todo\n self.bearing = truncate_angle(self.bearing + float (steering))\n self.x = self.x + (self.distance * math.cos(self.bearing))\n self.y = self.y + (self.distance * math.sin(self.bearing))\n \n action = 'move ' + str(steering) + ' ' + str(self.distance)\n \n self.sample = True\n return action\n \n\n if self.explore == True:\n if self.movements == 7:\n self.distance += dist_inc\n steering = 1.570963\n self.movements = 1\n # action = 'move ' + str(steering) + ' ' + str(self.distance)\n self.sample = True\n elif self.movements == 0: # first movement\n self.distance = dist\n steering = 1.570963\n self.movements += 1\n # action = 'move ' + str(steering) + ' ' + str(self.distance)\n self.sample = True\n else:\n steering = 0\n self.movements += 1\n # action = 'move ' + str(steering) + ' ' + str(self.distance)\n self.sample = True\n # print measurements\n \n if self.site_id == 0:\n for m in measurements:\n if measurements[m]['type'] == 'site':\n self.site_id = m\n # print \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@site\",m\n steering = measurements[m]['bearing']\n distance = measurements[m]['distance']\n # print steering\n # print distance \n # exit()\n if (distance > self.max_dist):\n distance = self.max_dist\n\n if (steering > self.max_steer):\n steering = self.max_steer\n\n if (steering < (-self.max_steer)):\n steering = -self.max_steer\n\n self.distance = distance\n break\n else:\n steering = measurements[self.site_id]['bearing']\n distance = measurements[self.site_id]['distance']\n # print steering\n # print distance \n # exit()\n if (distance > self.max_dist):\n distance = self.max_dist\n\n if (steering > self.max_steer):\n steering = self.max_steer\n\n if (steering < (-self.max_steer)):\n steering = -self.max_steer\n\n self.distance = distance\n\n self.bearing = truncate_angle(self.bearing + float(steering))\n self.x = self.x + (self.distance * math.cos(self.bearing))\n self.y = self.y + (self.distance * math.sin(self.bearing))\n\n # print \"ice rover x,y\", self.x, self.y\n action = 'move ' + str(steering) + ' ' + str(self.distance)\n # print \"movements \", self.movements\n # print \"bearing is \", self.bearing\n # print \"action is\", action\n return action", "def move_y(self,c, ADDR, TEMP, FREQ, REL, Y, TORQUE = None):\r\n \r\n VEC = np.dot(self.T1,[0,Y,0])\r\n VEC = self.adjustForWeight(VEC)\r\n VEC = [round(x) for x in VEC]\r\n print VEC\r\n \r\n #Have each cycle take ~1.5 seconds\r\n cycle_size = int(FREQ/2)\r\n \r\n #Determine the direction\r\n if VEC[0] >0:\r\n dir_chn_1 = 1\r\n dir_chn_2 = 0\r\n dir_chn_3 = 1\r\n else:\r\n dir_chn_1 = 0\r\n dir_chn_2 = 1\r\n dir_chn_3 = 0\r\n \r\n #Find the largest number of steps that need to be taken\r\n max = np.max(np.abs(VEC))\r\n #Determine the number of cycles based on the max number of step taken in a cycle (cycle_size)\r\n num_cycles = floor(max / cycle_size)\r\n #Determine the amount to move each cycle in each channel \r\n VEC_cycle = [int(x) for x in np.multiply(VEC, cycle_size / max)]\r\n remainder = [int(x) for x in np.subtract(VEC, np.multiply(VEC_cycle, num_cycles))]\r\n \r\n print \"Taking \" + str(VEC) + \" steps in channel 1, 2 and 3 respectively.\"\r\n print \"This will be done over \" + str(num_cycles) + \" cycles of \" + str(VEC_cycle) + \" steps.\"\r\n print \"And a final cycle with the remainder of \" + str(remainder) + \" steps.\"\r\n\r\n VEC_cycle = np.abs(VEC_cycle)\r\n remainder = np.abs(remainder)\r\n \r\n for i in range (0,int(num_cycles)):\r\n if VEC_cycle[0] > 0:\r\n yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, VEC_cycle[0], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if VEC_cycle[1] > 0:\r\n yield self.move(c, ADDR, 2, 'CA1801', TEMP, dir_chn_2, FREQ, REL, VEC_cycle[1], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if VEC_cycle[2] > 0:\r\n yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, VEC_cycle[2], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n \r\n tot_remain = 0\r\n for rem in remainder:\r\n tot_remain = tot_remain + rem\r\n \r\n if tot_remain != 0:\r\n if remainder[0] > 0:\r\n yield self.move(c, ADDR, 1, 'CA1801', TEMP, dir_chn_1, FREQ, REL, remainder[0], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if remainder[1] > 0:\r\n yield self.move(c, ADDR, 2, 'CA1801', TEMP, dir_chn_2, FREQ, REL, remainder[1], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n if remainder[2] > 0:\r\n yield self.move(c, ADDR, 3, 'CA1801', TEMP, dir_chn_3, FREQ, REL, remainder[2], TORQUE)\r\n yield self.pause_while_moving(c,ADDR)\r\n \r\n returnValue('Success!')", "def next_sample(self, z):\n xp = self.A @ self.x\n Pp = self.A @ self.P @ self.A.T + self.Q\n\n self.K = Pp @ self.H.T * inv(self.H @ Pp @ self.H.T + self.R)\n\n self.x = xp + self.K @ (np.array([[z]] - self.H @ xp))\n self.P = Pp - self.K @ self.H @ Pp\n\n self.pos = self.x[0]\n self.vel = self.x[1]\n return self.pos, self.vel", "def next_sample(self, z):\n xp = self.A @ self.x\n Pp = self.A @ self.P @ self.A.T + self.Q\n\n self.K = Pp @ self.H.T * inv(self.H @ Pp @ self.H.T + self.R)\n\n self.x = xp + self.K @ (np.array([[z]] - self.H @ xp))\n self.P = Pp - self.K @ self.H @ Pp\n\n self.pos = self.x[0]\n self.vel = self.x[1]\n return self.pos, self.vel", "def moveit_cartesian_path(start_pos, start_quat,\n delta_xyz, moveit_group,\n eef_step, jump_threshold=0.0):\n start_pos = np.array(start_pos).flatten()\n\n delta_xyz = np.array(delta_xyz).flatten()\n end_pos = start_pos + delta_xyz\n moveit_waypoints = []\n wpose = moveit_group.get_current_pose().pose\n wpose.position.x = start_pos[0]\n wpose.position.y = start_pos[1]\n wpose.position.z = start_pos[2]\n wpose.orientation.x = start_quat[0]\n wpose.orientation.y = start_quat[1]\n wpose.orientation.z = start_quat[2]\n wpose.orientation.w = start_quat[3]\n moveit_waypoints.append(copy.deepcopy(wpose))\n\n wpose.position.x = end_pos[0]\n wpose.position.y = end_pos[1]\n wpose.position.z = end_pos[2]\n wpose.orientation.x = start_quat[0]\n wpose.orientation.y = start_quat[1]\n wpose.orientation.z = start_quat[2]\n wpose.orientation.w = start_quat[3]\n moveit_waypoints.append(copy.deepcopy(wpose))\n\n (plan, fraction) = moveit_group.compute_cartesian_path(\n moveit_waypoints, # waypoints to follow\n eef_step, # eef_step\n jump_threshold) # jump_threshold\n return plan", "def calc_acc_frame(velocity, step_size, frame, vel_start_frame):\n #The offset required due to the velocities starting a vel_start_frame\n acc_offset = frame - vel_start_frame + 1\n if ((acc_offset) < step_size):\n raise IndexError(\"Acceleration cannot be calculated for this frame\")\n else:\n try:\n acc = (velocity[acc_offset - 1] - velocity[acc_offset - 1 - step_size]) / step_size\n return acc\n #return round(acc,2)\n except IndexError:\n print(\"Frame or step_size out of bounds\")", "def run(self):\n for direction in self.directions:\n rotation = direction[0]\n steps = direction[1]\n\n self.make_rotation(rotation)\n hq_found = self.travel(steps)\n\n if hq_found:\n return (abs(self.new_loc[0] + self.new_loc[1]))", "def move():\n # step 1 of task analysis: get data\n data = get_data('MovementData/Walking_02.txt')\n # step 2: get the initial orientation of the sensor\n sensor_orientation = get_init_orientation_sensor(data.acc[0])\n # step 3: get the vector of the right horizontal semi-circular canal's on-direction\n rhscc_init_on_dir = get_init_on_dir_rh_scc(15)\n # preparation for step 4: align the angular velocity sensor data with the global coordinate system\n angular_velocities_aligned_globally = align_sensor_data_globally(data.omega, sensor_orientation)\n # step 4: calculate the stimulation of the cupula\n stimuli = get_scc_stimulation(angular_velocities_aligned_globally, rhscc_init_on_dir)\n # step 5: get the transfer function of the scc with the dynamics provided in the lecture\n scc_trans_fun = get_scc_transfer_fun(0.01, 5)\n # step 6: get the cupular deflection\n max_cupular_deflection = calculate_max_cupular_deflection(scc_trans_fun, stimuli, data.rate)\n # preparation for step 7: align the acceleration sensor data with the global coordinate system\n accelerations_aligned_globally = align_sensor_data_globally(data.acc, sensor_orientation)\n # step 8: calculate the maxmimum left- and rightwards stimulation of the otolithic organ\n max_left_right_stimuli = calculate_otolithic_max_stimuli(accelerations_aligned_globally, 1)\n # step 9: calculate the head orientation\n head_orientations = calculate_head_orientation(angular_velocities_aligned_globally, data.rate)\n\n return max_cupular_deflection, max_left_right_stimuli, head_orientations", "def test03_sample_ray_differential(variants_vec_spectral, origin, direction):\n near_clip = 1.0\n camera = create_camera(origin, direction, near_clip=near_clip)\n\n time = 0.5\n wav_sample = [0.5, 0.33, 0.1]\n pos_sample = [[0.2, 0.1, 0.2], [0.6, 0.9, 0.2]]\n\n ray, spec_weight = camera.sample_ray_differential(time, wav_sample, pos_sample, 0)\n\n # Importance sample wavelength and weight\n wav, spec = mi.sample_rgb_spectrum(mi.sample_shifted(wav_sample))\n\n assert dr.allclose(ray.wavelengths, wav)\n assert dr.allclose(spec_weight, spec)\n assert dr.allclose(ray.time, time)\n\n inv_z = dr.rcp((camera.world_transform().inverse() @ ray.d).z)\n o = mi.Point3f(origin) + near_clip * inv_z * mi.Vector3f(ray.d)\n assert dr.allclose(ray.o, o, atol=1e-4)\n\n\n # Check that the derivatives are orthogonal\n assert dr.allclose(dr.dot(ray.d_x - ray.d, ray.d_y - ray.d), 0, atol=1e-7)\n\n # Check that a [0.5, 0.5] position_sample generates a ray\n # that points in the camera direction\n ray_center, _ = camera.sample_ray_differential(0, 0, [0.5, 0.5], 0)\n assert dr.allclose(ray_center.d, direction, atol=1e-7)\n\n # Check correctness of the ray derivatives\n\n # Deltas in screen space\n dx = 1.0 / camera.film().crop_size().x\n dy = 1.0 / camera.film().crop_size().y\n\n # Sample the rays by offsetting the position_sample with the deltas\n ray_dx, _ = camera.sample_ray_differential(0, 0, [0.5 + dx, 0.5], 0)\n ray_dy, _ = camera.sample_ray_differential(0, 0, [0.5, 0.5 + dy], 0)\n\n assert dr.allclose(ray_dx.d, ray_center.d_x)\n assert dr.allclose(ray_dy.d, ray_center.d_y)", "def distanceXYZ(self, samples=None, dXYZ_factors=None):\n data = self.get_data(traces=['psdXYZ', 'positionXYZ'], samples=samples)\n psdXYZ = data[:, 0:3]\n positionXYZ = data[:, 3:6]\n calibration = self.calibration\n\n distXYZ = distanceXYZ(positionXYZ, psdXYZ=psdXYZ,\n calibration=calibration,\n dXYZ_factors=dXYZ_factors)\n return distXYZ", "def drag(self, points, steps=None, durations=None):\n if durations is None:\n durations = [0.1] * (len(points) - 1)\n\n if steps is None:\n steps = [10] * (len(points) - 1)\n\n if len(points) < 2:\n raise EnvironmentError(\"Need at least 2 points\")\n\n if len(durations) + 1 != len(points):\n raise EnvironmentError(\n \"Durations size should be one less than points size.\")\n\n if len(steps) + 1 != len(points):\n raise EnvironmentError(\"Steps size should be one less than points size.\")\n\n self.android_device_driver.drag_start(points[0].x, points[0].y)\n time.sleep(0.5)\n for i in range(len(points) - 1):\n startx = points[i].x\n starty = points[i].y\n endx = points[i + 1].x\n endy = points[i + 1].y\n disx = (endx - startx) / steps[i]\n disy = (endy - starty) / steps[i]\n for j in range(steps[i]):\n self.android_device_driver.drag_move(startx + disx * j,\n starty + disy * j)\n time.sleep(durations[i])\n\n self.android_device_driver.drag_stop(points[-1].x, points[-1].y)", "def linear_move(self, initial_position, final_position):\n if any(initial_position - final_position):\n # The desired position is not the actual position (would make a 'divide by zero' error otherwise)\n\n # Compute directional vector\n dir_vector = final_position - initial_position\n\n # Divide directional vector as a series of vector of norm 10um\n step_vector = 15 * dir_vector/np.linalg.norm(dir_vector)\n\n # Number of sub-directional vector to make\n nb_step = np.linalg.norm(dir_vector) / 15.\n\n # Moving the arm\n for step in range(1, int(nb_step)+1):\n intermediate_position = step * self.inv_mat * step_vector\n self.arm.absolute_move_group(self.inv_mat*initial_position + intermediate_position, [0, 1, 2])\n time.sleep(0.1)\n\n # make final move to desired position\n self.arm.absolute_move_group(self.inv_mat*final_position, [0, 1, 2])\n pass", "def test_squared_moving_integration_args():\n from sleepecg._heartbeat_detection import _squared_moving_integration\n x = np.array([0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0])\n window_length = 10\n\n _squared_moving_integration(x, window_length)\n _squared_moving_integration(x=x, window_length=window_length)\n _squared_moving_integration(x, window_length=window_length)\n _squared_moving_integration(window_length=window_length, x=x)", "def distance(acceleration_linX, acceleration_linY, acceleration_linZ, deltat):\n acceleration_linX = abs(acceleration_linX)\n if -0.05 <= acceleration_linX <= 0.05:\n acceleration_linX = 0.0\n velocityX = integrations.getVelocityX(acceleration_linX, deltat)\n else:\n velocityX = integrations.getVelocityX(acceleration_linX, deltat)\n\n positionX, distanceX = integrations.getPositionX(\n velocityX, acceleration_linX, deltat)\n return positionX, distanceX # , positionY, positionZ", "def distance(XYZ1=np.array([0, 0, 0], dtype='float32'),\n XYZ2=np.array([1, 1, 1], dtype='float32')):\n a=XYZ2-XYZ1\n b=a**2\n c=b.sum()\n return np.sqrt(c)" ]
[ "0.6440305", "0.5357005", "0.53352326", "0.52927643", "0.50602823", "0.50276756", "0.4972673", "0.49217618", "0.49024922", "0.48894984", "0.48508203", "0.48323593", "0.476565", "0.47450444", "0.4708342", "0.4680033", "0.4670589", "0.46641427", "0.46641427", "0.4659771", "0.46344978", "0.45941725", "0.45903632", "0.45763955", "0.457282", "0.4561861", "0.4560182", "0.45528102", "0.45518264", "0.45477924" ]
0.54873776
1
Verbalise punctuation in 'textFilePath'.
def verbalisePunctuation(self): for i, strText in enumerate(self.sentencesList): #For all punctuation marks for regex, value in list(TextRepresentation.PUNCTUATION.items()): strText = re.sub(regex, value, strText) self.sentencesList[i] = strText
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeOwnPunctuation(self):\n\t\tself.textFile = self.removePunctuation(self.open(self.filePath)).split()", "def clean_text(txt):\n\n for symbol in \"\"\".,'?!()/-:;\"\"\":\n txt = txt.replace(symbol, '')\n txt = txt.lower()\n txt = txt.split()\n return txt", "def cleanupText(path):\n \n text_cleaned = ''\n try:\n f = open(path)\n raw = f.read().lower()\n text = raw\n text_cleaned = text.translate(None, punctuation + digits)\n # print \"\\n Word count before:\" + str(len(text_translated.split())) + \"\\n\"\n # for stop in stop_word:\n # text_translated = text_translated.replace(stop,'')\n # print \"\\n Word count after:\" + str(len(text_translated.split())) + \"\\n\"\n text_cleaned = ' '.join([word for word in text_cleaned.split(' ') if (word and len(word) > 1)])\n \n finally:\n f.close()\n return text_cleaned", "def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text", "def remove_punctuation(text):\n bad_characters = [\".\", \",\", \";\", \"!\", \"?\", \":\", \"(\", \")\", \"-\", \"/\", \"*\",\n \"' \", \" '\", '\"', \"&\"]\n for bad_character in bad_characters:\n text = text.replace(bad_character, \"\")\n return text.lower()", "def _do_smart_punctuation(self, text):\r\n if \"'\" in text: # guard for perf\r\n text = self._do_smart_contractions(text)\r\n text = self._opening_single_quote_re.sub(\"&#8216;\", text)\r\n text = self._closing_single_quote_re.sub(\"&#8217;\", text)\r\n\r\n if '\"' in text: # guard for perf\r\n text = self._opening_double_quote_re.sub(\"&#8220;\", text)\r\n text = self._closing_double_quote_re.sub(\"&#8221;\", text)\r\n\r\n text = text.replace(\"---\", \"&#8212;\")\r\n text = text.replace(\"--\", \"&#8211;\")\r\n text = text.replace(\"...\", \"&#8230;\")\r\n text = text.replace(\" . . . \", \"&#8230;\")\r\n text = text.replace(\". . .\", \"&#8230;\")\r\n return text", "def remove_punct(self,text):", "def cleanup(text):\n with open(text, 'r') as uncleaned_text:\n no_chapters = re.sub('[A-Z]{3,}', ' ', uncleaned_text.read())\n remove_periods = re.sub('(\\s\\.){4,}', '', no_chapters)\n new_text = re.sub('\\*', '', remove_periods)\n return new_text", "def clean_text(text):\n lowercase = tf.strings.lower(text)\n stripped_html = tf.strings.regex_replace(lowercase, \"<br />\", \" \")\n cleaned_punctuation = tf.strings.regex_replace(\n stripped_html, \"[%s]\" % re.escape(string.punctuation), \"\"\n )\n return cleaned_punctuation", "def remove_punctuation(text):\n import string\n PUNCT_TO_REMOVE = '''!()-[]{};:'\"\\,<>/?@#$%^&*_~'''\n no_punct = \"\"\n for char in text:\n if char not in PUNCT_TO_REMOVE:\n no_punct = no_punct + char\n else:\n no_punct = no_punct + ' '\n return(no_punct)", "def words_from_text(file_with_text):\n import string\n\n text = open(file_with_text, 'r')\n\n words = []\n amount_of_words = 0\n number_different_words = 0\n\n for line in text:\n line = line.replace('-',' ')\n for word in line.split():\n word = word.strip(string.punctuation + string.whitespace)\n word = word.lower()\n if word not in words:\n number_different_words +=1\n words.append(word)\n amount_of_words += 1\n\n\n return (\" This book has a total of %s words. It has %s different words !\") % (amount_of_words, number_different_words)", "def text_prepare(text):\r\n\r\n replace_by_space_re = re.compile('[/(){}\\[\\]\\|@,;]')\r\n good_symbols_re = re.compile('[^0-9a-z #+_]')\r\n stopwords_set = set(stopwords.words('english'))\r\n\r\n text = text.lower()\r\n text = replace_by_space_re.sub(' ', text)\r\n text = good_symbols_re.sub('', text)\r\n text = ' '.join([x for x in text.split() if x and x not in stopwords_set])\r\n\r\n return text.strip()", "def remove_punctuation(text):\n return re.sub('[,.?\";:\\-!@#$%^&*()]', '', text)", "def removePunctuation(self, text=None):\n\t\t# Loop through all the punctuation in self.stop_puncs\n\t\tfor punctuation in self.stop_puncs:\n\n\t\t\t# Replace punctuation with leading and trailing spaces\n\t\t\ttext = text.replace(\" \" + punctuation, \" \")\n\t\t\ttext = text.replace(punctuation + \" \", \" \")\n\n\t\t\t# Replace punctuation within the first and last 5 characters of the text\n\t\t\ttext = text[:5].replace(punctuation, \"\") + text[5:]\n\t\t\ttext = text[:-5] + text[-5:].replace(punctuation, \"\")\n\n\t\t\t# Otherwise, remove the punctuation if not in list specified\n\t\t\tif punctuation not in [\".\", \",\", \"-\", \"--\"]:\n\t\t\t\ttext = text.replace(punctuation, \"\")\n\n\t\treturn text", "def normalize_text(text):\n punctuation = '!\"#$%&\\'()*+,./:;<=>?@[\\\\]^_`{|}~'\n return text.lower().strip().translate(None, punctuation)", "def processText(text):\n\n no_punc = [word for word in text.split() if word.isalpha()] # and word not in stopwords.words('english')]\n #removes non-letter characters and only includes words not included in stopwords\n no_punc = \" \".join(no_punc) \n clean_words = nltk.word_tokenize(no_punc) #splits the punctuation marks from the real words\n return clean_words", "def remove_punctuation(text: str) -> str:\n return \"\".join(\n itertools.filterfalse(lambda x: unicodedata.category(x).startswith(\"P\"), text)\n )", "def spacing_punctuation(text):\n for punc in all_punct:\n if punc in text:\n text = text.replace(punc, f' {punc} ')\n return text", "def spacing_punctuation(text):\n for punc in all_punct:\n if punc in text:\n text = text.replace(punc, f' {punc} ')\n return text", "def test_drop_punctuation():\n assert TextCleaner().transform([[\"'test!?\"]])[\"corpus\"][0] == \"test\"", "def stripPunctuation(text):\n exclude = set(string.punctuation)\n clean_text = ''.join(ch for ch in text if ch not in exclude)\n clean_text = clean_text.replace('\\n',' ') # Let's account for newline characters also \n return clean_text.encode('ascii','ignore')", "def process_raw_phrases(file_path):", "def clean(text):\n\n lower_proper = src.utils.nlp.lower_with_proper(text)\n lemmas = src.utils.nlp.lemmatize(lower_proper)\n cleaned = src.utils.nlp.clean_stopwords_punctuation(lemmas)\n return cleaned", "def is_punctuation(text):\n return not (text.lower() in AVRO_VOWELS or\n text.lower() in AVRO_CONSONANTS)", "def remove_punctuation(text):\n text = re.sub(r'[^\\w\\s]', ' ', text)\n return text", "def clean_text(txt):\n s = ''\n for c in txt:\n if c != '.' and c != ',' and c != '!' and c != '?':\n s += c\n s = s.lower().split()\n return s", "def parse_text(self, text):\r\n MAXLEN = 100\r\n sentences = []\r\n punct = [\",\",\":\",\";\",\".\",\"–\",\"?\",\"!\",\"(\",\")\"] # Interpunctuation marks\r\n text = text.replace(\"\\r\", \" \").replace(\"\\t\", \" \") # Remove CR and tabs\r\n words = text.split(\" \") if len(text) > MAXLEN else []\r\n sentence = \"\" if len(text) > MAXLEN else text\r\n\r\n # Preprocess list for silence markers\r\n if conf.SilenceMarker in text:\r\n words_new = []\r\n if not words and sentence: # Was too short to be cut initially\r\n words = text.split(\" \")\r\n sentence = \"\"\r\n for w in filter(None, words):\r\n if conf.SilenceMarker not in w.lower():\r\n words_new.append(w)\r\n else:\r\n text_chunks = w.lower().split(conf.SilenceMarker)\r\n for i, part in enumerate(text_chunks):\r\n if part:\r\n words_new.append(part)\r\n if i < len(text_chunks) - 1:\r\n words_new.append(conf.SilenceMarker)\r\n else:\r\n if words_new and conf.SilenceMarker in words_new[-1]:\r\n words_new[-1] += conf.SilenceMarker\r\n else:\r\n words_new.append(conf.SilenceMarker)\r\n words = words_new\r\n\r\n for w in words:\r\n if conf.SilenceMarker in w:\r\n if sentence:\r\n sentences.append(sentence.strip())\r\n sentences.append(w)\r\n sentence = \"\"\r\n elif w[-1] in punct or w[0] in punct: # Encountered punctuation\r\n if w[-1] in punct and (len(sentence) + len(w) + 1 < MAXLEN):\r\n # Word ends with punct and sentence can still be added to\r\n sentences.append(sentence.strip() + \" \" + w.strip())\r\n sentence = \"\" # Save sentence and word, start new sentence\r\n elif w[0] in punct and w[-1] not in punct:\r\n # Word starts with punctuation, like '('\r\n sentences.append(sentence.strip()) # Save current sentence\r\n sentence = w # Start a new sentence with punct and word\r\n else: # word ends with punct and sentence already long enough\r\n sentences.extend([sentence.strip(), w.strip()])\r\n sentence = \"\" \r\n else:\r\n if (len(sentence) + len(w) + 1 < MAXLEN): # Sentence still\r\n sentence += \" \" + w # short enough\r\n else: # Sentence too long\r\n sentences.append(sentence.strip())\r\n sentence = w # Start a new sentence with the word\r\n if sentence:\r\n sentences.append(sentence.strip())\r\n return sentences", "def clean_text(txt):\n\n cleaned_txt = ''\n for character in txt:\n if character not in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVQXWY ': #punctuation\n character = ''\n cleaned_txt += character\n elif character == character.upper(): #uppercase\n character = character.lower()\n cleaned_txt += character\n else:\n cleaned_txt += character\n return cleaned_txt", "def remove_noise(text):\n text1 = re.sub(\"[\\t\\r\\s]\", \" \",text)\n text1 = \" \" + text1\n text2 = re.sub(r\"([ \" + string.punctuation + \"]+)[^a-zA-Z ]+\", \"\\g<1> \", text1)\n return text2", "def remove_punctuation(text):\n return text.translate(None, string.punctuation)" ]
[ "0.7281496", "0.65980756", "0.6584256", "0.654501", "0.6430311", "0.63022417", "0.62523586", "0.6225026", "0.6210525", "0.6191587", "0.6176901", "0.6176755", "0.6172276", "0.61582357", "0.61551625", "0.6148912", "0.61315274", "0.60980105", "0.60980105", "0.60933495", "0.60932773", "0.60869926", "0.607676", "0.6069475", "0.60654074", "0.60646963", "0.6063039", "0.6055284", "0.6035943", "0.6017882" ]
0.71744287
1
Return the type of the underlying document. Raise an exception when unknown.
def _getDocumentType(self): fileName, fileExtension = os.path.splitext(self.sourceFileName) documentType = None for knownType in list(TextRepresentation.KNOWNTYPES.keys()): if knownType == fileExtension[1:]: documentType = knownType if documentType is None: raise Exception("Unknown document type: %s" % fileExtension[1:]) return documentType
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_doc_type(self):\n if not self.is_documented():\n return DocType.none\n if self._rawdoc.is_local():\n return DocType.internal\n return self._rawdoc.get_visibility()", "def document_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"document_type\")", "def DocumentType(self, default=[None]):\n return self.data.get('metadata', {}).get('document_type', default)", "def DocumentType(self, default='article'):\n return self.data.get('document_type', [{}])", "def get_type(self):\n\n return ebooklib.ITEM_DOCUMENT", "def article_type(self):\n abbrv_doctype = self.root.xpath(\".//@docsubtype\").extract_first()\n article_type = DOCTYPE_MAPPING.get(abbrv_doctype)\n return article_type", "def get_type(self):\n if not self.xmlnode.hasProp(\"type\"):\n self.upgrade()\n return from_utf8(self.xmlnode.prop(\"type\"))", "def getTypeCode(self):\n return _libsbml.SBMLDocument_getTypeCode(self)", "def content_type(self):\n return self.guess_content_type(self.store_key)", "def doc_type(self):\n from corehq.form_processor.backends.sql.dbaccessors import state_to_doc_type\n if self.is_deleted:\n return 'XFormInstance' + DELETED_SUFFIX\n return state_to_doc_type.get(self.state, 'XFormInstance')", "def get_internal_type(self):\n return self._internal_type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type" ]
[ "0.76666296", "0.7492897", "0.7062189", "0.67980164", "0.66945153", "0.66379994", "0.6350302", "0.6309663", "0.6273911", "0.6244093", "0.6189533", "0.6183449", "0.6183449", "0.6183449", "0.6183449", "0.6183449", "0.6183449", "0.6183449", "0.6183449", "0.6183449", "0.6183449", "0.6183449", "0.6183449", "0.6183449", "0.6183449", "0.6183449", "0.6183449", "0.6183449", "0.6183449", "0.6183449" ]
0.7964641
0
Returns the globstrings for getting M1 and M2 files out of the subdirectory, doesnt contain the descend into the subdir anymore. `subdirglob` is a globstring into a subdir relative to the basedir.
def get_glob_strings(subdirglob): dirname = path.dirname(subdirglob) basename = path.basename(subdirglob) assert ((("_M1_" in subdirglob) or ("_M2_" in subdirglob)) or ("_S_" in subdirglob)), \ ("_M1_ or _M2_ not in subdirglob, cant differentiate between M1 and M2, aborting." f"glob: {subdirglob}") if ("*" not in subdirglob) and ("_S_" not in basename): newbasename = basename.replace("_M2_", "_M1_"), basename.replace("_M1_", "_M2_") return path.join(dirname, newbasename[0]), path.join(dirname, newbasename[1]) elif ("_M1_" or "_M2_") in basename: newbasename = basename.replace("_M2_", "_M1_"), basename.replace("_M1_", "_M2_") return path.join(dirname, newbasename[0]), path.join(dirname, newbasename[1]) elif "_S_" in basename: return basename
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def glob(glob_pattern: str, directoryname: str) -> List[str]:\n matches = []\n for root, dirnames, filenames in os.walk(directoryname):\n for filename in fnmatch.filter(filenames, glob_pattern):\n absolute_filepath = os.path.join(root, filename)\n matches.append(absolute_filepath)\n return matches", "def recursive_glob(rootdir='.', suffix=''):\n return [os.path.join(looproot, filename)\n for looproot, _, filenames in os.walk(rootdir)\n for filename in filenames if filename.endswith(suffix)]", "def recursive_glob(rootdir=\".\", suffix=\"\"):\n return [\n os.path.join(looproot, filename)\n for looproot, _, filenames in os.walk(rootdir)\n for filename in filenames\n if filename.endswith(suffix)\n ]", "def recursive_glob(rootdir=\".\", suffix=\"\"):\n return [\n os.path.join(looproot, filename)\n for looproot, _, filenames in os.walk(rootdir)\n for filename in filenames\n if filename.endswith(suffix)\n ]", "def recursive_glob(self, rootdir='.', suffix=''):\n return [os.path.join(rootdir, filename)\n for filename in sorted(os.listdir(rootdir)) if filename.endswith(suffix)]", "def recursive_glob(rootdir=\".\", suffix=\".tif\"):\r\n return [\r\n os.path.join(looproot,filename)\r\n for looproot, _, filenames in os.walk(rootdir)\r\n for filename in filenames\r\n if filename.endswith(suffix)\r\n ]", "def recursive_glob(stem, file_pattern):\n\n if sys.version_info >= (3, 5):\n return glob(stem + \"/**/\" + file_pattern, recursive=True)\n else:\n # gh-316: this will avoid invalid unicode comparisons in Python 2.x\n if stem == str(\"*\"):\n stem = \".\"\n matches = []\n for root, dirnames, filenames in os.walk(stem):\n for filename in fnmatch.filter(filenames, file_pattern):\n matches.append(path_join_robust(root, filename))\n return matches", "def recursive_glob(path):\n if \"*\" not in path:\n # Glob isn't needed.\n return [path]\n elif \"**\" not in path:\n # Recursive glob isn't needed.\n return path_utils.glob(path)\n else:\n return path_utils.glob(path, recursive=True)", "def rooted_glob(root, glob):\r\n return remove_root(root, sorted(glob2.glob('{root}/{glob}'.format(root=root, glob=glob))))", "def glob1(self, dirname, pattern):\n names = self.listdir(dirname)\n if pattern[0] != '.':\n names = filter(lambda x: x[0] != '.',names)\n return fnmatch.filter(names, pattern)", "def mkglob(fullpaths: list, trim=False) -> str:\n string_list = []\n glob = None\n for fname in fullpaths:\n if trim:\n fname = re.sub(r\"^.*/(.*)$\", r\"\\1\", fname)\n # fname = re.sub(r\"^(.*)\\.fits?(\\.fz)*$\", r\"\\1\", fname)\n fname = re.sub(r\"^([^\\.]*)\\..*$\", r\"\\1\", fname) # trim suffix\n string_list.append(fname)\n logging.debug(\"string_list[]={}\".format(string_list))\n if len(string_list) == 1:\n glob = string_list[0]\n elif len(string_list) > 1:\n # title is longest common substring array\n # joined with *'s to look like a glob pattern\n ss_arr = []\n get_lcs_array(string_list, ss_arr, 0, \"\", 2)\n if ss_arr:\n glob = \"{}\".format(\"*\".join(ss_arr))\n if not re.match(ss_arr[0], string_list[0]):\n glob = \"*{}\".format(glob)\n if not re.search(r\"{}$\".format(ss_arr[-1]), string_list[0]):\n glob = \"{}*\".format(glob)\n return glob", "def _files_in_subdir(self, subdir, pattern, regex):\n all_files = glob(join(subdir, (pattern or '**')), recursive=True)\n all_files = [fp for fp in all_files if isfile(fp)]\n\n if pattern and regex:\n raise ValueError(\"Specify pattern OR regex, not both!\")\n elif pattern:\n files = [fn for fn in glob(join(subdir, pattern), recursive=True)]\n elif regex:\n files = [fn for fn in all_files if re.search(regex, fn)]\n else:\n files = all_files\n\n return sorted(files)", "def glob2re(glob: str) -> str: # pylint: disable=too-many-branches\n index = 0\n size = len(glob)\n results: List[str] = []\n\n while index < size:\n char = glob[index]\n index += 1\n\n if char == \"*\":\n if index < size and glob[index] == \"*\":\n index += 1\n if results and results[-1] == \"/\" and index < size and glob[index] == \"/\":\n results.append(\"(.*/)?\")\n index += 1\n else:\n results.append(\".*\")\n else:\n results.append(\"[^/]*\")\n\n elif char == \"?\":\n results.append(\"[^/]\")\n\n elif char == \"[\":\n end_index = index\n while end_index < size and glob[end_index] != \"]\":\n end_index += 1\n\n if end_index >= size:\n results.append(\"\\\\[\")\n\n else:\n characters = glob[index:end_index].replace(\"\\\\\", \"\\\\\\\\\")\n index = end_index + 1\n\n results.append(\"[\")\n\n if characters[0] == \"!\":\n results.append(\"^/\")\n characters = characters[1:]\n elif characters[0] == \"^\":\n results.append(\"\\\\\")\n\n results.append(characters)\n results.append(\"]\")\n\n elif char in \"{}/\":\n results.append(char)\n\n else:\n results.append(re.escape(char))\n\n return \"\".join(results)", "def to_path_globs(self, glob_match_error_behavior: GlobMatchErrorBehavior) -> PathGlobs:\n return self._generate_path_globs((*self.includes, *self.ignores), glob_match_error_behavior)", "def glob(self, glob_expr: str) -> Iterator[NicePath]:\n for path in self._root.glob(glob_expr):\n relative_path = path.relative_to(self._root)\n if not self._match_include(relative_path):\n continue\n if self._match_exclude(relative_path):\n continue\n\n yield NicePath(path)", "def globs(cls, *globspecs, **kw):\r\n root = kw.pop('root', os.curdir)\r\n def relative_glob(globspec):\r\n for fn in glob.glob(os.path.join(root, globspec)):\r\n yield os.path.relpath(fn, root)\r\n def combine(files, globspec):\r\n return files ^ set(relative_glob(globspec))\r\n return cls(lambda: reduce(combine, globspecs, set()))", "def _glob_files(directories, extensions):\n pwd = Path(__file__).resolve().parent\n open3d_root_dir = pwd.parent\n\n file_paths = []\n for directory in directories:\n directory = open3d_root_dir / directory\n for extension in extensions:\n extension_regex = \"*.\" + extension\n file_paths.extend(directory.rglob(extension_regex))\n file_paths = [str(file_path) for file_path in file_paths]\n file_paths = sorted(list(set(file_paths)))\n return file_paths", "def glob(self):\n self._deduplicate()\n result = []\n for entry in self._entries:\n pp = entry.posix_path()\n if GLOBBABLE_REGEX.search(pp):\n try:\n globs = glob.glob(entry.posix_path())\n result += globs\n except re.error:\n result.append(pp)\n else:\n result.append(pp)\n self._entries = [Path(g) for g in result]\n self._clean = False\n self._current = 0", "def recursive_glob(self, rootdir='.', suffix=''):\n\n valid_image_files = []\n for looproot, _, filenames in os.walk(rootdir):\n for filename in filenames:\n if filename.endswith(suffix):\n image_path = os.path.join(looproot, filename)\n label_path = image_path.replace(\"images\", \"labels\").replace(\"bmp\", \"txt\")\n if os.path.isfile(label_path):\n valid_image_files.append(image_path)\n\n return valid_image_files", "def GetInputFilenameGlob(pattern):\n if not indir:\n return glob.glob(fname)\n files = []\n for dirname in indir:\n pathname = os.path.join(dirname, pattern)\n files += glob.glob(pathname)\n return sorted(files)", "def recursive_glob(rootdir=\".\", suffix=\"\"):\n image_paths = []\n for looproot, _, filenames in os.walk(rootdir):\n for filename in filenames:\n if filename.endswith(suffix):\n image_paths.append(os.path.join(looproot, filename))\n return image_paths", "def recursive_glob(rootdir=\".\", suffix=\"\"):\n image_paths = []\n for looproot, _, filenames in os.walk(rootdir):\n for filename in filenames:\n if filename.endswith(suffix):\n image_paths.append(os.path.join(looproot, filename))\n return image_paths", "def zglobs(cls, *globspecs, **kw):\r\n root = kw.pop('root', os.curdir)\r\n patterns = [(os.path.basename(spec).startswith('*'),\r\n re.compile(fnmatch_translate_extended(spec))) for spec in globspecs]\r\n\r\n def matcher(path):\r\n for no_hidden, pattern in patterns:\r\n # Ignore hidden files when globbing wildcards.\r\n if not (no_hidden and os.path.basename(path).startswith('.')):\r\n if pattern.match(path):\r\n return True\r\n return False\r\n\r\n return cls(lambda: set(cls._do_rglob(matcher, allow_dirs=True, root=root, **kw)))", "def rglobs(cls, *globspecs, **kw):\r\n root = kw.pop('root', os.curdir)\r\n\r\n def matcher(path):\r\n for globspec in globspecs:\r\n # Ignore hidden files when globbing wildcards.\r\n if not (globspec.startswith('*') and os.path.basename(path).startswith('.')):\r\n if fnmatch.fnmatch(path, globspec):\r\n return True\r\n return False\r\n\r\n return cls(lambda: set(cls._do_rglob(matcher, allow_dirs=False, root=root, **kw)))", "def expand_globpaths(globpaths, cwd=None):\n with cd(cwd):\n paths = sum((recursive_glob(p) for p in globpaths), [])\n return expand_paths(paths, cwd)", "def to_path_globs(self, glob_match_error_behavior: GlobMatchErrorBehavior) -> PathGlobs:\n return self._generate_path_globs(\n (*self.file_includes, *self.dir_includes, *self.ignores), glob_match_error_behavior\n )", "def glob_files(root_dir, includes=None, excludes=None, gcdtignore=None):\n # docu here: https://docs.python.org/3/library/pathlib.html\n if not includes:\n includes = ['**']\n else:\n # we need to iterate multiple times (iterator safeguard)\n includes = list(includes)\n\n if excludes:\n # we need to iterate multiple times (iterator safeguard)\n excludes = list(excludes)\n\n if gcdtignore:\n spec = pathspec.PathSpec.from_lines('gitwildmatch', gcdtignore)\n log.debug('gcdtignore patterns: %s', gcdtignore)\n\n while includes:\n pattern = includes.pop(0)\n # for compatibility with std. python Lib/glop.py:\n # >>>If recursive is true, the pattern '**' will match any files and\n # zero or more directories and subdirectories.<<<\n if pattern.endswith('**'):\n pattern += '/*'\n matches = list(Path(root_dir).glob(pattern))\n\n for m in matches:\n if m.is_dir():\n continue\n\n # some discussion on how to convert a pattern into regex:\n # http://stackoverflow.com/questions/27726545/python-glob-but-against-a-list-of-strings-rather-than-the-filesystem\n pp = PurePath(m)\n\n # check if m is contained in remaining include patterns\n # (last one wins)\n if includes and any(map(lambda p: pp.match(p), includes)):\n continue\n\n # check if m is contained in exclude pattern\n if excludes and any(map(lambda p: pp.match(p), excludes)):\n continue\n\n # check if m is contained in gcdtignore\n if gcdtignore and spec.match_file(str(m)):\n log.debug('Skipped file \\'%s\\' due to gcdtignore pattern',\n str(m.relative_to(root_dir)))\n continue\n\n yield (str(m), str(m.relative_to(root_dir)))", "def get_list_of_file_paths_in_dir(directory, substrings=None, subfolders=False, regex=False):\n\n if subfolders == True:\n # Look through all subfolders too\n list_subdir = get_list_of_subdir_in_dir(directory)\n for this_subdir in list_subdir:\n yield from get_list_of_file_paths_in_dir(\n directory=this_subdir, \n substrings=substrings, \n subfolders=False, # Already considered\n regex=regex\n )\n\n # ⭐️ Get the file paths and names\n list_of_file_paths = glob.glob(os.path.join(directory,'*'))\n\n if (regex == True) & isinstance(substrings, list):\n raise Exception('If you want to match regex, the substrings should be of str type instead of list')\n if (regex == True) & isinstance(substrings, str):\n for f in list_of_file_paths:\n if re.search(substrings, os.path.basename(f)):\n yield f\n if (regex == False) & (substrings != None):\n # Make sure the substring is of list-type\n if isinstance(substrings, str):\n substrings = list(substrings.split(' '))\n for f in list_of_file_paths:\n if all(this_substring.upper() in f.upper() for this_substring in substrings):\n yield f\n if (regex == False) & (substrings == None):\n for f in list_of_file_paths:\n yield f", "def list_rpaths(rootdir, subdir, suffixes=None, prefixes=None):\n\n flist = []\n suffixes_len = len(suffixes) if suffixes is not None else 0\n prefixes_len = len(prefixes) if prefixes is not None else 0\n with change_cwd(os.path.join(rootdir, subdir)):\n for root, dirs, files in os.walk('.'):\n for fname in files:\n rpath = os.path.normpath(os.path.join(root, fname))\n # Exclude files ending with one of the suffixes.\n if suffixes_len:\n if (len(list(itertools.takewhile(lambda x: not x or\n not rpath.endswith(x), suffixes))) !=\n suffixes_len):\n continue\n # Exclude files starting with one of the prefixes.\n if prefixes_len:\n if (len(list(itertools.takewhile(lambda x: not x or\n not rpath.startswith(x), prefixes))) !=\n prefixes_len):\n continue\n flist.append(os.path.join(subdir, rpath))\n return flist", "def subdir_findall(dir, subdir):\n strip_n = len(dir.split('/'))\n path = '/'.join((dir, subdir))\n return ['/'.join(s.split('/')[strip_n:]) for s in setuptools.findall(path)]" ]
[ "0.5916455", "0.5887615", "0.58773214", "0.58773214", "0.57660085", "0.57416576", "0.5612709", "0.5590412", "0.5584732", "0.55376524", "0.5522868", "0.5417981", "0.54049164", "0.53778815", "0.53727657", "0.53702134", "0.5357846", "0.53505063", "0.5350162", "0.5341075", "0.5240297", "0.5240297", "0.5237227", "0.52215236", "0.52013886", "0.5198291", "0.5146661", "0.5066137", "0.5044232", "0.50247246" ]
0.7825845
0
Splits the filenames into those from Telescope M1 and M2
def split_filenames(filenames): fm1 = [fname for fname in filenames if "_M1_" in fname] fm2 = [fname for fname in filenames if "_M2_" in fname] return fm1, fm2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_into_telescopes(fnames):\n if isinstance(fnames, str):\n fnames = glob_and_check(fnames)\n fnames1 = [fname for fname in fnames if \"_M1_\" in fname]\n fnames2 = [fname for fname in fnames if \"_M2_\" in fname]\n return fnames1, fnames2", "def splitFilename(filename):\n\n if filename[-4:] == '.rpm':\n filename = filename[:-4]\n \n archIndex = filename.rfind('.')\n arch = filename[archIndex+1:]\n\n relIndex = filename[:archIndex].rfind('-')\n rel = filename[relIndex+1:archIndex]\n\n verIndex = filename[:relIndex].rfind('-')\n ver = filename[verIndex+1:relIndex]\n\n epochIndex = filename.find(':')\n if epochIndex == -1:\n epoch = ''\n else:\n epoch = filename[:epochIndex]\n \n name = filename[epochIndex + 1:verIndex]\n return name, ver, rel, epoch, arch", "def split3 (filename):\n directory, basename = os.path.split (filename)\n basename, extension = os.path.splitext (basename)\n return directory, basename, extension", "def split(self):\n if(self.back == 'y'):\n files = open(self.file_path,'r').read().split('Splitting Text')\n names = [self.file_path + str(num) for num in range(len(files))]\n for num,file in enumerate(files):\n open(names[num],'w').write(file)\n self.file_count += 1\n backNames = [self.file_path + str(num) + 'b' for num in range(len(files))]\n for num,file in enumerate(files):\n open(backNames[num],'w').write(file)\n else:\n files = open(self.file_path,'r').read().split('Splitting Text')\n names = [self.file_path + str(num) for num in range(len(files))]\n for num,file in enumerate(files):\n open(names[num],'w').write(file)\n self.file_count += 1", "def filenameSplit (p):\n\tfrom os.path import split as splitPath, splitdrive, splitext\n\t\n\tsplt = splitPath (p)\n\tdisk,dir_ = splitdrive(splt[0])\n\ttry:\n\t\tif disk[1] != \":\":\n\t\t\traise IndexError\n\texcept IndexError:\n\t\tdisk,dir_ = \"\", splt[0]\n\tname,ext = splitext(splt[1])\n\treturn disk,dir_,name,ext", "def split_name(filename):\n # *********** My filename are in the format ./CaAl2Si2O8_T3_nvt_a12.5.outcar.msd.dat\n # ******* so I can split their name with _ and take the compound and T from their name\n filename = filename.strip('./')\n temperature = filename.split('_')[1]\n acell = filename.split('.outcar')[0].split('_')[3].strip('a')\n return temperature, acell", "def split(self, f):\n x = os.path.split(f)\n subjectid = os.path.split(x[-2])[-1]\n imagefile = x[-1]\n return (subjectid, imagefile)", "def _splitzipext(self, filename):\n\n if self._iszip(filename):\n return os.path.splitext(filename)\n else:\n return filename, None", "def split_file_name(file, dataset_type='ycb'):\n dirname, filename = osp.split(file)\n filename_without_ext, ext = osp.splitext(filename)\n\n if dataset_type == 'ObjectNet3D':\n category_name = dirname.split(\"/\")[-2]\n idx = dirname.split(\"/\")[-1]\n else: # ycb\n category_name = dirname.split(\"/\")[-1]\n idx = None\n return dirname, filename, category_name, idx", "def filename_split(path):\n\tdirectory = os.path.dirname(path)\n\tfilename, extension = os.path.splitext(os.path.basename(path))\n\treturn directory, filename, extension", "def split_name(filename):\n # *********** My filename are in the format ./CaAl2Si2O8_T3_nvt_a12.5.\n # ******* so I can split their name with _ and take the compound and T from their name\n filename = filename.strip('./')\n temperature = str(int(float(filename.split('_')[1].strip('T'))*1000))\n acell = filename.split('_')[3].split('.outcar')[0].strip('a')\n return temperature, acell", "def file_splitter(filename):\n filename_pieces = filename.split(delimiter)\n\n # Remove the last file piece and split file extension\n new_values = filename_pieces[-1].split('.')\n filename_pieces.pop(-1)\n for value in new_values:\n filename_pieces.append(value)\n\n return filename_pieces", "def splitTransform(self):\n\t\t#path_merge = \"transform\"\n\t\t#path_train = \"transform/data/\"\n\t\t#path_label = \"transform/label/\"\n\t\tpath_merge = \"train/merge\"\n\t\tpath_train = \"train/image\"\n\t\tpath_label = \"train/label\"\n\t\ttrain_imgs = glob.glob(path_merge+\"/*.\"+self.img_type)\n\t\tfor imgname in train_imgs:\n\t\t\tmidname = imgname[imgname.rindex(\"/\")+1:imgname.rindex(\".\"+self.img_type)]\n\t\t\timg = cv2.imread(imgname)\n\t\t\timg_train = img[:,:,2]#cv2 read image rgb->bgr\n\t\t\timg_label = img[:,:,0]\n\t\t\tcv2.imwrite(path_train+midname+\".\"+self.img_type,img_train)\n\t\t\tcv2.imwrite(path_label+midname+\".\"+self.img_type,img_label)", "def tokenize_files(paths, source_root, destination_root):\n for p, path in enumerate(paths):\n # CHANGE AND CREATE PATHS\n if not os.path.exists(destination_root):\n os.makedirs(destination_root)\n new_path = path.replace(source_root, destination_root) \n repertoire = '/'.join(new_path.split('/')[:-1])\n title = new_path.split('/')[-1]\n # CREATE THE NEW ARBORESCENCE\n directory = repertoire.split('/')\n for d in range(1, len(directory)):\n directory[d] = '/'.join([directory[d-1], directory[d]])\n if not os.path.exists(directory[d]):\n os.makedirs(directory[d])\n # WRITE TOKENIZED FILES\n with open('{}/{}'.format(repertoire, title), 'w') as tokenized_txt:\n\n tokenized_txt.write(\"\\n\".join(txt_to_sentences(path)))\n tokenized_txt.close()\n print('{}/{}'.format(repertoire, title))", "def get_files(self):\n def _get_files_by_names(files, name_set, postfix):\n ret = []\n for f in files: \n name = osp.basename(f).split(\"_%s\" % postfix)[0]\n if name in name_set:\n ret.append(f)\n return ret\n\n frame1_files = sorted(glob.glob(osp.join(self.root, 'images', \"*_pre_disaster*\")))\n frame2_files = sorted(glob.glob(osp.join(self.root, \"images\", \"*_post_disaster*\")))\n label_files = sorted(glob.glob(osp.join(self.root, \"masks\", \"*_change*\")))\n assert len(frame1_files) == len(frame2_files) == len(label_files), \\\n \"%d, %d, %d\" % (len(frame1_files), len(frame2_files), len(label_files))\n\n file_names = [osp.basename(f).split(\"_pre\")[0] for f in frame1_files]\n file_names = sorted(list(set(file_names)))\n if self.isTrain:\n name_set = train_test_split(file_names, train_size=0.8, random_state=0)[0]\n else: \n name_set = train_test_split(file_names, train_size=0.8, random_state=0)[1]\n self.frame1_files = _get_files_by_names(frame1_files, name_set, 'pre')\n self.frame2_files = _get_files_by_names(frame2_files, name_set, 'post')\n self.label_files = _get_files_by_names(label_files, name_set, 'change')", "def get_test_train_files_split() -> str:\n pos_files_list = [os.path.join(POS_DIR_PATH, file_name) for file_name in os.listdir(POS_DIR_PATH)]\n neg_files_list = [os.path.join(NEG_DIR_PATH, file_name) for file_name in os.listdir(NEG_DIR_PATH)]\n\n test_files_list = []\n random_files_index = random.sample(range(len(pos_files_list)), NUM_POS_TEST_FILES)\n test_pos_files_list = [pos_files_list[i] for i in random_files_index] # pos test files list\n train_pos_file_list = [pos_files_list[i] for i in range(len(pos_files_list)) if i not in random_files_index] # pos train file list\n\n random_files_index = random.sample(range(len(neg_files_list)), NUM_NEG_TEST_FILES)\n test_neg_files_list = [neg_files_list[i] for i in random_files_index]\n train_neg_file_list = [neg_files_list[i] for i in range(len(neg_files_list)) if i not in random_files_index] # pos train file list\n\n test_files_list = test_pos_files_list + test_neg_files_list\n train_files_list = train_pos_file_list + train_neg_file_list\n\n random.shuffle(test_files_list)\n random.shuffle(train_files_list)\n\n return test_files_list, train_files_list", "def split_filename(path):\n filename = os.path.basename(path)\n name, extension = os.path.splitext(filename)\n region = name.split('.')[0]\n\n return region, name, extension", "def processed_file_names(self):\n # For 'trainval', we use files from 'train' and 'val' to save\n # memory\n if self.stage == 'trainval' and self.val_mixed_in_train:\n return [\n osp.join('train', self.pre_transform_hash, f'{w}.h5')\n for s in ('train', 'val')\n for w in self.all_cloud_ids[s]]\n if self.stage == 'trainval':\n return [\n osp.join(s, self.pre_transform_hash, f'{w}.h5')\n for s in ('train', 'val')\n for w in self.all_cloud_ids[s]]\n return [\n osp.join(self.stage, self.pre_transform_hash, f'{w}.h5')\n for w in self.cloud_ids]", "def splitext( filename ):\n index = filename.find('.')\n if index == 0:\n index = 1+filename[1:].find('.')\n if index == -1:\n return filename, ''\n return filename[:index], filename[index:]\n return os.path.splitext(filename)", "def get_files():\n\n img_dir = '../ADE20K_2016_07_26/full_data/images/validation/'\n sem_dir = '../ADE20K_2016_07_26/full_data/annotations/validation/'\n ins_dir = '../ADE20K_2016_07_26/full_data/annotations_instance/validation/'\n\n img_files = os.listdir(img_dir)\n sem_files = os.listdir(sem_dir)\n ins_files = os.listdir(ins_dir)\n \n img_files = [ os.path.join(img_dir,item) for item in img_files ]\n sem_files = [ os.path.join(sem_dir,item) for item in sem_files ]\n ins_files = [ os.path.join(ins_dir,item) for item in ins_files ]\n \n img_files.sort()\n sem_files.sort()\n ins_files.sort()\n \n return img_files, sem_files, ins_files", "def file_parts(file_path):\n\n base_path, tail = os.path.split(file_path)\n name, ext = os.path.splitext(tail)\n\n return base_path, name, ext", "def splitext_no_dot(filename):\n name, ext = os.path.splitext(filename)\n ext.strip('.')\n return name, ext.strip('.')", "def split_ext(filepath):\n\t(fn, ext) = os.path.splitext(filepath)\n\tif ext=='.gz':\n\t\t(fn, ext) = os.path.splitext(fn)\n\t\text += '.gz'\n\treturn (fn, ext)", "def parse_tmp_to_final_filepath_map_file(lines):\r\n infiles_lists = []\r\n out_filepaths = []\r\n for line in lines:\r\n fields = line.split()\r\n infiles_lists.append(fields[:-1])\r\n out_filepaths.append(fields[-1])\r\n return infiles_lists, out_filepaths", "def parse_filenames(filenames):\n \n for fn in filenames:\n dirname, basename = path.split(fn)\n subject_visit = basename[:7]\n visit = basename[5:7]\n yield dirname, basename, subject_visit, visit", "def split_file_name(file_path):\n file_name = os.path.splitext(file_path)[0]\n file_name = os.path.split(file_name)[1]\n\n return file_name", "def clean_file(filesnames_list, file_type): # so now not needed.\r\n global files_list\r\n files_list = []\r\n global ft_list\r\n ft_list = []\r\n for line in filesnames_list:\r\n s, fileType = line.split('.') # split off file_type here\r\n print(s)\r\n files_list.append(s)\r\n ft_list.append(fileType)\r\n print(files_list)\r\n return (files_list)", "def splitMerge(self):\n\t\tpath_merge = self.aug_merge_path\n\t\tpath_train = self.aug_train_path\n\t\tpath_label = self.aug_label_path\n\t\tfor i in range(self.slices):\n\t\t\tpath = path_merge + \"/\" + str(i)\n\t\t\ttrain_imgs = glob.glob(path+\"/*.\"+self.img_type)\n\t\t\tsavedir = path_train + \"/\" + str(i)\n\t\t\tif not os.path.lexists(savedir):\n\t\t\t\tos.mkdir(savedir)\n\t\t\tsavedir = path_label + \"/\" + str(i)\n\t\t\tif not os.path.lexists(savedir):\n\t\t\t\tos.mkdir(savedir)", "def separate(input_paths):\n sep_paths = {\".csv\": [], \".xls\": [], \".xlsx\": [], \"del\": [], \"out\": []}\n for path in input_paths:\n file_name, file_ext = os.path.splitext(os.path.basename(path))\n sep_paths[file_ext].append(path)\n return sep_paths", "def splitFn(fn):\n\n (dir, bn) = op.split(fn)\n\n fidx = bn.find(opts.separator)\n if fidx != -1:\n # found separator, add as an alt repn\n base = bn[ :fidx ]\n (repn, ext) = splitext(bn[ fidx + len(opts.separator): ])\n\n else:\n # didn't find separator, split using extension\n (base, ext) = splitext(bn)\n repn = ''\n return (dir, base, repn, ext)" ]
[ "0.6869177", "0.6430804", "0.6268593", "0.6030786", "0.60156405", "0.6013226", "0.59532857", "0.5944488", "0.5916908", "0.5900731", "0.5892232", "0.58632034", "0.5850609", "0.57814455", "0.5769506", "0.57669324", "0.5765677", "0.57340425", "0.5681897", "0.56788385", "0.56778765", "0.56776416", "0.56330985", "0.5614889", "0.5611501", "0.5608167", "0.5596464", "0.55953395", "0.5593687", "0.55930734" ]
0.76343876
0
Returns the complete directory of the globstr when given basedir and the globstr relative to basedir.
def get_dir_from_glob(basedir, globstr): directory = path.abspath(basedir) + path.dirname(globstr) directory = path.join(directory, "") return directory
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getBaseFolder(globalsDict=None):\n globalsDictHere = globalsDict or globals()\n baseFolder = \"\"\n if globalsDictHere['__name__'] == \"__main__\":\n baseFolder = os.path.split(sys.argv[0])[0]\n # print(('baseFolder from argv: %s'% baseFolder))\n elif globalsDictHere['__file__']:\n baseFolder = os.path.split(globalsDictHere['__file__'])[0]\n # print(('baseFolder from __file__: %s'% baseFolder))\n if not baseFolder or baseFolder == '.':\n baseFolder = os.getcwd()\n # print(('baseFolder was empty, take wd: %s'% baseFolder))\n return baseFolder", "def normalize_base_dir(base_dir: Optional[str]) -> str:\n if base_dir is None:\n base_dir = os.path.abspath(\"\")\n elif not is_absolute_path(base_dir):\n base_dir = os.path.abspath(base_dir)\n while base_dir != '/' and base_dir.endswith('/'):\n base_dir = base_dir[:-1]\n return base_dir", "def _get_basedir(datadir, target_genome):\n genome_dir = os.path.join(datadir, \"genomes\")\n for dirname in glob.glob(os.path.join(genome_dir, \"*\", \"*\")):\n if dirname.endswith(\"/%s\" % target_genome):\n return dirname", "def becv_dir(*arg):\n return _path.join(BASE_DIR, *arg)", "def find_base_dir(dirname):\n def check_dirname(path, subdirname):\n if os.path.isdir(os.path.join(path, subdirname)):\n try:\n int(subdirname)\n return True\n except ValueError:\n return False\n else:\n return False\n\n path = os.path.abspath(os.path.join(os.curdir, dirname))\n if os.path.isdir(path):\n if len(os.listdir(path)) > 0:\n dirs = [int(i) for i in os.listdir(path) if check_dirname(path, i)]\n found_path = \"{}/{:03d}\".format(path, max(dirs))\n else:\n found_path = path\n else:\n found_path = os.curdir\n return found_path", "def base_dir(path=None, base=None, max_levels=100):\n path = path or _get_caller_path()\n path, children = _repo_dir_and_children(path, max_levels=max_levels)\n if path and base:\n # Explicit base\n return op.join(path, base)\n elif path and children:\n if children[0] in ['data', 'models', 'reports', 'src']:\n # The repo_dir IS the data science dir, so just return the repo_dir\n return path\n else:\n # Implicit base\n return op.join(path, children[0])\n else:\n # Not found\n return None", "def basepath(*args):\n return join(dirname(__file__), '../../', *args)", "def setBasedir(self, basedir):\n if not (isinstance(basedir, list) or isinstance(basedir, tuple)):\n basedir = [basedir]\n new_base = []\n for b in basedir:\n b = os.path.abspath(b)\n if b[0] != '\\\\':\n b = b.replace('\\\\', '/')\n b = b[0].lower() + b[1:]\n if b not in new_base:\n new_base.append(b)\n self._basedir = new_base\n self.update()", "def set_base_dir(dirname=None, sel_flag=False, out_dir=None):\n # def check_dirname(path, subdirname):\n # if os.path.isdir(os.path.join(path, subdirname)):\n # try:\n # int(subdirname)\n # return True\n # except ValueError:\n # return False\n # else:\n # return False\n\n if out_dir == None and dirname != None:\n path = os.path.abspath(os.path.join(os.curdir, dirname))\n elif out_dir != None and dirname == None:\n path = os.path.abspath(out_dir)\n elif out_dir == None and dirname == None:\n path = os.path.abspath(os.curdir)\n else:\n path = os.path.join(os.path.abspath(out_dir), dirname)\n\n if os.path.isdir(path):\n dirs = [d for d in os.listdir(path) if os.path.isdir(os.path.join(path, d))]\n dirnums = [int(d) for d in dirs if d.isdigit()]\n if len(dirnums) > 0:\n n_top = max(dirnums)\n if sel_flag:\n new_path = \"{}/{:03d}\".format(path, n_top)\n else:\n new_path = \"{}/{:03d}\".format(path, n_top + 1)\n else:\n new_path = \"{}/{:03d}\".format(path, 1)\n else:\n new_path = \"{}/001\".format(path)\n\n return new_path", "def get_dir(base, *args):\n if not os.path.isdir(base):\n tf.logging.fatal(\"%s path does not exist\", base)\n raise FileNotFoundError(\"{} does not exist\".format(base))\n\n path = os.path.join(base, *args)\n os.makedirs(path, exist_ok=True)\n\n return path", "def base_dir(context):\n return '{}'.format(os.getcwd())", "def relpath(targpath: str, basepath: str='') -> str:\n pass", "def test_base_dir(self):\n old_base_dir = self.path_translator.BASE_REAL_DIR\n self.path_translator.BASE_REAL_DIR = \"/tmp/study\"\n import os\n stat_f = lambda x: FakeStat(33188, 16398844, 65024L, 1, 1049, 1049, 0,\n 1409046988, 1409046988, 1409046988)\n os.stat = stat_f\n os.lstat = stat_f\n expected = [\"subdir1\", \"subdir2\"]\n result = [r[0] for r in self.path_translator.list_directory(\n \"/{0}/search1\".format(self.search.instance))]\n self.assertEqual(result, expected)\n self.path_translator.BASE_REAL_DIR = old_base_dir", "def get_base_dir(config: Mapping[str, Any]) -> str:\n return normalize_base_dir(config.get(\"base_dir\"))", "def relpath(path, dir = '.'):\n\n\tapath = os.path.abspath(os.path.normpath(path))\n\tadir = os.path.abspath(os.path.normpath(dir))\n\tif apath == adir:\n\t\treturn(os.curdir)\n\n\tcommonpath = os.path.commonprefix((apath, adir))\n\tuniqpath = adir[len(commonpath):]\n\n\tif len(uniqpath) == 0:\n\t\treturn(path)\n\n\tif uniqpath.startswith(os.sep):\n\t\tuniqpath = uniqpath[1:]\n\tlenuniq = len(uniqpath.split(os.sep))\n\treturn os.path.normpath(os.path.join(os.sep.join(lenuniq * [os.pardir]), apath[len(commonpath):]))", "def base_dir(self, value):\n pass", "def subOfPresetFilesDir(subdir: str) -> str:\n return os.path.join(os.getcwd(), _basePresetFilesDir, subdir)", "def get_sub_dir_for_saving(base_dir):\n num_sub_dirs = sum(os.path.isdir(os.path.join(base_dir, el))\n for el in os.listdir(base_dir))\n\n sub_dir_to_save_to_name = str(num_sub_dirs)\n sub_dir_to_save_to_name = sub_dir_to_save_to_name.zfill(4)\n\n sub_dir_to_save_to = os.path.join(base_dir, sub_dir_to_save_to_name)\n os.mkdir(sub_dir_to_save_to)\n\n return sub_dir_to_save_to", "def relDir(self, cwd=None, root=None):\n return os.path.dirname(self.relName(cwd, root)) or \".\"", "def _get_org_base_dir(self, org_id):\n return self._get_persistent_mpe_dir().joinpath(org_id)", "def relative_base(base):\n return as_base(base).lstrip('/')", "def get_directory(self, subdir=None):\n path = settings.SUBMISSION_DIR / str(self.assignment.id) / str(self.id)\n if subdir:\n path = path / subdir\n\n return path", "def getIRODSdir(self, inirodsdir, subname):\n tokens = inirodsdir.split('/')\n irodsdir=inirodsdir if tokens[-1]==subname else '/'.join([inirodsdir,subname])\n return irodsdir", "def get_all_dirs(dirpath, base_dir=None):\n\tif not base_dir:\n\t\tpost = os.path.normpath(dirpath)\n\telif base_dir in dirpath:\n\t\t(pre, post) = dirpath.split(os.path.normpath(base_dir))\n\t\tpost = os.path.normpath(post)\n\telse:\n\t\treturn\n\tdirs = []\n\t(head, tail) = os.path.split(post)\n\twhile tail:\n\t\tdirs.append(tail)\n\t\t(head, tail) = os.path.split(head)\n\tdirs.reverse()\n\treturn dirs", "def relpath(target, base_path):\r\n\r\n base_path = normcase(abspath(normpath(base_path)))\r\n target = normcase(abspath(normpath(target)))\r\n\r\n if base_path == target:\r\n return '.'\r\n\r\n # On the windows platform the target may be on a different drive.\r\n if splitdrive(base_path)[0] != splitdrive(target)[0]:\r\n return None\r\n\r\n common_path_len = len(commonpath(base_path, target))\r\n\r\n # If there's no common prefix decrease common_path_len should be less by 1\r\n base_drv, base_dir = splitdrive(base_path)\r\n if common_path_len == len(base_drv) + 1:\r\n common_path_len -= 1\r\n\r\n # if base_path is root directory - no directories up\r\n if base_dir == os.sep:\r\n dirs_up = 0\r\n else:\r\n dirs_up = base_path[common_path_len:].count(os.sep)\r\n\r\n ret = os.sep.join([os.pardir] * dirs_up)\r\n if len(target) > common_path_len:\r\n ret = path_join(ret, target[common_path_len + 1:])\r\n\r\n return ret", "def get_default_download_dir(self, *subdirs):\r\n # Look up value for key \"path\" in the config\r\n path = self.get_config_value(self.CONFIG_NAME_PATH)\r\n\r\n # If not set in config, default to present working directory\r\n if path is None:\r\n return os.getcwd()\r\n\r\n return os.path.join(path, *subdirs)", "def get_parent_directory(src: str) -> str:\n return src[: src.rfind(os.path.sep)]", "def relpath(target, base=os.curdir):\r\n\r\n if not os.path.exists(target):\r\n raise OSError, 'Target does not exist: '+target\r\n\r\n if not os.path.isdir(base):\r\n raise OSError, 'Base is not a directory or does not exist: '+base\r\n\r\n base_list = (os.path.abspath(base)).split(os.sep)\r\n target_list = (os.path.abspath(target)).split(os.sep)\r\n\r\n # On the windows platform the target may be on a completely\r\n # different drive from the base.\r\n if os.name in ['nt','dos','os2'] and base_list[0] <> target_list[0]:\r\n raise OSError, 'Target is on a different drive to base. Target: '+target_list[0].upper()+', base: '+base_list[0].upper()\r\n\r\n # Starting from the filepath root, work out how much of the\r\n # filepath is shared by base and target.\r\n for i in range(min(len(base_list), len(target_list))):\r\n if base_list[i] <> target_list[i]: break\r\n else:\r\n # If we broke out of the loop, i is pointing to the first\r\n # differing path elements. If we didn't break out of the\r\n # loop, i is pointing to identical path elements.\r\n # Increment i so that in all cases it points to the first\r\n # differing path elements.\r\n i+=1\r\n\r\n rel_list = [os.pardir] * (len(base_list)-i) + target_list[i:]\r\n if rel_list:\r\n return os.path.join(*rel_list)\r\n else:\r\n return \"\"", "def bestrelpath(self, dest):\n try:\n if self == dest:\n return os.curdir\n base = self.common(dest)\n if not base: # can be the case on windows\n return str(dest)\n self2base = self.relto(base)\n reldest = dest.relto(base)\n if self2base:\n n = self2base.count(self.sep) + 1\n else:\n n = 0\n lst = [os.pardir] * n\n if reldest:\n lst.append(reldest)\n target = dest.sep.join(lst)\n return target\n except AttributeError:\n return str(dest)", "def relative_path(base, target):\r\n common, base_tail, target_tail = split_common(base, target)\r\n #print \"common:\", common\r\n #print \"base_tail:\", base_tail\r\n #print \"target_tail:\", target_tail\r\n r = len(base_tail) * [os.pardir] + target_tail\r\n if r:\r\n return os.path.join(*r)\r\n else:\r\n return os.curdir" ]
[ "0.629505", "0.6257941", "0.6153911", "0.61254823", "0.6059204", "0.5946701", "0.5944693", "0.59154135", "0.585148", "0.58466715", "0.5784248", "0.5737764", "0.57186085", "0.5708487", "0.5701268", "0.5697349", "0.5693046", "0.5662574", "0.5574513", "0.54947746", "0.5486882", "0.5443199", "0.5438802", "0.5435914", "0.54313606", "0.54102933", "0.5409143", "0.54083526", "0.54069966", "0.53786826" ]
0.86459345
0
extracts the mergecols from the _S_ root (superstarglob) files and merges the energies
def merge_wrapper(processdir, basedir, starglob, superstarglob, calibrootglob, njobs=2, invert=False): for glob in [starglob, superstarglob, calibrootglob]: assert path.dirname(glob), \ f"Glob : {glob} should be/contain a subdirectory" superstarGlobNew = get_glob_strings(superstarglob) calibrootGlob1, calibrootGlob2 = get_glob_strings(calibrootglob) superstardir = get_dir_from_glob(processdir, superstarglob) calibdir = get_dir_from_glob(basedir, calibrootglob) starglob = processdir + starglob # ssmcolfnames = converter(superstardir, # globstr1=superstarGlobNew, # globstr2=superstarGlobNew, # njobs=42, # mergecolsonly=True) # yecho("SuperStarfiles done.") # tofiltercalibglob = converter(processdir, # globstr1=calibrootGlob1, # globstr2=calibrootGlob2, # njobs=42, # mergecolsonly=False) # yecho("Extracting done.") tofiltercalibglob = "./csv/*.csv" ssmcolfnames = glob_and_check("./superstar/mergecols/*.csv") yecho("Removing events.") if njobs > 1: splitcalib = split_by_dates(tofiltercalibglob) splitstar = split_by_dates(starglob) splitss = split_by_dates(ssmcolfnames) # needs filename output assert len(splitcalib) == len(splitstar) == len(splitss), "only works the first time when no calibfiles got moved, for everything else this needs a new function with more logic" Parallel(n_jobs=njobs)\ (delayed(single_remove_events)(calibglob, starglob, ssglob, njobs, invert) for calibglob, starglob, ssglob in zip(splitcalib, splitstar, splitss)) # filteredFiles = [f for arr in filteredFiles for f in arr] else: check_telescope_files(rootdir=None, globstr1=ssmcolfnames, globstr2=calibmcolfnames, replacer=("_Y_", "_I_")) remover = EventRemover(tofiltercalibglob=tofiltercalibglob, starglob=starglob, superstarmcolglob=ssmcolfnames) remover.remove_events() filteredFiles = remover.outfilenames yecho("Removed events that get thrown out during image cleaning and superstar processing and wrote the merged runs to:") yecho(f"{path.basename(filteredFiles[0])}") # return filteredFiles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def datamerge_run(filenames, outdir, roc_cols):\n \n tbldict = collect2dict(filenames, outdir)\n tbldict = cogtest_manipulation(tbldict, roc_cols)\n \n #count number of tps\n tbldict['cogtests'] = count_instances(tbldict['cogtests'], 'codeb', 'NP_NoTps')\n tbldict['aseg_change'] = count_instances(tbldict['aseg_change'], 'codea', 'MRI_NoTps')\n tbldict['pibparams'] = count_instances(tbldict['pibparams'], 'codea', 'PIB_NoTps')\n \n new_tbldict = {}\n for key, tbl in tbldict.iteritems():\n tpcol = [s for s in tbl.columns if ('_Tp' in s)]\n if tpcol:\n tpcol = tpcol[0]\n tblflat, tblflatnm = flatten(tbl, tpcol, key, [1, '1'])\n new_tbldict[tblflatnm] = tblflat\n tbldict.update(new_tbldict)\n \n #make sure each table contains SubjID and BAC# fields\n for key, tbl in tbldict.iteritems():\n tbl = addcodes(tbl, tbldict['codetranslator'])\n tbldict[key] = tbl\n \n #merge tables\n tblstojoin = ['cogtests_flat','pibparams_flat','aseg_change_flat','fdg_metaroi_flat','subjinfo']\n joincol = ['codea','codeb']\n subjtbl = mergelots(tbldict, tblstojoin, joincol)\n \n #merge tables\n tblstojoin = ['cogtests','subjinfo','pibparams_flat','aseg_change_flat','fdg_metaroi_flat']\n joincol = ['codea','codeb']\n NPtbl = mergelots(tbldict, tblstojoin, joincol)\n \n cf.save_xls_and_pkl(subjtbl, 'subjtbl', outdir)\n cf.save_xls_and_pkl(NPtbl, 'NPtbl', outdir)\n \n return tbldict, NPtbl, subjtbl", "def get_overlaps(file_name):\r\n\r\n place = {}\r\n size = {}\r\n sap = {}\r\n overlapping = []\r\n active_list = []\r\n max_width = 0\r\n\r\n with open(file_name + \".scl\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if line.split()[0] == \"Sitespacing\":\r\n sitespacing = line.split()[2]\r\n if line.split()[0] == \"SubrowOrigin\":\r\n starting_x = line.split()[2]\r\n ending_x = int(starting_x) + int(sitespacing) * int(line.split()[5])\r\n if ending_x > max_width:\r\n max_width = ending_x\r\n\r\n divider = max_width // 10\r\n\r\n with open(file_name + \".nodes\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if len(line.split()) == 3:\r\n size[line.split()[0]] = [line.split()[1], line.split()[2]]\r\n\r\n with open(file_name + \".pl\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if line.split()[0] in size:\r\n place[line.split()[0]] = [line.split()[1], line.split()[2]]\r\n sap_num = int(line.split()[1]) // divider\r\n if sap_num not in sap.keys():\r\n sap[sap_num] = []\r\n sap[sap_num].append([line.split()[0], int(line.split()[1]),\r\n int(line.split()[1]) + int(size[line.split()[0]][0]), int(line.split()[2]),\r\n \"start\"])\r\n\r\n sap[sap_num].append([line.split()[0], int(line.split()[1]),\r\n int(line.split()[1]) + int(size[line.split()[0]][0]),\r\n int(line.split()[2]) + int(size[line.split()[0]][1]), \"end\"])\r\n\r\n for lista in sap.values():\r\n lista.sort(key=lambda x: x[3])\r\n lista.sort(key=lambda x: x[4], reverse=True)\r\n for element in lista:\r\n if element[4] == \"start\":\r\n if len(active_list) == 0:\r\n active_list.append(element[0])\r\n else:\r\n for node in active_list:\r\n if int(place[node][0]) < int(place[element[0]][0]) + int(size[element[0]][0]) \\\r\n and int(place[node][0]) + int(size[node][0]) > int(place[element[0]][0]) \\\r\n and int(place[node][1]) < int(place[element[0]][1]) + int(size[element[0]][1]) \\\r\n and int(place[node][1]) + int(size[node][1]) > int(place[element[0]][1]):\r\n overlap = (node, element[0])\r\n overlapping.append(overlap)\r\n active_list.append(element[0])\r\n else:\r\n active_list.remove(element[0])\r\n return overlapping", "def _merge_subruns(self, fnames, run=None, getoldname=False, mergecolsonly=False):\n fnames = self._handle_fnames_for_merging(fnames, run)\n if not fnames:\n if getoldname is True:\n return [], \"\"\n else:\n return []\n\n if \"mergecols\" in fnames[0]:\n if len(fnames) == 1:\n rundf = pd.read_csv(fnames[0], index_col=False)\n else:\n rundf = pd.DataFrame(np.vstack([pd.read_csv(f, index_col=False).values for f in fnames]))\n else:\n # depends on the fact the merge cols extract in globRoot2csv.C sets headers, if that is changed set header=None here too\n if len(fnames) == 1:\n rundf = pd.read_csv(fnames[0], header=None, index_col=False)\n else:\n rundf = pd.DataFrame(np.vstack([pd.read_csv(f, header=None, index_col=False).values for f in fnames]))\n\n colnames = pd.read_csv(fnames[0]).columns\n rundf.columns = colnames\n if getoldname is True:\n oldname = self.get_oldname(fnames[0])\n return rundf, oldname\n else:\n return rundf", "def merge_physdfs2(files):\n\n temp_df = pd.read_csv(files[0], index_col=False)\n columns = temp_df.columns.tolist()\n merged_df = pd.DataFrame([], columns=columns)\n\n for file in files:\n df = pd.read_csv(file, index_col=False)\n\n # add 'rat_data' column to the merged df\n root_name = file.split('/')[-1]\n df = df.assign(raw_data=root_name)\n\n # add 'exp_label' column to the merged df\n cell_num = ''.join(re.findall(\"cell\\d{2}\", file))\n exp = file.split('_')[1]\n exp = ''.join(re.findall(\"[a-zA-Z]+\", exp))\n\n df = df.assign(exp_label=exp)\n df = df.assign(cell_num=cell_num)\n\n merged_df = pd.concat([merged_df, df], sort=True, ignore_index=True)\n\n return merged_df", "def split_per(folderin, folderout, split_col='ECO_ID', colNms=['i_h100','i_cd',\n 'doy','i_wflen','i_acqdate','b1','vcf','ECO_NAME','ECO_ID','BIOME','geometry']):\n\n split_files = glob.glob(folderin + '*.shp')\n\n for filename in split_files:\n print(filename)\n basename = os.path.splitext(os.path.basename(filename))[0]\n dfa = gpd.read_file(filename)\n df = dfa.astype({split_col: 'int32'}) \n ecoNames = list(np.unique(df[split_col]))#get list of unique ecoregions \n \n for eco in ecoNames:\n #create new df with just columns I want\n df2 = gpd.GeoDataFrame(df, columns=colNms)\n ID = str(eco)\n df_eco = df.loc[df2[split_col]==eco, colNms]\n df_eco.to_file(folderout + '/{}_eco_{}.shp'.format(basename, ID))", "def merge_physdfs(files, mode='basic'):\n\ttemp_df = pd.read_csv(files[0], index_col=False)\n\tcolumns = temp_df.columns.tolist()\n\tmerged_df = pd.DataFrame([], columns=columns)\n\n\tind = 1\n\ttot = len(files)\n\tfor file in files:\n\t\tprint(\"Merging (%d/%d): %s\" % (ind, tot, file))\n\t\tind = ind + 1\n\n\t\tdf = pd.read_csv(file, index_col=False)\n\n\t\t# add 'rat_data' column to the merged df\n\t\troot_name = file.split('/')[-1]\n\t\tdf = df.assign(raw_data=root_name)\n\n\t\t# add 'exp_label' column to the merged df\n\t\tif mode=='basic':\n\t\t\texp = re.findall(r'[a-zA-Z]{3}\\d{1}', file)\n\t\t\tdf = df.assign(exp_label=exp[0][:-1])\n\n\t\tif mode=='general':\n\t\t if 'cohort' in root_name:\n\t\t df = df.assign(exp_label=root_name[0:8])\n\t\t else:\n\t\t m = root_name.find('_') + 1\n\t\t n = root_name.find('_', m)\n\t\t df = df.assign(exp_label=root_name[m:n])\n\n\t\tif mode=='mengdi':\n\t\t\tm = root_name.find('_') + 1\n\t\t\tm = root_name.find('_', m) + 1\n\t\t\tn = root_name.find('-', m)\n\t\t\tdf = df.assign(exp_label=root_name[m:n])\n\n\t\tif mode=='stiffness':\n\t\t\tm = root_name.find('-') + 1\n\t\t\tm = root_name.find('-', m) + 1\n\t\t\tn = root_name.find('_') + 1\n\t\t\tn = root_name.find('_', n)\n\t\t\tdf = df.assign(exp_label=root_name[m:n])\n\n\t\tmerged_df = pd.concat([merged_df, df], sort=True, ignore_index=True)\n\n\treturn merged_df", "def merge(): #Status: WIP\r\n pass", "def write_merged_file(self):\n \n #out_name = os.getcwd() + '/FAST_INDEX_merged_' + [ x for x in self.datasets[ list(self.datasets_keys)[0]].split('/') if '.nc' in x ] [0] \n \n \"\"\" Loading the econding of variables created from the harvester script \"\"\"\n encodings = np.load('groups_encodings.npy' , allow_pickle = True ).item()\n \n if not os.path.isdir(self.out_dir):\n Path(self.out_dir).mkdir(parents=True, exist_ok=True)\n \n out_name = self.out_dir + '/' + self.station + '_CEUAS_merged_v0.nc' \n \n logging.info('Writing the observations_tables to the netCDF output via xarray to_netcdf() ')\n #obs_tab = self.MergedObs[ ['date_time' , 'latitude', 'longitude' , 'observation_value' , 'observed_variable' , 'source_id' , 'observation_id', 'z_coordinate' ] ] # including only some columns \n obs_tab = self.MergedObs # including only some columns \n obs_tab = self.add_cdm_missing_columns(obs_tab) \n \n \"\"\" \n # Old using xarray\n obs_tab = obs_tab.to_xarray() \n for v in obs_tab.variables:\n if v == \"index\" or v == \"hdrlen\" or 'string' in v:\n continue\n obs_tab[v].attrs['external_table'] = self.attributes['observations_table'][v]['external_table']\n obs_tab[v].attrs['description'] = self.attributes['observations_table'][v]['description']\n \"\"\"\n\n for k in obs_tab.columns:\n print('Writing the observation table using h5py new method for the variable: ' , k )\n df = obs_tab[ [k] ] # making a 1 column dataframe \n write_dict_h5(out_name, df, k, encodings['observations_table'], var_selection=[], mode='a', attrs={'date_time':('units','seconds since 1900-01-01 00:00:00')})\n \n #obs_tab.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='w' , group = 'observations_table') # writing the merged observations_table \n \n \n \n logging.info('Writing the header_table to the netCDF output via xarray ')\n head_tab = self.MergedHead.to_xarray()\n for v in head_tab.variables: \n if v == \"index\" or v == \"hdrlen\" or v == \"string80\":\n continue\n head_tab[v].attrs['external_table'] = self.attributes['header_table'][v]['external_table']\n head_tab[v].attrs['description'] = self.attributes['header_table'][v]['description']\n \n head_tab.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = 'header_table') # writing the merged observations_table \n \n \n \n logging.info('Writing the station_configuration and source_configurations tables to the netCDF output via xarray ') \n for k in self.data.keys():\n if k == 'cdm_tables':\n continue \n group_name = k + '_station_configuration'\n sc = self.data[k]['station_configuration'].to_xarray()\n sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n group_name = k + '_source_configuration'\n sc = self.data[k]['source_configuration'].to_xarray()\n sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n \"\"\" To be fixed ! \"\"\"\n #group_name = k + '_source_configuration'\n #sc = self.data[k]['source_configuration'][:1].to_xarray()\n #sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name ) \n \n logging.info('Writing the merged record indices to the netCDF output ') \n di = self.MergedRecordIndex\n di.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a')\n \n logging.info('Writing the merged feedback to the netCDF output ') \n group_name = 'era5fb' \n di = self.MergedFeedback\n di = di.to_xarray()\n di.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n logging.info('Writing the standard cdm tables to the netCDF output ') \n for t in self.data['cdm_tables'].keys(): \n d = self.data['cdm_tables'][t]\n d.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = t )\n \n logging.info('*** Done writing the output netCDF file ')", "def merge_all_data(self):\n\n logging.info('***** Starting the merging process merge_all_data')\n\n \"\"\" All possible unique_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n date_times = np.array(date_times) \n\n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, = [] , [] , [] , [] , []\n best_ds_list = [] \n source_files = []\n station_configurations = []\n\n \"\"\" The items contained in the lists in the list below can be removed from the list when the record that was previously stored is removed. \"\"\"\n all_list = [all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, best_ds_list, source_files , station_configurations ] # holder of all the above lists\n all_list_name = ['all_combined_obs' , 'all_combined_head', 'all_combined_era5fb' , 'combined_indices' , 'combined_date_time' , 'best_ds_list', 'source_files' ] \n \n removed_record, kept_record = [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #dt_bestds_dic = {} # store the selected best dataset for each dt \n #date_times=date_times[0:30000]\n tot = len(date_times)\n tt=time.time()\n print('*** Merging ' , tot, ' records ***')\n \n early_datasets = True\n \n self.processed_dt = [] \n \n for dt, c in zip(date_times, range(tot) ): # loop over all the possible date_times \n\n if (c+1)%1000==0:\n print('Analize : ', str(c+1) , '/', str(tot) , ' ', dt , ' ',\n now(time.time()),'{:5.3f}'.format(time.time()-tt ))\n\n delete = self.delete_ds(dt) # check if there is a dataset to delete \n \n \"\"\" Finding if this record is the same as the previous one analyzed, according to the given time_shift \"\"\"\n if c == 0:\n is_same_record = False\n else:\n is_same_record = self.is_same_record( time_shift = self.hour_time_delta , dt = dt)\n \n \"\"\" Updating list of processed datetimes \"\"\"\n self.processed_dt.append(dt) # cannot put it before the check_timeshift or it will check itself \n\n \n cleaned_df_container = {} \n all_len = [] # will hold the length of all the obs_tabs \n \n for k in self.dataset_per_dt[dt].keys() : # checking the list of available datasets \n ''' {'era5_2': ['example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._1:82930.gz.nc', \n 'example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._82930.gz.nc']}\n ''' \n for F in self.dataset_per_dt[dt][k]: # checking the list of available files for the dataset\n \n if data[k][F][\"counter\"] %self.slice_size==0 or data[k][F][\"counter\"] == 0: # loading the data only at specific slices \n load = self.load_obstab_feedback_sliced(datetime=dt, dataset=k, file = F)\n \n data[k][F][\"counter\"] = data[k][F][\"counter\"] + 1 \n \n obs_tab, era5fb_tab = self.make_obstab_era5fb_dic(dataset = k , date_time = dt, File = F )\n\n if len(obs_tab['date_time'][:])==0: # go to next file if obs_tab is empty \n #print('ZERO length')\n continue \n\n all_len.append( len(obs_tab['date_time'][:] ) )\n \n if k not in cleaned_df_container.keys():\n cleaned_df_container[k] = {}\n\n cleaned_df_container[k][F] = {}\n cleaned_df_container[k][F]['obs_tab'] = obs_tab # cleaned dataframe \n cleaned_df_container[k][F]['era5fb_tab'] = era5fb_tab # cleaned dataframe \n \n \"\"\" Merging the different records found in the sifferent sources \"\"\"\n if bool(all_len): # skipping empty container dictionary. At this point I certainyl have one valid record \n best_ds, combined_obs_tab, combined_era5fb_tab, combined_head_tab, selected_file, best_file = self.combine_record(dt, container = cleaned_df_container)\n \n if is_same_record: # decide what to keep in case of same record\n temporary_previous = all_combined_obs[-1] # keep the temporary previous record \n\n if best_ds in ['era5_1','era5_2']: # best_ds from era5\n if best_ds_list[-1] not in ['era5_1','era5_2']: # remove previous non era5_1 or era5_2 record \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n\n elif best_ds_list[-1] in ['era5_1','era5_2']:\n if len(combined_obs_tab) <= len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab)\n continue # nothing to do, will keep the previous records -> go to next dt \n \n else: # case where both the current and previous are from era5_1 and era5_2, but the previous has smaller number of data \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # best_ds not from era5\n if best_ds_list[-1] in ['era5_1','era5_2']:\n #print('This best ds is ' , best_ds , ' but I will keep ' , best_ds_list[-1] )\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else:\n if len(combined_obs_tab) < len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue # nothing to do, will keep the previous records -> go to next dt \n \n elif len(combined_obs_tab) > len(all_combined_obs[-1] ): # remove previous, keep current \n for lista in all_list:\n lista.pop() \n #kept_record.append(combined_obs_tab) \n #removed_record.append(temporary_previous)\n \n elif len(combined_obs_tab) == len(all_combined_obs[-1] ): # prefer igra2, otherwise\n if best_ds == 'igra2':\n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # case where data source is not important, I keep the previous and do nothing \n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else: # not the same record, nothing special to do, keep both previous and current \n pass \n else:\n print(' Found an empty record / time shifted record ')\n continue\n \n\n \"\"\" Fill the best_ds list \"\"\"\n best_ds_list.append(best_ds)\n\n \"\"\" Storing the selected file for the source_configuration \"\"\"\n source_files.append(selected_file)\n \"\"\" Selecting the station_configuration \"\"\"\n station_configurations.append(self.data[best_ds][best_file]['station_configuration'] )\n \n \"\"\" Storing the combined era5fb, header and observations tables\"\"\"\n all_combined_era5fb.append(combined_era5fb_tab)\n all_combined_obs .append(combined_obs_tab)\n \n primary, name = self.data[best_ds][best_file]['station_configuration']['primary_id'][0] , self.data[best_ds][best_file]['station_configuration']['station_name'][0] \n #combined_head_tab['primary_station_id'] = [ primary ] * len( combined_head_tab ) \n #combined_head_tab['station_name'] = [ name ] * len( combined_head_tab ) \n \n combined_head_tab['primary_station_id'] = np.array( [primary] )\n combined_head_tab['station_name'] = np.array( [name] )\n \n all_combined_head .append(combined_head_tab)\n\n \"\"\" Dictionary to fill the best_ds for duplicates \"\"\"\n #dt_bestds_dic[dt] = {}\n #dt_bestds_dic[dt]['best_ds'] = best_ds\n #dt_bestds_dic[dt]['len'] = len(combined_obs_tab['date_time'])\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n combined_indices.append(len(combined_obs_tab['date_time'])) \n combined_date_time.append(dt)\n\n del cleaned_df_container \n \n \n \n #print(blue + 'Memory used after deleting the cleaned_df_container: ', process.memory_info().rss/1000000000 , cend)\n\n \"\"\" Removing remaining loaded df \"\"\"\n for k in self.datasets_keys:\n for F in self.datasets[k]:\n try:\n del data[k][F]['era5fb_tab']\n print('=== removed era5fb ' , k , F )\n except:\n pass\n try:\n del data[k][F]['observations_table']\n print('=== removed obstab ' , k , F ) \n except:\n pass\n \n \n \"\"\" Saving a numpy dictionary \"\"\"\n print(\" === Saving the numpy dictionary of removed and kept records +++ \")\n #dic_records = { 'kept' : kept_record , 'removed': removed_record }\n #np.save(self.station + '_time_shift_removed_kept.npy',dic_records )\n \n \n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n combined_date_time = np.array(combined_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : combined_date_time.shape } , combined_date_time )\n di['recordtimestamp'].attrs['units']='seconds since 1900-01-01 00:00:00'\n\n \"\"\" Creating the merged indices mi \"\"\"\n mi = [] \n mi.append(0)\n for i in range(len(combined_indices)):\n mi.append( combined_indices[i] + mi[-1] )\n mi.pop()\n pop = np.array(mi) # removing last unecessary index \n di['recordindex'] = ( {'recordindex' : pop.shape } , pop )\n\n\n \"\"\" Creating the combined data \"\"\"\n logging.debug('*** Concatenating the observations_table ' ) \n combined_obs = {}\n #### Writing combined observations_table dic\n logging.info(' ***** Writing the observations_table to the netCDF output ***** ' ) \n for k in all_combined_obs[0].keys(): \n a = np.concatenate([all_combined_obs[i][k][:] for i in range(len(all_combined_obs))])\n if k == 'date_time':\n combined_obs[k]= a \n self.tot_records = len(combined_obs[k])\n self.write_merged(content = 'observations_table', table= {k:a})\n #logging.info('*** Written observations table %s: ', k)\n\n\n #self.tot_records = len(combined_obs['date_time'])\n del all_combined_obs\n print(blue + 'Memory used after deleting all_combined_obs dic: ', process.memory_info().rss/1000000000 , cend )\n \n dateindex = combined_obs['date_time']//86400 \n date_times, indices, counts = np.unique(dateindex, return_counts = True, return_index= True) \n di['dateindex'] = ( {'dateindex' : indices.shape } , indices ) # considers the day only \n del combined_obs\n \n combined_era5fb = {}\n #### Writing combined era5fb_table dic \n for k in all_combined_era5fb[0].keys():\n try:\n #combined_era5fb[k]=np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n #self.write_merged(content = 'era5fb', table= {k:combined_era5fb[k]})\n \"\"\" try replacing , remove combined_era5fb = {} \"\"\"\n a = np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n self.write_merged(content = 'era5fb', table= {k:a})\n logging.debug('*** Written era5fb %s: ', k)\n except:\n print(\"FAILED feedback variable \" , k)\n\n del all_combined_era5fb\n print(blue + 'Memory used after deleting era5fb_tab dic: ', process.memory_info().rss/1000000000 , cend)\n\n\n #### Writing combined header_table dic \n for k in all_combined_head[0].keys():\n print('head variable is', k )\n if ( k == 'comments' or k == 'history'):\n continue\n try:\n tab=np.concatenate([all_combined_head[i][k][:] for i in range(len(all_combined_head))])\n self.write_merged(content = 'header_table', table= {k: tab}) # { key: np.array([])}\n logging.info('*** Written header table %s: ', k)\n except:\n print('FFF FAILED variable in header table', k )\n\n del all_combined_head\n print(blue + 'Memory used after deleting all_merged head_tab dic: ', process.memory_info().rss/1000000000 , cend)\n \n self.write_merged(content = 'recordindex', table = di) \n self.write_merged(content = 'cdm_tables', table= '')\n\n\n source_conf=xr.Dataset()\n source_files = np.array(source_files).astype(dtype='|S70')\n source_conf['source_file'] = ( {'source_file' : source_files.shape } , source_files )\n self.write_merged(content = 'source_configuration', table= source_conf )\n\n print(0)\n\n\n \"\"\" Concatenation of station_configurations \"\"\"\n station_conf = pd.concat( station_configurations ) \n for k in station_conf.columns:\n try:\n a =np.array( station_conf[k])\n self.write_merged(content = 'station_configuration', table= {k:a})\n logging.debug('*** Written station_configuration %s: ', k)\n except:\n print(\" Failed station_configuration \" , k )\n \n return 0", "def process_merging(lvcfs, ltoolnames, list_tool_precedence_order, dico_map_tool_acronym, lossless, merge_vcf_outfilename, l_contigs_ref_genome_fasta_dict, cmdline):\n\n\toutputFilename = merge_vcf_outfilename\n\ttuple_objs = ()\n\tl_snames = []\n\tl_contigs = []\n\n\n\tListFieldsToProcessForOurFORMATColumn = [\"GT\", \"DP\", \"AR\", \"AD\"] ## HARDCODED;\n\n\t##@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n\t## SECTION CHECKING PRECEDENCE ORDER if necessary\n\t##@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n\tlog.info(\"is list_tool_precedence empty? \".format(str(list_tool_precedence_order)))\n\tif list_tool_precedence_order is not None:\n\t\t'''here we sort and reassigned ltoolnames and lvcfs based on list_tool_precedence_order ; names of the \n\t\ttools have to match 100%\n\t\t'''\n\t\tif len(list_tool_precedence_order) != len(ltoolnames):\n\t\t\texit(\"ERROR: Tool Names in list precedence do not match 100% names in list toolnames ; check your \"\n\t\t\t \"input\\n\" + \"sorted_list_tool_precedence -> \" + str(sorted(list_tool_precedence_order)) +\n\t\t\t \"\\nsorted_list_tool_names ------> \"\n\t\t\t + str(sorted(ltoolnames)))\n\t\t## REORDERING the list of PRECEDENCE of the TOOLs\n\t\tindices = []\n\t\tfor toolname in list_tool_precedence_order:\n\t\t\tindices.append(ltoolnames.index(toolname))\n\t\t## we reallocate/reorder the vcfs files the same order of the list_tool_precedence_order\n\t\tlvcfs = [lvcfs[i] for i in indices]\n\t\tltoolnames = list_tool_precedence_order; ## we re-assigned the list\n\t\tlog.info(str(type(list_tool_precedence_order)))\n\t\tlog.info(\"Re-Ordering the Toolnames and the list of VCFs based on the given precedence list: {} \".format(\n\t\t\tlist_tool_precedence_order))\n\n\t##@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n\t## SECTION STARTING PROCESSING FIELDS\n\t##@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n\tvcfMerger_Format_Fields_Specific = [\n\t\t'##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">',\n\t\t'##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\"Read depth at locus in Sample\">',\n\t\t'##FORMAT=<ID=AD,Number=.,Type=Integer,Description=\"Allelic depths for the ref and alt alleles in the order listed from chosen prevalent tool\">',\n\t\t'##FORMAT=<ID=AR,Number=1,Type=Float,Description=\"Allele frequency of ALT allele from chosen prevalent tool\">'\n\t]\n\n\tTN_FLAGS = []\n\tfor tool in ltoolnames:\n\t\tTN_FLAG = str(''.join([\n\t\t\t'##INFO=<ID=' + tool + ',Number=0,Type=Flag,Description=\"Toolname Flag means that position got '\n\t\t\t 'called by this tool\">']))\n\t\tTN_FLAGS.append(TN_FLAG)\n\tAdditional_FLAGS = [\n\t\t'##INFO=<ID=CC,Number=1,Type=Integer,Description=\"CALLERS_COUNT,Number of tools calling this variant event '\n\t\t'out of a total of ' + str(len(ltoolnames)) + ' tools\">',\n\t\t''.join(['##INFO=<ID=TPCE,Number=1,Type=String,Description=\"Tool that got precedence for called position; '\n\t\t 'user gave the following order for tool precedence: ', ', '.join([str(t) for t in\n\t\t ltoolnames]),\n\t\t '\">']),\n\t\t'##INFO=<ID=VTYPE,Number=1,Type=String,Description=\"Type of Variant (snv, ins, del)\">'\n\t]\n\n\tvcfMerger_Info_Fields_Specific = TN_FLAGS + Additional_FLAGS\n\n\t# the trick is here for the Tool Precedence!!! The user has given us an ordered list of\n\t# vcfs and toolnames in order of precedence or a specific PRECEDENCE order was given via --precedence\n\t# and we sort the vcf and add them to the tuple accordingly\n\tfor i in range(len(lvcfs)):\n\t\to = vcfToDict.vcfToDict(lvcfs[i], ltoolnames[i]) ## here we map the toolname and the vcf associated\n\t\ttuple_objs = tuple_objs + (o,) ## we add instances of object vcfToDict to the tuple ; order FIFO is\n\t\t# equivalent to the order of precedence\n\t\tl_snames.append(o.samplenames) ## we add tuples of samplenames to the list l_snames as a list of tuples\n\t\tl_contigs.append(sorted(o.contigs))\n\n\t# performing checks before processing data further\n\tdvm.compareTuples(l_snames,\n\t \"SampleNames\") ## we cannot skip that one. If not matching, then modify vcf to get samples in\n\t# correct columns or with the same names across ALL the vcf files ;\n\tlog.info(\"list of vcf-within-captured Sample Names:\")\n\tlog.info(set(l_snames))\n\tlog.info(\"Number of sample in set: {}\".format(len(set(l_snames))))\n\n\t## UNCOMMENT NEXT LINE TO PUT THE CONTIGS CHECK BACK ON\n#########\tdvm.compareTuples(l_contigs, \"CONTIGS\") ## we may add an option to skip that check ; even though we do not know\n\t# what could be the consequences of having different contigs ; we cannot think any so far.\n\n\t\"\"\"\n\t### we check here the presence of the expected MANDATORY fields in the FORMAT columns ;\n\t### Unfortunately as we do not read the entire VCFs file and therefore we do not have the object VCF created yet,\n\t### we cannot use the cyvcf2 API to check if an ID is defined in the VCF header or not, or in the variant or not;\n\t### So for now, we rely on our own vcf header capture as string; we therefore check the string;\n\t### BUT: this does not mean that the ID fields are necessary present in each variant;\n\t### If we want to check that presence, we will have to read the vcf files entirely see below \"tuple_dicts = () loop\" ;\n\t### and check every variant.\n\t### Or, second option, we will check while we merge and raise ERROR and either stop merging or skip that variant, or put NULL value for that field ;\n\t### for example: if AR does not exist, we set AR=.\n\t\"\"\"\n\n\tcheck_fields_definitions_in_header = True\n\tif check_fields_definitions_in_header:\n\t\tfor flagid in ListFieldsToProcessForOurFORMATColumn:\n\t\t\tlog.info(\"common flag to be processed in FORMAT: {}\".format(flagid))\n\t\t\tfor tpo in tuple_objs:\n\t\t\t\t'''Check if flag we want to put in the format field have been defined in the VCF header'''\n\t\t\t\tres_search = search(\"\".join([\"ID=\", flagid]), tpo.headers)\n\t\t\t\tif res_search is None:\n\t\t\t\t\texit(\n\t\t\t\t\t\t\"Id Flag \" + flagid + \" not Defined in header of vcf file \" + tpo.fvcf + \".\\nPlease bring the VCF up to specs before running this merging tool. Use a wrapper specific to your tool which has already been created by the Author of the current tool. Aborting!\")\n\n\n\t# we process the files entirely after all the checks have PASSED successfully\n\t# we may make parallel this step But If we do, we lose the precedence order in the tuple_dicts variable and\n\t# this defies the purpose of that script\n\ttuple_dicts = ()\n\tfor tpo in tuple_objs:\n\t\ttuple_dicts = tuple_dicts + (tpo.dictOfLoci(tpo.readVCF()),)\n\n\t# we merge the Loci from all the VCFs [Key + Value, where Key is defined as CHROM_POS_REF_ALT as assigned in the function \"dictOfLoci\" of class vcfToDict ]\n\t\tdd = defaultdict(list)\n\n\tlog.debug(\"-\" * 41);\n\tlog.debug(str(type(tuple_dicts)))\n\n\tfor d in tuple_dicts:\n\t\tfor key, value in d.items():\n\t\t\ttry:\n\t\t\t\tdd[key].append(value)\n\t\t\texcept KeyError: ## I do not see why we should have an error here because we just list the Keys\n\t\t\t\t# from d dicts we created ; I put it probably because it happened?\n\t\t\t\tlog.warning(\"KEY ERROR Detected - Skipping this values ; It should not have happened; please \"\n\t\t\t\t \"report that to the Author\")\n\t# NOTE: in the loop above, to get your .attrib, just change append(value) to append(value.attrib)\n\t# You may then want to make a normal dict out of the defaultdict so you have normal dict behavior for non-existent keys etc: dd = dict(dd)\n\n\t# 1) first we managed the Headers from all the tools\n\tlog.info(\"processing headers of all the vcf files ...\")\n\tlist_lines_header = dvm.create_new_header_for_merged_vcf(tuple_objs,\n\t cmdline,\n\t vcfMerger_Format_Fields_Specific,\n\t vcfMerger_Info_Fields_Specific,\n\t dico_map_tool_acronym,\n\t l_contigs_ref_genome_fasta_dict\n\t )\n\t# 2) we add the modified header lines to the output merger file\n\tlog.info(\"adding the header to the out vcf file ...\")\n\tdvm.add_new_header_to_merged_file(outputFilename, list_lines_header, tuple_objs[0].header_chrom_line + \"\\n\")\n\n\t# 3) we process all the variants\n\tlog.info(\"looping over variant calls, merging and writing back to file ... \")\n\n\ttry:\n\n\t\tof = open(outputFilename, 'a') # we open the output file with merged information here\n\t\t# sort dico by keys before iterating over it ... ## normally the Keys are not sorted because we deal with a dictionary which do not keep the order\n\n\t\t# dd = OrderedDict(sorted(dd.items()))\n\t\t# if flag_natsorted : ## if necessary, and requested by users later, we will activate the sorting of teh variants themselves by contigs order as in fastadict file\n\t\t# \tsorted_keys = natsorted(dd.keys())\n\t\t# else:\n\t\t## in this next line, the variants are sorted in the same order the contigs are in the HEADER (the line above aka \"sorted_keys = natsorted(dd.keys())\" sorts the key in natural order that can be different from contgis order in header.\n\t\tsorted_keys = dvm.output_list_variant_sorted_by_contigs_as_same_order_as_in_fastdict_file(dd, l_contigs_ref_genome_fasta_dict)\n\t\t# dd.keys --> they are the KEYS that are represented by the PATTERN --> CHROM_POS_REF_ALT\n\t\t# dd.values --> represents the calls and their information from each tool having call the variant at position CHROM_POS\n\t\t# (the number of list in values may go from 1 to len(lvcfs); where len(lvcfs) represents the total number\n\t\t# of inputs vcfs and therefore ALL the tools would have called that variant )\n\t\t# wtv stands for Winning Tool Variant ; It always is the first one, as the tools have been sorted by\n\t\t# precedence given by the user\n\t\t# 3a) get the total number variants to process in order to calculate on the fly the value for the counter\n\t\t# steps\n\t\ttot_variants_count = len(dd)\n\t\ttotnum_samples = len(list(set(l_snames))[0]) ## get the number of sample detected within the VCF ; We already check if same number of samples within each vcf so no need here; But we deal with tuples of strings so we need to extract the unique tuple from the set; because we expect only ONE tuple\n\t\tlog.info(\"Expected number of Samples in each VCF: \"+str(totnum_samples))\n\t\tlog.info(\"Set of sample(s) found:: \" + str(set(l_snames)))\n\t\tlog.info(\"Total Count of Variants to be merged (aka union of variant here): \" + str(tot_variants_count))\n\n\n\t\tcounter = 0\n\t\t# step is ~10% of tot_variants and round to the nearest nth value\n\t\tstep = int(round(tot_variants_count / 10, -(len(str(round(tot_variants_count / 10))) - 1)))\n\t\tfor K in [k for k in sorted_keys]: # sub is list__list__o.ovcf_variant ;\n\t\t\tcounter += 1;\n\t\t\tif step > 1 and counter % step == 0:\n\t\t\t\tlog.info(\"processed {} variants ...\".format(counter))\n\t\t\trebuilt_variant = dvm.rebuiltVariantLine(dd[K],\n\t\t\t dico_map_tool_acronym,\n\t\t\t lossless,\n\t\t\t ListFieldsToProcessForOurFORMATColumn,\n\t\t\t totnum_samples); ## dd[K} represent a List of Variants (LV)\n\t\t\tof.write(rebuilt_variant + linesep)\n\t\tlog.info(\"total processed variants: {}\".format(counter))\n\n\n\texcept IOError as e:\n\t\tlog.info(\"Error I/O({0}): {1}\".format(e.errno, e.strerror))\n\t\tof.close()\n\telse:\n\t\tof.close()", "def splitMerge(self):\n\t\tpath_merge = self.aug_merge_path\n\t\tpath_train = self.aug_train_path\n\t\tpath_label = self.aug_label_path\n\t\tfor i in range(self.slices):\n\t\t\tpath = path_merge + \"/\" + str(i)\n\t\t\t# print(path)\n\t\t\ttrain_imgs = glob.glob(path+\"/*.\"+self.img_type)\n\t\t\t# print(len(train_imgs))\n\t\t\t# break\n\t\t\tfor imgname in train_imgs:\n\t\t\t\tmidname = imgname[imgname.rindex(\"/\")+1:imgname.rindex(\".\"+self.img_type)]\n\t\t\t\timg = cv2.imread(imgname)\n\t\t\t\timg_train = img[:,:,2]#cv2 read image rgb->bgr\n\t\t\t\timg_label = img[:,:,0]\n\t\t\t\tcv2.imwrite(path_train+\"/\"+midname+\".\"+self.img_type,img_train)\n\t\t\t\tcv2.imwrite(path_label+\"/\"+midname+\".\"+self.img_type,img_label)", "def merge_all_claims_norm_dicts_for_docs(): \n# docs_norm_scores_dicts_path = base_path+\"\\\\docs_norm_scores_dicts\"\n docs_norm_scores_dicts_path = linux_base_path+\"/docs_norm_scores_dicts\"\n# all_claims_norms_scores_merged_dict = base_path +\"\\\\all_claims_norms_scores_merged_dict\"\n all_claims_norms_scores_merged_dict = linux_base_path +\"/all_claims_norms_scores_merged_dict\"\n for alpha in range(0,11,1):\n for beta in range(0,10,1):\n docs_scores_all_claims = {}\n for filename in os.listdir(docs_norm_scores_dicts_path):\n (alpha_f,beta_f)=turn_to_float([alpha,beta])\n if \"_alpha_\"+str(alpha_f)+\"_\" in filename and \"_beta_\"+str(beta_f)+\"_\" in filename:\n curr_dict = read_pickle(docs_norm_scores_dicts_path+\"/\"+filename)\n docs_scores_all_claims = dict(docs_scores_all_claims.items() + curr_dict.items()) #merge dicts\n save_pickle(all_claims_norms_scores_merged_dict+\"/docs_norm_scores_all_claims_alpha_\"+str(alpha_f)+\"_beta_\"+str(beta_f),docs_scores_all_claims)", "def _mergeDiffrsp(self):\n # --------------------------------------------------------------------------------------------- #\n # Create a file listing all the FT1 files\n ft1Files = np.array(glob.glob(os.path.join(self.workpath, '*_Chk_*.fits')))\n tmpFT1 = os.path.join(self.workpath, 'ft1.lis')\n chkNb = np.array([int(os.path.basename(ff).split('_')[-1][:-5]) for ff in ft1Files])\n sortInd = np.argsort(chkNb)\n wfil = open( tmpFT1, 'w')\n for f in ft1Files[sortInd]:\n wfil.write(f + '\\n')\n wfil.close()\n\n # --------------------------------------------------------------------------------------------- #\n # Merging everything\n self.outselect = os.path.join(self.workpath, 'FT1_Diffuse'+self.suffix+'.fits')\n self._gtSelect(data=tmpFT1)\n os.remove(tmpFT1)\n\n return", "def merge_summaries_old(root_dir,output_file=None):\n #\n sumfiles = glob.glob(f\"{root_dir}/**/*smry.txt\",recursive=True)\n nsums = len(sumfiles)\n print (f\"Found {nsums} summary files in {root_dir}\")\n #\n with tempfile.NamedTemporaryFile(mode='w') as fp:\n for i in range(nsums):\n sumfile = sumfiles[i]\n iobs = os.path.basename(sumfile)[0:10]\n with open(sumfile,'r') as sfile:\n fp.write(sfile.read())\n #\n # now read as pandas dataframe\n #\n colnames = [\"rev\",\"obsid\",\"expid\",\"mode\",\"filt\",\"tstart\",\"tend\",\"texpo\",\\\n \"mvcratio\", # (a rough measure of the ratio of counts in the MnKa versus continuum)\n \"qboxt0\",\"qboxt1\",\"qboxt2\",\"qboxt3\", # x 4 (electronics quadrant box temperatures)\n \"ndisclin_mean0\",\"ndisclin_mean1\",\"ndisclin_mean2\",\"ndisclin_mean3\", #x 4\n \"mipsel0\",\"mipsel1\",\"mipsel2\",\"mipsel3\", #x 4 (parameter for on-board MIP rejection algorithm)\n \"maxmip0\",\"maxmip1\",\"maxmip2\",\"maxmip3\", #x 4 (parameter for on-board MIP rejection algorithm)\n \"ndisclin_med0\",\"ndisclin_med1\",\"ndisclin_med2\",\"ndisclin_med3\", #median x 4\n \"ndisclin_std0\",\"ndisclin_std1\",\"ndisclin_std2\",\"ndisclin_std3\"] #, stddev x 4\n #\n df = pd.read_csv(fp.name,delimiter='\\s+',header=None,skip_blank_lines=True,names=colnames)\n #\n # now calculate the time_delta, the difference in years from observation start and 2000-01-01\n #\n stime = [(datetime.strptime(x,\"%Y-%m-%dT%H:%M:%S\")-time0).total_seconds()/(365.0*24.0*3600.0) for x in df.tstart]\n df.insert(6,\"delta_time\",pd.Series(stime,index=df.index))\n #\n print (f'Last observation t={df.delta_time.max():.2f} years')\n if (output_file is not None):\n df.to_csv(output_file)\n fp.close()\n return df", "def attr_merge(self):\n if SUBSAMPLE_DF_KEY not in self or self[SUBSAMPLE_DF_KEY] is None:\n _LOGGER.debug(\"No {} found, skipping merge\".\n format(CFG_SUBSAMPLE_TABLE_KEY))\n return\n for subsample_table in self[SUBSAMPLE_DF_KEY]:\n for n in list(subsample_table[self.sample_name_colname]):\n if n not in [s[SAMPLE_NAME_ATTR] for s in self.samples]:\n _LOGGER.warning((\"Couldn't find matching sample for \"\n \"subsample: {}\").format(n))\n for sample in self.samples:\n sample_colname = self.sample_name_colname\n if sample_colname not in subsample_table.columns:\n raise KeyError(\"Subannotation requires column '{}'.\"\n .format(sample_colname))\n _LOGGER.debug(\"Using '{}' as sample name column from \"\n \"subannotation table\".format(sample_colname))\n sample_indexer = \\\n subsample_table[sample_colname] == sample[SAMPLE_NAME_ATTR]\n this_sample_rows = subsample_table[sample_indexer].\\\n dropna(how=\"all\", axis=1)\n if len(this_sample_rows) == 0:\n _LOGGER.debug(\"No merge rows for sample '%s', skipping\",\n sample[SAMPLE_NAME_ATTR])\n continue\n _LOGGER.debug(\"%d rows to merge\", len(this_sample_rows))\n _LOGGER.debug(\"Merge rows dict: \"\n \"{}\".format(this_sample_rows.to_dict()))\n\n merged_attrs = {key: list() for key in this_sample_rows.columns}\n _LOGGER.debug(this_sample_rows)\n for subsample_row_id, row in this_sample_rows.iterrows():\n try:\n row[SUBSAMPLE_NAME_ATTR]\n except KeyError:\n row[SUBSAMPLE_NAME_ATTR] = str(subsample_row_id)\n rowdata = row.to_dict()\n\n def _select_new_attval(merged_attrs, attname, attval):\n \"\"\" Select new attribute value for the merged columns\n dictionary \"\"\"\n if attname in merged_attrs:\n return merged_attrs[attname] + [attval]\n return [str(attval).rstrip()]\n\n for attname, attval in list(rowdata.items()):\n if attname == sample_colname or not attval:\n _LOGGER.debug(\"Skipping KV: {}={}\".\n format(attname, attval))\n continue\n _LOGGER.debug(\"merge: sample '{}'; '{}'='{}'\".\n format(sample[SAMPLE_NAME_ATTR], attname,\n attval))\n merged_attrs[attname] = \\\n _select_new_attval(merged_attrs, attname, attval)\n\n # remove sample name from the data with which to update sample\n merged_attrs.pop(sample_colname, None)\n\n _LOGGER.debug(\"Updating Sample {}: {}\".\n format(sample[SAMPLE_NAME_ATTR], merged_attrs))\n sample.update(merged_attrs)", "def merge_all_data(self):\n \n logging.info('***** Starting the merging process ')\n\n \n \"\"\" All possible unqiue_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n \n date_times = np.array(date_times) \n \n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_merged_obs , all_merged_head, all_merged_fb , merged_indices , merged_date_time, mi= [] , [] , [] , [] , [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #for dt in date_times[3008:3100]: # loop over all the possible date_times \n \n tot = len(date_times)\n for dt, c in zip(date_times[3008:3100], range(tot) ): # loop over all the possible date_times \n #print('Analize : ', str(c) , '/', str(tot) , ' ', dt , ' ', now(time.time()) )\n \n logging.info('Analize : %s %s /', str(c) , str(tot) )\n \n cleaned_df_container = {} \n chunk = ''\n \n for k in self.dataset_per_dt[dt] : # checking the list of available datasets \n \n index, index_up = self.unique_dates[k]['indices'][dt]['low'] , self.unique_dates[k]['indices'][dt]['up'] # extracting the exact chunk of the dataframe where the data of this are stored \n \n chunk = self.data[k]['dataframe'].iloc[index:index_up]\n \n chunk['date_time'] = dt\n chunk = self.clean_dataframe(chunk) # cleaning from wrong or nan values \n \n if len(chunk)==0:\n continue\n \n cleaned_df_container[k] = {} \n cleaned_df_container[k]['df'] = chunk # cleaned dataframe \n\n \n if all(value == 0 for value in cleaned_df_container.values()):\n logging.debug('No data were found! ')\n continue\n \n merged_observations_table, best_ds, duplicates, header = self.merge_record(dt, container = cleaned_df_container)\n \n merged_observations_table['source_id'] = best_ds # adding extra columns i.e. chosen dataset, other dataset with data, number of pressure levels \n merged_observations_table['z_coordinate_type'] = 1 # only pressure inn [Pa] available at the moment. Check z_coordinate_type table for the correpsonding code \n \n \n \"\"\" Extracting the merged feedback, flagging the advanced_observations_feedback flag = 1\"\"\"\n feedback, merged_obs = self.get_reanalysis_feedback( dt, merged_observations_table , reanalysis='era5fb', best_ds= best_ds)\n all_merged_fb.append(feedback) \n all_merged_obs.append(merged_obs)\n \n \"\"\" Setting the correct report_id in the header table \"\"\"\n merged_report_id = merged_obs['report_id'].values[0] # same report_id as calculated in the observation_table \n header['report_id'] = merged_report_id \n all_merged_head.append(header)\n \n #if len(merged_observations_table) != len(header): \n #print('lengths check best ds: ', best_ds , ' obs_merged: ' , len(merged_observations_table), ' feedback:' , len(feedback) , ' header: ' , len(header) )\n #print( len(merged_observations_table), ' ' , len(feedback) )\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n merged_indices.append(len(merged_observations_table)) \n merged_date_time.append(dt)\n\n\n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n merged_date_time = np.array(merged_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : merged_date_time.shape } , merged_date_time )\n \n \n \"\"\" Creating the merged indices \"\"\"\n mi.append(0)\n for i,ind in zip(merged_indices[0:], range(len(merged_indices[0:]) ) ) :\n mi.append(mi[ind] + i )\n mi = np.array(mi) \n di['recordindex'] = ( {'recordindex' : mi.shape } , mi )\n self.MergedRecordIndex = di \n \n \n \"\"\" Creating the merged dataframes \"\"\"\n logging.debug('*** Concatenating the observations_table dataframes' ) \n merged_obs = pd.concat (all_merged_obs)\n \n self.MergedObs = merged_obs \n logging.debug('*** Finished concatenating theobservations_table dataframes' ) \n \n logging.debug('*** Concatenating the header_table dataframes' ) \n merged_hd = pd.concat (all_merged_head)\n self.MergedHead = merged_hd \n logging.debug('*** Finished concatenating the header_table dataframes' ) \n \n logging.debug('*** Concatenating the feedback dataframes' ) \n merged_fb = pd.concat (all_merged_fb)\n self.MergedFeedback = merged_fb \n logging.debug('*** Finished concatenating the feedback dataframes' ) \n\n return 0", "def merge_energy_datatypes(osm_path): \n #extract line data\n df_line = powerline_limited(osm_path) #extract required data\n if 'asset' in df_line.columns:\n df_line['asset'] = list(map(lambda x: x.lower(), df_line['asset'])) #make sure that asset column is in lowercase characters\n #reclassify assets \n mapping_dict = {\n \"cable\" : \"cable\", #underground\n \"minor_cable\" : \"cable\", \n #\"generator\" : \"generator\", #device used to convert power from one form to another\n \"line\" : \"line\", #overground\n \"minor_line\" : \"minor_line\", #overground\n #\"plant\" : \"plant\", #place where power is generated\n #\"substation\" : \"substation\"\n }\n df_line['asset'] = df_line.asset.apply(lambda x : mapping_dict[x]) #reclassification \n\n if 'voltage' in df_line.columns:\n df_line = df_line.drop(['voltage'], axis=1) \n \n #extract polygon data\n df_poly = power_polygon(osm_path) #extract required data\n df_poly['geometry'] =pygeos.buffer(df_poly.geometry,0) #avoid intersection\n \n #extract point data\n df_point = power_point(osm_path) #extract required data\n \n return pandas.concat([df_line, df_poly, df_point], ignore_index=True)", "def merge_both_tables():\n old = Table.read('data/data_table_cartesian_including_tims_stars_with_bg_ols_and_component_overlaps.fits')\n wanted = Table.read('data/scocen_candidates_300k_only_spatial_cut.fits')\n additional = Table.read('data/scocen_candidates_300k_only_spatial_cut_200k_to_determine_bg_ols.fits')\n\n d_old = dict(zip(old['source_id'], old['background_log_overlap']))\n d_add = dict(zip(additional['source_id'], additional['background_log_overlap']))\n d_old.update(d_add)\n dct = d_old\n\n ln_bg_ols = [dct[source_id] for source_id in wanted['source_id']]\n print\n len(ln_bg_ols), len(wanted)\n\n wanted['background_log_overlap'] = ln_bg_ols\n print\n wanted\n\n wanted.write('data/scocen_candidates_300k_only_spatial_cut.fits', overwrite=True, format='fits')", "def first_found_merging(file_names, tree_name):\n\n tree_branches = {}\n for ifile, file_name in enumerate(file_names):\n file_ = uproot.open(file_name)\n if tree_name+\";1\" in file_.keys():\n tree = file_[tree_name].arrays()\n tree_branches = { k: tree[k] for k in tree.fields }\n break\n\n return tree_branches", "def merge_split_data(detail=True):\n\n merge_dirs = [\"../data/split_semeval_mic_train_and_test_by_parser\"]\n out_dirs = [\"../data/merge_semeval_mic_train_and_test_by_parser\"]\n\n for out_dir in out_dirs:\n os.system(\"rm -rf %s\" % out_dir)\n os.system(\"mkdir -p %s\" % out_dir)\n\n for i in range(0, len(merge_dirs)):\n merge_dir = merge_dirs[i]\n if detail:\n print(\"To merge %s\" % merge_dir)\n file_names = os.listdir(\"%s/train/\" % (merge_dir, ))\n for file_name in file_names:\n train_path = \"%s/train/%s\" % (merge_dir, file_name)\n test_path = \"%s/test/%s\" % (merge_dir, file_name)\n out_file = \"%s/%s\" % (out_dirs[i], file_name)\n os.system(\"cat %s >> %s; cat %s >> %s\"\n % (train_path, out_file, test_path, out_file))", "def merge_def(self, expanded_def, sdef, to_include = {}):\n for id in sdef['df'].keys():\n if (((id == 'description' and type(sdef['df'][id]) is str)\n and '_description' not in sdef['df'].keys()) or id == '_description'):\n # append this description to any other descriptions specified by previous merge\n description = \"%s:%s- %s\" % (sdef['ns'], sdef['id'], sdef['df'][id])\n self.description.append(description)\n continue\n if id == 'merge':\n continue\n # if id == 'attributes':\n # self.attributes.update(sdef['df'][id])\n # continue\n if id == 'parent_attributes':\n self.parent_attributes.update(sdef['df'][id])\n continue\n if id == 'include':\n # save includes for processing later\n # print \"in merge_def, found include:\"\n # pp.pprint(sdef['df'][id])\n to_include.update(sdef['df'][id])\n continue\n if id in expanded_def.keys():\n # means id from previous merge conflicts\n if id == 'attributes':\n self.merge_attribute_defs(expanded_def[id], sdef['df'][id])\n # if value for both are dictionaries, try recursive merge\n elif isinstance(expanded_def[id], dict) and isinstance(sdef['df'][id], dict):\n # print \"conflicting key (%s) in merge\" % id\n # print \"attempting to recursively merge expanded_def['%s]:\" % id\n # pp.pprint(expanded_def[id])\n # print \"with sdef['df']['%s']:\" % id\n # pp.pprint(sdef['df'][id])\n self.merge(expanded_def[id], sdef['df'][id])\n else:\n print \"** Error\"\n print \"Conflicting key (%s) when merging '%s' when doing\" % (id, sdef['id'])\n print \"make_group(%s, %s, path=%s)\" % (self.sdef['id'], self.name, self.path)\n print \"expanded_def is:\"\n pp.pprint(expanded_def)\n print \"sdef is:\"\n pp.pprint(sdef)\n traceback.print_stack()\n sys.exit(1)\n else:\n # no conflict, just copy definition for id\n # deep copy so future merges do not change original\n expanded_def[id] = copy.deepcopy(sdef['df'][id])", "def entry_parser():\n # from tools import file_importer, file_outporter\n from copy import copy\n from collections import defaultdict\n import os.path\n \n print(\"this is entry parser\")\n \n # inPathL = [\"bob/processed/proteinGroups - OST-1-09042017.txt\",\"bob/processed/proteinGroups_OST2.txt\",\"bob/processed/proteinGroups_OST3.txt\"]\n inpathL = []\n inpF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"txt_cav1ko-1-17082017\", \"proteinGroups.txt\"),\"r\")\n # outPath = \"bob/processed/OST-24-05-2017_combined.csv\"\n fileCount = 1\n # outF = file_outporter(outPath)\n outF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"processed\", \"cav1ko-1.csv\"),\"w\")\n # newFlag = True\n \n finDict = defaultdict(list)\n cN = 0\n # for relPath in inPathL:\n outDict = {}\n # inpF = file_importer(relPath)\n headerFlag = True\n \n for inpLine in inpF:\n cN += 1\n if headerFlag:\n headerFlag = False\n headerLine = inpLine\n continue\n inpLine = inpLine.strip(\"\\n\\r\")\n inpItem = inpLine.split(\"\\t\")\n geneL = inpItem[0].split(\";\")\n lenS = len(geneL[0])\n curGene = geneL[0]\n for geneI in geneL: # find gene name with the shortest length\n if len(geneI) < lenS:\n lenS = len(geneI)\n curGene = geneI\n if \"__\" in curGene: continue # get rid of contaminant lines\n try: # get rid of wonky lines introduced by excel\n int(curGene)\n continue\n except ValueError: \n pass\n\n if curGene[-2] == \"-\":\n curGene = curGene[:-2]\n if curGene[-3] == \"-\":\n curGene = curGene[:-3]\n \n # remove ambiguities based on gene name from the entire entry:\n \n corrPos = geneL.index(curGene)\n corrLine = []\n targetCount = 46 # after the 45th item row in the list, peptide IDs and modification start to appear which are allowed to have multiple entries and do not need to be disambiguated\n currCount = 1\n pepFlag = True\n for inpE in inpItem:\n currCount += 1\n if currCount == targetCount:\n pepFlag = False\n # print inpE\n if \";\" in inpE and pepFlag:\n try:\n corrLine.append(inpE.split(\";\")[corrPos])\n except IndexError:\n corrLine.append(inpE.split(\";\")[0])\n else:\n corrLine.append(inpE.rstrip(\"\\n\"))\n\n \n if inpItem[6] == \"\":\n # print \"no protein name found. adding the uniprot ID.\"\n inpItem[6] = curGene\n \n \"\"\"\n try:\n for inpN in inpItem[4:10]:\n inpItem[inpItem.index(inpN)] = int(inpN)\n countFlag = True\n except ValueError:\n print inpItem[4:10]\n countFlag = False\n if countFlag:\n if sum(inpItem[4:10]) == 0: continue # there are some unexpressed proteins in there\n \n \"\"\"\n # print len(corrLine)\n if curGene in outDict: # handle duplicate protein entries and merge them together\n # print \"%s is duplicate\" % curGene\n if curGene == \"Protein IDs\": \n \"\"\"\n quickCount2 = 0\n for quickDictI in outDict[curGene]:\n print str(quickCount2) + \" \" + quickDictI\n quickCount2 += 1\n quickList = inpItem\n quickCount3 = 0\n for quickImp in quickList:\n print str(quickCount3) + \" \" + quickImp\n quickCount3 += 1 \n # print inpItem\n # print outDict[curGene]\n \"\"\"\n continue\n combList = []\n \n \"\"\"\n addL = []\n for i in outDict[curGene][3:]:\n addL.append(i)\n addL2 = []\n for j in corrLine[3:]:\n addL2.append(i)\n outL[3:] = map(add, addL, addL2) # admittedly this looks terrible\n \"\"\"\n \n indexN = 0\n for cItem in corrLine:\n # print indexN\n # print \"---\"\n # print len(corrLine)\n if indexN < 18 or 30 <= indexN <= 43:\n try:\n currC = int(cItem)\n currC = currC + int(outDict[curGene][indexN]) # numbers like peptide counts or LFQ values are added up during merge\n except ValueError:\n currC = cItem\n \n elif 18 <= indexN <= 25 or 28 <= indexN <= 29: # sequence coverage and scores\n currC = max([float(cItem),float(outDict[curGene][indexN])])\n \n elif 26 <= indexN <= 27 or indexN == 44:\n \"\"\"\n quickCount = 0\n for corrItem in corrLine:\n print str(quickCount) + \" \" + corrItem\n quickCount += 1\n \n import time\n \n print relPath\n print corrLine\n print outDict[curGene]\n print \"++++++++++++++++++++++++\"\n print indexN\n time.sleep(0.5)\"\"\"\n currC = cItem\n\n \n else:\n corrL = cItem.split(\";\")\n # print indexN\n # print corrLine\n # print outDict[curGene][indexN]\n dictL = outDict[curGene][indexN].split(\";\")\n mergeL = copy(dictL)\n for corrI in corrL:\n if corrI not in dictL:\n mergeL.append(corrI)\n \n currC = \";\".join(mergeL)\n\n combList.append(currC)\n\n \n indexN +=1\n \n \n combList[-1] = \"merged\" \n outDict[curGene] = combList \n # print \"merged:\"\n # print combList\n else:\n corrLine.append(\"unique\")\n outDict[curGene] = corrLine\n\n \n print(fileCount)\n \n\n # if not newFlag: print fileCount, testKey, finDict[testKey] \n # if newFlag:\n # newFlag = False\n \n for outKey,outValue in list(outDict.items()): \n if outKey in finDict: # add modified dicts together into single, unified dict\n # print fileCount, finDict[outKey]\n # print outValue\n outIndex = 0\n for outItem in outValue:\n finDict[outKey][outIndex].append(outItem)\n outIndex += 1\n # print finDict[outKey]\n\n else: # or just add new entries\n if fileCount == 1:\n for outItem in outValue:\n finDict[outKey].append([outItem])\n \n else: # fill up entries that were not present in the previous cycle\n loopCount = 0\n while loopCount < fileCount - 1:\n for i in range(len(outValue)):\n if len(finDict[outKey]) == i:\n finDict[outKey].append([])\n else:\n finDict[outKey][i].append(\"\")\n loopCount += 1\n outIndex = 0\n for outItem in outValue:\n # print finDict[outKey]\n finDict[outKey][outIndex].append(outItem) \n outIndex += 1\n\n for testKey in finDict: # fill up entries in result dict which were not present in previous file\n if len(finDict[testKey][0]) < fileCount:\n for i in range(len(finDict[testKey])):\n finDict[testKey][i].append(\"\")\n\n if len(inpathL) > 1: fileCount += 1 # this is needed if multiple files are parsed\n for finK, finV in list(finDict.items()):\n for finI in finV[-1]:\n if finI != \"unique\" and finI != \"\":\n print(finK, finV)\n\n \n \n outN = 0 \n # prepare header for file:\n headList = headerLine.strip(\"\\n\\r\").split(\"\\t\")\n if fileCount > 1:\n for headerItem in headList[:-1]:\n headerI = headerItem.replace(\",\",\".\")\n headerCount = 1\n while headerCount < fileCount:\n outF.write(headerI + \"-\" + str(headerCount) + \"|\")\n headerCount += 1 \n outF.write(headerI + \"-\" + str(headerCount) + \"\\t\")\n \n headerCount = 1\n while headerCount < fileCount:\n outF.write(headList[-1] + \"-\" + str(headerCount) + \"|\")\n headerCount += 1\n \n outF.write(headList[-1] + \"-\" + str(headerCount) + \"\\n\")\n\n elif fileCount == 1:\n for headerItem in headList[:-1]:\n headerI = headerItem.replace(\",\",\".\") \n outF.write(headerI + \"\\t\")\n outF.write(headList[-1].replace(\",\",\".\") + \"\\n\")\n \n else:\n print(\"number of input files should be at least one. Got less somehow\")\n raise ValueError\n \n \n for outDK, outDV in list(finDict.items()): # write out assembled results to a file\n outN += 1\n if len(outDK) > 30: print(\"this line should not be displayed\")\n # print outDV[1]\n # if outN == 100: break\n nameCount = 0\n for outI in outDV:\n # if nameCount == 0: print outI\n for outPiece in outI[:-1]:\n outU = outPiece.replace(\",\",\".\")\n if outU == \"\": outF.write(\"_|\")\n else: outF.write(str(outU) + \"|\")\n if outI[-1] == \"\": # handle missing entries\n if nameCount == 6: outF.write(outDV[0][0] + \"\\t\") # replace missing gene names with their uniprot ID\n else: outF.write(\"_\\t\")\n else: outF.write(str(outI[-1]).replace(\",\",\".\") + \"\\t\")\n nameCount += 1\n outF.write(\"\\n\")\n \n\n print(\"unique proteins: \", outN)\n print(\"lines parsed: \", cN)\n # print headerLine\n inpF.close()\n outF.close()", "def merge_rasters(self):\n for index, i in enumerate(self.months):\n month = str(index + 1)\n if len(month) < 2:\n month = '0' + month\n rasters = [str(x) for x in i.joinpath('subnational').iterdir() if not x.name.endswith('txt') if x.name.endswith('norm.tif')]\n outfile = i.joinpath(f'{self.country}_{month}_normalised.tif')\n tiffs = \" \".join(rasters)\n gdal_cmd = f\"gdal_merge.py -o {outfile} -a_nodata -99999.0 -of gtiff {tiffs}\"\n subprocess.call(gdal_cmd, shell=True)", "def write_merged(self, content = '', table=''):\n\n if not os.path.isdir(self.out_dir):\n Path(self.out_dir).mkdir(parents=True, exist_ok=True) \n out_name = self.out_dir + '/' + self.station + '_CEUAS_merged_v0.nc' \n\n '''\n if os.path.isfile('dic_obstab_attributes.npy'):\n attrs_dic = np.load('dic_obstab_attributes.npy' , allow_pickle = True).item()\n else:\n attrs_dic = {}\n '''\n attrs_dic = {}\n\n \"\"\" Retrieving the attributes \"\"\"\n if content in ['observations_table','header_table','era5fb', 'station_configuration']:\n for var in table.keys():\n if var == 'comments':\n continue \n\n attrs_dic[var] = {}\n try:\n attrs_dic[var]['description'] = bytes( self.dic_type_attributes[content][var]['description'] , 'utf-8' )\n except:\n attrs_dic[var]['description'] = bytes( 'missing' , 'utf-8' )\n #print(' FFF FAILING WITH DESCRIPTION: ', var , ' ' , self.dic_type_attributes[content][var]['description']) # FFF CHECK WHY SOME ARE FAILING\n\n try:\n attrs_dic[var]['external_table'] = bytes( self.dic_type_attributes[content][var]['external_table'] , 'utf-8' )\n except:\n attrs_dic[var]['external_table'] = bytes( 'missing' , 'utf-8' )\n #print(' FFF FAILING WITH EXTERNAL TABLE : ', var ) # FFF CHECK WHY SOME ARE FAILING \n\n\n if content == 'recordindex': # writing the recordindex, recordtimestamp, dateindex\n #logging.info('Writing the merged record indices to the netCDF output ')\n table.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a')\n\n elif content == 'cdm_tables':\n for k in data['cdm_tables'].keys():\n table = data['cdm_tables'][k]\n table.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a', group = k)\n #logging.info('Writing the cdm table %s to the netCDF output ', k)\n \n elif content == 'source_configuration': \n table.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a', group = content)\n #logging.info('Writing the source_configuration table to the netCDF output ')\n\n elif content == 'station_configuration':\n for k in table.keys(): \n if k == 'station_name':\n print(0)\n var_type = self.dic_type_attributes[content][k]['type']\n\n ''' trying to convert the variable types to the correct types stored as attribute, read from the numpy dic file '''\n if type(table[k][0]) != var_type:\n try:\n table[k] = table[k].astype( var_type ) \n print('Done station_conf' , k )\n except:\n if k == 'secondary_id':\n table[k] = table[k].astype( bytes ) \n\n print ('FAILED converting column ' , k, ' type ', type(table[k][0]) , ' to type ', var_type )\n\n dic = {k:table[k]} \n write_dict_h5(out_name, dic , content, self.encodings[content], var_selection=[], mode='a', attrs = attrs_dic )\n \n \n # Writing the observations_table, header_table, era5fb \n elif content in ['observations_table', 'era5fb', 'header_table']: \n\n shape = ''\n for k in table.keys(): \n if k == 'index' or k == 'hdrlen' or 'string' in k :\n continue\n if k == 'station_name':\n print(0)\n \n var_type = self.dic_type_attributes[content][k]['type']\n\n ''' trying to convert the variable types to the correct types stored as attribute, read from the numpy dic file '''\n if type(table[k][0]) != var_type:\n\n if k == 'hdrlen': \n continue\n try:\n #table[k] = table[k].astype( bytes ) \n table[k] = table[k].astype( var_type ) \n \n except:\n print ('FAILED converting column ' , k, ' type ', type(table[k][0]) , ' to type ', var_type )\n\n #print('*** Writing the table ', content, ' variable ', k)\n #if k == 'duplicates':\n # table[k] = table[k].astype( bytes ) \n \n \n dic = {k:table[k]} # making a 1 colum dictionary\n shape = table[k].shape\n #print('SHAPE IS FFF ', table[k].shape )\n write_dict_h5(out_name, dic , content, self.encodings[content], var_selection=[], mode='a', attrs = attrs_dic )\n\n if content == 'observations_table' and not self.obstab_nans_filled :\n missing_cdm_var = [ v for v in self.dic_type_attributes[content].keys() if v not in self.observations_table_vars] # variables to be filled with nans \n for k in missing_cdm_var:\n if k not in ['advanced_assimilation_feedback']:\n var_type = self.dic_type_attributes[content][k]['type']\n if var_type == np.int32 :\n nan = np.int32(-2147483648)\n else:\n nan = np.float32(np.nan) \n logging.debug('Adding missing cdm colum with empty values: %s ' , k )\n dic={k:np.empty(shape,dtype=np.dtype(nan))}\n dic[k].fill(nan)\n write_dict_h5(out_name, dic, 'observations_table', self.encodings['observations_table'], var_selection=[], mode='a', attrs = attrs_dic ) ### TO DO\n self.obstab_nans_filled = True\n\n elif content == 'observations_table' and self.obstab_nans_filled:\n return", "def merge_root_histos(run, seqno, slices):\n inset = {\"hists\": \"hd_root.root\",\n \"tree_TS_scaler\": \"tree_TS_scaler.root\",\n \"tree_bcal_hadronic_eff\": \"tree_bcal_hadronic_eff.root\",\n \"tree_fcal_hadronic_eff\": \"tree_fcal_hadronic_eff.root\",\n \"tree_tof_eff\": \"tree_tof_eff.root\",\n \"tree_sc_eff\": \"tree_sc_eff.root\",\n \"tree_PSFlux\": \"tree_PSFlux.root\",\n \"tree_TPOL\": \"tree_TPOL.root\",\n }\n outset = {\"hists\": \"hd_root_{0:06d}_{1:03d}.root\",\n \"tree_TS_scaler\": \"tree_TS_scaler_{0:06d}_{1:03d}.root\",\n \"tree_bcal_hadronic_eff\": \"tree_bcal_hadronic_eff_{0:06d}_{1:03d}.root\",\n \"tree_fcal_hadronic_eff\": \"tree_fcal_hadronic_eff_{0:06d}_{1:03d}.root\",\n \"tree_tof_eff\": \"tree_tof_eff_{0:06d}_{1:03d}.root\",\n \"tree_sc_eff\": \"tree_sc_eff_{0:06d}_{1:03d}.root\",\n \"tree_PSFlux\": \"tree_PSFlux_{0:06d}_{1:03d}.root\",\n \"tree_TPOL\": \"tree_TPOL_{0:06d}_{1:03d}.root\",\n }\n badslices = []\n slicepatt = re.compile(r\"([1-9][0-9]*),([1-9][0-9]*)/\")\n for iset in inset:\n ofile = outset[iset].format(run, seqno)\n ifiles = [\"{0},{1}/\".format(sl[0], sl[1]) +\n inset[iset].format(run, seqno, sl[0], sl[1])\n for sl in slices\n ]\n cmd = subprocess.Popen([\"hadd\", ofile] + ifiles,\n stderr=subprocess.PIPE)\n elog = cmd.communicate()\n if cmd.returncode != 0:\n for eline in elog[1].decode(\"ascii\").split('\\n'):\n badslice = slicepatt.search(eline)\n if badslice:\n badslices.append(\"{0},{1}\".format(badslice.group(1),\n badslice.group(2)))\n sys.stderr.write(eline + '\\n')\n sys.stderr.write(\"Error on output file {0}\".format(ofile) +\n \" - root file merging failed!\\n\")\n sys.stderr.flush()\n continue\n odir = output_area + \"/\" + iset + \"/{0:06d}\".format(run)\n upload(ofile, odir)\n return badslices", "def reduce_and_save():\n ### Get the signature information\n sig_info = pd.read_csv(join(FILE_PATH, \"GSE92742_Broad_LINCS_sig_info.txt\"), sep=\"\\t\")\n ### Columns are:\n ### Index([u'sig_id', u'pert_id', u'pert_iname', u'pert_type', u'cell_id',\n ### u'pert_dose', u'pert_dose_unit', u'pert_idose', u'pert_time',\n ### u'pert_time_unit', u'pert_itime', u'distil_id'],\n ### dtype='object')\n\n ### Filter for signature ids for small molecule pertubagens\n small_mol_sigs = sig_info['sig_id'][sig_info['pert_type'] == \"trt_cp\"]\n ### Results in 205034 signatures\n\n ### Read in the gene info\n gene_info = pd.read_csv(join(FILE_PATH, \"GSE92742_Broad_LINCS_gene_info.txt\"), sep='\\t')\n ### Index([u'pr_gene_id', u'pr_gene_symbol', u'pr_gene_title', u'pr_is_lm',\n ### u'pr_is_bing'],\n ### dtype='object')\n\n landmark_gene_ids = gene_info['pr_gene_id'][gene_info['pr_is_lm'] == 1] #Filters for directly measured transcripts\n ### Results in the 978 landmark pr_gene_ids\n\n ### LOAD in the main file filtering the columns so that only the small molecules signatures are loaded and the\n ### rows such that only the landmark genes are loaded into their custom gctoo container type\n relevent_sigs_gctoo = parse(join(FILE_PATH, \"GSE92742_Broad_LINCS_Level5_COMPZ.MODZ_n473647x12328.gctx\"),\n cid=small_mol_sigs, rid=landmark_gene_ids)\n # print small_mol_sigs.data_df.shape\n ### Should write an intermediate file with dimensions (978, 205034)\n write_gctx.write(relevent_sigs_gctoo, join(FILE_PATH, \"lm_sm_aggz\"))", "def completeMerge(self):\n #--Remove lists that aren't the sum of at least two esps.\n srcMods = self.srcMods\n for levls in (self.levcs,self.levis):\n for listId in levls.keys():\n if len(srcMods[listId]) < 2 or levls[listId].isDeleted:\n self.records.remove(levls[listId])\n del levls[listId]\n del srcMods[listId]\n #--Log\n log = self.log\n for label, levls in (('Creature',self.levcs), ('Item',self.levis)):\n if not len(levls): continue\n log.setHeader(_('Merged %s Lists:') % (label,))\n for listId in sorted(levls.keys(),key=lambda a: a.lower() ):\n log(listId)\n for mod in srcMods[listId]:\n log(' '+mod)", "def merge_docs(self):", "def merge(self, skel):\n return Skeleton.simple_merge((self, skel)).consolidate()", "def __init__(self, out_dir = 'output' ):\n\n self.data = {} # will contain the data for each different dataset \n self.datasets = '' # will contain the input datasets (original dictionary)\n self.datasets_keys = '' # will contain the input datasets names only (i.e. keys of the datasets dictionary)\n #self.datasets_all = ['era5_2_2'] # all possibly available datasets \n\n self.unique_dates = {} \n self.attributes = {} # will keep the original attributes from the CDM tables, read from the netCDF files \n self.id_string_length = 14 # fixed length for record_id and observation_id values \n self.out_dir = out_dir \n self.variable_types = {}\n self.observation_ids_merged = { 'igra2':b'3' , \n 'ncar':b'4', \n 'bufr':b'5', \n 'era5_1':b'1' , \n 'era5_2':b'2', \n 'era5_1759' :b'6' , \n 'era5_1761':b'7' , \n 'era5_3188' :b'8' } # values used to convert original record_id to the merged record_id, see method merge_all_data \n\n logging.info('*** Initialising the Merging procedure ***' ) \n #self.era5b_columns = [] # stores the columns of the era5fb \n self.standard_cdm = [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type', 'station_configuration_codes'] \n self.slice_size = 3000\n self.index_offset = 0 # will be replaced when running \n self.hour_time_delta = 60 * 60 * 2 # decide up to which time shift records are considered identical \n \n \n self.only_std_plevels = False # set to True to store only standard pressure level data \n self.std_plevs = [1000, 2000, 3000, 5000, 7000, 10000, 15000, 20000, 25000, 30000, 40000, 50000, 70000, 85000, 92500, 100000]" ]
[ "0.5888243", "0.56524366", "0.5617107", "0.5513934", "0.54756737", "0.5444855", "0.5439256", "0.5407577", "0.5280827", "0.52725893", "0.5247397", "0.52251565", "0.5191612", "0.5171436", "0.51419157", "0.5122677", "0.50893784", "0.50718045", "0.50425047", "0.50397617", "0.5014876", "0.50106746", "0.49942422", "0.49891105", "0.49888903", "0.49836224", "0.4979007", "0.4975996", "0.49716547", "0.49573913" ]
0.6208765
0
Simple generator of primes by trial division
def primes(): yield 2 candidate = 3 while True: for i in range(3, int(sqrt(candidate)) + 1, 2): if (candidate % i) == 0: break else: yield candidate candidate += 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_primes():\n\n n = 1\n while True:\n while not isPrime(n):\n n += 1\n\n yield n\n n += 1", "def gen_primes():\n\tyield 2\n\tyield 3\n\tprime_list = [2, 3]\n\twhile 1:\n\t\tnext = prime_list[-1] + 2\n\t\ti = 0\n\t\twhile i < len(prime_list):\n\t\t\tif next%prime_list[i] == 0:\n\t\t\t\tnext+=2\n\t\t\t\ti=0\n\t\t\telse:\n\t\t\t\ti+=1\n\t\tprime_list.append(next)\n\t\tyield next", "def primes():\n yield 2\n found_primes = [2]\n a = 3\n while True:\n for p in found_primes:\n if p**2 > a:\n found_primes.append(a)\n yield a\n a += 2\n break\n elif a % p == 0:\n a += 2\n break", "def primes():\n yield 2\n found = []\n for i in itertools.count(start=3, step=2):\n for p in found:\n if i % p == 0:\n break\n else:\n yield i\n found.append(i)", "def primes():\n yield 1\n primes = []\n for n in itertools.count(2):\n if not any(n % p == 0 for p in primes):\n # No divisor found among previous primes\n yield n\n primes.append(n)", "def gen_prime():\n\n n = 100\n if n == 2:\n return [2]\n elif n < 2:\n return []\n s = range(3, n + 1, 2)\n mroot = n ** 0.5\n half = (n + 1) / 2 - 1\n i = 0\n m = 3\n while m <= mroot:\n if s[i]:\n j = (m * m - 3) / 2\n s[j] = 0\n while j < half:\n s[j] = 0\n j += m\n i = i + 1\n m = 2 * i + 3\n primes = [2] + [x for x in s if x]\n return (primes[random.randint(1, len(primes) - 1)])", "def generate_primes():\n # David Eppstein, UC Irvine, 28 Feb 2002\n # Source : http://code.activestate.com/recipes/117119/\n yield 2\n\n D = {} # map composite integers to primes witnessing their compositeness\n for q in count(start=3, step=2):\n if q not in D:\n yield q # not marked composite, must be prime\n D[q*q] = [q] # first multiple of q not already marked\n else:\n for p in D[q]: # move each witness to its next multiple\n D.setdefault(2*p+q,[]).append(p)\n del D[q] # no longer need D[q], free memory", "def prime_generator() -> int:\n \n #Start with the first prime.\n counter = count(2)\n candidate = next(counter)\n cache: list = [candidate]\n yield candidate\n \n # Set a flag.\n divisible = False\n while True:\n candidate = next(counter)\n # Check if the candidate is prime.\n for number in cache:\n # If number is greater than the squareroot of candidate, we are done.\n if number * number > candidate:\n break\n # If number divides candidate, candidate is not prime.\n if candidate % number == 0:\n divisible = True\n break\n # If is is prime, add it to the list.\n if not divisible:\n cache.append(candidate)\n yield candidate\n # Reset the flag.\n divisible = False", "def get_primes(lower: int, upper: int) -> typing.Generator[int, None, None]:\r\n for num in range(lower, upper + 1):\r\n if num > 1:\r\n for i in range(2, int(math.sqrt(num)) + 1):\r\n if num % i == 0:\r\n break\r\n else:\r\n yield num", "def primes(n):\n\tsieve = [True] * n\n\tyield 2\n\tfor i in xrange(3,int(n**0.5)+1,2):\n\t\tif sieve[i]:\n\t\t\tyield i\n\t\t\tsieve[i*i::2*i] = [False]*((n-i*i-1)/(2*i)+1)\n\tfor i in xrange(i+2,n,2):\n\t\tif sieve[i]: yield i", "def generate():\n j = [2]\n i = 3\n while i:\n if is_prime(i):\n j.append(i)\n yield [j, j[-1]]\n i += 2", "def gen_primes():\n\n # Maps composites to primes witnessing their compositeness.\n # This is memory efficient, as the sieve is not \"run forward\"\n # indefinitely, but only as long as required by the current number\n # being tested\n\n D = {}\n\n # The runing integer that is checked for primeness\n q = 2\n\n while True:\n if q not in D:\n # q is a new prime.\n # Yield it and mark its first multiple that isn't\n # already marked in previous iterations\n\n yield q\n D[q * q] = [q]\n else:\n # q is composite. D[q] is the list of primes that\n # divide it. Since we've reached q, we no longer\n # need it in the map, but we'll mark the next multiples\n # of its witnesses to prepare for larger numbers\n\n for p in D[q]:\n D.setdefault(p + q, []).append(p)\n del D[q]\n\n q += 1", "def gen_primes():\n D = defaultdict(list)\n q = 2\n while True:\n if q not in D:\n\n yield q \n D[q * q] = [q]\n else:\n for p in D[q]:\n D[p + q].append(p)\n del D[q]\n q += 1", "def gen_primes():\n\n # Maps composites (=non-primes) to primes witnessing their compositeness.\n # This is memory efficient, as the sieve is not \"run forward\" indefinitely,\n # but only as long as required by the current number being tested.\n D = {}\n\n q = 1 # the running integer that is checked for primeness\n while (q := q+1):\n if q not in D:\n # q is a new prime. Yield it and mark its first multiple that is\n # not already marked in previous iterations\n yield q\n D[q*q] = [q]\n else:\n # q is composite. D[q] is the list of primes that divide it. Since\n # we have reached q, we no longer need it in the map, but we will\n # mark the next multiples of its witnesses to prepare for larger\n # numbers\n for p in D[q]:\n D.setdefault(p+q, []).append(p)\n del D[q]", "def prime_gen():\n for i in memo_primes: yield i\n x = memo_primes[-1] + 1\n \n while True:\n if prime_with(x, memo_primes):\n yield x\n memo_primes.append(x)\n x += 1", "def prime_generator():\n i = 0 # prime numbers counter\n num = 0 # current number\n while True:\n num += 1\n if is_prime(num):\n i += 1\n yield i, num", "def factors(n, primes):\n\n for p in takewhile(lambda p: p*p < n, primes):\n exponent = 0\n\n while n % p == 0:\n exponent += 1\n n /= p\n\n if exponent > 0:\n yield p, exponent\n\n if n > 1:\n yield n, 1", "def test_prime_10(self):\n\t self.assertTrue(prime_generator(10), [2, 3, 5, 7])", "def main():\n prime = gen_prime(1, 100000)\n print(prime)", "def gen_primes(N):\n primes = set()\n for n in range(2, N):\n if all(n % p > 0 for p in primes):\n primes.add(n)\n yield n", "def test_prime_12(self):\n\t self.assertTrue(prime_generator(12), [2, 3, 5, 7, 11])", "def test_15(self):\n\t self.assertTrue(prime_generator(15), [2, 3, 5, 7, 11, 13])", "def prime_generator():\r\n for i in itertools.count(start=1):\r\n for j in ((6 * i) - 1, (6 * i) + 1):\r\n if is_prime(j): yield(j)", "def primes():\n D = {} # map composite integers to primes witnessing their compositeness\n q = 2 # first integer to test for primality\n while True:\n if q not in D:\n yield q # not marked composite, must be prime\n D[q*q] = [q] # first multiple of q not already marked\n else:\n for p in D[q]: # move each witness to its next multiple\n D.setdefault(p+q,[]).append(p)\n del D[q] # no longer need D[q], free memory\n q += 1", "def primes(n):\n sieve = [True]*n\n for p in range(2, n):\n if sieve[p]:\n yield p\n for i in range(p*p, n, p):\n sieve[i] = False", "def test_prime_2(self):\n\t self.assertTrue(prime_generator(2), [2])", "def Primes():\n candidate = 1\n _primes_so_far = [2] # first prime, only even prime\n yield _primes_so_far[-1]\n while True:\n candidate += 2 # check odds only from now on\n for prev in _primes_so_far:\n if prev**2 > candidate:\n yield candidate\n _primes_so_far.append(candidate)\n break\n if not divmod(candidate, prev)[1]: # no remainder!\n break # done looping", "def primes(m):\n if m <= 2:\n return ()\n sieve = [True] * m\n for i in sixn(m):\n if sieve[i]:\n yield i\n for mult in range(i * i, m, i):\n sieve[mult] = False", "def gen_primes():\n # Maps composites to primes witnessing their compositeness.\n # This is memory efficient, as the sieve is not \"run forward\"\n # indefinitely, but only as long as required by the current\n # number being tested.\n\n D = {}\n\n # The running integer that's checked for primeness\n\n q = 2\n\n while True:\n if q not in D:\n # q is a new prime.\n # Yield it and mark its first multiple that isn't\n # already marked in previous iterations\n yield q\n D[q * q] = [q]\n else:\n # q is composite. D[q] is the list of primes that\n # divide it. Since we've reached q, we no longer\n # need it in the map, but we'll mark the next\n # multiples of its witnesses to prepare for larger\n # numbers\n for p in D[q]:\n D.setdefault(p + q, []).append(p)\n del D[q]\n\n q += 1", "def prime_generator() -> Iterator[int]:\n\n num = 2\n while True:\n if is_prime(num):\n yield num\n num += 1" ]
[ "0.80137175", "0.77621424", "0.7665605", "0.7564029", "0.7559787", "0.7539917", "0.73725927", "0.727737", "0.71787924", "0.714312", "0.713863", "0.71249187", "0.7116757", "0.7101361", "0.70969033", "0.7089131", "0.7069633", "0.7069145", "0.70630926", "0.70569324", "0.704758", "0.7040995", "0.7005567", "0.6986781", "0.6962981", "0.6932179", "0.69228965", "0.69108266", "0.689328", "0.68637544" ]
0.7840028
1
Return the unique prime factorization of `number` assuming number <= 1 million cf the Fundamental Theorem of Arithmetic
def prime_factorization(number): global primes_under_1M assert number <= 1_000_000 factors = [] running_product = 1 current_number = number # Loop through the primes, iteratively dividing our # number by each prime `p` so long as `p` exactly # divides `current_number` for p in primes_under_1M: while (current_number % p) == 0: current_number = current_number // p factors.append(p) running_product *= p if running_product == number: return set(factors)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_unique_factors(num):\n a = num\n m = int(num ** 0.5) if num > 100 else num\n factors = []\n primes = sieve(m)\n # Divide the number by compatible primes until it is 1\n # (or we run out of primes...)\n for p in primes:\n if a % p == 0:\n a = a / p\n factors.append(p)\n if a == 1:\n break\n return factors", "def problem3():\n def _prime_factorization(n):\n \"\"\"Returns the list of prime factors of a number n\"\"\"\n factors = []\n f = 2\n # Use trial division to add factors\n while f**2 <= n:\n while (n % f) == 0:\n factors.append(f)\n n //= f\n f += 1\n\n if n > 1:\n factors.append(n)\n\n return factors\n\n return max(_prime_factorization(600851475143))", "def prime_factorization(num):\n return prime_factors_p(num, _sieve)", "def factor(number):\n\tdividing_primes = sieve(number/2 + 1)\n\tfactors = []\n\t\n\twhile number != 1:\t\n\t\tif not dividing_primes:\n\t\t\treturn [number]\n\n\t\tnext_divisor = min(dividing_primes)\n\n\t\tif not number % next_divisor:\n\t\t\tfactors.append(next_divisor)\n\t\t\tnumber /= next_divisor\n\t\telse:\n\t\t\tdividing_primes.remove(next_divisor)\n\n\treturn factors", "def first_factor(cls, number):\n for prime in cls(maximum=math.sqrt(number)):\n if not number % prime:\n return prime\n return None", "def get_prime_factors(self, number):\n for prime in self.get_primes():\n while number % prime == 0:\n yield prime\n number /= prime\n \n if number == 1:\n break", "def factorize(num):\n factors = []\n while num not in primes_list:\n for prime in primes_list:\n if num % prime == 0:\n factors.append(prime)\n num /= prime\n break\n factors.append(num)\n factors = sorted(factors)\n return factors", "def prime_factors(number):\n all_factors = factors(number)\n return list(filter(lambda x: is_prime(x), all_factors))", "def factorization(n):\n pf = []\n for p in primeslist:\n if p*p > n : break\n count = 0\n while not n % p:\n n //= p\n count += 1\n if count > 0: pf.append((p, count))\n if n > 1: pf.append((n, 1))\n return pf", "def factorize(n):\n\n if n in (0, 1):\n return [(n, 1)]\n\n factors = []\n\n if n < 0:\n factors.append((-1, 1))\n n = -n\n\n # check 2, 3, then all integers in form q = 6k +- 1\n for q in chain((2, 3), range(5, isqrt(n) + 1, 6)):\n # q = 6k - 1\n a = 0\n while n % q == 0:\n # q is prime because n already divided by its prime factors\n n //= q\n a += 1\n if a > 0:\n factors.append((q, a))\n\n # 6k + 1\n q += 2\n a = 0\n while n % q == 0:\n # q is prime because n already divided by its prime factors\n n //= q\n a += 1\n if a > 0:\n factors.append((q, a))\n\n if n != 1:\n factors.append((n, 1))\n\n return factors", "def setFactors(self, number):\n self.number = number\n length = len(self.primes)\n p = self.primes[:self.closestPrimeIndex(self.primes, self.number**0.5) + 1]\n\n start = clock()\n self.facts = serial_factor(self.number, p)\n print \"Time taken ======================> \", clock() - start\n\n c = 1\n for fact in self.facts:\n c = c * fact\n\n if c != self.number:\n num = self.number / c\n for fact in self.facts:\n while num % fact == 0:\n num = num / fact\n\n if num != 1:\n self.facts.append(num)", "def factorize(self,num):\n def sieveOfEratosthenes(N, s): \n prime = [False] * (N+1) \n for i in range(2, N+1, 2): \n s[i] = 2\n for i in range(3, N+1, 2): \n if (prime[i] == False): \n s[i] = i \n for j in range(i, int(N / i) + 1, 2): \n if (prime[i*j] == False): \n prime[i*j] = True\n s[i * j] = i \n\n\n def generatePrimeFactors(N): \n ans=[]\n s = [0] * (N+1) \n sieveOfEratosthenes(N, s) \n curr = s[N] \n cnt = 1\n while (N > 1): \n N //= s[N]\n if (curr == s[N]): \n cnt += 1\n continue\n\n ans.append((str(curr),str(cnt))) \n\n curr = s[N] \n cnt = 1\n return ans\n \n return generatePrimeFactors(num)", "def smallest_factor(number):\n for i in xrange(2, int(sqrt(number)+1)):\n if number % i == 0:\n return i\n return False", "def factor_gen(number):\n if number <= 0:\n raise ValueError(\"Not a valid number {}\".format(number))\n # O(sqrt(n)) sorted solution\n # For the unsorted solution, remove the queue and yield when found\n queue = []\n for f in count(1):\n if number != 1 and (f == 1 or f * f == number):\n yield f\n elif number <= f * f:\n yield from iter(queue)\n raise StopIteration\n elif number % f == 0:\n yield f\n queue.insert(0, number // f)", "def generate_prime_factors(number):\n if not isinstance(number, int):\n raise ValueError\n list_of_ints = []\n if number > 1:\n remainder = number\n divisor = 2\n while remainder != 1:\n if remainder % divisor == 0:\n list_of_ints.append(divisor)\n remainder = remainder / divisor\n else:\n divisor += 1\n return list_of_ints", "def prime_factors(number):\n factors = []\n\n if number == 0 : return factors\n\n # first round factors by two\n while number % 2 == 0:\n factors.append(2)\n number /= 2\n\n # other rounds goes by odd numbers only (no other even is prime)\n divisor = 3\n while divisor <= number:\n while number % divisor == 0:\n factors.append(divisor)\n number /= divisor\n divisor += 2\n\n return factors", "def prime_factors(number):\n prime_factors = []\n while ( smallest_factor(number) ):\n smallest = smallest_factor(number)\n prime_factors.append(smallest)\n number /= smallest\n prime_factors.append(number)\n #return prime_factors\n return number", "def primeFactors(number):\n factorlist=[]\n loop=2\n while loop<=number:\n if number%loop==0:\n number/=loop\n factorlist.append(loop)\n else: \n loop+=1\n return factorlist", "def prime_factors(num):\n if prime_checker(num):\n return num\n if num > 10^5:\n maxPrime = round(num**0.5) + 1\n else:\n maxPrime = round(num/2)+1\n primelist = prime_generator(maxPrime)\n factors = []\n\n while num > 1 and num not in primelist:\n for prime in primelist:\n if num % prime == 0:\n factors.append(prime)\n num = int(num / prime)\n break\n if not num == 1:\n factors.append(num)\n \n return factors", "def factor(cls, number):\n factors = []\n for prime in cls():\n if prime > number:\n break\n # print 'Checking to see if %d is a factor of %d' % (prime, number)\n # reduce the total iterations\n if prime > math.sqrt(number):\n factors.append(number)\n break\n while not number % prime:\n number /= prime\n factors.append(prime)\n return factors", "def prime_factorization(n):\n # Code taken directly from \"Prime factorization - list\" at\n # http://stackoverflow.com/a/16996439.\n primfac = []\n d = 2\n while d*d <= n:\n while (n % d) == 0:\n primfac.append(d) # supposing you want multiple factors repeated\n n //= d\n d += 1\n if n > 1:\n primfac.append(n)\n return Multiset(primfac)", "def factorone(n):\n\tif (is_prime(n)): return n\n\tfor fact in (2,3,5,7,11,13,17,19,23,29):\n\t\tif n%fact == 0: return fact\n\treturn factorPR(n) # Needs work - no guarantee that a prime factor will be returned", "def primish(n):\n\n factors = set()\n for i in range(n, 1, -1):\n\n # Find the smallest divisor of i.\n smallest = 2\n while (i % smallest) != 0:\n smallest += 1\n\n # Divide by that divisor until we have 1 or something else.\n remainder = i\n while (remainder % smallest) == 0:\n remainder /= smallest\n\n # Keep it if needed.\n if remainder == 1:\n factors.add(i)\n\n return factors", "def getPrimeFactors(num):\n n = num\n primes = {}\n\n p = 2\n sqrt = math.sqrt(num)\n\n def checkAndUpdate(inc):\n nonlocal n\n nonlocal p\n nonlocal primes\n if n % p == 0:\n if str(p) in primes.keys():\n primes[str(p)] += 1\n else:\n primes[str(p)] = 1\n n /= p\n else:\n p += inc\n \n while p == 2 and p <= n:\n checkAndUpdate(1)\n while p <= n and p <= sqrt:\n checkAndUpdate(2)\n if len(primes.keys()) == 0:\n primes[str(num)] = 1\n elif n != 1:\n primes[str(n)] = 1\n return primes", "def primefacs(num):\n facs=set()\n fac=2\n while (fac*fac <= num):\n if num%fac == 0:\n facs.add(fac)\n num = num//fac\n else:\n fac += 1\n if num != 1:\n facs.add(num)\n return facs", "def factorone(n):\r\n\tif (is_prime(n)): return n\r\n\tfor fact in [2,3,5,7,11,13,17,19,23,29]:\r\n\t\tif n%fact == 0: return fact\r\n\treturn factorPR(n) # Needs work - no guarantee that a prime factor will be returned\r", "def get_prime_factors_by_number(self, number):\n if int(number) < 2:\n print \"this method needs number >= 2\"\n return {}\n ret = {}\n import math\n # use math.sqrt for speedup\n if number >= 4:\n number_sqrt = math.sqrt(number)\n else:\n number_sqrt = 2\n primes = self.get_primes_by_limit_number(number_sqrt)\n num = number\n for p in primes:\n if num == 1:\n break\n while num % p == 0:\n num /= p\n if p in ret:\n ret[p] = ret[p] + 1\n else:\n ret[p] = 1\n if num == number:\n # in this case, number is prime\n ret[number] = 1\n elif num != 1:\n ret[num] = 1\n return ret", "def factorize(n):\n fct = [] # prime factor\n b, e = 2, 0 # base, exponent\n while b * b <= n:\n while n % b == 0:\n n = n // b\n e = e + 1\n if e > 0:\n fct.append((b, e))\n b, e = b + 1, 0\n if n > 1:\n fct.append((n, 1))\n return fct", "def prime_factors(number: int) -> dict:\n f = {}\n i = 2\n while number > 1 and number >= i:\n if number % i == 0:\n if i not in f:\n f[i] = 1\n else:\n f[i] += 1\n number //= i\n else:\n i += 1\n return f", "def prime_factorization(n):\r\n result = []\r\n for i in xrange(2, n+1):\r\n s = 0;\r\n while n / float(i) == floor(n/float(i)):\r\n n = n / float(i)\r\n s += 1\r\n if s > 0:\r\n for k in range(s):\r\n result.append(i)\r\n if n == 1:\r\n return result" ]
[ "0.7623067", "0.72447276", "0.72008526", "0.71466506", "0.71395624", "0.71260005", "0.7038484", "0.7021164", "0.6997865", "0.69853556", "0.69795007", "0.69734", "0.6948307", "0.69408995", "0.69343865", "0.6893117", "0.6893014", "0.68922955", "0.6884946", "0.6881858", "0.6873295", "0.68577373", "0.6857589", "0.6824426", "0.6809833", "0.68089217", "0.6803163", "0.6796038", "0.67811537", "0.67207485" ]
0.8159412
0
If the suffix of a child_id is numeric, the whole hierarchy is searchable to the leaf nodes. If the suffix of a child_id is alphabetic, the whole hierarchy is not searchable.
def _is_hierachy_searchable(child_id: str) -> bool: pieces_of_child_id_list = child_id.split('.') suffix = pieces_of_child_id_list[len(pieces_of_child_id_list) - 1] return suffix.isnumeric()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_subhalo(self, childid, parentid):\n if (childid in self._halos[parentid].properties['children']):\n return True\n else:\n return False", "def _is_child(self, parent, child): # type: (str, str) -> bool\n return child != parent and child.startswith(parent + \".\")", "def reorder_child_element(\n self,\n the_id: str,\n spaces: int = 1) -> bool:\n\n if the_id not in self._labels:\n raise KeyError('No id {}'.format(the_id))\n parent_id = self.get_parent(the_id)\n children = self.subtypes[parent_id]\n # get the current location\n current_index = children.index(the_id)\n # determine the feasible new location\n if spaces < 0:\n new_index = max(0, current_index + spaces)\n else:\n new_index = min(len(children) - 1, current_index + spaces)\n if current_index == new_index:\n return False # nothing to be done\n\n # pop our entry out of its current location\n children.pop(current_index)\n # insert it in its new location\n children.insert(new_index, the_id)\n return True", "def build_hierarchy_from_id_lookup(id_lookup_file=\"idlookups.csv\"):\n df_id_lookups = pd.read_csv(id_lookup_file, index_col=0)\n\n # The naming convention separates layers of the hierarchy with a colon ':', so we can break this into a list of descendents, and calculate the depth of the tree.\n df_id_lookups[\"parsed_name\"] = df_id_lookups.name.apply(lambda s: s.split(\": \"))\n df_id_lookups[\"depth\"] = df_id_lookups.parsed_name.apply(lambda d: len(d))\n\n # The two top nodes \"Biota\" and \"Physical\" are not prepended to their children, so we need to do this manually.\n # Manually define biota and physical children\n biota_kids = [\n \"Worms\",\n \"Sponges\",\n \"Seagrasses\",\n \"Molluscs\",\n \"Macroalgae\",\n \"Jellies\",\n \"Fishes\",\n \"Echinoderms\",\n \"Crustacea\",\n \"Cnidaria\",\n \"Bryozoa\",\n \"Bioturbation\",\n \"Bacterial mats\",\n \"Ascidians\",\n ]\n\n physical_kids = [\"Substrate\"]\n\n # Prepend them to name lists, and add to depth.\n biota_inds = df_id_lookups.parsed_name.apply(lambda d: d[0] in biota_kids)\n df_id_lookups.loc[biota_inds, \"depth\"] += 1\n df_id_lookups.loc[biota_inds, \"parsed_name\"] = df_id_lookups.loc[biota_inds, \"parsed_name\"].apply(\n lambda d: [\"Biota\"] + d\n )\n\n physical_inds = df_id_lookups.parsed_name.apply(lambda d: d[0] in physical_kids)\n df_id_lookups.loc[physical_inds, \"depth\"] += 1\n df_id_lookups.loc[physical_inds, \"parsed_name\"] = df_id_lookups.loc[physical_inds, \"parsed_name\"].apply(\n lambda d: [\"Physical\"] + d\n )\n\n # Create columns for ancestor and descendant lists.\n df_id_lookups[\"child_name\"] = df_id_lookups.parsed_name.apply(lambda d: d[-1])\n\n df_id_lookups[\"ancestor_id_list\"] = [get_ancestor_ids(d, df_id_lookups) for d in df_id_lookups.index]\n\n df_id_lookups[\"descendant_id_list\"] = [get_descendant_ids(d, df_id_lookups) for d in df_id_lookups.index]\n\n # Create a multilabel, one hot encoded bit vector for each class, taking into account the hierarchy of ancestors, and unspecified descendants.\n # We now want to represent this class hierarchy as a bit-vector. Each class index has a unique bit in the vector. A root level class will turn on a single bit. A depth 4 class will turn on 4 bits.\n df_id_lookups[\"bit_vector\"] = [get_bit_vector(d, df_id_lookups) for d in df_id_lookups.index]\n df_id_lookups\n\n return df_id_lookups", "def search(self, word: str):\n node = self.root\n for letter in word:\n if letter in node.child:\n node = node.child[letter]\n else:\n return False\n return node.is_leaf", "def search(self, word):\n if word[0] == '.' and len(word) > 1:\n ans = False\n for c in self.child:\n ans = ans or self.child[c].search(word[1:])\n return ans\n elif word[0] == '.' and len(word) == 1:\n ans = False\n for c in self.child:\n ans = ans or self.child[c].isend\n return ans\n elif word[0] not in self.child:\n return False\n elif len(word) > 1:\n return self.child[word[0]].search(word[1:])\n elif len(word) == 1:\n return self.child[word[0]].isend", "def verify_child(heights):\n dic = {}\n children = heights.columns[heights.columns.str.contains('^child_')] # Get children columns\n for child in children: # Loop through child_X\n dic.update({child:ks_permutation(heights, child, 'father')})\n return dic", "def has_child(self, term):\n for parent in self.children:\n if parent.id == term or parent.has_child(term):\n return True\n return False", "def GetExpandableIds(children, length_name):\n # I could write a list comprehension here. Would it make the code clearer?\n result = []\n for child_id, child in enumerate(children):\n if child.canExpand(length_name):\n result.append(child_id)\n return result", "def __contains__(self, key):\n\n if type(key) != self.type:\n return False\n\n first_char = key[:1]\n others = key[1:]\n\n if first_char not in self.children:\n return False\n\n if len(first_char) != 0 and len(others) == 0:\n node = self.children[first_char]\n\n if node.value is None:\n return False\n\n return True\n else:\n return others in self.children[first_char]", "def contains_child(self, pid):\n return pid in self._children_ids", "def createChildLookup(self):\n self.dictChildLookup = dict()\n \n for index, objChild in self.dfNodes.iterrows():\n if objChild[self.strParentNodeID] is not self.strRootID:\n parent_id = objChild[self.strParentNodeID]\n lstChildren = self.dictChildLookup.get(parent_id)\n if not lstChildren:\n lstChildren = self.dictChildLookup[parent_id] = list()\n lstChildren.append(objChild.copy())\n else:\n self.strParent = objChild.copy()", "def insert_suffix(self, prefix, idx):\n parent_pos = self.path_to_matching_prefix(prefix)[-1]\n\n has_inserted = False\n for child_pos in self.children(parent_pos):\n if child_pos.element()._label[0] == prefix[0]:\n # Intermediate node is added between parent and child.\n j = 0\n while j < len(child_pos.element()._label) and \\\n child_pos.element()._label[j] == prefix[j]:\n j += 1\n\n # Update tree structure\n intermediate_pos = self._add(parent_pos, self._SuffixNode(prefix[:j], -1))\n intermediate_node = self._validate(intermediate_pos)\n\n child_node = self._validate(child_pos)\n child_node._parent = intermediate_node\n intermediate_node._children[child_node] = child_node\n parent_node = self._validate(parent_pos)\n del parent_node._children[child_node]\n\n # Set label of child node to be unmatched part of child label.\n child_pos.element()._label = child_pos.element()._label[j:]\n # create new leaf node containing unmatched part of suffix.\n self._add(intermediate_pos, self._SuffixNode(prefix[j:], idx))\n # break from for loop.\n has_inserted = True\n break\n\n # New node is inserted as child of parent.\n if not has_inserted:\n self._add(parent_pos, self._SuffixNode(prefix, idx))", "def ChildOrMatch(self, other):\n return self._dir == other or other.startswith(self._dir + \"/\")", "def search(self, word: str) -> bool:\n \n def helper(n, sub):\n if not sub:\n return n.rec > 0\n \n for i, c in enumerate(sub):\n if c == \".\":\n for l in n.childs:\n if helper(n.childs[l], sub[i+1:]):\n return True\n return False\n else:\n if c not in n.childs:\n return False\n n = n.childs[c]\n \n return n.rec > 0\n \n trav = self.root\n \n for i, c in enumerate(word):\n if c == \".\":\n for l in trav.childs:\n if helper(trav.childs[l], word[i+1:]):\n return True\n return False\n else:\n if c not in trav.childs:\n return False\n trav = trav.childs[c]\n \n return trav.rec > 0", "def searchRecursive(self, node, word):\n if len(word) == 0:\n return node.size > 0\n char = word[0]\n if char == '.':\n for child in node.children.values():\n if self.searchRecursive(child, word[1:]):\n return True\n return False\n if char in node.children:\n child = node.children[char]\n return self.searchRecursive(child, word[1:])\n return False", "def is_child(self, kid, mother): \n mom_node = self.names_to_nodes[mother] \n child_node = self.names_to_nodes[kid]\n return mom_node.is_child(child_node)", "def fix(child, roots):\n link, root_url = child\n root = get_root(root_url)\n if len(roots[roots.root == root].children > 0) == 1 and (roots[roots.root == root].children > 0).values[0]:\n check = fix_url(link, root_url)\n if check != '' and check is not None:\n return (check,\n roots.loc[roots[roots.root == root].index].root.values[0],\n roots.loc[roots[roots.root == root].index].category.values[0])\n return", "def search(self, word: str) -> bool:\n if len(word) == 0:\n return True\n idx = ord(word[0]) - ord('a')\n if not self.children[idx]:\n return False\n if len(word) == 1:\n return self.children[idx].tail # switch for only once\n\n return self.children[idx].search(word[1:])", "def search_prefix(self, prefix):\n node = self.root\n for i in range(len(prefix)):\n current_letter = prefix[i]\n current_index = self.to_index(current_letter)\n if node.children[current_index]:\n node = node.children[current_index]\n else:\n return False\n\n # save last node after a search\n self.saved_node = node\n return True", "def filter(self, hierarchy: List[str]) -> bool:", "def search(self, key): \n \n current_node = self.root \n length = len(key) \n for level in range(length): \n index = self._charToIndex(key[level]) \n if not current_node.children[index]: \n return False\n current_node = current_node.children[index] \n \n return current_node != None and current_node.isEndOfWord", "def indexOfChild(self, child):\n self.__initChild()\n return self.__child.index(child)", "def __call__(self, node):\n if not node.children: return;\n ochildren = node.children;\n for n in ochildren:\n mark = self.ProperContainsMarker(n);\n if mark: raise ValueError(\"not implemented\");", "def get_serials_by_child_recid(recid):\n search = SeriesSearch().query(\n 'bool',\n filter=[\n Q('term', mode_of_issuance='SERIAL'),\n Q('term', _migration__children=recid),\n ]\n )\n for hit in search.scan():\n yield Series.get_record_by_pid(hit.pid)", "def has_child(self, character):\n # get the position of that character\n if self.num_children() > 0:\n character = character.upper()\n # get the character position the children list\n index = self._get_index(character)\n # if there is a value(not None) in that position then we know it\n # exists\n # print(f'we see child exists => index: {index}, char: {character}')\n return self.children[index] is not None\n return False", "def __setitem__(self, x, kid):\r\n x = str(x) # convert int to string\r\n if (len(x) > 1) and all([c in 'LR123456789' for c in x]): # binary string of the form LLLRLR or 1213 (or mixed)\r\n self._namedkid[x[0]][x[1:]] = kid\r\n elif x in self._namedkid:\r\n raise AttributeError, \"The subtree \" + x + \" already exists\"\r\n else:\r\n # Update coordinates\r\n kid.x += self.x[-1]\r\n kid.y += self.y[-1]\r\n kid.z += self.z[-1]\r\n if kid not in self.children:\r\n self.children.append(kid)\r\n self._namedkid[str(len(self.children))] = kid # numbered child\r\n self._namedkid[x] = kid", "def _duplicate_child_allowed_check(self):\n\n for rule in self.options[\n 'parent_allows_duplicate_child']:\n if self.lineage_test(rule):\n return True\n return False", "def is_ancestor(parent_alphabet, child_alphabet):\r\n alphabet = parent_alphabet\r\n while alphabet:\r\n if child_alphabet == alphabet:\r\n return True\r\n alphabet = alphabet.alphabet\r\n return False", "def test_team_template_folders_id_children_fk_get(self):\n pass" ]
[ "0.5488853", "0.5385023", "0.5233182", "0.50972", "0.50318193", "0.4960036", "0.49326897", "0.49024475", "0.48999116", "0.48729253", "0.4871233", "0.48577115", "0.48574862", "0.48468843", "0.4816291", "0.48138228", "0.47902393", "0.47179863", "0.47115165", "0.469999", "0.469299", "0.46747336", "0.46703523", "0.4669114", "0.4667849", "0.4665042", "0.46609467", "0.4660463", "0.4658895", "0.46529832" ]
0.8063963
0
Get the action method for an Algorithm. If method_name is None, the primary action method is returned (the one listed first in `Algorithm._action_methods`). Otherwise, the action method belonging to the respective name is returned.
def get_action_method(instance: Algorithm, method_name: Optional[str] = None) -> Callable: method_names = get_action_methods_names(instance) if method_name is not None: if method_name not in method_names: raise ValueError( "`method_name` must be one of the specified action methods of the algorithm. " f"Valid ones are {method_names}" ) else: method_name = method_names[0] return getattr(instance, method_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_method(self, name=None):\n return self.gateway and self.gateway.method or None", "def _get_action_from_http_method(self, http_method):\n http_method = http_method.lower()\n if http_method == 'get':\n return 'list' if issubclass(self.callback, ListModelMixin) else 'retrieve'\n if http_method not in self.method_actions:\n return http_method\n return self.method_actions[http_method]", "def get_method(self, request, action, content_type, body):\n\n # Look up the method\n if self.controller:\n return getattr(self.controller, action)\n else:\n return getattr(self, action)", "def _get_method(**params):\n try:\n return params['data']['method']\n except KeyError:\n return None", "def get_method_name(self) -> Optional[str]:\n current_mode = self.get_mode()\n # Check that 'Solvent' program is enabled.\n # Retreiving the remaining time without\n # this programm being selected first would trigger\n # a key error when unpacking the device reply.\n if current_mode != 'Method':\n self.logger.warning(\"Can't retreive selected method of the 'Method' \"\n \"program since this program is not currently \"\n f\"selected (selected program is '{current_mode}'). \"\n \"Select 'Method' program first.\")\n return None\n else:\n return self.send(self.cmd.GET_METHOD_NAME)", "def getcurrentmethod(self):\n if self._methodname == None:\n print(\"No method defined.\")\n else:\n return self._methodname", "def get_action_by_name(self, name):\n for action in self.all_actions:\n if action.name == name:\n return action\n return None", "def getMethod(self):\n return self.__get('method')", "def method(self,methodname):\n\t\tif methodname in self.methods:\n\t\t\treturn self.methods[methodname]\n\t\treturn None", "def get_action(action_name):\n action = justrok.Globals.action_collection.action(action_name)\n if action is None:\n justrok.logger.error('action %r not found', action_name)\n return lambda: None\n else:\n return action.trigger", "def _get_action_from_name(self, name):\n\n container = self._action\n if name is None:\n return None\n\n for action in container:\n if \"/\".join(action.option_strings) == name:\n return action\n elif action.metavar == name:\n return action\n elif action.dest == name:\n return action", "def _get_action_from_name(self, name):\n container = self._actions\n if name is None:\n return None\n for action in container:\n if '/'.join(action.option_strings) == name:\n return action\n elif action.metavar == name:\n return action\n elif action.dest == name:\n return action", "def _get_action_from_name(self, name):\n container = self._actions\n if name is None:\n return None\n for action in container:\n if '/'.join(action.option_strings) == name:\n return action\n elif action.metavar == name:\n return action\n elif action.dest == name:\n return action", "def getAction(self, nameOrAction):\n\t\tif isinstance(nameOrAction, Action):\n\t\t\treturn nameOrAction\n\t\telse:\n\t\t\treturn self.actions[nameOrAction]", "def get_method(name):\n for method in methods:\n if method.name == name:\n return method\n parser.error('{name!r} is an unsupported I/O method'.format(**locals()))", "def get_method(self) -> MethodStr:\n return METHODS.inverse[self.method()]", "def Method(self, default=None):\n return self.data.get('method', default)", "def get_test_method(self):\n return getattr(self, self.get_test_method_name())", "def _get_action_func(self):\n try:\n func = self.action_funcs[self.action]\n except KeyError:\n raise DaemonRunnerInvalidActionError(\n \"Unknown action: %(action)r\" % vars(self))\n return func", "def method(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"method\")", "def get_method(self):\n default_method = \"POST\" if self.data is not None else \"GET\"\n return getattr(self, 'method', default_method)", "def method(self):\n # type: () -> WebhookHttpMethod\n return self._method", "def get_optimization_method(self):\n optTask = self._getTask('optimization')\n optMethod = optTask.find(xmlns + 'Method')\n return optMethod.attrib['name']", "def method(self):\n return self._method", "def method(self):\n return self._method", "def method_func(klass, method_name):\r\n method = getattr(klass, method_name)\r\n # in Python 2 method will be an instancemethod, try to get its __func__\r\n # attribute and fall back to what we already have (for Python 3)\r\n return getattr(method, '__func__', method)", "def get_action(self, action):\n actions = {\n self.GO_ACTION: self.go,\n self.CLICK_ACTION: self.click,\n self.CHECK_ACTION: self.check,\n self.WAIT_ACTION: self.wait,\n self.FILL_FORM_ACTION: self.fill,\n self.SELECT_FORM_ACTION: self.select\n }\n try:\n return actions[action]\n except KeyError:\n raise Exception('{0} is not a valid action, the valid actions are: {1}'.format(action,\n \", \".join(actions.keys())))", "def get_action(self, name: str) -> Action:\n return self.get_session.query(self.action_model).filter_by(name=name).one_or_none()", "def get_action(self, json_object):\n\n # dictionary of supported actions this resource is capable of executing\n supported_action_methods = {\n 'connect': self.__connect_action,\n 'disconnect': self.__disconnect_action,\n 'cycle': self.__cycle_action\n }\n\n try:\n return supported_action_methods[json_object[\"action\"]]\n except KeyError:\n return None", "def get_action(self):\n return self.__action" ]
[ "0.6643434", "0.65293586", "0.6413036", "0.62758154", "0.61746657", "0.6106306", "0.6037263", "0.6004484", "0.5983328", "0.59728557", "0.5909148", "0.5905452", "0.5905452", "0.5890808", "0.5871449", "0.5817096", "0.58095455", "0.5774518", "0.57545245", "0.5741709", "0.57199466", "0.5671266", "0.56472576", "0.5640088", "0.5640088", "0.5636167", "0.5611991", "0.5593014", "0.5586086", "0.55738103" ]
0.8294379
0
Get the names of all action methods of a class. This basically returns `instance_or_cls._action_method`, but ensures that the return type is a tuple.
def get_action_methods_names(instance_or_cls: Union[Type[Algorithm], Algorithm]) -> Tuple[str, ...]: method_names = instance_or_cls._action_methods if isinstance(method_names, str): method_names = (method_names,) if not isinstance(method_names, tuple) and len(method_names) == 0: if isclass(instance_or_cls): instance_or_cls = cast(Type[Algorithm], instance_or_cls) name = instance_or_cls.__name__ else: name = type(instance_or_cls).__name__ raise ValueError(f"`_action_methods` of {name} must either be a string or a tuple of strings.") return method_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def actions(cls):\n return [m for m in cls.__dict__ if not \"__\" in m]", "def _methods_of(cls):\n # The idea of unbound methods exists in Python 2 and was removed in\n # Python 3, so \"inspect.ismethod\" is used here for Python 2 and\n # \"inspect.isfunction\" for Python 3.\n all_methods = inspect.getmembers(\n cls, predicate=lambda x: inspect.ismethod(x) or inspect.isfunction(x))\n methods = [m for m in all_methods if not m[0].startswith(\"_\")]\n\n help_groups = {}\n for m in methods:\n group = getattr(m[1], \"help_group\", \"0\")\n help_groups.setdefault(group, []).append(m)\n\n if len(help_groups) > 1:\n # we should sort methods by groups\n methods = []\n for group in sorted(help_groups.items(), key=lambda x: x[0]):\n if methods:\n # None -> empty line between groups\n methods.append((None, None))\n methods.extend(group[1])\n return methods", "def get_all_action_types() -> List[str]:\n\n actions: List[str] = []\n for name, val in actions_module_dict.items(): # iterate through every module's attributes\n if inspect.isclass(val) and issubclass(val, Action) and val != Action:\n actions.append(name)\n\n return actions", "def get_method_list_from_classlist(self):\n method_list = []\n method_name_list = []\n for class_object in self.class_list:\n for name, obj in inspect.getmembers(class_object, inspect.ismethod):\n method_list.append(obj)\n method_name_list.append(name)\n return method_list", "def get_all_methods(instance):\n return [m for m in dir(instance) if callable(getattr(instance, m))]", "def declared_http_methods(cls):\n\tfor name, fn in inspect.getmembers(cls, predicate=inspect.isfunction):\n\t\tif name in http_method_funcs and is_instance_method(fn) and not inspect.ismethod(fn):\n\t\t\tyield name\n\n\t# for name, fn in ((n, getattr(cls, n)) for n in dir(cls)):\n\t# \tif inspect.ismethod(fn) and name in http_method_funcs:\n\t# \t\tyield name", "def get_available_actions() -> tuple:\n return tuple(method for method in dir(cli_commands) if callable(getattr(cli_commands, method)))", "def all_action_names() -> List[str]:\n\n return list(map(lambda c: c.name, LoggingActions))", "def classmethods(class_object):\n fn_tuple_list = inspect.getmembers(class_object, predicate=inspect.ismethod)\n fn_names = [\n f_name for (f_name, method) in fn_tuple_list if not f_name.startswith(\"_\")\n ]\n return fn_names", "def __methods(cls):\n _dict = {}\n __methodDict(cls, _dict)\n return _dict.keys()", "def actions(self):\n r = self.session.query(models.Action).all()\n return [x.type_name for x in r]", "def _get_actions(self):\n return self.__actions", "def _get_actions(self):\n return self.__actions", "def _get_actions(self):\n return self.__actions", "def get_method_names(cls, prefix):\n names = []\n for name in dir(cls):\n if name.startswith(prefix):\n func = getattr(cls, name)\n names.append(name)\n return names", "def _get_local_method_names(cls: Any, exclude: Iterable[str] = ()) -> Tuple[str]:\n true_methods = set()\n for m in cls.__dict__:\n if callable(cls.__dict__[m]) and not inspect.isclass(cls.__dict__[m]):\n mtype = type(cls.__dict__[m])\n if mtype != staticmethod and mtype != classmethod:\n true_methods.add(m)\n return tuple(true_methods.difference(set(exclude)))", "def get_action_classes(app_name: str):\n from policyengine.models import PlatformAction\n actions = []\n for cls in apps.get_app_config(app_name).get_models():\n if issubclass(cls, PlatformAction) and hasattr(cls, \"action_codename\"):\n actions.append(cls)\n return actions", "def actions(self):\n return self._action_list", "def action_classes(cls, request_obj, model_cls, cls_options, method):\n\n module = locate_base_module(model_cls, 'actions')\n target_base_cls = cls_options[0] if 'id' not in request_obj.kwargs else cls_options[1]\n\n try:\n classes = []\n loaded_mod = get_module(module)\n for name, cls in loaded_mod.__dict__.items():\n # Class must be inherited from the target class\n if target_base_cls and isinstance(cls, type) and issubclass(cls, target_base_cls):\n # Class must not be imported\n if cls.__module__ != target_base_cls.__module__:\n # The model must match that of the target\n if cls.MODEL.__name__ == model_cls.__name__ and method in cls.ALLOWED_METHODS:\n if cls.ACTION not in getattr(model_cls, 'DISALLOWED_ACTIONS', []):\n classes.append(cls)\n except ImportError:\n pass\n\n # Include the base class if it has action name specified\n if target_base_cls and target_base_cls.ACTION and method in target_base_cls.ALLOWED_METHODS:\n if target_base_cls.ACTION not in getattr(model_cls, 'DISALLOWED_ACTIONS', []):\n classes.append(target_base_cls)\n\n # Include delete action as special action if id present\n if 'id' in request_obj.kwargs and method in DeleteAction.ALLOWED_METHODS:\n if DeleteAction.ACTION not in getattr(model_cls, 'DISALLOWED_ACTIONS', []):\n classes.append(DeleteAction)\n\n return classes", "def get_action_choices():\n from hardware.management.commands.gpio_buttons import Command\n import re\n pattern = re.compile(r'^on_(?P<name>\\w+)_press$')\n choices = []\n for member in dir(Command):\n match = pattern.match(member)\n if match:\n action = match.groupdict()['name']\n name = action.replace('_', ' ').title()\n choices.append((action, name))\n return choices", "def get_class_methods(class_ast):\n output = list()\n\n # only checks definitions immediately in body to avoid nested class methods\n for node in class_ast.body:\n if isinstance(node, ast.FunctionDef) or isinstance(node, ast.AsyncFunctionDef):\n output.append(node)\n\n return output", "def getActions():\n return getPlugins(IRenamingAction, plugins)", "def get_action_meanings(self) -> list[str]:\n keys = ale_py.Action.__members__.values()\n values = ale_py.Action.__members__.keys()\n mapping = dict(zip(keys, values))\n return [mapping[action] for action in self._action_set]", "def getActions(self):\n actions = self.actions[:]\n return actions", "def get_actions(self, state: TState = None) -> Sequence[TAction]:\n pass", "def actions(self):\n\n return self._actions.getSlice(0)", "def actions(self):\n\n return self._actions.getSlice(0)", "def getActions(self, state): \n util.raiseNotDefined()", "def _get_methods(self):\n\n methods = inspect.getmembers(self, predicate=callable)\n method_list = set()\n\n for name, _ in methods:\n if (name in ('proxy', 'start', 'stop', 'part', 'join',)\n or name[0] == '_'):\n continue\n\n method_list.add(name)\n\n return method_list", "def get_list_of_actions(self):\n return self.actions" ]
[ "0.6725654", "0.67109776", "0.665491", "0.64789116", "0.6402844", "0.6323588", "0.6282057", "0.62751466", "0.6228913", "0.61890525", "0.61869615", "0.6094848", "0.6094848", "0.6094848", "0.6071644", "0.6056207", "0.59731996", "0.59709716", "0.5959929", "0.59402436", "0.5936232", "0.59313184", "0.5924357", "0.59000784", "0.58949614", "0.5853336", "0.5853336", "0.58533275", "0.5831719", "0.5805595" ]
0.78314745
0
Get all "Action Params" / "Other Parameters" of the Algorithm. Action params are all parameters passed as input to the action method.
def get_action_params(instance: Algorithm) -> Dict[str, Any]: params = instance.get_params() attrs = { v: getattr(instance, v) for v in vars(instance) if not v.endswith("_") and not v.startswith("_") and v not in params } return attrs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_action_params(self, a):\n return self.params[a]", "def getParams(self):\n\n\t\tparams = {\"Nparticles\":self.__Nparticles,\"Nkicks\":self.__Nkicks,\"kappa\":self.__kappa, \"eta\":self.__eta,\"gamma\":self.__gamma, \"omega\":self.__omega,\n\t\t\"Kbt\":self.__Kbt, \"tk\":self.__tk}\n\n\t\treturn params", "def get_params(self):\n pass", "def get_params(self):\n return []", "def params(self):\n\t\treturn self.params_", "def get_params(self):\n return list(self.params.values())", "def _get_params(self):\r\n return self.k._get_params()", "def get_params(self):", "def get_params (self):\n return self.params", "def get_params (self):\n return self.params", "def get_params(self):\n return self.params", "def get_params(self):\n return self.params", "def get_params(self):\n return self.params", "def get_params(self):\n params = {}\n for step in self.steps:\n params[step[0]] = step[1].get_params()\n return params", "def getParameters(self):\n\n current_params = {'taux': self.taux, 'mu': self.mu, 'G': self.G, 'alpha_0': self.alpha_0,\n 'delta': self.delta, 'p': self.p, 'I0': self.I0, 'kparam': self.kparam}\n\n return (current_params)", "def get_params(self):\n raise NotImplementedError", "def params(self):\n return self._pars", "def parameters(self):\n return [p for _, a in vars(self).items() for p in self._params(a)]", "def params(self):\n return self._params", "def params(self):\n return self._params", "def params(self):\n return self._params", "def getParams(self):\n return self.__params", "def get_params(self):\n\n return self.params_", "def params():\n raise NotImplementedError", "def get_params(self):\n\n params={'f_star':self.get_f_star(), 'g_star':self.get_g_star(), \n 'Delta2_star':self.get_Delta2_star(), \n 'n_star':self.get_n_star(), 'alpha_star':self.get_alpha_star()}\n\n return params", "def params(self):\n return tuple(self._params)", "def params(self):\n return {'cfg': self.cfg,\n 'momentum': self.momentum,\n 'center': self.center,\n 'scale': self.scale,\n 'epsilon': self.epsilon,\n 'act_fn': self.act_fn}", "def get_params(self):\n\n return None", "def get_params(self):\n\n return None", "def get_params(self):\n\n return None" ]
[ "0.70934117", "0.6718168", "0.66945964", "0.6678622", "0.6638862", "0.66280246", "0.6627631", "0.65940255", "0.65821403", "0.65821403", "0.65637183", "0.65637183", "0.65637183", "0.64723307", "0.64681345", "0.64640665", "0.64528596", "0.64158535", "0.6391029", "0.6391029", "0.6391029", "0.6330091", "0.63265586", "0.632454", "0.630802", "0.6301744", "0.6285221", "0.6278944", "0.6278944", "0.6278944" ]
0.7409327
0
Check if the action method was already called/results were generated.
def is_action_applied(instance: Algorithm) -> bool: if len(get_results(instance)) == 0: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _result_already_returned(self):\n return self.deferred.called", "def final_check(self):\n for func in self.called.keys():\n self.assertTrue(self.called[func], \"%s was not called\" % (func,))", "def match_action(self, action):\n\n return hasattr(self, self._action_handler_name(action))", "def is_method(self):\n try:\n self.method\n except Transformation.DoesNotExist:\n return False\n return True", "def _action(self):\n pass", "async def before_action(self, action: str, *args, **kwargs) -> bool:\n return True", "async def before_action(self, action, *args, **kwargs):\n return True", "def _do_action(self):\n pass", "def _do_action(self):\n pass", "def should_run_action(self) -> bool: # pylint: disable=too-many-return-statements\n if self.must_run_action:\n return True\n\n if self.phony_outputs:\n # Either no output files (pure action) or missing output files.\n Logger.why(f\"Must run actions to satisfy the phony output: {self.phony_outputs[0]}\")\n return True\n\n if self.missing_output is not None:\n Logger.why(f\"Must run actions to create the missing output(s): {self.missing_output}\")\n return True\n\n if self.abandoned_output is not None:\n Logger.why(f\"Must run actions \" f\"because changed to abandon the output: {self.abandoned_output}\")\n return True\n\n if self.new_persistent_actions:\n # Compare with last successful build action.\n index = len(self.new_persistent_actions) - 1\n if index >= len(self.old_persistent_actions):\n Logger.why(\"Must run actions because changed to add action(s)\")\n return True\n new_action = self.new_persistent_actions[index]\n old_action = self.old_persistent_actions[index]\n if Invocation.different_actions(old_action, new_action):\n return True\n\n # All output files exist:\n\n if self.newest_input_path is None:\n # No input files (pure computation).\n Logger.debug(\"Can skip actions \" \"because all the outputs exist and there are no newer inputs\")\n return False\n\n # There are input files:\n\n if self.oldest_output_path is not None and self.oldest_output_mtime_ns <= self.newest_input_mtime_ns:\n # Some output file is not newer than some input file.\n Logger.why(\n f\"Must run actions because the output: {self.oldest_output_path} \"\n f\"is not newer than the input: {self.newest_input_path}\"\n )\n return True\n\n # All output files are newer than all input files.\n Logger.debug(\"Can skip actions \" \"because all the outputs exist and are newer than all the inputs\")\n return False", "def _confirm_action(self, action):\n\t\treturn True", "def __bool__(self):\n return bool(self._actions)", "def check(self):\r\n for action in self._actions:\r\n action.check()", "def has_side_effect(self):\n # XXX Need to handle OpExtInst correctly (it is conservative now)\n if self.result_id is None:\n return True\n return self.op_name in spirv.HAS_SIDE_EFFECT", "def helper_action_get_request_is_wrong(self, action_name):\n wrong = not util.safe_string_compare(action_name, self.last_request_get_dict[\"action\"][0])\n return wrong", "def action_done(self):", "def action_done(self):\n pass", "def call_action(self, action):\n pass", "def action(self):\n pass", "def action(self):\n pass", "def assert_called_anytime(self, method, url, data=None):\n expected = (method, url)\n\n assert self.http_client.callstack, \\\n \"Expected %s %s but no calls were made.\" % expected\n\n found = False\n for entry in self.http_client.callstack:\n if expected == entry[0:2]:\n found = True\n break\n\n assert found, 'Expected %s; got %s' % (expected,\n self.http_client.callstack)\n if data is not None:\n try:\n assert entry[2] == data\n except AssertionError:\n print(entry[2])\n print(\"!=\")\n print(data)\n raise\n\n self.http_client.callstack = []", "def take_action(self, *args, **kwargs):\r\n pass", "def check_called(self, func):\n self.called[func] = False\n def _check(*args, **kwargs):\n self.called[func] = True\n return func(*args, **kwargs)\n return _check", "def meth(self):\r\n return 1", "def is_triggered(self) -> bool:\n raise NotImplementedError()", "def will_call_detail(self):\n return self._will_call_detail", "def has_results(self):\n pass", "def test_results_exists(self):\n name_exists = 'results' in self.views_module_listing\n is_callable = callable(self.views_module.results)\n \n self.assertTrue(name_exists, f\"{FAILURE_HEADER}results() view does not exist{FAILURE_FOOTER}\")\n self.assertTrue(is_callable, f\"{FAILURE_HEADER}results() function does not exist or will not execute{FAILURE_FOOTER}\")", "def skip(self):\n if not self.helper_view.newDataOnly():\n return False\n\n if self.request.steps[-1].startswith(\"++add++\"):\n return False\n if self.request.method != \"PATCH\":\n # restapi calls\n return False\n return True", "def operation_check(self, methodName, isSucceed):\n if (isSucceed):\n self.log4py.info(\"method 【\" + methodName + \"】 运行通过!\");\n else:\n self.log4py.error(\"method 【\" + methodName + \"】 运行失败!\");" ]
[ "0.65325475", "0.6513868", "0.6394428", "0.635402", "0.61890197", "0.6127541", "0.6109208", "0.6106263", "0.6106263", "0.60338444", "0.6010057", "0.59864855", "0.59193176", "0.5911129", "0.5904034", "0.58456475", "0.5825794", "0.58197397", "0.57962", "0.57962", "0.5773618", "0.5765089", "0.576508", "0.57646", "0.5743073", "0.57379025", "0.57105124", "0.5703317", "0.56987417", "0.569426" ]
0.6948303
0
Mark a method as an "action" and apply a set of runtime checks to prevent implementation errors. This decorator marks a method as action. Each algorithm is expected to have at least one action method. For pipelines this action method is called "run". This means, when implementing a custom action or run method, it must always be wrapped in this decorator.
def make_action_safe( action_method: Callable[Concatenate[AlgorithmT, P], AlgorithmT] ) -> Callable[Concatenate[AlgorithmT, P], AlgorithmT]: if getattr(action_method, ACTION_METHOD_INDICATOR, False) is True: # It seems like the decorator was already applied and we do not want to apply it multiple times and run # duplicated checks. return action_method @wraps(action_method) def safe_wrapped(self: AlgorithmT, *args: P.args, **kwargs: P.kwargs) -> AlgorithmT: if action_method.__name__ not in get_action_methods_names(self): warnings.warn( "The `make_action_safe` decorator should only be applied to an action method " f"({get_action_methods_names(self)} for {type(self)}) of an algorithm or methods. " f"To register an action method add the following to the class definition of {type(self)}:\n\n" f"` _action_methods = ({action_method.__name__},)`\n\n" "Or append it to the tuple, if it already exists.", PotentialUserErrorWarning, stacklevel=2, ) return _check_safe_run(self, action_method, *args, **kwargs) setattr(safe_wrapped, ACTION_METHOD_INDICATOR, True) return safe_wrapped
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_action(*args, **kwargs):\n raise NotImplementedError()", "def do_action(self, action, **kwargs):\r\n print(action)\r\n action_method = getattr(self, action._method.__name__)\r\n if action_method:\r\n action_method(**kwargs)", "def take_action(self, action):\n\t\traise NotImplementedError", "def call_action(self, action):\n pass", "def act(self, action):\n action_name = action.op\n args = action.args\n list_action = first(a for a in self.actions if a.name == action_name)\n if list_action is None:\n raise Exception(\"Action '{}' not found\".format(action_name))\n if not list_action.check_precond(self.kb, args):\n raise Exception(\"Action '{}' pre-conditions not satisfied\".format(action))\n list_action(self.kb, args)", "def apply_action(self, action):\n return self.__environment.step(action)", "def do_action(self, _action: action.Action) -> None:\n if isinstance(_action, action.Attack):\n self.do_attack_action(_action)\n elif isinstance(_action, action.Move):\n self.do_move_action(_action)\n else:\n raise NotImplementedError(f\"Action {type(_action)} not implemented!\")", "def take_action(self, action):\n getattr(self, action['func'])(\n *action.get('args', ()), \n **action.get('kwargs', {})\n )", "def perform_action(self, action):\n method_name = action.text().lower()\n method_name = method_name + \"_action\"\n action_method = getattr(self, method_name)\n action_method()", "def execute_action(self, agent, action):\n raise NotImplementedError", "def execute_action(self, agent, action):\n raise NotImplementedError", "def call_method(self, action):\n\n\t\tif action[0] in self.methods:\n\t\t\tself.methods[action[0]](action[0:])\n\t\telse:\n\t\t\tself.no_such_method()", "def __call__(self,action=None):\n raise NYI", "def execute_action(self, agent, action):\n abstract", "def _set_action(self, action):\n raise NotImplementedError()", "def _set_action(self, action):\n raise NotImplementedError()", "def _set_action(self, action):\n raise NotImplementedError()", "def _set_action(self, action):\n raise NotImplementedError()", "def _set_action(self, action):\n raise NotImplementedError()", "def _set_action(self, action):\n raise NotImplementedError()", "async def before_action(self, action: str, *args, **kwargs) -> bool:\n return True", "def __call__(self) -> Operation:\n if self.action:\n logger.debug(self.long_description)\n self.action(*self.args, **self.kwargs) # type: ignore\n return self", "async def before_action(self, action, *args, **kwargs):\n return True", "def do_action(self):\n func = self._get_action_func()\n func(self)", "def do_action(self, action, a=None, b=None):\n pass", "def expose(action):\n api_methods.append(action.__name__)\n return action", "def _act(self, action):\n self._set_action(action)", "def execute(action):\n\n def wrapped_action(context):\n try:\n action(context)\n except Exception as exc_info:\n if not context.is_error:\n context.set_error(exc_info)\n raise\n\n return wrapped_action", "def dispatch(self, *args, **kwargs):\r\n action = kwargs.pop('action', 'default')\r\n action_method = getattr(self, str(action), self.default)\r\n return action_method(*args, **kwargs)", "def action(self, action):\n allowed_values = [\"APPLY\", \"PRECHECK\"]\n if action not in allowed_values:\n raise ValueError(\n \"Invalid value for `action`, must be one of {0}\"\n .format(allowed_values)\n )\n self._action = action" ]
[ "0.64787984", "0.6455812", "0.6393445", "0.6273497", "0.6248149", "0.6213524", "0.61921144", "0.614397", "0.61419356", "0.6076854", "0.6076854", "0.60382336", "0.6000338", "0.5990066", "0.596705", "0.596705", "0.596705", "0.596705", "0.596705", "0.596705", "0.5950706", "0.5893134", "0.5892677", "0.585675", "0.58333206", "0.5791989", "0.57605636", "0.5745074", "0.57437545", "0.57254136" ]
0.73302245
0
Apply a set of runtime checks to a custom `self_optimize` method to prevent implementation errors.
def make_optimize_safe( self_optimize_method: Callable[Concatenate[OptimizableT, P], OptimizableT] ) -> Callable[Concatenate[OptimizableT, P], OptimizableT]: if getattr(self_optimize_method, OPTIMIZE_METHOD_INDICATOR, False) is True: # It seems like the decorator was already applied, and we do not want to apply it multiple times and run # duplicated checks. return self_optimize_method @wraps(self_optimize_method) def safe_wrapped(self: OptimizableT, *args: P.args, **kwargs: P.kwargs) -> OptimizableT: if self_optimize_method.__name__ not in ("self_optimize", "self_optimize_with_info"): warnings.warn( "The `make_optimize_safe` decorator is only meant for the `self_optimize` method, but you applied it " f"to the `{self_optimize_method.__name__}` method.", PotentialUserErrorWarning, stacklevel=2, ) try: return _check_safe_optimize(self, self_optimize_method, *args, **kwargs) except PicklingError as e: raise ValueError( "We had trouble hashing your class instance." "This is required to run the safety checks for the optimize method. " "This usually happens, if your pipeline or algorithm or one of its parameters is based " "on a dynamically defined class (e.g. a class defined within a function). " "Try defining your classes on a module level. " "If this is not possible for you, you need to disable the safety checks." ) from e setattr(safe_wrapped, OPTIMIZE_METHOD_INDICATOR, True) return safe_wrapped
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def accept_optimize():\n pass", "def self_optimize(self, dataset: DatasetT, **kwargs) -> Self:\n try:\n # This seems hacky, but is used to avoid infinite recursion\n setattr(type(self), \"__optimize_not_implemented__\", True)\n out = self.self_optimize_with_info(dataset, **kwargs)[0]\n delattr(type(self), \"__optimize_not_implemented__\")\n except NotImplementedError as e:\n raise NotImplementedError() from e\n return out", "def _optimise(self):\n pass", "def propose_optimize():\n pass", "def self_optimize_with_info(self, dataset: DatasetT, **kwargs) -> Tuple[Self, Any]:\n try:\n if getattr(type(self), \"__optimize_not_implemented__\", False):\n delattr(type(self), \"__optimize_not_implemented__\")\n raise NotImplementedError()\n except NotImplementedError as e:\n raise NotImplementedError() from e\n return self.self_optimize(dataset, **kwargs), NOTHING", "def optimize(self):\n self.vbe_step()\n self.compute_responsibilities()\n self.compute_sufficient_stats()\n self.vbmstep()", "def _check_safe_run(algorithm: AlgorithmT, old_method: Callable, *args: Any, **kwargs: Any) -> AlgorithmT:\n before_paras = algorithm.get_params()\n before_paras_hash = custom_hash(before_paras)\n output: AlgorithmT\n\n # In this case the method is already bound and we do not need to pass the algo as first argument\n output = old_method(*args, **kwargs) if hasattr(old_method, \"__self__\") else old_method(algorithm, *args, **kwargs)\n after_paras = algorithm.get_params()\n after_paras_hash = custom_hash(after_paras)\n if not before_paras_hash == after_paras_hash:\n raise ValueError(\n f\"Running `{old_method.__name__}` of {type(algorithm).__name__} did modify the parameters of the \"\n \"algorithm. \"\n \"This must not happen to make sure individual runs of the algorithm/pipeline are independent.\\n\\n\"\n \"This usually happens when you use an algorithm object or other mutable objects as a parameter to your \"\n \"algorithm/pipeline. \"\n \"In this case, make sure you call `algo_object.clone()` or more general `clone(mutable_input)` on the \"\n f\"object within the `{old_method.__name__}` method before modifying the mutable or running the nested \"\n \"algorithm.\"\n )\n if not isinstance(output, type(algorithm)):\n raise TypeError(\n f\"The `{old_method.__name__}` method of {type(algorithm).__name__} must return `self` or in rare cases a \"\n f\"new instance of {type(algorithm).__name__}. \"\n f\"But the return value had the type {type(output)}.\"\n )\n if not is_action_applied(output):\n raise ValueError(\n f\"Running the `{old_method.__name__}` method of {type(algorithm).__name__} did not set any results on the \"\n \"output. \"\n f\"Make sure the `{old_method.__name__}` method sets the result values as expected as class attributes and \"\n f\"all names of result attributes have a trailing `_` to mark them as such.\"\n )\n return output", "def optimize(cls, trials, score, evals_rounds, mon_cons, categorical):\n raise NotImplementedError", "def solve(self):\n start = time.time()\n instance = self.class_object()\n method = getattr(instance, self.method_str)\n method(*self.args, **self.kwargs)\n end = time.time()\n run_log.info(\n (\n f\"\\n###########################\"\n f\"\\n{self.name} took {end-start} to run\"\n f\"\\n###########################\\n\"\n )\n )", "def __call__(self): # run test\n\n try: # Check if any errors were raised during calling of self.func\n return abs(self.func(*self.args, **self.kwargs) - self.res) < self._tolerance\n\n except IndexError:\n return False", "def _helper_run_appropriate_fitter(self,lowerbounds_list: list,\n upperbounds_list: list,\n bounds_not_least_squares: sopt.Bounds):\n \n if self.fitmodel_input.minimization_method_str == \"least_squares\":\n fit_function_callable = getattr(fitmodels,self.fitmodel_input.fitfunction_name_string)\n optimization_output = sopt.least_squares(fit_function_callable,\n np.array(list(self.fitmodel_input.start_paramdict.values())),\n args=(self.fitmodel_input.xvals,\n self.fitmodel_input.yvals,\n self.fitmodel_input.errorbars),\n bounds=(lowerbounds_list, upperbounds_list),\n loss=\"linear\", f_scale=1)\n return optimization_output\n elif self.fitmodel_input.minimization_method_str == \"minimize\":\n fit_function_callable = getattr(fitmodels,self.fitmodel_input.fitfunction_name_string)\n optimization_output = sopt.minimize(sum_squares_decorator(fit_function_callable),\n np.array(list(self.fitmodel_input.start_paramdict.values())),\n args=(self.fitmodel_input.xvals,\n self.fitmodel_input.yvals,\n self.fitmodel_input.errorbars),\n bounds=bounds_not_least_squares,\n **self.fitmodel_input.fitter_options_dict)\n return optimization_output\n elif self.fitmodel_input.minimization_method_str == \"basinhopping\":\n fit_function_callable = getattr(fitmodels, self.fitmodel_input.fitfunction_name_string)\n optimization_output = sopt.basinhopping(\n sum_squares_decorator(fit_function_callable),\n np.array(list(self.fitmodel_input.start_paramdict.values())),\n minimizer_kwargs = {\"args\":(self.fitmodel_input.xvals,\n self.fitmodel_input.yvals,\n self.fitmodel_input.errorbars),\n \"method\":\"trust-constr\"}, # TODO: figure out a smart thing to use here\n **self.fitmodel_input.fitter_options_dict)\n # The next lines is just for now the weirdness of basinhopping, it doesn't\n # have the global attribute called success\n setattr(optimization_output,\"success\",optimization_output.lowest_optimization_result.success)\n return optimization_output\n elif self.fitmodel_input.minimization_method_str == \"differential_evolution\":\n fit_function_callable = getattr(fitmodels, self.fitmodel_input.fitfunction_name_string)\n optimization_output = sopt.differential_evolution(\n sum_squares_decorator(fit_function_callable),\n bounds_not_least_squares,\n args=(self.fitmodel_input.xvals,\n self.fitmodel_input.yvals,\n self.fitmodel_input.errorbars),\n **self.fitmodel_input.fitter_options_dict)\n return optimization_output\n elif self.fitmodel_input.minimization_method_str == \"shgo\":\n fit_function_callable = getattr(fitmodels, self.fitmodel_input.fitfunction_name_string)\n optimization_output = sopt.shgo(\n sum_squares_decorator(fit_function_callable),\n tuple(zip(lowerbounds_list,upperbounds_list)),\n args=(self.fitmodel_input.xvals,\n self.fitmodel_input.yvals,\n self.fitmodel_input.errorbars),\n **self.fitmodel_input.fitter_options_dict)\n return optimization_output\n elif self.fitmodel_input.minimization_method_str == \"dual_annealing\":\n fit_function_callable = getattr(fitmodels, self.fitmodel_input.fitfunction_name_string)\n optimization_output = sopt.dual_annealing(\n sum_squares_decorator(fit_function_callable),\n tuple(zip(lowerbounds_list,upperbounds_list)),\n args=(self.fitmodel_input.xvals,\n self.fitmodel_input.yvals,\n self.fitmodel_input.errorbars),\n **self.fitmodel_input.fitter_options_dict)\n return optimization_output\n elif self.fitmodel_input.minimization_method_str == \"findmax\":\n # make a copy so that we can go about deleting the max value to find the next\n # max and so on\n peaks_xvals = []\n peaks_yvals = []\n data_array_copy = self.fitmodel_input.yvals.copy()\n # find max, then replace that point with the average, find the next max \n # and keep going until found as many maxima as requested\n for peak_num in range(self.fitmodel_input.start_paramdict[\"numpeaks\"]):\n peakval_y = np.nanmax(data_array_copy)\n peakcoord = np.argmax(data_array_copy)\n peakval_x = self.fitmodel_input.xvals[peakcoord]\n peaks_xvals.append(peakval_x)\n peaks_yvals.append(peakval_y)\n data_array_copy[peakcoord] = np.mean(data_array_copy)\n # we now have to build the optimization_output object that will look similar to what it looks like for regular fits\n param_dict_length = len(self.fitmodel_input.start_paramdict)\n optimization_output = types.SimpleNamespace() # this just initializes an empty class\n optimization_output.fun = -1 # objective function is -1, because it has no meaning here\n optimization_output.x = [peaks_xvals,peaks_yvals]\n # we now add the values to the \"output\" which are not real fit parameters\n # in normal fitting these are always fit parameters, but since this is a \"fake\" fit, we can simply add the initial parameters just to keep the interface constant\n for (idx,key) in enumerate(self.fitmodel_input.start_paramdict):\n if idx >= len(optimization_output.x):\n optimization_output.x.append(self.fitmodel_input.start_paramdict[key])\n optimization_output.success = True\n return optimization_output\n elif self.fitmodel_input.minimization_method_str == \"findmin\":\n # make a copy so that we can go about deleting the max value to find the next\n # max and so on\n peaks_xvals = []\n peaks_yvals = []\n data_array_copy = self.fitmodel_input.yvals.copy()\n # find max, then replace that point with the average, find the next max \n # and keep going until found as many maxima as requested\n for peak_num in range(self.fitmodel_input.start_paramdict[\"numpeaks\"]):\n peakval_y = np.nanmin(data_array_copy)\n peakcoord = np.argmin(data_array_copy)\n peakval_x = self.fitmodel_input.xvals[peakcoord]\n peaks_xvals.append(peakval_x)\n peaks_yvals.append(peakval_y)\n data_array_copy[peakcoord] = np.mean(data_array_copy)\n # we now have to build the optimization_output object that will look similar to what it looks like for regular fits\n param_dict_length = len(self.fitmodel_input.start_paramdict)\n optimization_output = types.SimpleNamespace() # this just initializes an empty class\n optimization_output.fun = -1 # objective function is -1, because it has no meaning here\n optimization_output.x = [peaks_xvals,peaks_yvals]\n for (idx,key) in enumerate(self.fitmodel_input.start_paramdict):\n if idx >= len(optimization_output.x):\n optimization_output.x.append(self.fitmodel_input.start_paramdict[key])\n optimization_output.success = True\n return optimization_output\n else:\n print(\n \"\"\"Message from Class {:s} function _helper_run_appropriate_fitter: \n you tried to use the following optimizer: {}. \n This optimizer does not exist. Not doing any optimization\"\"\".format(\n self.__class__.__name__, self.fitmodel_input.minimization_method_str))\n return None", "def run_and_check(self, *args, **kwargs) -> None:\n raise NotImplementedError", "def _compute_penalty(self):\n raise ValueError('Implement in a child class')", "def do_we_need_to_reoptimize(MFE):\n # check that we found a solution and run optimizer again if not\n MFE.CalculateMeritFunction()\n Nop = MFE.NumberOfOperands\n REOPTIMIZE = False\n for j in range(6):\n op = MFE.GetOperandAt(Nop - j)\n contribution = op.Contribution\n print(\"Contribution %i: %1.2e\" % (j, contribution))\n REOPTIMIZE = REOPTIMIZE or (contribution > 1e-7)\n op_margin = MFE.GetOperandAt(Nop - 7)\n reached_target = np.isclose(op_margin.Value,\n op_margin.Target, atol=10)\n print(\"Margin: %1.2e\" % op_margin.Value)\n REOPTIMIZE = REOPTIMIZE or not reached_target\n\n op_equa = MFE.GetOperandAt(Nop - 8)\n reached_target = op_equa.Value < 10\n print(\"Avg Deviation from edge shape: %1.2f\" % op_equa.Value)\n REOPTIMIZE = REOPTIMIZE or not reached_target\n return REOPTIMIZE", "def _optimize(self,x0,type,method,**kwargs):\n from scipy.optimize import fmin,fmin_powell\n\n if type == 'min':\n g=lambda x:self(x)\n elif type == 'max':\n g=lambda xs:-1*self(x)\n elif type == 'root':\n g=lambda x:np.abs(self(x))\n elif type == 'val':\n val = kwargs.pop('valtofind')\n g=lambda x:np.abs(self(x)-val)\n elif type == 'saddle':\n raise NotImplementedError\n else:\n raise ValueError('Unrecognized optimization type')\n\n if method == 'fmin':\n res = fmin(g,x0,**kwargs)\n elif method == 'fmin_powell':\n res = fmin_powell(g,x0,**kwargs)\n else:\n raise ValueError('Unrecognized method')\n\n self.lastOpt = res\n return res[0]", "def optimize(self): # 3\n res,resargs = self.__obj.optimizetrm()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _trmcode_return_value = resargs\n _trmcode_return_value = rescode(_trmcode_return_value)\n return _trmcode_return_value", "def setup(\n self,\n method: str = \"SLSQP\",\n tol: Union[None, float] = None,\n options: Union[None, Dict] = None,\n ):\n\n # Input check\n if self.opt_type in CONSTRAINED_OPT and (\n method not in ScipyMinimizeSolver.methods_handle_constraints\n ):\n raise TypeError(\n f\"optimization problem has constraints, the method '{method}' is not suitable\"\n )\n\n # Setup class attributes\n\n ## Container for the statistics.\n self._stats = None\n\n ## Method name.\n self.method = method\n\n # Setup minimize input parameters\n\n ## Input to the minimize method\n self.minimize_input = {\n \"fun\": self.f,\n \"method\": method,\n \"x0\": self.x0.toarray().flatten(),\n }\n\n if tol is not None:\n self.minimize_input[\"tol\"] = tol\n\n if options is not None:\n self.minimize_input[\"options\"] = options\n\n if method in ScipyMinimizeSolver.methods_req_jac:\n self.minimize_input[\"jac\"] = self.jac\n\n if method in ScipyMinimizeSolver.methods_req_hess:\n self.minimize_input[\"hess\"] = self.hess\n\n ## Constraints definition passed to the minimize method.\n self._constraints = {}\n if method in ScipyMinimizeSolver.methods_handle_constraints:\n if method != \"trust-constr\":\n if self.opt_type in CONSTRAINED_OPT:\n self._constraints[\"constr\"] = {\n \"type\": \"ineq\",\n \"fun\": self.v,\n \"jac\": self.dv,\n }\n else:\n if self.opt.nk:\n self._constraints[\"k\"] = LinearConstraint(\n A=csc_matrix(self.opt.M(self.p).toarray()),\n lb=-self.opt.c(self.p).toarray().flatten(),\n ub=self.opt.inf * np.ones(self.opt.nk),\n )\n\n if self.opt.na:\n eq = -self.opt.b(self.p).toarray().flatten()\n self._constraints[\"a\"] = LinearConstraint(\n A=csc_matrix(self.opt.A(self.p).toarray()),\n lb=eq,\n ub=eq,\n )\n\n if self.opt.ng:\n self._constraints[\"g\"] = NonlinearConstraint(\n fun=self.g,\n lb=np.zeros(self.opt.ng),\n ub=self.opt.inf * np.ones(self.opt.ng),\n jac=self.dg,\n hess=self.ddg,\n )\n\n if self.opt.nh:\n self._constraints[\"h\"] = NonlinearConstraint(\n fun=self.h,\n lb=np.zeros(self.opt.nh),\n ub=np.zeros(self.opt.nh),\n jac=self.dh,\n hess=self.ddh,\n )\n\n return self", "def _add_error_checks(self, other):\n raise NotImplementedError()", "def check_auto_contrast(method):\n\n @wraps(method)\n def new_method(self, *args, **kwargs):\n [cutoff, ignore], _ = parse_user_args(method, *args, **kwargs)\n type_check(cutoff, (int, float), \"cutoff\")\n check_value(cutoff, [0, 100], \"cutoff\")\n if ignore is not None:\n type_check(ignore, (list, tuple, int), \"ignore\")\n if isinstance(ignore, int):\n check_value(ignore, [0, 255], \"ignore\")\n if isinstance(ignore, (list, tuple)):\n for item in ignore:\n type_check(item, (int,), \"item\")\n check_value(item, [0, 255], \"ignore\")\n return method(self, *args, **kwargs)\n\n return new_method", "def _base_check(self, func, on_true, on_false, *args, otherwise=\"\"):\n\n # Retrieve information about the line number and filename of the check.\n frame = inspect.currentframe().f_back\n lineno = frame.f_lineno\n filepath = frame.f_globals[\"__file__\"]\n filename = os.path.basename(filepath)\n\n # Try and run the check. If we run into an exception, report it.\n try:\n if func(*args):\n result, data = SUCCESS, on_true.format(*args)\n else:\n result, data = FAILURE, on_false.format(*args)\n except Exception as e:\n result, data = ERROR, str(e)\n\n # Display and record our results.\n dots(result)\n self.results.append(\n Result(result=result, data=data, case=self.case, alt=otherwise,\n filename=filename, lineno=lineno)\n )", "def _optimized(self):\n return False", "def check_cutout(method):\n\n @wraps(method)\n def new_method(self, *args, **kwargs):\n [length, num_patches], _ = parse_user_args(method, *args, **kwargs)\n\n check_value(length, (1, FLOAT_MAX_INTEGER))\n check_value(num_patches, (1, FLOAT_MAX_INTEGER))\n\n return method(self, *args, **kwargs)\n\n return new_method", "def start_uncorrelated_self_optimizer_strategy(wf):\n\n optimizer_method = wf.execution_strategy[\"optimizer_method\"]\n info(\"> ExecStrategy | UncorrelatedSelfOptimizer\", Fore.CYAN)\n info(\"> Optimizer | \" + optimizer_method, Fore.CYAN)\n\n knobs = wf.execution_strategy[\"knobs\"]\n wf.totalExperiments = len(knobs) * wf.execution_strategy[\"optimizer_iterations\"]\n\n total_result = dict()\n # we fill the arrays and use the index to map from gauss-optimizer-value to variable\n for key in knobs:\n optimal_knob_value = optimizeOneVariable(wf, wf.execution_strategy[\"optimizer_iterations\"], key,\n (knobs[key][0], knobs[key][1]))\n total_result[key] = optimal_knob_value\n wf.change_provider[\"instance\"].applyChange(total_result)\n info(\">\")\n info(\"> FinalResult | Best Values: \" + str(total_result))", "def Optimize(self):\n return _gmat_py.ExternalOptimizer_Optimize(self)", "def _constraints_utility(self):\n\n def rule(model):\n total = summation(self.utilities, model.A)\n return model.A_total == total\n\n self.model.constrain_A_total = Constraint(rule=rule)\n\n def rule(model):\n total = 2 * summation(self.utilities, model.A2)\n return model.A2_total == total\n\n self.model.constrain_A2_total = Constraint(rule=rule)\n\n def rule(model):\n total = 3 * summation(self.utilities, model.A3)\n return model.A3_total == total\n\n self.model.constrain_A3_total = Constraint(rule=rule)\n\n def rule(model):\n total = 4 * summation(self.utilities, model.A4)\n return model.A4_total == total\n\n self.model.constrain_A4_total = Constraint(rule=rule)\n\n def rule(model):\n completion_bonus = self.task_completion_bonus * self.task_duration\n total = summation(completion_bonus, model.T_total)\n return model.Completion_total == total\n\n self.model.constrain_completion_total = Constraint(rule=rule)\n\n def rule(model):\n scaling = 0.2\n affinity = np.outer(c.AFFINITY_COGNITIVE, self.task_cognitive_load)\n\n # TODO(cathywu) replace this code when \"simple slicing\" is clarified\n zeros1 = np.zeros((1, self.num_tasks))\n zeros2 = np.zeros((2, self.num_tasks))\n zeros3 = np.zeros((3, self.num_tasks))\n\n total = summation(affinity, model.A)\n total += summation(affinity, model.A2)\n total += summation(affinity, model.A3)\n total += summation(affinity, model.A4)\n\n total += summation(np.vstack((affinity[1:, :], zeros1)), model.A2)\n total += summation(np.vstack((affinity[1:, :], zeros1)), model.A3)\n total += summation(np.vstack((affinity[1:, :], zeros1)), model.A4)\n\n total += summation(np.vstack((affinity[2:, :], zeros2)), model.A3)\n total += summation(np.vstack((affinity[2:, :], zeros2)), model.A4)\n\n total += summation(np.vstack((affinity[3:, :], zeros3)), model.A4)\n total *= scaling\n\n return model.Affinity_cognitive_total == total\n\n self.model.constrain_affinity_cognitive_total = Constraint(rule=rule)", "def solve(self, **kwargs) -> OptimizationResult:\n raise NotImplementedError", "def lint(self):\n raise NotImplementedError()", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.all_methods = list_subclass_methods(RegressionMetrics, True,\n additional_ignores=['calculate_hydro_metrics',\n # 'calculate_scale_dependent_metrics',\n # 'calculate_scale_independent_metrics'\n ])\n\n # if arrays contain negative values, following three errors can not be computed\n for array in [self.true, self.predicted]:\n\n assert len(array) > 0, \"Input arrays should not be empty\"\n\n if len(array[array < 0.0]) > 0:\n self.all_methods = [m for m in self.all_methods if m not in ('mean_gamma_deviance',\n 'mean_poisson_deviance',\n 'mean_square_log_error')]\n if (array <= 0).any(): # mean tweedie error is not computable\n self.all_methods = [m for m in self.all_methods if m not in ('mean_gamma_deviance',\n 'mean_poisson_deviance')]", "def run_check(self, ctx: RunContext):\n params = ctx.get_params(\"mccabe\")\n options = ctx.options\n if options:\n params.setdefault(\"max-complexity\", options.max_complexity)\n\n McCabeChecker.max_complexity = int(params.get(\"max-complexity\", 10))\n McCabeChecker._error_tmpl = \"%r is too complex (%d)\"\n number = McCabeChecker._code\n for lineno, offset, text, _ in McCabeChecker(ctx.ast, ctx.filename).run():\n ctx.push(\n col=offset + 1,\n lnum=lineno,\n number=number,\n text=text,\n type=\"C\",\n source=\"mccabe\",\n )", "def __init__(self, cost_func):\n super().__init__(cost_func)\n\n self.support_for_bounds = True\n self._popt = None\n self._status = None\n self._maxiter = None" ]
[ "0.6009816", "0.6001013", "0.59160036", "0.5702203", "0.5633489", "0.54485035", "0.54099053", "0.5349612", "0.52543825", "0.52409315", "0.5170419", "0.5148132", "0.51057553", "0.5033916", "0.49907643", "0.49848083", "0.4968956", "0.49565458", "0.49478036", "0.49183828", "0.4918197", "0.49069118", "0.48927885", "0.48535866", "0.48489335", "0.4846225", "0.48216787", "0.47498435", "0.471493", "0.4711531" ]
0.7030773
0
Strip the stderr of a Python process from potential debug output emitted by the interpreter. This will typically be run on the result of the communicate() method of a subprocess.Popen object.
def strip_python_stderr(stderr): stderr = re.sub(br"\[\d+ refs, \d+ blocks\]\r?\n?", b"", stderr).strip() return stderr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def debug(line):\n sys.stderr.write(line + \"\\n\")\n sys.stderr.flush()", "def stderr(self):\n if self._stderr is None:\n stderr = [p.stderr.read() for p in self.processes if p.stderr]\n output = b'\\n'.join(stderr).strip()\n if not isinstance(output, str):\n output = output.decode(self.encoding, 'ignore')\n self._stderr = output\n return self._stderr", "def __readStderr(self):\n if self.process is not None:\n self.errorGroup.show()\n s = str(self.process.readAllStandardError(),\n Preferences.getSystem(\"IOEncoding\"),\n 'replace')\n self.errors.insertPlainText(s)\n self.errors.ensureCursorVisible()", "def nostderr():\n save_stderr = sys.stderr\n sys.stderr = cStringIO.StringIO()\n yield\n sys.stderr = save_stderr", "def hook_exceptions():\n\n if hasattr(sys.stdout, \"fileno\"): # when testing, sys.stdout is StringIO\n # reopen stdout in non buffered mode\n sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)\n # set the hook\n sys.excepthook = traceback_formatter", "def degsOutput(err, globalNameSpace):\n lineNumber = err.lineNumber\n columnNumber = err.columnNumber\n err.msg = '\\n' + err.msg + '\\n'\n print(err.msg, file=sys.stderr)\n if not lineNumber == None:\n positionReference = [\"Error caused at line %(lineNumber)i\" % locals()]\n if not columnNumber == None:\n positionReference.append(\", column %(columnNumber)i\" % locals())\n positionReference.append(\":\\n\")\n positionReference.append(globalNameSpace['inputScript'].splitlines(True)[lineNumber-1])\n if not columnNumber == None:\n positionReference.append(\" \"*(columnNumber-1) + \"^~~ here.\")\n print(''.join(positionReference) + '\\n', file=sys.stderr)\n if err.element:\n print(\"In element: \" + err.element.userUnderstandableXPath(), file=sys.stderr)\n else:\n print(\"Unknown element. Please report this error to %s\" % globalNameSpace['bugReportAddress'], file=sys.stderr)", "def nostderr():\n savestderr = sys.stderr\n\n class Devnull(object):\n def write(self, _):\n pass\n\n def flush(self):\n pass\n\n sys.stderr = Devnull()\n try:\n yield\n finally:\n sys.stderr = savestderr", "def print_unable_to_run(exc: \"CalledProcessError\"):\n _print(str(exc), level=MessageLevel.QUIET)", "def suppress_stdout_stderr():\n with open(devnull, 'w') as fnull:\n with redirect_stderr(fnull) as err, redirect_stdout(fnull) as out:\n yield (err, out)", "def redirect_stderr():\n\n class LoggerWriter:\n \"\"\"https://github.com/apache/airflow/pull/6767/files\"\"\"\n def __init__(self, target_logger, level=logging.INFO):\n self.logger = target_logger\n self.level = level\n\n def write(self, message):\n if message and not message.isspace():\n self.logger.log(self.level, message)\n\n def fileno(self):\n \"\"\"\n Returns the stdout file descriptor 1.\n For compatibility reasons e.g python subprocess module stdout redirection.\n \"\"\"\n return 1\n\n def flush(self):\n \"\"\"MUST define flush method to exit gracefully\"\"\"\n\n sys.stderr = LoggerWriter(logger, logging.ERROR)", "def idb_excepthook(type, value, tb):\n if hasattr(sys, \"ps1\") or not sys.stderr.isatty():\n sys.__excepthook__(type, value, tb)\n else:\n traceback.print_exception(type, value, tb)\n print\n pdb.pm()", "def _grab_sanitizer_trace(self):\n inside_sanitizer_trace = False\n self.crash_trace = []\n while True:\n captured_line = self.terminal_queue.get()\n if self.print_subprocess_output:\n print(captured_line.strip(\"\\n\"))\n if self.monitor_console:\n self.console_log.append(captured_line)\n if not inside_sanitizer_trace:\n if captured_line.find(\"ERROR: AddressSanitizer\") != -1 and captured_line.find(\"AddressSanitizer failed to allocate\") == -1:\n inside_sanitizer_trace = True\n if inside_sanitizer_trace and \\\n (captured_line.find(\"Stats: \") != -1 or\n captured_line.find(\"ABORTING\") != -1 or\n captured_line.find(\"ERROR: Failed\") != -1):\n inside_sanitizer_trace = False\n self.failure = True\n break\n if inside_sanitizer_trace:\n self.crash_trace.append(captured_line)\n if self.failure and self._IsRunning():\n self.process.terminate()\n self.process.kill()\n self.process = None", "def to_stderr(stderr: str) -> typing.Optional[bool]:\n if not (stderr + \"\").strip():\n return False\n\n out = []\n for line in filter(None, str(stderr).replace(\"\\\\n\", \"\\n\").split(\"\\n\")):\n out.append(\" \" + line.strip())\n\n eprint(\"\\nError:\\n\" + (\"-\" * 80))\n eprint(\"\\n\".join(out))\n eprint(\"-\" * 80)\n\n sys.exit(1)", "def stderr(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"stderr\", _args)\n return _ctx.execute_sync(str)", "def result_stderr(result):\n return result[1][1]", "def ddebug(msg, err=None): # pragma: no cover\n import os\n if err:\n err = ''.join(traceback.format_exception(*err))\n else:\n err = ''\n sys.__stdout__.write(\"({}) {} {}\".format(os.getpid(), msg, err)+'\\n')\n sys.__stdout__.flush()", "def redirect_stderr(x):\n\tif hasattr(contextlib, 'redirect_stderr'):\n\t\tresult = contextlib.redirect_stderr\n\telse:\n\t\[email protected]\n\t\tdef result(x):\n\t\t\t\"\"\" Stand-in for Python 3.5's `redirect_stderr`.\n\n\t\t\t\tNotes: Non-reentrant, non-threadsafe\n\t\t\t\"\"\"\n\t\t\told_stderr = sys.stderr\n\t\t\tsys.stderr = x\n\t\t\tyield\n\t\t\tsys.stder = old_stderr\n\n\treturn result(x)", "def readProcessStderrLog(self, name, offset, length):\r\n self._update('readProcessStderrLog')\r\n return self._readProcessLog(name, offset, length, 'stderr')", "def get_stderr(self):\n return self._get_log('stderr')", "def execute_suppress_stdout_stderr(cmd, log_cmd=True):\r\n return CommandUtil._execute_internal(cmd, False, False, log_cmd)", "def result(x):\n\t\t\told_stderr = sys.stderr\n\t\t\tsys.stderr = x\n\t\t\tyield\n\t\t\tsys.stder = old_stderr", "def _restoreStdStreams(self):\n if self.options.buffer:\n stdout = sys.stdout.getvalue()\n stderr = sys.stderr.getvalue()\n sys.stdout = self._original_stdout\n sys.stderr = self._original_stderr\n self._stdout_buffer.seek(0)\n self._stdout_buffer.truncate(0)\n self._stderr_buffer.seek(0)\n self._stderr_buffer.truncate(0)\n return stdout, stderr\n else:\n return None, None", "def redirect_stderr(new_target=None):\n\n if not new_target:\n new_target = StringIO()\n\n _ = sys.stderr\n try:\n sys.stderr = new_target\n yield new_target\n finally:\n sys.stderr = _", "def to_stderr(message):\n print >> sys.stderr, message", "def pipeline_stderr_handler(line, conf_info):\n\n if pl_stderr_ignore(line):\n pass\n elif s_make_error.search(line):\n LOGGER.error(\"make error detected; run failed\")\n return RUN_FAILED\n elif s_no_gnuplot.search(line):\n LOGGER.error(\"gnuplot not found\")\n return RUN_FAILED\n elif s_no_convert.search(line):\n LOGGER.error(\"imagemagick's convert command not found\")\n return RUN_FAILED\n elif s_no_ghostscript.search(line):\n LOGGER.error(\"ghostscript not found\")\n return RUN_FAILED\n else:\n LOGGER.debug('PIPE:STDERR:?: %s' % (line))\n\n return False", "def extract_errors(stdout: str) -> str:\n if not (stdout + \"\").strip():\n return \"\"\n\n out: typing.List[str] = []\n for line in filter(None, str(stdout).replace(\"\\\\n\", \"\\n\").split(\"\\n\")):\n if line.lower().startswith(\"ora-\") or line.lower().startswith(\"rman-\"):\n if not line.find(\"===\") > -1:\n out += textwrap.wrap(line.strip())\n\n return '\\n'.join(out)", "def write_err(self, text): # pragma: no cover\n # type: (str) -> None\n stderr = self.stderr\n if self.stderr.closed:\n stderr = sys.stderr\n stderr.write(decode_output(u\"\\r\", target_stream=stderr))\n stderr.write(decode_output(CLEAR_LINE, target_stream=stderr))\n if text is None:\n text = \"\"\n text = decode_output(u\"{0}\\n\".format(text), target_stream=stderr)\n self.stderr.write(text)\n self.out_buff.write(decode_output(text, target_stream=self.out_buff))", "def Cleanup(self, silent=False):\n if os.getpid() != self._parent_pid or self._output is None:\n return\n try:\n # Print output from subprocess.\n if not silent and logging.getLogger().isEnabledFor(logging.DEBUG):\n with open(self._output.name, 'r') as f:\n for line in f:\n logging.debug(line.rstrip('\\n'))\n finally:\n # Clean up our temporary file.\n osutils.SafeUnlink(self._output.name)\n self._output.close()\n self._output = None", "def to_stderr(self, message):\n message = self.ydl._bidi_workaround(message)\n output = message + '\\n'\n self.ydl._write_string(output, self.ydl._err_file)", "def test_capture_stderr():\n\n sys.stderr.write('Print to stderr')\n\n assert False" ]
[ "0.5967011", "0.589786", "0.5790933", "0.5673864", "0.5622882", "0.55554366", "0.5553385", "0.5504194", "0.5424705", "0.53817785", "0.5293466", "0.5291438", "0.52749884", "0.52310896", "0.52268976", "0.52113926", "0.5170546", "0.5169454", "0.51536196", "0.510776", "0.507081", "0.5055367", "0.50265163", "0.50019145", "0.49986106", "0.4968103", "0.49572212", "0.4947596", "0.49422443", "0.4941821" ]
0.69125473
0
Prints 3 plots for linear assumptions analysis This function takes in a list of predicted y_hats and a list of the
def analysis_plot(predictions, ys): fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20, 5)) residuals = ys - predictions # Plot 1 - Predicted vs Actual sns.scatterplot(predictions, ys, ax=ax1) ax1.set_title('Predicted vs Actual', fontsize=20) ax1.set(xlabel='Predicted Ys', ylabel='Actual Ys') # Plot 2 - Residuals PLot (predicted vs residuals) sns.scatterplot(predictions, residuals, ax=ax2) ax2.set_title('Residuals Plot', fontsize=20) ax2.set(xlabel='Predicted Ys', ylabel='Residuals') # Plot 3 - QQ Plot sm.qqplot(residuals, ax=ax3, line='s') ax3.set_title('QQ Plot- Distribution of Residuals', fontsize=20) plt.show();
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_observed_predicted(y_data, y_predict, ols_line=False, model_fit=None, figsize=(15, 10), save=False, end_name_fig='', folder='Charts'): \r\n\r\n end_name_fig = end_name_fig + '_' if end_name_fig is not None else ''\r\n\r\n fig, ax = plt.subplots(figsize=figsize)\r\n ax.scatter(y_data, y_predict)\r\n \r\n if ols_line == False:\r\n ax.plot([y_data.min(), y_data.max()], [y_data.min(), y_data.max()], 'k--', lw=4)\r\n\r\n else:\r\n line_fit = sm.OLS(y_data, sm.add_constant(y_predict, prepend=True)).fit()\r\n abline_plot(model_results=line_fit, ax=ax)\r\n\r\n ax.set_title('Predicted vs Observed')\r\n ax.set_ylabel('Observed values')\r\n ax.set_xlabel('Predicted values')\r\n\r\n if save == True:\r\n plt.savefig(folder + '/predict_observed_' + end_name_fig + '.png')\r\n\r\n if model_fit is not None:\r\n \r\n fig, ax = plt.subplots(figsize=figsize)\r\n ax.scatter(y_predict, model_fit.resid_pearson)\r\n ax.hlines(0, 0, 1)\r\n ax.set_xlim(0, 1)\r\n ax.set_title('Residual Dependence Plot')\r\n ax.set_ylabel('Pearson Residuals')\r\n ax.set_xlabel('Fitted values') \r\n\r\n if save == True:\r\n plt.savefig(folder + '/pearson_residuals_' + end_name_fig + '.png')\r\n\r\n\r\n fig, ax = plt.subplots(figsize=figsize)\r\n res_dev_residuals = model_fit.resid_deviance.copy()\r\n res_dev_residuals_std = stats.zscore(res_dev_residuals)\r\n ax.hist(res_dev_residuals_std, bins=25)\r\n ax.set_title('Histogram of standardized deviance residuals')\r\n\r\n if save == True:\r\n plt.savefig(folder + '/standard_deviance_residuals_' + end_name_fig + '.png')\r\n\r\n graphics.gofplots.qqplot(res_dev_residuals, line='r')\r\n\r\n if save == True:\r\n plt.savefig(folder + '/gofplot_' + end_name_fig + '.png')", "def plot_observed_predictions(self):\n \n # Plot of X vs Y\n fig = plt.figure(figsize=(15,5))\n plt.subplot(1,3,1) \n for k in self.phd_filter['estimated_positions'].keys():\n plt.plot(self.phd_filter['estimated_positions'][k][0], self.phd_filter['estimated_positions'][k][1], 'bx')\n plt.xlabel(\"X\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n \n # Plot of time vs X\n plt.subplot(1,3,2)\n for k in self.phd_filter['estimated_positions'].keys(): \n plt.plot(k*np.ones(self.phd_filter['estimated_positions'][k].shape[1]), self.phd_filter['estimated_positions'][k][0], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"X\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n\n # Plot of time vs Y\n plt.subplot(1,3,3)\n for k in self.phd_filter['estimated_positions'].keys():\n plt.plot(k*np.ones(self.phd_filter['estimated_positions'][k].shape[1]), self.phd_filter['estimated_positions'][k][1], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n plt.show();", "def test_plot_lm(models, kwargs):\n idata = models.model_1\n if \"constant_data\" not in idata.groups():\n y = idata.observed_data[\"y\"]\n x1data = y.coords[y.dims[0]]\n idata.add_groups({\"constant_data\": {\"_\": x1data}})\n idata.constant_data[\"x1\"] = x1data\n idata.constant_data[\"x2\"] = x1data\n\n axes = plot_lm(idata=idata, y=\"y\", y_model=\"eta\", xjitter=True, **kwargs)\n assert np.all(axes)", "def plotPredictedError():\n\tglobal normalized\n\n\twarmthPred = []\n\twarmthObserved = []\n\tcompPred = []\n\tcompObserved = []\n\tSStotalWarmth = 0\n\tSSresWarmth = 0\n\tSStotalComp = 0\n\tSSresComp = 0\n\tkeys = parser.getMappings(normalized)[0].keys()\n\tfor key in keys:\n\n\t\tif \"_\" in key:\n\t\t\twarmthAxis, compAxis = getPlotData(key)\n\t\t\twarmthPred.append(warmthAxis[3])\n\t\t\twarmthObserved.append(warmthAxis[2])\n\t\t\tcompPred.append(compAxis[3])\n\t\t\tcompObserved.append(compAxis[2])\n\n\tmeanObservedWarmth = np.mean(warmthObserved)\n\tmeanObservedComp = np.mean(compObserved)\n\tfor i in range(0, len(warmthObserved)):\n\t\tSStotalWarmth += (warmthObserved[i] - meanObservedWarmth)**2\n\t\tSSresWarmth += (warmthObserved[i] - warmthPred[i])**2\n\t\tSStotalComp += (compObserved[i] - meanObservedComp)**2\n\t\tSSresComp += (compObserved[i] - compPred[i])**2\n\n\n\tplt.axis([0, 100, 0, 100])\n\tfig = plt.figure(1)\n\tax = fig.add_subplot(111)\n\tslope, intercept, r_value, p_value, std_err = stats.linregress(warmthObserved, warmthPred)\n\tprint(r_value**2)\n\ttext = ax.text(60, 20, \"R^2 value: \" + str(r_value**2) , \\\n fontsize = 12, color = 'black')\n\tplt.title(\"Observed vs Predicted Warmth\")\n\tplt.ylabel(\"Predicted Value\")\n\tplt.xlabel(\"Observed Value\")\n\tplt.scatter(warmthObserved, warmthPred)\n\tplt.plot([0,100], [0,100])\n\tplt.show()\n\n\tfig = plt.figure(1)\n\tax = fig.add_subplot(111)\n\tslope, intercept, r_value, p_value, std_err = stats.linregress(compObserved, compPred)\n\tprint(r_value**2)\n\ttext = ax.text(60, 20, \"R^2 value: \" + str(r_value**2) , \\\n fontsize = 12, color = 'black')\n\tplt.axis([0, 100, 0, 100])\n\tplt.title(\"Observed vs Predicted Competence\")\n\tplt.ylabel(\"Predicted Value\")\n\tplt.xlabel(\"Observed Value\")\n\tplt.scatter(compObserved, compPred)\n\tplt.plot([0,100], [0,100])\n\tplt.show()", "def plot_pred(xy, y_prime, N=10, groundtruth=True):\n \n fig,ax = plt.subplots()\n pred_seq = y_prime.shape[2]\n obs_seq = xy.shape[1] - pred_seq\n \n if groundtruth:\n for i in range(N):\n # plot observation\n ax.plot(xy[i, :obs_seq, 2], xy[i, :obs_seq, 3], color='k')\n # plot ground truth\n ax.plot(xy[i, obs_seq-1:, 2], xy[i, obs_seq-1:, 3], color='r')\n for j, pred in enumerate(y_prime[i]):\n # concate the first step for visulization purpose\n pred = np.concatenate((xy[i, obs_seq-1:obs_seq, 2:4], pred), axis=0) \n ax.plot(pred[:, 0], pred[:, 1], color='b') \n else:\n x = xy\n obs_seq = x.shape[1] \n for i in range(N):\n # plot observation\n ax.plot(x[i, :, 2], x[i, :, 3], color='k')\n for j, pred in enumerate(y_prime[i]):\n # concate the first step for visulization\n pred = np.concatenate((x[i, obs_seq-1:obs_seq, 2:4], pred), axis=0) \n ax.plot(pred[:, 0], pred[:, 1], color='b') \n ax.set_aspect(\"equal\")\n plt.show()\n plt.gcf().clear()\n plt.close()", "def plot_variables(self, n, show=False, diagnostics=False):\n\n if diagnostics:\n fig, ax = plt.subplots(5, 1, sharex = True, figsize = (10, 10))\n else:\n fig, ax = plt.subplots(2, 1, sharex = True, figsize = (10, 10))\n\n plt.subplots_adjust(hspace = 0)\n end = len(n.history[\"det F\"])\n epochs = np.arange(end)\n a, = ax[0].plot(epochs, n.history[\"det F\"], label = 'Training data')\n b, = ax[0].plot(epochs, n.history[\"det test F\"], label = 'Test data')\n # ax[0].axhline(y=5,ls='--',color='k')\n ax[0].legend(frameon = False)\n ax[0].set_ylabel(r'$|{\\bf F}_{\\alpha\\beta}|$')\n ax[0].set_title('Final Fisher info on test data: %.3f'%n.history[\"det test F\"][-1])\n ax[1].plot(epochs, n.history[\"loss\"])\n ax[1].plot(epochs, n.history[\"test loss\"])\n # ax[1].set_xlabel('Number of epochs')\n ax[1].set_ylabel(r'$\\Lambda$')\n ax[1].set_xlim([0, len(epochs)]);\n \n if diagnostics:\n ax[2].plot(epochs, n.history[\"det C\"])\n ax[2].plot(epochs, n.history[\"det test C\"])\n # ax[2].set_xlabel('Number of epochs')\n ax[2].set_ylabel(r'$|{\\bf C}|$')\n ax[2].set_xlim([0, len(epochs)]);\n \n # Derivative of first summary wrt to theta1 theta1 is 3rd dimension index 0\n ax[3].plot(epochs, np.array(n.history[\"dμdθ\"])[:,0,0]\n , color = 'C0', label=r'$\\theta_1$',alpha=0.5)\n \n \"\"\"\n # Derivative of first summary wrt to theta2 theta2 is 3rd dimension index 1\n ax[3].plot(epochs, np.array(n.history[\"dμdθ\"])[:,0,1]\n , color = 'C0', ls='dashed', label=r'$\\theta_2$',alpha=0.5)\n \"\"\"\n\n # Test Derivative of first summary wrt to theta1 theta1 is 3rd dimension index 0\n ax[3].plot(epochs, np.array(n.history[\"test dμdθ\"])[:,0,0]\n , color = 'C1', label=r'$\\theta_1$',alpha=0.5)\n \n \"\"\"\n # Test Derivative of first summary wrt to theta2 theta2 is 3rd dimension index 1\n ax[3].plot(epochs, np.array(n.history[\"test dμdθ\"])[:,0,1]\n , color = 'C1', ls='dashed', label=r'$\\theta_2$',alpha=0.5)\n ax[3].legend(frameon=False)\n \"\"\"\n\n ax[3].set_ylabel(r'$\\partial\\mu/\\partial\\theta$')\n # ax[3].set_xlabel('Number of epochs')\n ax[3].set_xlim([0, len(epochs)])\n\n # Mean of network output summary 1\n ax[4].plot(epochs, np.array(n.history[\"μ\"])[:,0],alpha=0.5)\n # Mean of test output network summary 1\n ax[4].plot(epochs, np.array(n.history[\"test μ\"])[:,0],alpha=0.5)\n ax[4].set_ylabel('μ')\n ax[4].set_xlabel('Number of epochs')\n ax[4].set_xlim([0, len(epochs)])\n \n\n print ('Maximum Fisher info on train data:',np.max(n.history[\"det F\"]))\n print ('Final Fisher info on train data:',(n.history[\"det F\"][-1]))\n \n print ('Maximum Fisher info on test data:',np.max(n.history[\"det test F\"]))\n print ('Final Fisher info on test data:',(n.history[\"det test F\"][-1]))\n\n if np.max(n.history[\"det test F\"]) == n.history[\"det test F\"][-1]:\n print ('Promising network found, possibly more epochs needed')\n\n plt.tight_layout()\n plt.savefig(f'{self.figuredir}variables_vs_epochs_{self.modelversion}.png')\n if show: plt.show()\n plt.close()", "def plot_actual_vs_predicted_by_equations(df, x_variable, y_variables, plot_title):\n #Plot results\n df.plot(x=x_variable, y=y_variables, title=plot_title)\n plt.show()", "def regression_plots(regression_results, dependant, independant):\n\n # Scatter of two variables\n ds = xr.Dataset()\n ds['dependant'] = dependant\n ds['independant'] = regression_results[f'prediction_skt']\n ds = ds.transpose(\"y\", \"x\", \"time\")\n\n expected = ds['dependant'].values.flatten()\n predicted = ds['independant'].values.flatten()\n \n plt.style.use('stylesheets/contour.mplstyle')\n fig, ax = plt.subplots(figsize=(5, 5))\n ax.axhline(0, color='k', alpha=0.5)\n ax.axvline(0, color='k', alpha=0.5)\n mask = np.isfinite(expected) * np.isfinite(predicted)\n X = expected[mask]\n Y = predicted[mask]\n counts, xedges, yedges = np.histogram2d(X, Y, bins=100)\n xedges = (xedges[1:] + xedges[:-1]) / 2\n yedges = (yedges[1:] + yedges[:-1]) / 2\n im = ax.contourf(xedges, yedges, counts, norm=LogNorm())\n ax.set_xlabel('Expected values')\n ax.set_ylabel('Predicted values')\n # ax.autoscale(False)\n plt.colorbar(im)\n plt.show()\n\n # gradient_expected = regression[f'{dependant}'].polyfit(\n # dim='time', deg=1, cov=True).sel(degree=1).polyfit_coefficients * 1e9*60*60*24*365\n # gradient_predicted, = regression[f'prediction_{independant}'].polyfit(\n # dim='time', deg=1, cov=True).sel(degree=1).polyfit_coefficients * 1e9*60*60*24*365\n\n\n # max_ = max([gradient_expected.max().max(), gradient_predicted.max().max()])\n # min_ = min([gradient_expected.min().min(), gradient_predicted.min().min()])\n\n # divnorm = TwoSlopeNorm(vmin=min_, vcenter=0, vmax=max_)\n # levels = np.arange(min_, max_, 0.1)\n\n # fig = plt.figure(figsize=(5, 10))\n\n # ax = fig.add_subplot(2, 1, 1, projection=ccrs.SouthPolarStereo())\n # im = ax.contourf(gradient_expected.x, gradient_expected.y, gradient_expected.transpose(\n # ), levels=levels, norm=divnorm, cmap='RdBu_r')\n # ax.coastlines()\n # ax.set_title('Expected Trends')\n # plt.colorbar(im, ax=ax)\n\n # ax = fig.add_subplot(2, 1, 2, projection=ccrs.SouthPolarStereo())\n # im = ax.contourf(gradient_predicted.x, gradient_predicted.y,\n # gradient_predicted.transpose(), levels=levels, norm=divnorm, cmap='RdBu_r')\n # ax.coastlines()\n # ax.set_title('Predicted Trends')\n # plt.colorbar(im, ax=ax)\n pass", "def show_plots(history):\n loss_vals = history['loss']\n val_loss_vals = history['val_loss']\n epochs = range(1, len(history['accuracy'])+1)\n \n f, ax = plt.subplots(nrows=1,ncols=2,figsize=(16,4))\n \n # plot losses on ax[0]\n ax[0].plot(epochs, loss_vals, color='navy',marker='o', linestyle=' ', label='Training Loss')\n ax[0].plot(epochs, val_loss_vals, color='firebrick', marker='*', label='Validation Loss')\n ax[0].set_title('Training & Validation Loss')\n ax[0].set_xlabel('Epochs')\n ax[0].set_ylabel('Loss')\n ax[0].legend(loc='best')\n ax[0].grid(True)\n \n # plot accuracies\n acc_vals = history['accuracy']\n val_acc_vals = history['val_accuracy']\n\n ax[1].plot(epochs, acc_vals, color='navy', marker='o', ls=' ', label='Training Accuracy')\n ax[1].plot(epochs, val_acc_vals, color='firebrick', marker='*', label='Validation Accuracy')\n ax[1].set_title('Training & Validation Accuracy')\n ax[1].set_xlabel('Epochs')\n ax[1].set_ylabel('Accuracy')\n ax[1].legend(loc='best')\n ax[1].grid(True)\n \n plt.show()\n plt.close()\n \n # delete locals from heap before exiting\n del loss_vals, val_loss_vals, epochs, acc_vals, val_acc_vals", "def plot_results(self):\n\n\n f1, ax1 = plt.subplots()\n h1, = ax1.plot(self.history[\"step\"], self.history[\"trainLoss\"],\\\n \"b-\", label=\"Loss - Train\")\n h2, = ax1.plot(self.history[\"step\"], self.history[\"validLoss\"],\\\n \"b.\", label=\"Loss - Validation\")\n\n ax1.set_ylabel(\"Loss\", color = \"blue\")\n ax1.tick_params(\"y\", color = \"blue\")\n ax1.yaxis.label.set_color(\"blue\")\n ax1.set_xlabel(\"Training Steps [{}]\".format(self.FLAGS.eval_every))\n\n ax2 = ax1.twinx()\n h3, = ax2.plot(self.history[\"step\"], self.history[\"trainAccr\"], \"r-\",\\\n label = \"Accuracy - Train\")\n h4, = ax2.plot(self.history[\"step\"], self.history[\"validAccr\"], \"r.\",\\\n label = \"Accuracy - Validation\")\n\n ax2.set_ylabel(\"Accuracy\", color = \"red\")\n ax2.tick_params(\"y\", color = \"red\")\n ax2.yaxis.label.set_color(\"red\")\n\n hds = [h1,h2,h3,h4]\n lbs = [l.get_label() for l in hds]\n ax1.legend(hds, lbs)\n f1.tight_layout()\n plt.savefig(\"trainingHistory.png\")\n\n plt.close(f1)\n #plt.show()", "def visualize_pred(y_test, y_pred, test_seq, window_out, num_plots, num_win_ser, cols_y, col_idx):\n \n \n ser_idx = [i for i in range(0, len(y_test), num_win_ser)]\n if num_plots > len(ser_idx):\n print(\"Too many plots, reduce the mumber\")\n else:\n indx = ser_idx[0:num_plots]\n days = range(num_win_ser)\n for idx in indx:\n CR = test_seq[idx][0][0][3]\n pred = y_pred[idx : idx+num_win_ser, window_out -1, col_idx]\n true = y_test[idx : idx+num_win_ser, window_out -1, col_idx]\n \n plt.title(\"Y_True V/S Y_Pred, CR: \"+ str(CR))\n plt.xlabel('Days')\n plt.ylabel(cols_y[col_idx])\n \n plt.plot(days, pred, label = 'Pred')\n plt.plot(days, true, label = 'True')\n \n plt.legend()\n plt.show()", "def plot_predictions(y, yhat, title=\"Predictions vs Actual\", output_dir=None):\n\n fig = plt.figure(figsize=(15, 6))\n plt.xlabel('Time')\n plt.ylabel('PM10')\n plt.plot(y, label=\"actual\", figure=fig)\n plt.plot(yhat, label=\"predicted\", figure=fig)\n plt.title(title)\n fig.legend()\n\n if output_dir != None:\n plt.savefig(os.path.join(output_dir, \"{}.png\".format(title)))\n\n plt.close(fig)", "def plot(self):\n h = .02\n i=1\n bags_X = self.bags_X\n bags_y = self.bags_y\n fig1 = plt.figure(figsize=(45, 9))\n\n \n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n \n for model in self.models:\n ax = plt.subplot(1, len(self.models) , i)\n X = pd.DataFrame(bags_X[i-1])\n y = pd.Series(bags_y[i-1])\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(model.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n # print(Z[12])\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n ax.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n # size=[1000*w for w in self.weights[i-1]]\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xlabel(str(X.columns[0]))\n ax.set_ylabel(str(X.columns[1]))\n plt.title(\"Estimator \"+str(i))\n i+=1\n \n fig2 = plt.figure(figsize=(9,9))\n X = self.X\n y = self.y\n ax2 = plt.subplot(1,1,1)\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(self.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n Z = Z.reshape(xx.shape)\n ax2.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n # size=[1000*w for w in self.weights[i-2]]\n ax2.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n ax2.set_xlim(xx.min(), xx.max())\n ax2.set_ylim(yy.min(), yy.max())\n plt.title(\"Combined Decision Surface\")\n \n plt.tight_layout()\n plt.show()\n\n return [fig1,fig2]", "def plot_multiple_linear_regression(data, x_1_name, x_2_name, y_name):\n model = LinearRegression()\n X, Y = get_features(data, [x_1_name, x_2_name], y_name)\n fit = model.fit(X, Y)\n\n fig = plt.figure(figsize=(10,10))\n ax = fig.add_subplot(111, projection='3d')\n\n x_surf = np.arange(0, max(data[x_1_name]), max(data[x_1_name])/10) # generate a mesh\n y_surf = np.arange(0, max(data[x_2_name]), max(data[x_2_name])/10)\n x_surf, y_surf = np.meshgrid(x_surf, y_surf)\n\n exog = pd.core.frame.DataFrame({x_1_name: x_surf.ravel(), x_2_name: y_surf.ravel()})\n out = fit.predict(exog)\n ax.plot_surface(x_surf, y_surf,\n out.reshape(x_surf.shape),\n rstride=1,\n cstride=1,\n color='None',\n alpha = 0.4)\n\n ax.scatter(data[x_1_name], data[x_2_name], data[y_name],\n marker='o',\n alpha=1)\n\n ax.set_xlabel(x_1_name)\n ax.set_ylabel(x_2_name)\n ax.set_zlabel(y_name)\n plt.title(\"Linear Model with 2 Explanatory Variables: \" + x_1_name + \" and \" + x_2_name + \" vs. \" + y_name, y=1.08, fontsize=16)\n\n plt.show();", "def test_plot_lm_list():\n y = [1, 2, 3, 4, 5]\n assert plot_lm(y=y, x=np.arange(len(y)), show=False)", "def regression_analysis(cls, y_true, y_pred, path=None):\n residual = y_true - y_pred\n print(\"Histogram\")\n cls.histogram(residual, \"Residual\")\n print(\"Scatter\")\n cls.scatter_plot(y_pred, residual, \"pred\", \"residual\", path=path)\n print(\"Scatter\")\n cls.scatter_plot( y_true, y_pred, \"y_test\", \"pred\", path=path)", "def ex1_plots(instance, destination, prefix, save, animate):\n \n plts = ukf_plots(instance, destination, prefix, save, animate)\n\n truths = truth_parser(instance)\n nan_array= nan_array_parser(instance, truths, instance.base_model)\n #obs, obs_key = obs_parser(instance, True)\n obs_key = obs_key_parser(instance, True)\n preds = preds_parser(instance, True)\n #forecasts = forecasts_parser(instance, True)\n \n ukf_params = instance.ukf_params\n index2 = ukf_params[\"index2\"]\n \n \"remove agents not in model to avoid wierd plots\"\n #obs *= nan_array\n truths *= nan_array\n preds *= nan_array\n #forecasts*= nan_array\n \n \"indices for unobserved agents\"\n not_index2 = np.array([i for i in np.arange(truths.shape[1]) if i not in index2])\n plts.pair_frame(truths, preds, obs_key, 10, destination)\n plts.error_hist(truths[::instance.sample_rate,index2], \n preds[::instance.sample_rate,index2],\"Observed Errors\")\n if len(not_index2)>0:\n plts.error_hist(truths[::instance.sample_rate, not_index2], \n preds[::instance.sample_rate, not_index2],\"Unobserved Errors\")\n \n #plts.path_plots(obs[::instance.sample_rate] , \"Observed\")\n plts.path_plots(preds[::instance.sample_rate], \"Predicted\")\n plts.path_plots(truths, \"True\")\n #plts.path_plots(forecasts[::instance.sample_rate], \"Forecasts\")\n\n if animate:\n #plts.trajectories(truths, \"plots/\")\n plts.pair_frames(truths, preds, obs_key,\n truths.shape[0], \"../../plots/\")", "def generate_plots(fixed, moving, warped, flows, train_loss, val_loss, reg_loss, epoch):\n moving = moving.detach().cpu().numpy()\n fixed = fixed.detach().cpu().numpy()\n warped = [w.detach().cpu().numpy() for w in warped]\n flows = [f.detach().cpu().numpy() for f in flows]\n\n fig = plt.figure(constrained_layout=True, figsize=(4 * 5, 4 * 3))\n ax_dict = fig.subplot_mosaic(\"\"\"\n FABCD\n LGHIE\n MKJWX\n \"\"\")\n\n ax_dict['F'].imshow(moving[0, 0, ...], cmap='gray')\n ax_dict['F'].set_title('Moving')\n\n ax_dict['W'].imshow(fixed[0, 0, ...], cmap='gray')\n ax_dict['W'].set_title('Fixed')\n\n for i, ax_name in enumerate(list(\"ABCDEX\")):\n ax_dict[ax_name].imshow(warped[i][0, 0, ...], cmap='gray')\n if ax_name == \"A\":\n ax_dict[ax_name].set_title(\"Affine\")\n else:\n ax_dict[ax_name].set_title(f\"Cascade {i}\")\n\n ax_dict['L'].plot(train_loss, color='red', label='train_loss')\n ax_dict['L'].plot(val_loss, label='val_loss', color='blue')\n ax_dict['L'].plot(reg_loss, label='train_reg_loss', color='green')\n ax_dict['L'].set_title(\"Losses\")\n ax_dict['L'].grid()\n ax_dict['L'].set_xlim(0, args.e)\n ax_dict['L'].legend(loc='upper right')\n ax_dict['L'].scatter(len(train_loss) - 1, train_loss[-1], s=20, color='red')\n ax_dict['L'].scatter(len(val_loss) - 1, val_loss[-1], s=20, color='blue')\n ax_dict['L'].scatter(len(reg_loss) - 1, reg_loss[-1], s=20, color='green')\n\n for i, ax_name in enumerate(list(\"GHIJKM\")):\n plot_grid(ax_dict[ax_name], flows[i][0, ...])\n if ax_name == \"G\":\n ax_dict[ax_name].set_title(\"Affine\")\n else:\n ax_dict[ax_name].set_title(f\"Cascade {i}\")\n\n plt.suptitle(f\"Epoch {epoch}\")\n plt.savefig(f'./ckp/visualization/epoch_{epoch}.png')", "def plot_model(model, inputs, outputs, tss=90):\n mdl = np.load(model)\n sys = ss(mdl['A'], mdl['B'], mdl['C'], mdl['D'],1)\n gain_matrix = dcgain(sys).T\n num_i = len(inputs)\n num_o = len(outputs)\n fig, axs = plt.subplots(num_i,num_o, figsize=(3*len(outputs), 2*len(inputs)), facecolor='w', edgecolor='k')\n fig.suptitle('Step responce: '+model)\n T = np.arange(tss)\n for idx_i in range(num_i):\n for idx_o in range(num_o):\n ax = axs[idx_i][idx_o]\n t,y_step = step_response(sys,T, input=idx_i, output=idx_o)\n gain = round(gain_matrix[idx_i][idx_o],4)\n ax.plot(t, y_step,color='r')\n if idx_i == 0:\n ax.set_title(outputs[idx_o], rotation='horizontal', ha='center', fontsize=10)\n if idx_o == 0:\n ax.set_ylabel(inputs[idx_i], rotation=90, fontsize=10)\n ax.grid(color='k', linestyle='--', linewidth=0.5)\n ax.tick_params(axis='x', colors='red',size=0,labelsize=4)\n ax.tick_params(axis='y', colors='red',size=0,labelsize=4)\n ax.annotate(str(gain),xy=(.72,.8),xycoords='axes fraction')\n # fig.tight_layout()\n plt.show()", "def generate_prediction_plots(y_true: np.ndarray,\n y_pred: np.ndarray,\n mses: np.ndarray,\n n_agents: int,\n n_options: int,\n save_path: Optional[Path] = None) -> None:\n n_to_plot = 10\n\n fig, axes = plt.subplots(n_to_plot * n_options, 2, figsize=(20, 60))\n trues, preds = select_predictions(mode='random', n_to_sample=n_to_plot, y_true=y_true, y_pred=y_pred)\n plot_preds(axes=axes, y_true=trues, y_pred=preds, n_agents=n_agents, n_options=n_options)\n plt.tight_layout()\n\n if save_path is not None:\n plt.savefig(save_path.joinpath(\"random_predictions.png\"), dpi=150)\n\n fig, axes = plt.subplots(n_to_plot * n_options, 2, figsize=(20, 60))\n trues, preds = select_predictions(mode='worst',\n n_to_sample=n_to_plot,\n y_true=y_true,\n y_pred=y_pred,\n mses=mses)\n plot_preds(axes=axes, y_true=trues, y_pred=preds, n_agents=n_agents, n_options=n_options)\n plt.tight_layout()\n\n if save_path is not None:\n plt.savefig(save_path.joinpath(\"worst_predictions.png\"), dpi=150)\n else:\n plt.show()\n\n plt.close('all')", "def plot_predictions(self):\n\n plt.title(\"Targets vs. Predictions\")\n plt.plot(self.T, label=\"Targets\")\n plt.plot(self.Y, label=\"Predictions\")\n plt.xlabel(\"Sample number\")\n plt.legend()\n plt.show()", "def plot_result(data, gt_y, pred_y):\n assert data.shape[0] == gt_y.shape[0]\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()", "def plot_result(data, gt_y, pred_y):\n assert data.shape[0] == gt_y.shape[0]\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()", "def partial_dependence_plot(model, data, important_labels, feature_names):\n n_plots_per_row = 3\n n_plots = ceil(important_labels.shape[0] / n_plots_per_row)\n\n for plot_index, x_index in enumerate(important_labels, 1):\n target = X_train[:, x_index]\n unique_target = np.unique(target)\n n_unique = unique_target.shape[0]\n\n is_categorical = n_unique == 2\n if is_categorical:\n x_points = unique_target\n y_points = np.zeros_like(unique_target)\n else:\n # for numeric values, generate a fix number of values\n # in between the min and max value of the target column\n n_points = min(n_unique, 50)\n x_points = np.linspace(np.min(target), np.max(target), n_points)\n y_points = np.zeros_like(x_points)\n\n for i in range(x_points.shape[0]):\n x_data = data.copy()\n x_data[:, x_index] = x_points[i]\n y_pred = model.predict(x_data)\n y_points[i] = np.mean(y_pred)\n\n plt.subplot(n_plots, n_plots_per_row, plot_index)\n if is_categorical:\n plt.bar(x_points, y_points)\n else:\n plt.plot(x_points, y_points)\n\n plt.title(feature_names[x_index])\n\n plt.tight_layout()\n plt.show()", "def plot_predictions(self, names=None, min_=1, max_=1000):\n \n if not names:\n names = [*self.models.keys()] + [\"test\", \"final\"]\n\n arr = range(min_, max_, int(30/self.conf[\"time_step\"]))\n\n plt.figure(figsize=(16, 7), dpi=75)\n\n for name in names:\n plt.plot(np.concatenate(self.predict.iloc[arr][name].to_numpy()), label=name)\n\n plt.title(\"Predictions\")\n plt.legend()\n plt.show()", "def parity_plot(y_pred, y_act):\n\n fig = plt.figure(figsize=FIG_SIZE)\n plt.scatter(y_act, y_pred)\n plt.plot([y_act.min(), y_act.max()], [y_act.min(), y_act.max()],\n lw=4, color='r')\n plt.xlabel('Actual')\n plt.ylabel('Predicted')\n\n return fig", "def display_All_T_Confidence(self):\n for s in ('300', '310', '323', '343'):\n SVM = ResampleSVM(self.FormatDataFromCSV(\"PD_P1_SVM/Experimental Data P1/ER2_T\" + s + \".csv\"))\n self.DisplayTernaryPhaseScatter(\n self.FormatDataFromCSV(\"PD_P1_SVM/Experimental Data P1/ER2_T\" + s + \".csv\")[1], 10, s + \" Lab Data\")\n optimal = pd.read_csv(\"T\" + s + \"-OUT.csv\").values.tolist()[0][1:3]\n print(\"Optimal Parameters used: \")\n print(optimal)\n self.DisplayTernaryPhaseScatter(SVM.generate_all_phase(SVM.data_in[1], optimal[0], optimal[1], .01), 7,\n s + \" SVM Optimized\")\n data_in = pd.read_csv(\"Confidence\" + s + \".csv\").values.tolist()\n\n conf = list(map(lambda x: x[4], data_in))\n fig = go.Figure(go.Scatterternary({\n 'mode': 'markers',\n 'a': list(map(lambda x: x[3], data_in)),\n 'b': list(map(lambda x: x[2], data_in)),\n 'c': list(map(lambda x: x[1], data_in)),\n 'text': conf,\n 'marker': {\n 'symbol': 0,\n 'color': conf,\n 'size': 7,\n 'colorbar': dict(title=\"Colorbar\"),\n 'colorscale': \"Viridis\"}\n })\n )\n\n fig.update_layout(\n title=go.layout.Title(\n text=\"Confidence T-\" + s))\n plotly.offline.plot(fig, filename=\"ConfidenceT\" + s + \".html\")\n return", "def draw_predictions(ax, outputs):\n for output in outputs:\n boxes = output['box3d_lidar'].cpu().detach().numpy()\n confidences = output['scores'].cpu().detach().numpy()\n classes = output['label_preds'].cpu().detach().numpy()\n class_txts = at(class_to_name, *classes)\n for k, box3d in enumerate(boxes):\n x, y, z, w, l, h, r = box3d\n drawBoundingBoxes(ax, x, y, z, w, l, h, r, col='green', linewidth=0.8)\n ax.text(x+(w/2.0)+1, y+(l/2.0)+2, z+h, f\"{class_txts[k]}<{confidences[k]:.2f}>\", color=(0.4, 0.95, 0.3), fontsize=8.0, rotation=math.degrees(r))", "def logit_model_plots(ds,Population = 'Population_%',Event_rate ='Event_rate',decile ='Band',Cumulative_Non_Event = 'Cumulative_Non_Event_%',Cumulative_Event= 'Cumulative_Event_%',sample_type ='Development'):\n \n import matplotlib.pyplot as plt\n fig, (ax1, ax2) = plt.subplots(1, 2,figsize=(15, 4))\n _= ax1.plot(plot_df[Cumulative_Non_Event],plot_df[Cumulative_Event])\n _= ax1.set_ylabel(Cumulative_Non_Event)\n _= ax1.set_title('Gini Curve : '+str(sample_type) +' sample')\n _= ax1.set_xlabel(Cumulative_Event)\n\n _= plot_df[Population].plot(kind='bar', color='b', width = 0.35,legend=True , label = Population)\n _= plot_df[Event_rate].plot(kind='line',color ='r', secondary_y=True,legend=True, label = Event_rate)\n _= ax2.set_xticklabels(plot_df[decile])\n _= ax2.set_ylim(0,plot_df[Event_rate].max()*0.15)\n _= ax2.right_ax.set_ylim(0,plot_df[Event_rate].max()*1.5)\n _= ax2.right_ax.set_ylabel(Event_rate)\n _= ax2.set_ylabel(Population)\n _= ax2.set_title('Decile Wise Event Rate : ' +str(sample_type) +' sample')\n _= ax2.set_xlabel(decile)\n plt.show()", "def plot(self) -> None:\n cw_l2_data_list = list(); cw_linf_data_list = list()\n\n for model in self.model_list:\n cw_l2_data_list.append(joblib.load(model + \"/stat/mse-rmse-si-mae-cw_l2_1.pkl\"))\n\n cw_l2_attack = list(zip(self.model_list, cw_l2_data_list))\n\n for model in self.model_list:\n cw_linf_data_list.append(joblib.load(model + \"/stat/mse-rmse-si-mae-cw_inf_1.pkl\"))\n\n cw_linf_attack = list(zip(self.model_list, cw_linf_data_list))\n\n # RMSE v.s. MAE over change budget\n # There will be one graph for each manipulation\n # CW_L2 ATTACK\n for datum in cw_l2_attack:\n ran_color_list = self._random_color_picker(2)\n fig, axis_1 = plt.subplots()\n\n # Generate x_axis\n x_axis = list()\n for key in datum[1]:\n if float(key) not in x_axis:\n x_axis.append(float(key))\n\n x_axis.sort()\n\n # Sort data in datum[1]\n data_dict = self._sort_dict(x_axis, datum[1])\n\n # PLOT RMSE ON AXIS 1\n # Generate y_axis ticks for RMSE\n rmse_values = list()\n for key in data_dict:\n rmse_values.append(data_dict[key][\"rmse\"])\n\n # Generate 10 ticks for the y_axis\n y_axis_ticks = np.linspace(0.0, 0.6, num=10, endpoint=True)\n\n # Plot RMSE\n axis_1.plot(x_axis, rmse_values, color=ran_color_list[0], linestyle=\"solid\")\n axis_1.set_xlabel(\"Perturbation Budget\")\n axis_1.set_ylabel(\"Root Mean Squared Error (RMSE)\", color=ran_color_list[0])\n axis_1.set_yticks(y_axis_ticks)\n \n for tick_label, tick_line in zip(axis_1.get_yticklabels(), axis_1.get_yticklines()):\n tick_label.set_color(ran_color_list[0])\n tick_line.set_color(ran_color_list[0])\n\n # PLOT MAE ON AXIS 2\n axis_2 = axis_1.twinx()\n\n # Generate y-axis ticks for MAE\n mae_values = list()\n for key in data_dict:\n mae_values.append(data_dict[key][\"mae\"])\n\n\n # Plot MAE\n axis_2.plot(x_axis, mae_values, color=ran_color_list[1], linestyle=\"solid\")\n axis_2.set_ylabel(\"Mean Absolute Error (MAE)\", color=ran_color_list[1])\n axis_2.set_yticks(y_axis_ticks)\n \n for tick_label, tick_line in zip(axis_2.get_yticklabels(), axis_2.get_yticklines()):\n tick_label.set_color(ran_color_list[1])\n tick_line.set_color(ran_color_list[1])\n\n model_tag = datum[0].split(\"/\"); model_tag = model_tag[-1]\n plt.savefig(self.save_path + \"/{}_rmse-and-mae-as-perturbation-budget-increases-for-cw_l2-attack-on-model-{}.png\".format(self.plot_name, model_tag), \n bbox_inches=\"tight\")\n plt.close()\n\n # CW_Linf ATTACK\n for datum in cw_linf_attack:\n ran_color_list = self._random_color_picker(2)\n fig, axis_1 = plt.subplots()\n\n # Generate x_axis\n x_axis = list()\n for key in datum[1]:\n if float(key) not in x_axis:\n x_axis.append(float(key))\n\n x_axis.sort()\n\n # Sort data in datum[1]\n data_dict = self._sort_dict(x_axis, datum[1])\n\n # PLOT RMSE ON AXIS 1\n # Generate y_axis ticks for RMSE\n rmse_values = list()\n for key in data_dict:\n rmse_values.append(data_dict[key][\"rmse\"])\n\n # Plot RMSE\n axis_1.plot(x_axis, rmse_values, color=ran_color_list[0], linestyle=\"solid\")\n axis_1.set_xlabel(\"Perturbation Budget\")\n axis_1.set_ylabel(\"Root Mean Squared Error (RMSE)\", color=ran_color_list[0])\n axis_1.set_yticks(y_axis_ticks)\n\n for tick_label, tick_line in zip(axis_1.get_yticklabels(), axis_1.get_yticklines()):\n tick_label.set_color(ran_color_list[0])\n tick_line.set_color(ran_color_list[0])\n\n # PLOT MAE ON AXIS 2\n axis_2 = axis_1.twinx()\n\n # Generate y-axis ticks for MAE\n mae_values = list()\n for key in data_dict:\n mae_values.append(data_dict[key][\"mae\"])\n\n # Plot MAE\n axis_2.plot(x_axis, mae_values, color=ran_color_list[1], linestyle=\"solid\")\n axis_2.set_ylabel(\"Mean Absolute Error (MAE)\", color=ran_color_list[1])\n axis_2.set_yticks(y_axis_ticks)\n \n for tick_label, tick_line in zip(axis_2.get_yticklabels(), axis_2.get_yticklines()):\n tick_label.set_color(ran_color_list[1])\n tick_line.set_color(ran_color_list[1])\n \n model_tag = datum[0].split(\"/\"); model_tag = model_tag[-1]\n plt.savefig(self.save_path + \"/{}_rmse-and-mae-as-perturbation-budget-increases-for-cw_linf-attack-on-model-{}.png\".format(self.plot_name, model_tag),\n bbox_inches=\"tight\")\n plt.close()\n \"RMSE and MAE as Perturbation Budget increases for CW_Linf attack on model {}\".format(model_tag)\n \n # Scattter Index over the change budget\n # All the manipulations will be put on the same graph.\n # CW_L2 ATTACK\n plt.figure()\n plt.xlabel(\"Perturbation Budget\"); plt.ylabel(\"Scatter Index\")\n ran_color_list = self._random_color_picker(len(cw_l2_attack)); i = 0\n\n # Find maximum scatter index value\n scatter_values = list()\n for datum in cw_l2_attack:\n for key in datum[1]:\n scatter_values.append(datum[1][key][\"scatter_index\"])\n\n # Generate y_axis ticks; generate 10 ticks\n y_axis_ticks = np.linspace(0.0, float(Decimal(str(max(scatter_values))) + Decimal(\"0.1\")), num=10, endpoint=True)\n plt.yticks(y_axis_ticks)\n\n # Generate x_axis\n x_axis = list()\n for datum in cw_l2_attack:\n for key in datum[1]:\n if float(key) not in x_axis:\n x_axis.append(float(key))\n\n x_axis.sort()\n\n formal_names = FormalNameMap()\n for datum in cw_l2_attack:\n values = list()\n data_dict = self._sort_dict(x_axis, datum[1])\n for key in data_dict:\n values.append(data_dict[key][\"scatter_index\"])\n\n # Append values to the plot\n line_name = datum[0].split(\"/\"); line_name = line_name[-1]\n formal_name = formal_names.getformalname(line_name) if formal_names.hasname(line_name) else line_name\n if \"vanilla\" in line_name:\n plt.plot(x_axis, values, color=ran_color_list[i], linewidth=3, linestyle=self._random_linestyle(), label=formal_name)\n\n else:\n plt.plot(x_axis, values, color=ran_color_list[i], linestyle=self._random_linestyle(), label=formal_name)\n \n i += 1\n\n plt.legend()\n plt.savefig(self.save_path + \"/{}_scatter-index-as-perturbation-budget-increases-for-cw_l2-attack.png\".format(self.plot_name),\n bbox_inches=\"tight\")\n plt.close()\n\n # CW_Linf ATTACK\n plt.figure()\n plt.xlabel(\"Perturbation Budget\"); plt.ylabel(\"Scatter Index\")\n ran_color_list = self._random_color_picker(len(cw_linf_attack)); i = 0\n\n # Find maximum scatter index value\n scatter_values = list()\n for datum in cw_linf_attack:\n for key in datum[1]:\n scatter_values.append(datum[1][key][\"scatter_index\"])\n\n # Generate y_axis ticks; generate 10 ticks\n y_axis_ticks = np.linspace(0.0, float(Decimal(str(max(scatter_values))) + Decimal(\"0.1\")), num=10, endpoint=True)\n plt.yticks(y_axis_ticks)\n\n # Generate x_axis\n x_axis = list()\n for datum in cw_l2_attack:\n for key in datum[1]:\n if float(key) not in x_axis:\n x_axis.append(float(key))\n\n x_axis.sort()\n\n formal_names = FormalNameMap()\n for datum in cw_linf_attack:\n values = list()\n data_dict = self._sort_dict(x_axis, datum[1])\n for key in data_dict:\n values.append(data_dict[key][\"scatter_index\"])\n\n # Append values to the plot\n line_name = datum[0].split(\"/\"); line_name = line_name[-1]\n formal_name = formal_names.getformalname(line_name) if formal_names.hasname(line_name) else line_name\n if \"vanilla\" in line_name:\n plt.plot(x_axis, values, color=ran_color_list[i], linewidth=3, linestyle=self._random_linestyle(), label=formal_name)\n\n else: \n plt.plot(x_axis, values, color=ran_color_list[i], linestyle=self._random_linestyle(), label=formal_name)\n \n i += 1\n\n plt.legend()\n plt.savefig(self.save_path + \"/{}_scatter-index-as-perturbation-budget-increases-for-cw_linf-attack.png\".format(self.plot_name),\n bbox_inches=\"tight\")\n plt.close()" ]
[ "0.65088475", "0.64450926", "0.64100355", "0.63321865", "0.63073844", "0.6301566", "0.6285488", "0.62601894", "0.62367177", "0.6230777", "0.62141186", "0.61841685", "0.6178527", "0.61648494", "0.6145391", "0.6144818", "0.61439085", "0.61359733", "0.6135566", "0.6133642", "0.6119269", "0.6114262", "0.6114262", "0.61073625", "0.60955757", "0.6095275", "0.6095035", "0.60944045", "0.60906076", "0.60814655" ]
0.73881763
0
Prints report of a cross validation. This method takes in a list of the r^2s and the coefficients and prints a neat report of the results of a cross validation.
def print_results(name, train_r2, val_r2, coeffs, poly): if poly: print(f"With Polynomial Features: degree = {poly}...\n") print(f'{name} Regression Scores: ', val_r2, '\n') print(f'{name}.R. Train - Mean R^2: {np.mean(train_r2):.3f} +- {np.std(train_r2):.3f}') print(f'{name}.R. Val - Mean R^2: {np.mean(val_r2):.3f} +- {np.std(val_r2):.3f}') print('\nCoefficients: ', coeffs) print('\n\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cross_validation_visualization(lambds, score_tr, score_te):\n plt.semilogx(lambds, score_tr, marker=\".\", color='b', label='train score');\n plt.semilogx(lambds, score_te, marker=\".\", color='r', label='test score');\n plt.xlabel(\"lambda\")\n plt.ylabel(\"score\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation_test\")", "def tenfold_cross_validation(X, y):\n\n i = 0\n x_score = []\n y_score = []\n\n for i in range(1, 11):\n for train_index, test_index in KFold(10).split(X):\n x_train, x_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n # change the parameters to see how each parameter affects the l1inear classifier\n linear_classifier = linear_model.SGDClassifier(max_iter=1000, tol=1e-3)\n\n # start training the classifier\n linear_classifier.fit(x_train, y_train)\n\n # create and plot the confusion matrix\n # cross validation done with cross_val_\n y_train_pred = cross_val_predict(linear_classifier, x_test, y_test, cv=10)\n\n print(\"\\n Statistics and Confusion matrix obtained with pandas_ml: \\n\")\n cm = ConfusionMatrix(y_test, y_train_pred)\n stats = cm.stats()\n\n file = open(\"linear_classification_9000_cross_validation_\" + str(i) + \".txt\", \"w\")\n file.write(str(stats))\n file.close()\n\n # cm.print_stats()\n # print confusion matrix\n cm.plot(normalized=True)\n plt.show()", "def visualize_cross_validation_results(cross_val_results, plots_filepath):\n\n pair_model_scores, pair_model_stds, \\\n siamese_model_scores_2, siamese_model_stds_2, \\\n siamese_model_scores_10, siamese_model_stds_10 = cross_val_results\n param_names = (\"NBCH1\", \"NBCH2\", \"NBFCH\", \"BATCH_NORM\", \"SKIP_CON\", \"LR\")\n\n def aggregate_results(scores, stds):\n \"\"\"\n Helper function to aggregate score means and standard deviations for a model across parameter values\n\n :param scores: dictionary of score means {param_combo: score_mean}\n :param stds: dictionary of score stds {param_combo: score_std}\n\n :returns: list of tuples of pandas.Dataframe objects containing aggregated mean and std data\n \"\"\"\n\n scores = pd.DataFrame(scores.values(),\n index=scores.keys(),\n columns=[\"SCORE MEAN\", ])\n stds = pd.DataFrame(stds.values(),\n index=stds.keys(),\n columns=[\"SCORE STD\", ])\n scores.index.name = param_names\n stds.index.name = param_names\n data = []\n for param_gropby_levels in ((0,), (1,), (2,), (3, 4), (5,)):\n aggregate_scores = scores.groupby(level=param_gropby_levels).mean()\n aggregate_stds = scores.groupby(level=param_gropby_levels).std()\n data.append((aggregate_scores, aggregate_stds))\n return data\n\n pair_model_data = aggregate_results(pair_model_scores, pair_model_stds)\n siamese_model_2_data = aggregate_results(siamese_model_scores_2, siamese_model_stds_2)\n siamese_model_10_data = aggregate_results(siamese_model_scores_10, siamese_model_stds_10)\n\n # Group results for all models\n model_names = (\"Pair\", \"Siamese 2\", \"Siamese 10\")\n grouped_data = []\n for pair_model_group_data, siamese_model_2_group_data, siamese_model_10_group_data in zip(pair_model_data,\n siamese_model_2_data,\n siamese_model_10_data):\n score_means = (pair_model_group_data[0], siamese_model_2_group_data[0], siamese_model_10_group_data[0])\n score_mean_data = pd.concat(score_means, axis=1)\n score_mean_data.columns = model_names\n\n score_stds = (pair_model_group_data[1], siamese_model_2_group_data[1], siamese_model_10_group_data[1])\n score_std_data = pd.concat(score_stds, axis=1)\n score_std_data.columns = model_names\n\n grouped_data.append((score_mean_data, score_std_data))\n\n plots_param_names = (\"nbch1\", \"nbch2\", \"nbfch\", \"batch_norm+skip_con\", \"lr\")\n for i, (plot_param_names, (score_mean_data, score_std_data)) in enumerate(zip(plots_param_names, grouped_data)):\n plt.figure(figsize=(10, 5))\n score_mean_data.plot(kind=\"line\" if plot_param_names == \"lr\" else \"bar\",\n yerr=score_std_data,\n capsize=5,\n ylim=(0.4, 1.1),\n colormap=colormap_brg_darker)\n plt.title(\"Cross validation results for parameters:\\n{}\".format(plot_param_names), fontsize=18)\n plt.xlabel(\"Parameter value\", fontsize=14)\n plt.ylabel(\"Average accuracy\", fontsize=14)\n plt.xticks(fontsize=12, rotation=30)\n plt.yticks(fontsize=12)\n plt.legend(title=\"Model\", title_fontsize=10)\n plt.tight_layout()\n plt.savefig(fname=plots_filepath + \"cross_validation_{}.eps\".format(plot_param_names),\n dpi=\"figure\", format=\"eps\")\n plt.close()", "def print_report(\n m, X_valid, y_valid, t=0.5, X_train=None, y_train=None, show_output=True\n):\n # X_train = X_train.values\n # X_valid = X_valid.values\n\n if isinstance(m, list):\n probs_valid = predict_ensemble(m, X_valid)\n y_val_pred = adjusted_classes(probs_valid, t)\n\n if X_train is not None:\n probs_train = predict_ensemble(m, X_train)\n y_train_pred = adjusted_classes(probs_train, t)\n else:\n probs_valid = m.predict_proba(X_valid)[:, 1]\n y_val_pred = adjusted_classes(probs_valid, t)\n\n if X_train is not None:\n probs_train = m.predict_proba(X_train)[:, 1]\n y_train_pred = adjusted_classes(probs_train, t)\n\n res = [\n roc_auc_score(y_valid, probs_valid),\n f1_score(y_valid, y_val_pred),\n confusion_matrix(y_valid, y_val_pred),\n ]\n result = f\"AUC valid: {res[0]} \\nF1 valid: {res[1]}\"\n\n if X_train is not None:\n res += [\n roc_auc_score(y_train, probs_train),\n f1_score(y_train, y_train_pred),\n ]\n result += f\"\\nAUC train: {res[3]} \\nF1 train: {res[4]}\"\n\n acc_train = m.score(X_train, y_train)\n acc_valid = m.score(X_valid, y_valid)\n\n if show_output:\n logging.info(f\"train acc: {acc_train}\")\n logging.info(f\"test acc: {acc_valid} \")\n\n logging.info(result)\n plot_confusion_matrix(\n m, X_valid, y_valid, display_labels=y_valid.unique()\n )\n logging.info(classification_report(y_valid, y_val_pred))\n plt.show()\n return {\n \"train\": {\"AUC\": res[3], \"F1\": res[4], \"acc\": acc_train},\n \"test\": {\"AUC\": res[0], \"F1\": res[1], \"acc\": acc_valid},\n }", "def visualization(epochs, mse_tr, mse_te):\n plt.semilogx(epochs, mse_tr, marker=\".\", color='b', label='train error')\n plt.semilogx(epochs, mse_te, marker=\".\", color='r', label='test error')\n plt.xlabel(\"k\")\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation\")", "def poly_cross_validation_visualization(polys, accuracies):\n colors = ['r', 'b', 'y', 'g']\n labels = ['group_0', 'group_1', 'group_2', 'group_3']\n for i in range(len(accuracies)):\n plt.plot(polys, accuracies[i], marker=\".\", color=colors[i], label=labels[i])\n plt.xlabel(\"degree\")\n plt.ylabel(\"accuracy\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"./img/polynomial_cross_validation\")", "def print_evaluations(ytrue, ypred, model):\n\n print(f'How does model {model} score:')\n print(f'The accuracy of the model is: {round(accuracy_score(ytrue, ypred), 3)}')\n print(f'The precision of the model is: {round(precision_score(ytrue, ypred, pos_label=\"bastille_\" ), 3)}')\n print(f'The recall of the model is: {round(recall_score(ytrue, ypred, pos_label=\"bastille_\"), 3)}')\n print(f'The f1-score of the model is: {round(f1_score(ytrue, ypred, pos_label=\"bastille_\"), 3)}')", "def _print_eval_results(loss_train, cls_loss_train, reg_loss_train, loss_val,\n cls_loss_val, reg_loss_val, map_score):\n sys.stdout.write(\"\\r\")\n sys.stdout.write(f\"loss={loss_train:.4f}/{loss_val:.4f} | \")\n sys.stdout.write(f\"cls_loss={cls_loss_train:.4f}/{cls_loss_val:.4f} | \")\n sys.stdout.write(f\"reg_loss={reg_loss_train:.4f}/{reg_loss_val:.4f} | \")\n sys.stdout.write(f\"mAP={map_score:.4f}\\n\")", "def ridge_cross_validation_visualization(lambdas, accuracies):\n colors = ['r', 'b', 'y', 'g']\n labels = ['group_0', 'group_1', 'group_2', 'group_3']\n for i in range(len(accuracies)):\n plt.semilogx(lambdas, accuracies[i], marker=\".\", color=colors[i], label=labels[i])\n plt.xlabel(\"lambda\")\n plt.ylabel(\"accuracy\")\n plt.xlim(1e-4, 1)\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"./img/ridge_cross_validation\")", "def cross_validation_visualization(lambdas, loss_train, loss_test):\n plt.semilogx(lambdas, loss_train, marker=\".\", color='b', label='train error')\n plt.semilogx(lambdas, loss_test, marker=\".\", color='r', label='test error')\n plt.xlabel(\"lambda\")\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation_mse\")", "def display_cross_val_comparison(self, X, y, cross_val= 5):\n from sklearn.model_selection import KFold, cross_val_score\n import pandas as pd\n import seaborn as sns\n import matplotlib.pyplot as plt\n\n folds = KFold(n_splits=cross_val, shuffle=True, random_state=11)\n d = pd.DataFrame({self.model_list[0][1]: list(range(cross_val))})\n for model_tuple in self.model_list:\n d[model_tuple[1]] = cross_val_score(model_tuple[0], X, y, cv=folds)\n\n sns.boxplot(data=d)\n plt.xlabel(\"Classifier\")\n plt.ylabel(\"R^2 Score (Higher is Better)\")\n plt.title(\"Comparison between models\")\n plt.show()\n return d", "def crossvalidate(*args, **kwargs):\n\n scores = []\n j = 0\n for i, _ in enumerate(data):\n if i in good_patients:\n\n if 'silent' in kwargs:\n if kwargs['silent']:\n pass\n else:\n print \"real patient index:\", i\n else:\n print \"real patient index:\", i\n\n kwargs['patient_index'] = j\n score, reconstruction = runmodel(*args, **kwargs)\n scores.append(score)\n\n if 'save_reconstruction' in kwargs:\n if kwargs['save_reconstruction']:\n scipy.misc.imsave(\"patient_{}_reconstruction.png\".format(i), reconstruction)\n j += 1\n\n cvmodel = args[0].__class__.__name__\n print \"{} overall cross validated score {}\".format(cvmodel, np.mean(scores))\n return np.mean(scores)", "def evaluate_print(true_labels, predicted_labels):\n accuracy, precision, recall, f1 = evaluate(true_labels, predicted_labels)\n \n print('Accuracy:', accuracy)\n print('Precision:', precision)\n print('Recall:', recall)\n print('F1:', f1)", "def cross_validation_visualization_accuracy_multiple(epochs, accs, save=False, filename=\"cross_validation_acc_multiple\"):\n \n for i in range(accs.shape[0]):\n plt.plot(epochs, accs[i], marker=\".\", color='r', label=str(i+1)+'th accuracy')\n \n plt.xlabel(\"epoch\")\n plt.ylabel(\"accuracy\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n if (save):\n plt.savefig(filename)", "def cross_validation_visualization_accuracy(epochs, accs, save=False, filename=\"cross_validation_acc\"):\n plt.plot(epochs, accs, marker=\".\", color='r', label='accuracy')\n plt.xlabel(\"epoch\")\n plt.ylabel(\"accuracy\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n if (save):\n plt.savefig(filename)", "def print_results(self, regressor=False):\n if regressor:\n for i, model in enumerate(self.list_of_models):\n print \"Model: \", model\n print \"R2 score: \", self.r2_scores[i]\n print \"MSE: \", self.mse_scores[i]\n else:\n for i, model in enumerate(self.list_of_models):\n print \"Model: \", model\n print \"F1 score: \", self.f1_scores[i]\n print \"recall score: \", self.recall_scores[i]\n print \"precision score: \", self.precision_scores[i]\n print \"accuracy score: \", self.accuracy_scores[i]", "def print_score(classifier,X_test,y_test):\n print(\"Test results:\\n\")\n print('Accuracy Score: {0:.4f}\\n'.format(accuracy_score(y_test,classifier.predict(X_test))))\n print('Classification Report:\\n{}\\n'.format(classification_report(y_test,classifier.predict(X_test))))\n print('Confusion Matrix:\\n{}\\n'.format(confusion_matrix(y_test,classifier.predict(X_test))))", "def print_classification_report(y_train,y_test,y_train_preds,y_test_preds): \n try:\n for i in range(y_train.shape[-1]):\n test = (y_test.iloc[:,i].values, y_test_preds[:,i])\n train = (y_train.iloc[:,i].values, y_train_preds[:,i])\n print(f\"---------------{y_train.columns[i]}------train:-------------\")\n print(classification_report(*train))\n print(f\"----TEST---\")\n print(classification_report(*test))\n except Exception as e:\n try:\n print(f\"--------train:-------------\")\n print(classification_report(y_train, y_train_preds))\n print(f\"---------TEST--------------\")\n print(classification_report(y_test, y_test_preds))\n except Exception as e2:\n print('could not do report',e, e2)", "def roc_print(label, y_true, y_pred):\n for i in range(len(label)):\n FP = np.logical_and(y_true != y_pred[i], y_pred[i] == 1).sum()\n FN = np.logical_and(y_true != y_pred[i], y_pred[i] == 0).sum()\n TP = np.logical_and(y_true == y_pred[i], y_true == 1).sum()\n TN = np.logical_and(y_true == y_pred[i], y_true == 0).sum()\n FPR = 1. * FP / (FP + TN)\n TPR = 1. * TP / (TP + FN)\n PPV = 1. * TP / (TP + FP)\n print(\"FPR of \", label[i], \" is \", np.round(FPR, 5))\n print(\"TPR of \", label[i], \" is \", np.round(TPR, 5))\n print(\"PPV of \", label[i], \" is \", np.round(PPV, 5))\n print('')", "def cross_valid(model,x,folds,metric,verbose=True): \r\n\r\n score=[]\r\n \r\n\r\n kf = KFold(folds,shuffle=False,random_state=0) \r\n\r\n\r\n i=0\r\n for train_index, test_index in kf.split(x):\r\n\r\n xtrain = x[train_index,:]\r\n xtest = x[test_index,:]\r\n\r\n model.fit(xtrain[:,:-1],xtrain[:,-1])\r\n\r\n ypred = model.predict(xtest[:,:-1])\r\n\r\n ytrue= xtest[:,-1] \r\n \r\n \r\n if metric == 'mae':\r\n score.append(mae(ytrue,ypred))\r\n elif metric == 'mse':\r\n score.append(mse(ytrue,ypred))\r\n elif metric == 'rrmse':\r\n score.append(rrmse(ytrue,ypred))\r\n\r\n else:\r\n score.append(rmse(xtest[:,-1],ypred))\r\n\r\n if verbose:\r\n print('-'*30)\r\n print(f'\\nFold {i+1} out of {folds}')\r\n print(f'{metric}: {score[i]}')\r\n\r\n i+=1\r\n\r\n if verbose:\r\n print(f'\\n Overall Score:')\r\n print(f'{metric}: Mean: {np.mean(score)} Std: {np.std(score)}')\r\n\r\n\r\n return score", "def report_cv_results(self, results, scores=['score'], filename=None, n_top=5):\n res = \"\"\n for score in scores:\n\n res += \"{}\\n\".format(score)\n res += \"-------------------------------\\n\"\n for i in range(1, n_top + 1):\n candidates = np.flatnonzero(results['rank_test_{}'.format(score)] == i)\n for candidate in candidates:\n res += \"Model with rank: {0}\\n\".format(i)\n res += \"Mean validation {0}: {1:.3f} (std: {2:.3f})\\n\".format(\n score,\n results['mean_test_{}'.format(score)][candidate],\n results['std_test_{}'.format(score)][candidate])\n res += \"Parameters: {0}\\n\".format(results['params'][candidate])\n res += \"\\n\"\n\n if filename is not None:\n with open(filename, 'w') as f:\n f.write(res)\n\n self._upload_to_bucket(filename, filename)\n\n logging.info(res)", "def cross_validation(self):\r\n kfold = KFold(10, shuffle=True, random_state=1)\r\n data = self.read_data()\r\n # error from each kth iteration\r\n errors = []\r\n for train, test in kfold.split(data):\r\n\r\n #Splitting into test and training data\r\n X_test, Y_test = data[test][:, 1], data[test][:, 2]\r\n X_train, Y_train = data[train][:, 1], data[train][:, 2]\r\n\r\n #Training on the split data\r\n weights, design_matrix = self.train(X_train, Y_train)\r\n\r\n y_pred = self.make_prediction(X_test, weights)\r\n self.plot(y_true=Y_test, y_pred=y_pred, x=X_test)\r\n\r\n #error matrix\r\n errors.append(np.mean(y_pred - Y_test) ** 2)\r\n\r\n #cross-validation parameter taken as mean of errors obtained from each iteration\r\n print(\"%0.10f mean with a standard deviation of %0.10f across the k-folds\" % (np.mean(errors), np.std(errors)))", "def show_metrics(y_true, y_pred, target_names):\n print(\"Hamming Loss: {}\".format(hamming_loss(y_true, y_pred)))\n print(\"Zero One Loss: {}\".format(zero_one_loss(y_true, y_pred)))\n print(\"Hamming Loss Non Zero: {}\\n\".format(hamming_loss_non_zero(y_true, np.array(y_pred))))\n print(classification_report(y_true, y_pred, target_names=target_names))", "def output_report(x, y, clf, test_flag=False):\n\n # Calculate recall at k time series\n y, y_prob, y_pred, recall_at_k, avg_recall = calculate_recall_at_k_time_series(x, y, clf)\n # Output confusion_matrix\n print(\"Confusion matrix: \\n\", confusion_matrix(y, y_pred))\n # Output recall_at_k\n print(\"Recall at {}, {}, {}, {}, {}: \".format(5, 10, 20, 50, 100), end=' ')\n for top in [5, 10, 20, 50, 100]:\n if top == 100:\n print(str(round(recall_at_k[top - 1], 2)) + \" accordingly\")\n else:\n print(\"{}, \".format(round(recall_at_k[top - 1], 2)), end='')\n print(\"Average recalls over 100: \", round(avg_recall, 2))\n if test_flag:\n _, recall_at_k, _, _, _ = calculate_recall_at_k(y_prob[:, 1], y, k_max=y.shape[0])\n print(\n \"Positions of escalation flags: \", ([1] if recall_at_k[0] != 0 else []) +\n [i + 1 for i in range(1, len(recall_at_k)) if\n recall_at_k[i] != recall_at_k[i - 1]])", "def report(results, n_top=1):\n for i in range(1, n_top + 1):\n candidates = np.flatnonzero(results['rank_test_score'] == i)\n for candidate in candidates:\n print(f\"Model with rank: {i}\")\n print(f\"Mean validation score: {results['mean_test_score'][candidate]} (std: {results['std_test_score'][candidate]}\")\n print(f\"Parameters: {results['params'][candidate]}\")", "def cross_validation(exp_name):\n click.echo(\"Mode: Cross-validation.\")\n # defaults = get_defaults()\n\n # fitted_model_filename = add_extension(fitted_model_filename)\n\n # derive final path for fitted model as base output path for fitted models + model filename\n # fitted_model_path = os.path.join(defaults.OUTPUT.FITTED_MODELS_PATH, fitted_model_filename)\n # new_options = [\"OUTPUT.FITTED_MODEL_PATH\", fitted_model_path]\n\n # don't reserve dev set at this point since we need to do it in each cv fold\n boot_data = bootstrap(new_options=None, mode=\"cv\")\n\n defaults = boot_data['defaults']\n X_train, y_train = boot_data['data']\n\n cv = RepeatedStratifiedKFold(n_splits=defaults.EVAL.N_SPLITS,\n n_repeats=defaults.EVAL.N_REPEATS,\n random_state=defaults.MISC.SEED)\n\n s = time.time()\n outer_results, outer_preds = cross_validate(X=X_train, y=y_train,\n cv=cv,\n conf=defaults)\n print(\"Execution time: %s seconds.\" % (time.time() - s))\n\n # dump results\n # fitted_model_best_params_path = os.path.join(defaults.OUTPUT.PARAMS_PATH,\n # \"best_params_{}.pkl\".format(fitted_model_filename.split('.')[0]))\n\n outer_results_formatted = show_cross_val_results(outer_results, conf=defaults)\n\n cv_results_path = os.path.join(defaults.OUTPUT.RESULTS_PATH, \"cv_results_{}.csv\".format(exp_name))\n outer_results_formatted.to_csv(cv_results_path)\n\n # save predictions\n outer_preds_path = os.path.join(defaults.OUTPUT.PREDS_PATH, \"cv_pooled_preds_{}.pkl\".format(exp_name))\n save_obj(outer_preds, outer_preds_path)", "def ROC_plot(metrics, labels, title = '', filename = 'ROC') :\n\n for k in [0,1]: # linear & log\n\n fig,ax = plt.subplots()\n xx = np.logspace(-5, 0, 100)\n plt.plot(xx, xx, linestyle='--', color='black', linewidth=1) # ROC diagonal\n\n for i in range(len(metrics)) :\n plt.plot(metrics[i].fpr, metrics[i].tpr, label = '{}: AUC = {:.3f}'.format(labels[i], metrics[i].auc))\n\n plt.legend(loc=4)\n ax.set_xlabel('False Positive (background) rate $\\\\alpha$')\n ax.set_ylabel('True Positive (signal) rate $1-\\\\beta$')\n ax.set_title(title)\n\n if k == 0:\n plt.ylim(0.0, 1.0)\n plt.xlim(0.0, 1.0)\n ax.set_aspect(1.0/ax.get_data_ratio() * 1.0)\n plt.savefig(filename + '.pdf', bbox_inches='tight')\n\n if k == 1:\n plt.ylim(0.0, 1.0)\n plt.xlim(1e-4, 1.0)\n plt.gca().set_xscale('log')\n ax.set_aspect(1.0/ax.get_data_ratio() * 0.75)\n plt.savefig(filename + '_log.pdf', bbox_inches='tight')\n\n plt.close()", "def test_report(cv_rfc, lrc, x_test_variable, y_test_dep):\n # Ensure the function works\n try:\n cls.classification_report_image(\n cv_rfc, lrc, x_test_variable, y_test_dep)\n logging.info(\"Successfully Plotting Classification Results\")\n except Exception as err:\n logging.error(\"Errors in Plotting Classification Results\")\n raise err\n # Ensure the output exists\n for cols in [\"roc_curve\", \"explanation\"]:\n try:\n assert os.path.isfile(\"images/results/\"+cols+\".png\")\n except AssertionError as err:\n logging.error(\"Errors in generatingi %s classification file\", cols)\n raise err", "def cross_validate(X, Y, folds=5):\n\n log = LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, max_iter=200, multi_class='ovr', n_jobs=3,\n penalty='l2', random_state=None, solver='liblinear', tol=0.0001,\n verbose=0, warm_start=False)\n \n\n \n\n\n scores_log = [] \n scores_forest = []\n index = np.arange(X.shape[0])\n score_log = 0\n score_forest = 0\n \n for i in range(folds):\n score_log = 0\n score_forest = 0\n \n test_index = np.random.choice(index, int(X.shape[0]*1/folds),replace=False)\n index = np.setdiff1d(np.arange(X.shape[0]),test_index)\n \n test_x = X[test_index]\n test_y = Y[test_index]\n\n log.fit(X[index],Y[index])\n pred_log = log.predict(test_x)\n \n ran.fit(X[index],Y[index])\n pred_ran = ran.predict(test_x)\n \n for i in range(len(test_y)):\n if(pred_log[i] == test_y[i]):\n score_log += 1\n if(pred_ran[i] == test_y[i]):\n score_forest += 1\n scores_log.append(score_log/len(test_y))\n scores_forest.append(score_forest/len(test_y))\n \n\n return (np.mean(scores_log),np.mean(scores_forest))", "def Classification_ROC_Report(X,Y,model): \n \n # Plot Classification report, Confustion Matrix, ROC\n labels = {0: 'CNV', 1: 'DME', 2: 'DRUSEN', 3: 'NORMAL'}\n\n # get predictions on the test set\n y_hat = model.predict(X)\n #\n Y_pred_classes = np.argmax(y_hat,axis = 1) \n Y_true = np.argmax(Y,axis = 1)\n\n # Classification report\n ax=plt.figure(figsize=(15,5))\n ax = plt.subplot(1,3,1)\n rpt = sklearn.metrics.classification_report(np.argmax(Y, axis=1), np.argmax(y_hat, axis=1), target_names=list(labels.values()))\n ax.axis('off')\n ax.annotate(rpt, \n xy = (1.0,0.5), \n xytext = (0, 0), \n xycoords='axes fraction', textcoords='offset points',\n fontsize=13, ha='right', va='center') \n\n # Plot confusion matrix\n cm_df = Confusion_Matrix(Y,y_hat,labels,normalization=True)\n ax = plt.subplot(1,3,2)\n sns.heatmap(cm_df, annot=True)\n score = model.evaluate(X, Y, verbose=1)\n ax.set_title('Confusion Matrix\\nresult: {0:.2f} - loss: {0:.2f}'.format(score[1], score[0]))\n ax.set_ylabel('True label')\n ax.set_xlabel('Predicted label')\n\n # Plot ROC\n lw=2\n n_classes = 4\n fpr, tpr, roc_auc = ROC(Y,y_hat,n_classes)\n\n # Plot all ROC curves\n ax = plt.subplot(1,3,3)\n ax.plot(fpr[\"micro\"], tpr[\"micro\"],\n label='micro-average ROC curve (area = {0:0.2f})'\n ''.format(roc_auc[\"micro\"]),\n color='deeppink', linestyle=':', linewidth=4)\n\n ax.plot(fpr[\"macro\"], tpr[\"macro\"],\n label='macro-average ROC curve (area = {0:0.2f})'\n ''.format(roc_auc[\"macro\"]),\n color='navy', linestyle=':', linewidth=4)\n\n colors = cycle(['aqua', 'darkorange', 'cornflowerblue', '#4DBD33'])\n for i, color in zip(range(n_classes), colors):\n ax.plot(fpr[i], tpr[i], color=color, lw=lw,\n label='ROC curve of class {0} (area = {1:0.2f})'\n ''.format(labels[i], roc_auc[i]))\n\n ax.plot([0, 1], [0, 1], 'k--', lw=lw)\n ax.set_xlim([0.0, 1.0])\n ax.set_ylim([0.0, 1.05])\n ax.set_xlabel('False Positive Rate')\n ax.set_ylabel('True Positive Rate')\n ax.set_title('ROC')\n ax.legend(loc=\"lower right\")\n\n plt.tight_layout()\n plt.show()\n\n return" ]
[ "0.62587345", "0.6133166", "0.60486656", "0.6015816", "0.59888154", "0.59455055", "0.5901886", "0.5839792", "0.5825753", "0.5815804", "0.58060807", "0.5728041", "0.57201964", "0.568926", "0.56739503", "0.56676227", "0.5591121", "0.55885464", "0.5580657", "0.5567078", "0.5560923", "0.5536441", "0.5524033", "0.5496001", "0.54878706", "0.5481007", "0.5467794", "0.5450828", "0.54144526", "0.5371783" ]
0.65536094
0
Returns predictions made by a given model. This function takes in an X_matrix and uses the imported model to return the predicted (yhat) values as a list.
def get_predicts(x_matrix, model, poly, scale, dummy_idx): x_matrix = np.array(x_matrix) # adding polynomial features and/or scaling before prediction temp_list = split_poly_scale_join([x_matrix], dummy_idx, poly, scale) x_matrix = temp_list[0] return model.predict(x_matrix)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(x_tst, model):\n\n predictions = model.predict(x_tst)\n return predictions", "def predict(model, X_testing):\n predictions = model.predict(X_testing)\n\n return predictions", "def model_predict(classifier, X_test:list) -> list:\n y_predict = classifier.predict(X_test)\n return y_predict", "def predict(model, X):\n\tmodel.eval()\n\t# make the predictions\n\tscores = model.forward(X)\n\n\t# scores contains, for each example, two scores that can be interpreted as the\n\t# probability of each example belonging to each of the classes. To select the\n\t# final predicted label, we will select the class with higher probability.\n\tpredicted_labels = scores.argmax(dim=-1) # predicted_labels shape: (n_examples)\n\n\treturn predicted_labels", "def predict(model, X_test, batch_size):\r\n # test\r\n predict = model.predict(X_test, batch_size=batch_size, verbose=1)\r\n \r\n # if the model return result for every time, get only last time\r\n if predict.ndim == 3:\r\n extract = []\r\n for i in range(len(X_test)):\r\n index = np.arange(len(X_test[i]))\r\n if len(index[np.any(X_test[i] != 0, axis=1)]) == 0:\r\n extract.append(predict[i, -1, :])\r\n else:\r\n extract.append(predict[i, index[np.any(X_test[i] != 0.0, axis=1)][-1], :])\r\n\r\n # extract = np.array([predict[i,len(X_test[i])-1,:] for i in range(len(X_test))])\r\n return np.array(extract)\r\n else:\r\n return predict", "def predicts(self,X):\n return [self.predict(x) for x in X]", "def predict(self, model, x_test):\n pass", "def predictions(self, model):\n return get_predictions_from_df(\n model=model, df=self.prediction_df,\n fixed_effects=self.fixed_effects,\n random_effect=self.random_effect,\n spline=self.spline,\n offset=self.offset,\n )", "def predict(self, x):\n \n\n return predictions", "def predict(self, x_test, y_test, model_path):\n tf.reset_default_graph()\n with tf.compat.v1.Session() as sess:\n saver = tf.compat.v1.train.import_meta_graph(model_path + \".meta\")\n saver.restore(sess, model_path)\n graph = tf.compat.v1.get_default_graph()\n x = graph.get_operation_by_name(\"x_input\").outputs[0]\n y = tf.compat.v1.get_collection(\"network_architecture\")[0]\n no_samples = x_test.shape[0]\n predictions = []\n n_iteration = no_samples // self.batch_size\n for step in range(n_iteration):\n x_batch, y_batch = get_batch_data(x_test, y_test, iter_step=step, batch_size=self.batch_size)\n preds = sess.run(y, feed_dict={x: x_batch})\n predictions.append(preds)\n return predictions", "def predict(model, x):\n # Set model to evalution state to turn off dropout\n model.eval()\n x = to_Variable(x)\n yhat = model(x)\n _, tag = yhat.max(1)\n \n return tag.data.cpu().numpy()", "def predict(model, x):\n y = model.predict(x)\n print(\"y\")\n print(y)\n return y[0]", "def get_predictions(self, x_train_single):\n\n x_row = x_train_single.toarray()\n h = []\n for i in range(self.num_models):\n h.append(self.models[i].predict(x_row)[0])\n\n return h", "def predict(self, X: List[np.ndarray], **kwargs) -> List[np.ndarray]:", "def predict_data(data: pd.DataFrame, model: list):\n prediction = []\n for i, row in data.iterrows():\n prediction.append(predict_dataset(row, model))\n return prediction", "def model_predict(self, X):\n return self.cmodel.predict(X=X)", "def predict(self, x):\n pred = x\n for m in self.models:\n pred = m.predict(pred)\n\n return pred", "def predict(self, X):\n if self.model is None:\n print(\"%s.predict: implement me\" % (self.__class__.__name__))\n return np.zeros((1, self.odim))", "def predict(self, X, y=None): \n # Check to see if model has been trained\n if self.__fitted == False:\n print('Model not fitted yet.\\nReturning None.')\n return None\n \n # Append matrix, create design matrix\n X_appended = np.hstack((np.ones((X.shape[0], 1)), X))\n\n # Compute predictions\n predictions = X_appended @ self.__betas\n\n # If y is given, calculate and return MSE\n if not y.any() == None:\n mse = np.sum((predictions.T - y) ** 2) / X_appended.shape[0]\n print(f\"Prediction MSE is {mse}.\")\n return predictions.T, mse\n return predictions.T", "def predict(self,X):\n result = []\n for i in range(pd.DataFrame(X).shape[0]):\n prediction = self.predict_single(pd.DataFrame(X).iloc[i])\n result.append(prediction)\n print(result)\n return np.array(result)", "def predict(self, X: ArrayLike) -> np.ndarray:\n predictions = self._model.predict(X)\n return np.array([prediction for prediction in predictions])", "def predict(self, X):\n return self.model.predict(X)", "def predict(self, X):\n return self.model.predict(X)", "def predict(self, X):\n y_pred = []\n for i in range(len(X)):\n preds=[]\n X_test = X[X.index==i]\n for model in self.models:\n pred = model.predict(X_test)\n preds.append(pred[0])\n y_pred.append(mode(preds))\n return pd.Series(y_pred)", "def predict(self):\n path = self._artifact_repo.artifact_path(self._ARTIFACT_MODEL)\n model = tf.keras.models.load_model(path)\n\n _, _, x_test, y_test = self._load_data()\n x_test = tf.keras.utils.normalize(x_test, axis=1)\n\n preds = model.predict(x_test)\n self._show_cf_matrix(np.array([np.argmax(probas) for probas in preds]), y_test)", "def predict(self, X):\n fmodel = self.estimators_[np.array(self.estimator_errors_).argmin()]\n predictions = fmodel.predict(X)\n return predictions", "def predict(self,X): \n return self._predict(X)", "def predict(self, X):\n Y = []\n for predictor in self.predictors:\n Y.append(predictor.predict(X))\n\n Y = np.array(Y).T\n return Y", "def predict(self, X):\n ...", "def predict(self, X):\n ..." ]
[ "0.73035485", "0.71050286", "0.70261675", "0.68585396", "0.68142384", "0.677259", "0.6760893", "0.6750874", "0.66930443", "0.6654089", "0.66475666", "0.66246563", "0.661087", "0.657713", "0.6550398", "0.65496695", "0.6531237", "0.6531102", "0.649448", "0.64675003", "0.642741", "0.6425504", "0.6425504", "0.64143604", "0.6406804", "0.64056337", "0.63946193", "0.6394547", "0.63676435", "0.63676435" ]
0.7119781
1
Excludes dummy columns to add polynomial features and/or scale attributes. This function iterates over list of matrices and puts aside their dummy attributes in order to add polynomial features and scale the numerical values. Then, it combines the attributes and returns the resulting matrices.
def split_poly_scale_join(matrices, dummy_idx, poly, scale): adjusted_matrices = [] for matrix in matrices: if dummy_idx: # Split the matrix into numerical and dummies matrix, matrix_dummies = np.split(matrix, [dummy_idx], axis=1) # add polynomial features to numerical attributes if poly: matrix = (PolynomialFeatures(poly). fit_transform(matrix)) # scale numerical attributes if scale: matrix = (StandardScaler(). fit_transform(matrix)) if dummy_idx: # Join numerical and dummy attributes together again matrix = np.concatenate((matrix, matrix_dummies), axis=1) adjusted_matrices.append(matrix) return adjusted_matrices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocess_features(features):\r\n rowsum = np.array(features.sum(1))\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = np.diag(r_inv)\r\n features = r_mat_inv.dot(features)\r\n return features", "def prepare_features(groups, do_scale):\n feature_list = []\n for group in groups:\n for features in group.features:\n feature_list.append(features)\n\n feature_matrix = np.vstack(feature_list)\n feature_matrix = impute.impute(feature_matrix)\n if do_scale:\n feature_matrix, s_min, s_max = scale(feature_matrix)\n scale_params = zip(s_min, s_max)\n else:\n scale_params = None\n\n f_i = iter(feature_matrix)\n for group in groups:\n group.features = []\n for i in range(len(group.rs_list)):\n group.features.append(f_i.next())\n\n return scale_params", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.todense(), sparse_to_tuple(features)", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.todense(), sparse_to_tuple(features)", "def preprocess_features(features):\r\n rowsum = np.array(features.sum(1),dtype='float')\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = sp.diags(r_inv)\r\n features = r_mat_inv.dot(features)\r\n # return sparse_to_tuple(features)\r\n return features\r\n # print(features)\r\n # rowsum = np.array(features.sum(1),dtype='float')\r\n #\r\n # r_inv = np.power(rowsum, -1).flatten()\r\n # r_inv[np.isinf(r_inv)] = 0.\r\n # r_mat_inv = np.diag(r_inv)\r\n # features = r_mat_inv.dot(features)\r\n # # return sparse_to_tuple(features)\r\n # return features\r", "def _extract_imp_data(self) -> np.ndarray:\n \n mats = Material.objects.all()\n \n mat_arrays = []\n for mat in mats: # django queryset -> python list\n mat_features = []\n \n # Add data\n # Some data are missing here.\n #TODO: Delete those if sentences after cleaning the data.\n mat_features.append(mat.pvt_b5 if mat.pvt_b5!=None else 0)\n mat_features.append(mat.pvt_b6 if mat.pvt_b6!=None else 0)\n mat_features.append(mat.pvt_b1m if mat.pvt_b1m!=None else 0)\n mat_features.append(mat.pvt_b2m if mat.pvt_b2m!=None else 0)\n mat_features.append(mat.pvt_b2m if mat.pvt_b2m!=None else 0)\n mat_features.append(mat.pvt_b4m if mat.pvt_b4m!=None else 0)\n mat_features.append(mat.pvt_b1s if mat.pvt_b1s!=None else 0)\n mat_features.append(mat.pvt_b2s if mat.pvt_b2s!=None else 0)\n mat_features.append(mat.pvt_b3s if mat.pvt_b3s!=None else 0)\n mat_features.append(mat.pvt_b4s if mat.pvt_b4s!=None else 0)\n mat_features.append(mat.pvt_b7 if mat.pvt_b7!=None else 0)\n mat_features.append(mat.pvt_b8 if mat.pvt_b8!=None else 0)\n mat_features.append(mat.pvt_b9 if mat.pvt_b9!=None else 0)\n mat_features.append(mat.seven_params_n if mat.seven_params_n!=None else 0.)\n mat_features.append(mat.seven_params_Tau if mat.seven_params_Tau!=None else 0.)\n mat_features.append(mat.seven_params_D1 if mat.seven_params_D1!=None else 0.)\n mat_features.append(mat.seven_params_D2 if mat.seven_params_D2!=None else 0.)\n mat_features.append(mat.seven_params_D3 if mat.seven_params_D3!=None else 0.)\n mat_features.append(mat.seven_params_A1 if mat.seven_params_A1!=None else 0.)\n mat_features.append(mat.seven_params_A2 if mat.seven_params_A2!=None else 0.)\n \n mat_arrays.append(mat_features)\n \n # Get numpy arrays.\n mat_arrays = np.array(mat_arrays, dtype=np.float64)\n \n return mat_arrays", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.todense()", "def preprocess_features(features):\r\n rowsum = np.array(features.sum(1))\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = sp.diags(r_inv)\r\n features = r_mat_inv.dot(features)\r\n return sparse_to_tuple(features)", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.toarray() # densify -- these are tiny and we don't care", "def _extract_data(self) -> np.ndarray:\n \n mats = Material.objects.all()\n \n mat_arrays = []\n for mat in mats: # django queryset -> python list\n mat_features = []\n \n # Add data\n # Some data are missing here.\n #TODO: Delete those if sentences after cleaning the data.\n mat_features.append(mat.model_surface_temperature if mat.model_surface_temperature!=None else 0)\n mat_features.append(mat.melt_temperature if mat.melt_temperature!=None else 0)\n mat_features.append(mat.mold_temperature_range_min if mat.mold_temperature_range_min!=None else 0)\n mat_features.append(mat.mold_temperature_range_max if mat.mold_temperature_range_max!=None else 0)\n mat_features.append(mat.melt_temperature_range_min if mat.melt_temperature_range_min!=None else 0)\n mat_features.append(mat.melt_temperature_range_max if mat.melt_temperature_range_max!=None else 0)\n mat_features.append(mat.absolute_maximum_melt_temperature if mat.absolute_maximum_melt_temperature!=None else 0)\n mat_features.append(mat.ejection_temperature if mat.ejection_temperature!=None else 0)\n mat_features.append(mat.maximum_shear_stress if mat.maximum_shear_stress!=None else 0)\n mat_features.append(mat.maximum_shear_rate if mat.maximum_shear_rate!=None else 0)\n mat_features.append(mat.melt_density if mat.melt_density!=None else 0)\n mat_features.append(mat.solid_density if mat.solid_density!=None else 0)\n mat_features.append(mat.pvt_b5 if mat.pvt_b5!=None else 0)\n mat_features.append(mat.pvt_b6 if mat.pvt_b6!=None else 0)\n mat_features.append(mat.pvt_b1m if mat.pvt_b1m!=None else 0)\n mat_features.append(mat.pvt_b2m if mat.pvt_b2m!=None else 0)\n mat_features.append(mat.pvt_b2m if mat.pvt_b2m!=None else 0)\n mat_features.append(mat.pvt_b4m if mat.pvt_b4m!=None else 0)\n mat_features.append(mat.pvt_b1s if mat.pvt_b1s!=None else 0)\n mat_features.append(mat.pvt_b2s if mat.pvt_b2s!=None else 0)\n mat_features.append(mat.pvt_b3s if mat.pvt_b3s!=None else 0)\n mat_features.append(mat.pvt_b4s if mat.pvt_b4s!=None else 0)\n mat_features.append(mat.pvt_b7 if mat.pvt_b7!=None else 0)\n mat_features.append(mat.pvt_b8 if mat.pvt_b8!=None else 0)\n mat_features.append(mat.pvt_b9 if mat.pvt_b9!=None else 0)\n mat_features.append(mat.elastic_modulus_e1 if mat.elastic_modulus_e1!=None else 0)\n mat_features.append(mat.elastic_modulus_e2 if mat.elastic_modulus_e2!=None else 0)\n mat_features.append(mat.poisson_ratio_v12 if mat.poisson_ratio_v12!=None else 0)\n mat_features.append(mat.poisson_ratio_v23 if mat.poisson_ratio_v23!=None else 0)\n mat_features.append(mat.shear_modulus_g12 if mat.shear_modulus_g12!=None else 0.)\n mat_features.append(mat.thermal_expansion_data_transverse_isotropic_coefficient_alpha1 if mat.thermal_expansion_data_transverse_isotropic_coefficient_alpha1!=None else 0.)\n mat_features.append(mat.thermal_expansion_data_transverse_isotropic_coefficient_alpha2 if mat.thermal_expansion_data_transverse_isotropic_coefficient_alpha2!=None else 0.)\n mat_features.append(mat.seven_params_n if mat.seven_params_n!=None else 0.)\n mat_features.append(mat.seven_params_Tau if mat.seven_params_Tau!=None else 0.)\n mat_features.append(mat.seven_params_D1 if mat.seven_params_D1!=None else 0.)\n mat_features.append(mat.seven_params_D2 if mat.seven_params_D2!=None else 0.)\n mat_features.append(mat.seven_params_D3 if mat.seven_params_D3!=None else 0.)\n mat_features.append(mat.seven_params_A1 if mat.seven_params_A1!=None else 0.)\n mat_features.append(mat.seven_params_A2 if mat.seven_params_A2!=None else 0.)\n mat_features.append(mat.c1 if mat.c1!=None else 0.)\n mat_features.append(mat.c2 if mat.c2!=None else 0.)\n mat_features.append(mat.conversion_temperature if mat.conversion_temperature!=None else 0.)\n mat_features.append(mat.MFR_temperature if mat.MFR_temperature!=None else 0.)\n mat_features.append(mat.MFR_loading if mat.MFR_loading!=None else 0.)\n mat_features.append(mat.measured_MFR if mat.measured_MFR!=None else 0.)\n \n mat_arrays.append(mat_features)\n \n # Get numpy arrays.\n mat_arrays = np.array(mat_arrays, dtype=np.float64)\n \n return mat_arrays", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return sparse_to_tuple(features)", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return sparse_to_tuple(features)", "def nontuple_preprocess_features(features):\n rowsum = np.array(features.sum(1))\n ep = 1e-10\n r_inv = np.power(rowsum + ep, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features", "def normalize_attributions(self, att_list, positive=False, normalizer='MinMaxScaler'):\n all_values = np.concatenate(att_list)\n all_values = all_values[all_values > 0] if positive else all_values\n\n if normalizer == 'QuantileTransformer':\n normalizer = sklearn.preprocessing.QuantileTransformer()\n elif normalizer == 'MaxAbsScaler':\n normalizer = sklearn.preprocessing.MaxAbsScaler()\n else:\n normalizer = sklearn.preprocessing.MinMaxScaler()\n normalizer.fit(all_values.reshape(-1, 1))\n \n new_att = []\n for att in att_list:\n normed_nodes = normalizer.transform(att.reshape(-1, 1)).ravel()\n new_att.append(normed_nodes)\n return new_att", "def matrices(self):\n return [ self.__class__(labels=self.labels,\n labels_map=self.labels_map,\n sets=[x]) for x in self.sets]", "def augment (self, *args):\n cols = list(self.columns())\n for aug in args:\n try:\n cols.extend(aug.columns())\n except AttributeError:\n cols.append(aug)\n return Matrix(*cols, columns=True)", "def replace_linear_combinations(list_of_3x3_matrices, force_constant_prefactor):\n result = []\n\n for matrix in list_of_3x3_matrices:\n new_matrix = []\n for row in matrix:\n new_row = []\n for entry in row:\n if isinstance(entry, Iterable):\n new_entry = 0\n for value, factor in entry:\n new_entry += value * factor\n new_row.append(new_entry * force_constant_prefactor)\n else:\n new_row.append(entry * force_constant_prefactor)\n new_matrix.append(new_row)\n result.append(new_matrix)\n\n return result", "def _derive_transformation_matrices(self):\n\n if hasattr(self, '_primaries') and hasattr(self, '_whitepoint'):\n if self._primaries is not None and self._whitepoint is not None:\n npm = normalised_primary_matrix(self._primaries,\n self._whitepoint)\n\n self._derived_RGB_to_XYZ_matrix = npm\n self._derived_XYZ_to_RGB_matrix = np.linalg.inv(npm)", "def scale_features(features_nxm):\n # type: (list) -> list\n temp_mxn = [[], # 0 Aspect ratio\n [], # 1 Black pixel count\n [], # 2 Black pixel average inclination\n [], # 3 CentroidX\n [], # 4 CentroidY\n [], # 5 Centroid inclination\n [], # 6 Kurtosis\n [], # 7 Normalized size\n [], # 8 Skewness\n [], # 9 Standard deviation\n []] # 10 Transitions\n\n # Take transpose of the n x m matrix\n for sign in features_nxm:\n for i in range(len(sign)):\n temp_mxn[i].append(sign[i])\n\n # Add minimum row value to each of the element of a row to shift negative values\n for i in range(len(temp_mxn)):\n for j in range(len(temp_mxn[i])):\n temp_mxn[i][j] += min(temp_mxn[i])\n\n # Divide each row element with max of that row to get a value in (0, 1)\n for i in range(len(temp_mxn)):\n for j in range(len(temp_mxn[i])):\n temp_mxn[i][j] /= max(temp_mxn[i])\n\n # Take transpose of the m x n matrix\n features_nxm = []\n for sign in temp_mxn:\n sign = []\n for i in range(len(sign)):\n sign.append(sign[i])\n\n features_nxm.append(sign)\n\n return features_nxm", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features).tocoo()\n return sparse_to_tensor(features)", "def __call__(self, features):\n norm = []\n for data in features:\n if all(x == 0 for x in data):\n norm.append(data)\n else:\n scale = sum(x*x for x in data) ** 0.5\n normalized_data = [x / scale for x in data]\n norm.append(normalized_data)\n \n return norm", "def construct_feature_columns(input_features):\n return set([tf.feature_column.numeric_column(my_feature)\n for my_feature in input_features])", "def dummify_features(df):\n colnames = df.columns\n le_dict = {}\n for col in colnames:\n le_dict[col] = preprocessing.LabelEncoder()\n le_dict[col].fit(df[col])\n df.loc[:, col] = le_dict[col].transform(df[col])\n\n enc = preprocessing.OneHotEncoder()\n enc.fit(df)\n X = enc.transform(df)\n\n dummy_colnames = [cv + '_' + str(modality) for cv in colnames for modality in le_dict[cv].classes_]\n # for cv in colnames:\n # for modality in le_dict[cv].classes_:\n # dummy_colnames.append(cv + '_' + modality)\n\n return X, dummy_colnames, enc", "def get_dummies_list(self, \n cols=['elite'],\\\n drop_=True):\n for col in cols:\n print \"Pre-processing \" + col + \"...\"\n temp = pd.get_dummies(self.df[col].apply(pd.Series).stack(),drop_first=True)\\\n .astype(int).sum(level=0).astype(int)\n # temp.columns.apply(str).apply(lambda x: col + \"_\" + x)\n if drop_:\n self.df.drop(col,axis = 1, inplace=True)\n self.df = pd.concat([self.df, temp],axis=1)", "def get_cols_dummy():", "def createFeatureArray(self, lyrFeats): \n featIdlist = []\n fullFeatureList= []\n #add features to the attribute list\n for feat in lyrFeats:\n if feat == NULL:\n feat = None\n featIdlist.append(feat.id())\n featAttributes = feat.attributes()\n fullFeatureList.extend(featAttributes)\n \n #get size of attribute table\n rows = len(featIdlist)\n cols = len(featAttributes)\n \n #create an array af attributes and return it\n featArray = np.array([fullFeatureList])\n featArray2 = np.reshape(featArray, (rows, cols))\n return featArray2", "def feature_scale(data_dict, features_list):\n for feature in features_list:\n tmp_list = []\n if feature == 'poi': \n continue\n else:\n for name in data_dict:\n value = data_dict[name][feature]\n if value == 'NaN':\n value = 0\n data_dict[name][feature] = 0\n tmp_list.append( [float(value)] )\n \n scaler = preprocessing.MinMaxScaler()\n scaler.fit(np.array(tmp_list))\n \n for name in data_dict:\n data_dict[name][feature] = scaler.transform([float(data_dict[name][feature])])[0]", "def __call__(self, features: List[List[float]]) -> List[List[float]]:\n if self.istest == 0: #for training only calculate this\n for col in range(len(features[0])):\n list = []\n for feature in features:\n list.append(feature[col]) #append column-wise\n self.min_set.append(min(list))#min of the column\n self.max_set.append(max(list))#max of the column\n #print(features)\n for col in range(len(features[0])):\n max_val = self.max_set[col]\n min_val = self.min_set[col]\n denominator = max_val - min_val\n for f in features:\n if denominator == 0:\n f[col] = 0\n else:\n f[col] = (f[col] - min_val) / denominator\n self.istest += 1\n return features\n #raise NotImplementedError" ]
[ "0.5560244", "0.55353713", "0.55022645", "0.55022645", "0.54975176", "0.54975176", "0.54898787", "0.5452808", "0.541256", "0.5363212", "0.53405267", "0.53396016", "0.53237796", "0.53237796", "0.5215232", "0.50954026", "0.50924695", "0.50693184", "0.5062487", "0.5029951", "0.49988356", "0.4995514", "0.49855307", "0.49482432", "0.4942685", "0.49405703", "0.49144158", "0.48688152", "0.48401433", "0.48263747" ]
0.6392005
0
Tests that add() correctly adds a Task to storage
def test_add(self): self.my_task.key = self.task_storage.add(self.my_task) new_task = self.task_storage.find(self.my_task.key) new_task.key = self.task_storage.add(new_task) self.assertNotEqual(self.my_task.key, new_task.key) self.task_storage.delete(new_task.key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, task):\n pass", "def add(self, task):\n raise NotImplementedError()", "def test_find(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n new_task = self.task_storage.find(self.my_task.key)\n\n self.assertEqual(self.my_task, new_task)", "def test_add_raises():\n tasks.add(task='not a Task object')", "def test_update(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n\n self.my_task.title = 'foo'\n key = self.task_storage.update(self.my_task)\n new_task = self.task_storage.find(key)\n\n self.assertEqual(self.my_task, new_task)", "def test_delete(self):\n new_task = task.Task()\n self.my_task.key = self.task_storage.add(self.my_task)\n\n key = self.task_storage.delete(self.my_task.key)\n new_task = self.task_storage.find(key)\n\n self.assertIsNone(new_task)", "def test_update_no_match(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n\n self.task_storage.delete(self.my_task.key)\n\n self.my_task.title = 'foo'\n\n self.key = self.task_storage.update(self.my_task)\n\n self.assertIsNone(self.key)", "def add(self, task):\n self._count += 1\n path = os.path.join(self._root, \"%d_%s\" % (self._count, task.guid))\n j.sal.fs.writeFile(path, self._serialize_task(task))", "def insert(task):\n tasks.insert_one(task.__dict__)", "def add_task(self, task):\n raise NotImplementedError()", "def test_task_add():\n pytest.fail('Not implemented yet.')", "def test_add_item_at_using_put(self):\n pass", "def test_get_all(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n task_list = self.task_storage.get_all()\n\n self.assertEqual(task_list[0], self.my_task)", "def test_splits_on_taskexists(self, queue_add_mock):\n from google.appengine.api import taskqueue\n from sosbeacon.utils import insert_tasks\n\n queue_add_mock.side_effect = taskqueue.TaskAlreadyExistsError\n\n tasks = [i for i in xrange(0, 10)]\n added = insert_tasks(tasks, 'default')\n\n self.assertEqual(added, 0)\n self.assertEqual(queue_add_mock.call_count, 19)", "def add_task(self, task):\n if task.task_id in self.tasks:\n print('duplicate task {}'.format(task.task_id))\n \n # Store Task (right after strategy constructs a task)\n # TODO: copy\n self.tasks[task.task_id] = copy.deepcopy(task)\n \n # Store/Update TradeStat (right after strategy constructs a task)\n if task.function_name == 'place_order':\n order = task.data\n # self.orders[order.entrust_no] = order\n self._update_trade_stat_from_order(order)\n \n elif task.function_name == 'place_batch_order':\n orders = task.data\n for order in orders:\n # self.orders[order.entrust_no] = order\n self._update_trade_stat_from_order(order)\n \n elif task.function_name == 'basket_order':\n # TODO: no Order class for basket_order\n raise NotImplementedError(\"basket_order\")\n \n elif task.function_name == 'goal_portfolio':\n # self._update_trade_stat_from_goal_positions(goal_positions)\n #orders = task.data\n for entrust_no, order in task.data.items():\n self._update_trade_stat_from_order(order)", "def add_task(self, task):\n self.tasks.append(task)", "def add_task(self, task):\n self.tasks.append(task)", "def add_task(self, task):\n self.tasks.append(task)", "def add_task(self, task):\n self.tasks.append(task)", "def test_update_no_note(self):\n self.my_task.notes = None\n self.my_task.key = self.task_storage.add(self.my_task)\n\n self.my_task.title = 'foo'\n key = self.task_storage.update(self.my_task)\n new_task = self.task_storage.find(key)\n\n self.assertEqual(self.my_task, new_task)", "def test_add(self):\n r = main.List.connection()\n main.List.add(r, \"ToDo\", 1, \"Buy apples\", 2, \"20.05.2015\")\n task = r.get(\"ToDo\")\n self.assertTrue(task, \"No such entry in DB. Adding failed.\")", "def add_task(self, task):\n\n self.log(INFO, \"Adding task {} in {}s\".format(task.task, task.wait))\n\n collection = self._get_collection()\n\n if not task.when:\n task.when = self._get_now() + task.wait\n\n result = collection.insert_one(\n {\n \"task\": task.task,\n \"args\": task.args,\n \"kwargs\": task.kwargs,\n \"wait\": task.wait,\n \"when\": task.when,\n \"recurring\": task.recurring,\n }\n )\n\n return str(result.inserted_id)", "def test_added_task_has_id_set():\n # GIVEN an initialized tasks db\n # AND a new task is added\n new_task = Task('sit in chair', owner='me', done=True)\n task_id = tasks.add(new_task)\n\n # WHEN task is retrieved\n task_from_db = tasks.get(task_id)\n\n # THEN task_id matches id field\n assert task_from_db.id == task_id", "def test_add_task_successfully(mock_add, name, note, type_, test_operator,\n header_fx):\n # pylint: disable=too-many-arguments\n task = test_operator.add_task(name, note, type_)\n mock_add.assert_called_with(header_fx)\n\n assert task.text == name\n\n if note:\n assert task.notes == note\n else:\n assert not task.notes\n\n if type_:\n assert task.tasktype == type_\n else:\n assert task.tasktype == \"todo\"", "def test_delete_no_match(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n\n self.task_storage.delete(self.my_task.key)\n\n self.key = self.task_storage.delete(self.my_task.key)\n\n self.assertIsNone(self.key)", "def add(self, task):\n queue = self.__get_task_queue(task)\n if _contained(task, queue):\n # no-op if the task has already been added\n return\n # add task to internal data structures\n queue.append(task)\n if self._store:\n try:\n self._tasks_by_id[task.persistent_id] = task\n except AttributeError:\n gc3libs.log.warning(\"Task %s has no persistent ID!\", task)\n task.attach(self)\n self.__update_task_counts(task, task.execution.state, +1)", "def add_task(self, task):\n\n # The pyrtm module is dynamic.\n # pylint: disable=no-member\n\n added = self.rtm.tasks.add(timeline=self.timeline,\n name=task['name'], list_id=self.list_id, parse=0)\n\n # TODO: record undoable transactions and undo them upon kb interrupt\n #if added.transaction.undoable == \"1\":\n #self.transactions.append(added.transaction.id)\n\n args = dict(\n timeline = self.timeline,\n list_id = self.list_id,\n taskseries_id = added.list.taskseries.id,\n task_id = added.list.taskseries.task.id,\n )\n\n if task.get('tags', None):\n # Should this be setTags?\n self.rtm.tasks.addTags(tags=','.join(task['tags']), **args)\n\n if task.get('due_date', None):\n self.rtm.tasks.setDueDate(due=task['due_date'],\n # TODO: Can we determine has_due_time?\n has_due_time=1,\n # We're using iso8601 so we don't need them to be specially parsed.\n parse=0,\n **args)\n\n if task.get('estimated', None):\n self.rtm.tasks.setEstimate(estimate=task['estimated'], **args)\n\n if task.get('priority', None):\n self.rtm.tasks.setPriority(priority=task['priority'], **args)\n\n if task.get('repeat', None):\n self.rtm.tasks.setRecurrence(repeat=task['repeat'], **args)\n\n if task.get('notes', None):\n if isinstance(task['notes'], list):\n notes = task['notes']\n else:\n notes = [ task['notes'] ]\n for note in notes:\n self.rtm.tasks.notes.add(note_title=note, note_text=note, **args)\n\n if task.get('url', None):\n self.rtm.tasks.setURL(url=task['url'], **args)\n\n # do the status changes last\n if task.get('completed', None):\n self.rtm.tasks.complete(**args)\n\n if task.get('deleted', None):\n self.rtm.tasks.delete(**args)\n\n return added", "def test_add_raises():\n with pytest.raises(TypeError):\n tasks.add(task='not a Task object')", "def test_add_raises_catch():\n with pytest.raises(TypeError):\n tasks.add(task='not a Task object')", "def add_task(self, task):\n self._tasks.append(task)" ]
[ "0.76441354", "0.7422072", "0.7398845", "0.73473823", "0.7283399", "0.726891", "0.719649", "0.7159277", "0.70757115", "0.7048197", "0.6881506", "0.68018115", "0.6794567", "0.67916185", "0.67905664", "0.6783384", "0.6783384", "0.6783384", "0.6783384", "0.67813575", "0.6722948", "0.6704852", "0.6703519", "0.669459", "0.66623825", "0.66337836", "0.66217023", "0.6577288", "0.6543621", "0.6533276" ]
0.8637676
0
Tests that find() correctly returns a Task given a key
def test_find(self): self.my_task.key = self.task_storage.add(self.my_task) new_task = self.task_storage.find(self.my_task.key) self.assertEqual(self.my_task, new_task)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_task(self, key_task):\n task = None\n scanned_tasks = []\n\n with open(self.path_to_task_file, 'r') as file:\n for line in file:\n current_task = Task()\n current_task.load(line)\n\n if current_task.key == key_task:\n task = current_task\n else:\n scanned_tasks.append(line)\n\n self.check_time(task)\n self.save_scanned_tasks(scanned_tasks) # return unsuccessful tasks in file\n return task", "def get_task(self, key: str) -> Task:\n raise NotImplementedError", "def test_task_finder(test_operator, task_name, task_type):\n found_task = test_operator.find_task(task_name, task_type=task_type)\n assert found_task", "def find_task(self, task_str):\n task_str = task_str.replace('é', 'e').title()\n task_not_found = True\n custom_quest = False\n if \":\" in task_str:\n task_strs = task_str.split(\":\")\n task_str = task_strs[0]\n quest_str = task_strs[1]\n custom_quest = True\n while task_not_found:\n for task in self.tasks:\n if (task_str == task.reward.title()) or (task_str == task.quest.replace('é', 'e').title()) or (task_str in (reward.title() for reward in task.rewards)) or (task_str in (nickname.title() for nickname in task.nicknames)):\n out_task = task\n task_not_found = False\n if custom_quest:\n out_task = copy.copy(task)\n out_task.quest = quest_str.title()\n return out_task\n break\n if task_not_found:\n raise TaskNotFound()", "def find(self, task_id):\n for task_obj in self.queue:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in queue: '{}'\".format(task_id))", "def find(self, task_id):\n _structs = [\n self.stack,\n self.backlog,\n self.blocked,\n self.sleeping,\n ]\n for struct in _structs:\n try:\n task_obj = struct.find(task_id)\n return task_obj\n except LookupError:\n # not found; try next structure\n continue\n\n # the graveyard is just a list; search it\n for task_obj in self.graveyard:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task: '{}'\".format(task_id))", "def find(self, task_id):\n for task_obj in self._queue:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in dorm: '{}'\".format(task_id))", "def get_by_name(task_name):\n return tasks.find_one({'name': task_name})", "def _get_task(self, task_id):\n if not task_id:\n return None\n task = objects.Transaction.get_by_uid(task_id, fail_if_not_found=False)\n if task and task.cluster_id == self.cluster.id:\n return task\n return False", "def find(self, task_id):\n for task_obj in self.stack:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in stack: '{}'\".format(task_id))", "def test_search(self):\n self.task_storage.add(self.my_task)\n search_task = task.Task(title='title', notes='note')\n task_search_list = self.task_storage.search(search_task)\n\n self.assertTrue(self.my_task in task_search_list)", "def get(self, guid):\n results = j.sal.fs.find(self._root, '*_%s' % guid)\n if len(results) <= 0:\n raise TaskNotFoundError(\"task %s not found\" % guid)\n if len(results) > 1:\n raise RuntimeError(\"found 2 tasks with same guid, this should not happen\")\n return self._deserialize_task(j.sal.fs.readFile(results[0]))", "def find(self, task_id):\n for task_obj in self._blocked_items:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in limbo: '{}'\".format(task_id))", "def getTask(self, name):\n for t in self.tasks:\n if isinstance(name, str):\n if t.name == name:\n return t\n else:\n if t.__class__ is name:\n return t\n return None", "def get_task_by_name(self, task_name):\n task_table = Table('task', self.metadata, autoload=True)\n try:\n parent_task = self.session.query(task_table).filter(task_table.c.name==str(task_name)).one()\n task = parent_task._asdict()\n return task\n except Exception as e:\n logger.info(f\"Error retrieving task {task_name}: {e}\")\n return False", "def test_search_not_found(self):\n self.task_storage.add(self.my_task)\n search_task = task.Task(title='title1', notes='note1')\n task_search_list = self.task_storage.search(search_task)\n\n self.assertEqual(task_search_list, None)", "def find_task_by_id(self, task_id):\n return self._tasks_by_id[task_id]", "def find_task_by_id(self, task_id):\n return self._tasks_by_id[task_id]", "def test_delete(self):\n new_task = task.Task()\n self.my_task.key = self.task_storage.add(self.my_task)\n\n key = self.task_storage.delete(self.my_task.key)\n new_task = self.task_storage.find(key)\n\n self.assertIsNone(new_task)", "def __getitem__(self, txid: int) -> asyncio.Task:\n return self._tasks[txid]", "def get_task_by_name(self, task_name):\n for task in self.tasks:\n if task.name == task_name:\n logger.debug(\"Returning task with name '%s': '%s'\", task_name, task.to_xml_string())\n return task\n raise ValueError(\"A step task with the name {} can not be found.\".format(task_name))", "def task(self, name):\n with self.db_lock:\n return self.rcon.hget(self.task_key, name)", "def test_update_no_match(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n\n self.task_storage.delete(self.my_task.key)\n\n self.my_task.title = 'foo'\n\n self.key = self.task_storage.update(self.my_task)\n\n self.assertIsNone(self.key)", "def get_free_key_task(self):\n keys = []\n\n try:\n with open(self.path_to_task_file, 'r') as file:\n for line in file:\n current_task = Task()\n current_task.load(line)\n keys.append(current_task.key)\n except:\n pass\n\n while True:\n if len(keys) == 500:\n return None\n key = random.randint(0, 500)\n if key not in keys:\n return str(key)", "def test_get_all(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n task_list = self.task_storage.get_all()\n\n self.assertEqual(task_list[0], self.my_task)", "def _get_task(self, task):\n try:\n return TASKS[task]\n except KeyError:\n raise ValueError(\"task %s \"\n \"is not supported. \" % task)", "def get_task_by_id(self, task_id):\n for task in self.tasks:\n if task.id == task_id:\n logger.debug(\"Returning task with ID '%s': '%s'\", task_id, task.to_xml_string())\n return task\n raise ValueError(\"A step task with the ID {} can not be found.\".format(task_id))", "def test_update(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n\n self.my_task.title = 'foo'\n key = self.task_storage.update(self.my_task)\n new_task = self.task_storage.find(key)\n\n self.assertEqual(self.my_task, new_task)", "def test_delete_no_match(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n\n self.task_storage.delete(self.my_task.key)\n\n self.key = self.task_storage.delete(self.my_task.key)\n\n self.assertIsNone(self.key)", "def get(self, **kwargs):\n clone = self.filter(**kwargs)\n num = len(clone)\n if num == 1:\n return clone._result_cache[0]\n if not num:\n raise Task.DoesNotExist(\n 'Task matching query does not exist. '\n 'Lookup parameters were {0}'.format(kwargs))\n raise ValueError(\n 'get() returned more than one Task -- it returned {0}! '\n 'Lookup parameters were {1}'.format(num, kwargs))" ]
[ "0.69084066", "0.6853333", "0.68443716", "0.64593273", "0.6452229", "0.64300704", "0.6424204", "0.6378312", "0.6299489", "0.6238787", "0.6218941", "0.61991155", "0.6158822", "0.6088962", "0.6004947", "0.59960586", "0.59647334", "0.59647334", "0.5919875", "0.5909606", "0.59089446", "0.588509", "0.5874315", "0.5864838", "0.5821641", "0.5809711", "0.5794041", "0.5788824", "0.5778721", "0.5753942" ]
0.7447334
0
Tests that get_all() returns a list of all Tasks
def test_get_all(self): self.my_task.key = self.task_storage.add(self.my_task) task_list = self.task_storage.get_all() self.assertEqual(task_list[0], self.my_task)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all():\n return list(tasks.find({}))", "def get_all_tasks(self):\n \n sql = \"select * from tasks;\"\n return self._query_all(sql)", "def test_task_list():\n # Fake pyramid request, useful for testing.\n request = testing.DummyRequest()\n\n pytest.fail('Not implemented yet.')", "async def list_tasks():", "def test_list_tasks_no_args(self):\n rv = TEST_CLIENT.post(\"/tasks/list-tasks\", json={})\n result = rv.json()\n\n expected = util.MOCK_TASK_LIST\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "async def test_get_tasks(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # declare _scheduler task\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'get_tasks'\n interval_schedule.process_name = \"sleep5\"\n interval_schedule.repeat = datetime.timedelta(seconds=1)\n interval_schedule.exclusive = False\n\n await scheduler.save_schedule(interval_schedule)\n\n await asyncio.sleep(15)\n\n # Assert running tasks\n tasks = await scheduler.get_tasks(\n where=[\"state\", \"=\", int(Task.State.INTERRUPTED)])\n assert not tasks\n\n tasks = await scheduler.get_tasks(\n where=[\"end_time\", \"=\", 'NULL'])\n assert tasks\n\n tasks = await scheduler.get_tasks(limit=50)\n states = [int(task.state) for task in tasks]\n\n assert len(tasks) > 1\n assert int(Task.State.RUNNING) in states\n assert int(Task.State.COMPLETE) in states\n\n tasks = await scheduler.get_tasks(1)\n assert len(tasks) == 1\n\n tasks = await scheduler.get_tasks(\n where=[\"state\", \"=\", int(Task.State.RUNNING)],\n sort=[[\"state\", \"desc\"]], offset=50)\n assert not tasks\n\n tasks = await scheduler.get_tasks(\n where=[\"state\", \"=\", int(Task.State.RUNNING)],\n sort=[[\"state\", \"desc\"], [\"start_time\", \"asc\"]])\n assert tasks\n\n tasks = await scheduler.get_tasks(or_where_list=[[\"state\", \"=\", int(Task.State.RUNNING)], \\\n [\"state\", \"=\", int(Task.State.RUNNING)]])\n assert tasks\n\n tasks = await scheduler.get_tasks(and_where_list=[[\"state\", \"=\", int(Task.State.RUNNING)], \\\n [\"state\", \"=\", int(Task.State.RUNNING)]])\n assert tasks\n\n await self.stop_scheduler(scheduler)", "def test_n_available_tasks_all_tasks_completed_authenticated_user(self):\r\n app = AppFactory.create()\r\n task = TaskFactory.create(app=app, state='completed')\r\n\r\n n_available_tasks = helpers.n_available_tasks(app.id, user_id=1)\r\n\r\n assert n_available_tasks == 0, n_available_tasks", "def list_all_tasks(self):\n task_table = Table('task', self.metadata, autoload=True)\n try:\n all_tasks = self.session.query(task_table).all()\n task_list = []\n for t in all_tasks:\n task_list.append(t._asdict())\n return task_list\n except Exception as e:\n logger.info(f\"Error retrieving list of tasks: {e}\")\n return False", "def db_get_all_tasks():\n sql = \"SELECT * FROM {};\".format(TABLE_NAME)\n return db_query(sql)", "def get_tasks(self):\n return self.tasks.all()", "def get_tasks(self):\n res = self.conn.cursor().execute(\"SELECT * FROM tasks\")\n return res.fetchall()", "def get_all_tasks(self):\r\n\t\twith self.conn:\r\n\t\t\tself.c.execute(\"\"\"SELECT task FROM goals\"\"\")\r\n\t\t\ttup_list = self.c.fetchall()\r\n\t\treturn [tup[0] for tup in tup_list]", "def list(self, _request):\n serializer = TaskSerializer(instance=TASKS.values(), many=True)\n return response.Response(serializer.data)", "def test_get(self):\n task_types = [1, 2]\n\n for task_type in task_types:\n self.john_gamer.tasks.start(task_type)\n\n self.client.force_login(self.john)\n resp = self.client.get(self.URL)\n\n self.assertListEqual(\n resp.json(),\n ['Type: 1, time left: 42s', 'Type: 2, time left: 42s'],\n \"Gamer can't get list of task via API!\"\n )", "async def get_all(request):\n pass", "def test_query_to_tasks(self):\n Org(id='test1', status=CONNECTED).put()\n Org(id='test2', status=CONNECTED).put()\n Org(id='test3', status=DISCONNECTED).put()\n\n count = task_utils.query_to_tasks(\n query=Org.query(Org.status == CONNECTED),\n queue=Queue('adapter-update'),\n task_generator=lambda key: Task(url='/something/{}'.format(key.string_id()))\n )\n\n self.assertEqual(count, 2)\n task_count = len(self.taskqueue.get_filtered_tasks())\n self.assertEqual(task_count, 2)", "def test_list_completed_tasks_view(self):\n list_tasks_url = reverse('list_completed_tasks')\n response = self.client.get(list_tasks_url)\n self.assertEqual(response.status_code, 200)\n tasks = Task.objects.filter(status=Task.STATUS_CHOICES.complete)\n self.assertEqual(len(response.context_data['task_list']), tasks.count())\n status_of_all_tasks = response.context_data['task_list'].values_list(\n 'status',\n flat=True)\n self.assertNotIn(Task.STATUS_CHOICES.ready_for_review, status_of_all_tasks)\n self.assertNotIn(Task.STATUS_CHOICES.incomplete, status_of_all_tasks)\n self.assertTemplateUsed(response, 'tasks/task_list.html')\n self.assertNotIn(str(self.task.get_absolute_url()),\n response.rendered_content)", "def test_list_tasks_view(self):\n list_tasks_url = reverse('list_tasks')\n response = self.client.get(list_tasks_url)\n self.assertEqual(response.status_code, 200)\n tasks = Task.objects.all().exclude(status=Task.STATUS_CHOICES.complete)\n self.assertEqual(len(response.context_data['task_list']), tasks.count())\n self.assertNotIn(\n Task.STATUS_CHOICES.complete,\n response.context_data['task_list'].values_list('status', flat=True))\n self.assertTemplateUsed(response, 'tasks/task_list.html')\n self.assertIn(str(self.task.get_absolute_url()),\n response.rendered_content)", "def test_n_available_tasks_all_tasks_answered_by_authenticated_user(self):\r\n app = AppFactory.create()\r\n task = TaskFactory.create(app=app, n_answers=2)\r\n user = UserFactory.create()\r\n taskrun = TaskRunFactory.create(task=task, user=user)\r\n\r\n n_available_tasks = helpers.n_available_tasks(app.id, user_id=user.id)\r\n\r\n assert task.state != 'completed', task.state\r\n assert n_available_tasks == 0, n_available_tasks", "def test_my_tasks(self):\n url, parsed = self.prepare_urls(\n 'v1:activity-my-tasks', subdomain=self.company.subdomain)\n \n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n \n content = json.loads(response.content)\n self.assertTrue(content.has_key('count'))\n self.assertTrue(content.has_key('next'))\n self.assertTrue(content.has_key('previous'))\n self.assertTrue(content.has_key('results'))", "def task_get_all(context, filters=None, marker=None, limit=None,\n sort_key='created_at', sort_dir='desc', admin_as_user=False):\n filters = filters or {}\n\n session = get_session()\n query = session.query(models.Task)\n\n if not (context.is_admin or admin_as_user) and context.owner is not None:\n query = query.filter(models.Task.owner == context.owner)\n\n _task_soft_delete(context, session=session)\n\n showing_deleted = False\n\n if 'deleted' in filters:\n deleted_filter = filters.pop('deleted')\n query = query.filter_by(deleted=deleted_filter)\n showing_deleted = deleted_filter\n\n for (k, v) in filters.items():\n if v is not None:\n key = k\n if hasattr(models.Task, key):\n query = query.filter(getattr(models.Task, key) == v)\n\n marker_task = None\n if marker is not None:\n marker_task = _task_get(context, marker,\n force_show_deleted=showing_deleted)\n\n sort_keys = ['created_at', 'id']\n if sort_key not in sort_keys:\n sort_keys.insert(0, sort_key)\n\n query = _paginate_query(query, models.Task, limit,\n sort_keys,\n marker=marker_task,\n sort_dir=sort_dir)\n\n task_refs = query.all()\n\n tasks = []\n for task_ref in task_refs:\n tasks.append(_task_format(task_ref, task_info_ref=None))\n\n return tasks", "def test_list_tasks_page_size_1_page_3(self):\n rv = TEST_CLIENT.post(\"/tasks/list-tasks\", json={\"page\": 3, \"page_size\": 1})\n result = rv.json()\n total = util.TestingSessionLocal().query(models.Task).count()\n expected = {\"tasks\": [util.MOCK_TASK_3], \"total\": total}\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def test_n_completed_tasks_with_all_tasks_completed(self):\r\n\r\n app = self.create_app_with_tasks(completed_tasks=4, ongoing_tasks=0)\r\n completed_tasks = cached_apps.n_completed_tasks(app.id)\r\n\r\n err_msg = \"Completed tasks is %s, it should be 4\" % completed_tasks\r\n assert completed_tasks == 4, err_msg", "def test_n_available_tasks_all_tasks_completed_anonymous_user(self):\r\n app = AppFactory.create()\r\n task = TaskFactory.create(app=app, state='completed')\r\n\r\n n_available_tasks = helpers.n_available_tasks(app.id, user_ip='127.0.0.1')\r\n\r\n assert n_available_tasks == 0, n_available_tasks", "async def test_get_all(self):\n await self.collection.create({'id': 'foo', 'token': 'foo:bar'})\n await self.collection.create({'id': 'baz', 'token': 'baz:qux'})\n expected = (\n {'id': 'baz', 'username': 'baz'},\n {'id': 'foo', 'username': 'foo'})\n self.assertEqual(expected, await self.resource.get_all())", "def list_tasks(ctx):\n ctx.run(\"invoke --list\")", "async def list_tasks(fields: Set[str] = None):\n tasks = celery_app.describe_tasks()\n tasks = [TaskOut(**task).dict(include=fields) for task in tasks]\n return tasks", "def get_all_tasks(self) -> APIResponse:\n return self._get(\"system_list\")", "def test_list_background_email_tasks(self, act):\r\n act.return_value = self.tasks\r\n url = reverse('list_background_email_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n mock_factory = MockCompletionInfo()\r\n with patch('instructor.views.api.get_task_completion_info') as mock_completion_info:\r\n mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info\r\n response = self.client.get(url, {})\r\n self.assertEqual(response.status_code, 200)\r\n\r\n # check response\r\n self.assertTrue(act.called)\r\n expected_tasks = [ftask.to_dict() for ftask in self.tasks]\r\n actual_tasks = json.loads(response.content)['tasks']\r\n for exp_task, act_task in zip(expected_tasks, actual_tasks):\r\n self.assertDictEqual(exp_task, act_task)\r\n self.assertEqual(actual_tasks, expected_tasks)", "def test_list_instructor_tasks_running(self, act):\r\n act.return_value = self.tasks\r\n url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n mock_factory = MockCompletionInfo()\r\n with patch('instructor.views.api.get_task_completion_info') as mock_completion_info:\r\n mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info\r\n response = self.client.get(url, {})\r\n self.assertEqual(response.status_code, 200)\r\n\r\n # check response\r\n self.assertTrue(act.called)\r\n expected_tasks = [ftask.to_dict() for ftask in self.tasks]\r\n actual_tasks = json.loads(response.content)['tasks']\r\n for exp_task, act_task in zip(expected_tasks, actual_tasks):\r\n self.assertDictEqual(exp_task, act_task)\r\n self.assertEqual(actual_tasks, expected_tasks)" ]
[ "0.78161174", "0.72776675", "0.7262699", "0.7231321", "0.7122035", "0.69549555", "0.69116324", "0.6873156", "0.68698835", "0.6842177", "0.67857534", "0.67637753", "0.675407", "0.6740187", "0.6708301", "0.6706077", "0.66224027", "0.6617143", "0.66079676", "0.6603936", "0.6597648", "0.6591449", "0.65893215", "0.6584158", "0.6582155", "0.6575443", "0.65418684", "0.65391594", "0.65211", "0.6462424" ]
0.7713649
1
Tests that delete() correctly deletes a Task from storage
def test_delete(self): new_task = task.Task() self.my_task.key = self.task_storage.add(self.my_task) key = self.task_storage.delete(self.my_task.key) new_task = self.task_storage.find(key) self.assertIsNone(new_task)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_no_match(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n\n self.task_storage.delete(self.my_task.key)\n\n self.key = self.task_storage.delete(self.my_task.key)\n\n self.assertIsNone(self.key)", "def test_delete(self):\n # add a task\n self.add(title=\"Sample task doing\", description=\"for sample\", state=\"doing\")\n task = Task.query.filter_by(title='Sample task doing').first()\n\n # delete\n self.delete(id=task.id)\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertIsNone(task)", "def deleteTask():\n\tmarkOff(isdelete = 1)", "def delete(self):\n return super(Task, self).delete(None)", "def test_delete_task(self):\n check = False\n r = main.List.connection()\n main.List.add(r, \"ToDo\", 1, \"Buy apples\", 2, \"20.05.2015\")\n main.List.delete(r, \"ToDo\", 1)\n task = main.List.pull_from_redis(r, \"ToDo\", False)\n for key in task.iterkeys():\n if key == \"1\":\n check = True\n self.assertFalse(check, \"Deleting task failed.\")", "def test_delete_task_cascade(self):\r\n task = TaskFactory.create()\r\n task_runs = TaskRunFactory.create_batch(3, task=task)\r\n url = '/api/task/%s?api_key=%s' % (task.id, task.app.owner.api_key)\r\n res = self.app.delete(url)\r\n\r\n assert_equal(res.status, '204 NO CONTENT', res.data)\r\n task_runs = db.session.query(TaskRun)\\\r\n .filter_by(task_id=task.id)\\\r\n .all()\r\n assert len(task_runs) == 0, \"There should not be any task run for task\"", "def test_delete_task(self):\n resp = self.app.delete('/api/2/inf/esrs',\n headers={'X-Auth': self.token},\n json={'name': \"myESRS\"})\n\n task_id = resp.json['content']['task-id']\n expected = 'asdf-asdf-asdf'\n\n self.assertEqual(task_id, expected)", "def test_delete_run(self):\n pass", "def test_delete_write_fail(self):\n self.task_storage.add(self.my_task)\n os.chmod(self.test_task_filename, 0400)\n\n self.assertRaises(IOError, self.task_storage.delete, self.my_task.key)", "def test_DELETE_task(self):\n\t\ttask_id_1 = self.POST_task(TEST_TASK_DATA)\n\t\ttask_id_2 = self.POST_task(TEST_TASK_DATA)\n\t\ttask_id_3 = self.POST_task(TEST_TASK_DATA)\n\t\tdata = self.GET_data('/api/room/' + self.room_id)\n\t\tself.assertEqual(3, len(data[\"tasks\"]))\n\t\t# delete tasks 1 and 3 and verify just have task 2\n\t\tself.DELETE('/api/task/' + task_id_1)\n\t\tself.DELETE('/api/task/' + task_id_3)\n\t\tdata = self.GET_data('/api/room/' + self.room_id)\n\t\tself.assertEqual(1, len(data[\"tasks\"]))\n\t\tself.assertEqual(task_id_2, data[\"tasks\"][0])", "def _delete():\n\tquery = myTaskSession.query(WorkToolkitDB.db.Task)\n\n\tIDStr = myOpt.id\n\tIDs = re.split('\\s*,\\s*', IDStr)\n\n\tif len(IDs) == 0:\n\t\tprint('ERR: no deleting id input')\n\t\treturn 1\n\n\tfor ID in IDs:\n\t\tmyTask = query.get(ID)\n\t\tmyTaskSession.delete(myTask)\n\n\t\n\tmyTaskSession.commit()\n\n\treturn 0", "def test_delete_task_success(\n self,\n mock_background_tasks\n ):\n task_id = util.MOCK_UUID_4\n\n rv = TEST_CLIENT.delete(f\"/tasks/{task_id}\")\n result = rv.json()\n\n expected = {\"message\": \"Task deleted\"}\n self.assertDictEqual(expected, result)", "def delete(task_name):\n tasks.delete_one({'name': task_name})", "def delete(self, task_model):\n raise NotImplementedError()", "def delete():", "def test_delete(self):\n pass", "def test_update_no_match(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n\n self.task_storage.delete(self.my_task.key)\n\n self.my_task.title = 'foo'\n\n self.key = self.task_storage.update(self.my_task)\n\n self.assertIsNone(self.key)", "def delete(task_id, pfn):\n\n activated = PoolManager.db.query('UPDATE `standalone_deletion_tasks` SET `status` = \\'active\\' WHERE `id` = %s', task_id)\n if activated == 0:\n # task was cancelled\n return -1, None, None, '', '' \n\n return gfal_exec('unlink', (pfn,), deletion_nonerrors)", "def test_task_delete(self):\r\n admin = UserFactory.create()\r\n user = UserFactory.create()\r\n non_owner = UserFactory.create()\r\n app = AppFactory.create(owner=user)\r\n task = TaskFactory.create(app=app)\r\n root_task = TaskFactory.create(app=app)\r\n\r\n ## anonymous\r\n res = self.app.delete('/api/task/%s' % task.id)\r\n error_msg = 'Anonymous should not be allowed to update'\r\n assert_equal(res.status, '401 UNAUTHORIZED', error_msg)\r\n\r\n ### real user but not allowed as not owner!\r\n url = '/api/task/%s?api_key=%s' % (task.id, non_owner.api_key)\r\n res = self.app.delete(url)\r\n error_msg = 'Should not be able to update tasks of others'\r\n assert_equal(res.status, '403 FORBIDDEN', error_msg)\r\n\r\n #### real user\r\n # DELETE with not allowed args\r\n res = self.app.delete(url + \"&foo=bar\")\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'task', err\r\n assert err['action'] == 'DELETE', err\r\n assert err['exception_cls'] == 'AttributeError', err\r\n\r\n # DELETE returns 204\r\n url = '/api/task/%s?api_key=%s' % (task.id, user.api_key)\r\n res = self.app.delete(url)\r\n assert_equal(res.status, '204 NO CONTENT', res.data)\r\n assert res.data == '', res.data\r\n\r\n #### root user\r\n url = '/api/task/%s?api_key=%s' % (root_task.id, admin.api_key)\r\n res = self.app.delete(url)\r\n assert_equal(res.status, '204 NO CONTENT', res.data)\r\n\r\n tasks = db.session.query(Task)\\\r\n .filter_by(app_id=app.id)\\\r\n .all()\r\n assert task not in tasks, tasks\r\n assert root_task not in tasks, tasks", "def test_add(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n new_task = self.task_storage.find(self.my_task.key)\n new_task.key = self.task_storage.add(new_task)\n\n self.assertNotEqual(self.my_task.key, new_task.key)\n self.task_storage.delete(new_task.key)", "def test_delete_item_using_delete(self):\n pass", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package()\n key = self.db.redis_key(pkg.filename)\n self.redis[key] = \"foobar\"\n self.db.delete(pkg)\n val = self.redis.get(key)\n self.assertIsNone(val)\n count = self.redis.scard(self.db.redis_set)\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def delete_task(dataset):\n Observation.delete_all(dataset)\n super(dataset.__class__, dataset).delete({DATASET_ID: dataset.dataset_id})", "async def delete(self, delete: TPayload) -> None:", "def delete(self, tube, task_id):\n cmd = tube.cmd('delete')\n args = (task_id,)\n\n return self.tnt.call(cmd, args)", "def delete_task(self, tid):\n self.task_controller.delete(tid)", "def delete_task(id):\n cursor = conn.cursor()\n cursor.execute(\"DELETE from tasks where id = %s;\", (id, ))\n conn.commit()\n print(\"Number of records deleted:\", cursor.rowcount)", "async def delete(self, task_id):\n args = (task_id,)\n res = await self.conn.call(self.__funcs['delete'], args)\n return self._create_task(res.body)", "def test_delete_services_with_tasks(self):\n\n with self.assertRaises(UserError):\n self.services_pigs.unlink()\n\n # click on the archive button\n self.services_pigs.write({'active': False})\n\n with self.assertRaises(UserError):\n self.services_pigs.unlink()", "def testDelete(self):\n response = requests.delete(url=self.url)\n headers = response.headers\n json_data = response.json()\n\n self.assertTrue(self.place == storage.get(Place, self.place_id))\n self.assertTrue(self.user == storage.get(User, self.user_id))\n self.assertTrue(self.city == storage.get(City, self.city_id))\n self.assertTrue(self.state == storage.get(State, self.state_id))\n self.assertEqual(response.status_code, 200, WRONG_STATUS_CODE_MSG)\n self.assertEqual(\n headers['Content-Type'], 'application/json', WRONG_TYPE_RETURN_MSG)\n self.assertEqual(len(json_data), 0)\n storage.reload()\n self.assertIsNone(storage.get(Place, self.place_id))" ]
[ "0.8145864", "0.7770039", "0.76899", "0.75120354", "0.74010766", "0.7174947", "0.71608794", "0.7150028", "0.7131688", "0.7089983", "0.7040682", "0.7018786", "0.69417983", "0.6921762", "0.68974316", "0.68915594", "0.6891311", "0.68886715", "0.68705547", "0.6846934", "0.68015915", "0.67761755", "0.67582023", "0.6727899", "0.6695768", "0.66902894", "0.66810477", "0.66657007", "0.66487944", "0.66337824" ]
0.86244047
0
Tests delete()'s handling of no matching key
def test_delete_no_match(self): self.my_task.key = self.task_storage.add(self.my_task) self.task_storage.delete(self.my_task.key) self.key = self.task_storage.delete(self.my_task.key) self.assertIsNone(self.key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_nonexist(self):\n promotion = PromotionFactory()\n promotion.id = '1cak41-nonexist'\n try:\n promotion.delete()\n except KeyError:\n self.assertRaises(KeyError)", "def testDeletingUnknownKey(self):\n\n memcache.delete('unknown')", "def test_delete_without_partition_key(self):\r\n with self.assertRaises(query.QueryException):\r\n TestModel.objects(attempt_id=0).delete()", "def delete(self, key):", "def test_delete_empty(empty_bucket): # pylint: disable=redefined-outer-name\n with pytest.raises(KeyError):\n empty_bucket.delete(\"key 1\")", "def test_delete(self):\n fake_key_name = 'fake_key_name'\n\n with patch('iceit.backends.Key', spec=True) as mock_key:\n mock_key.return_value = mock_key\n backend = self.test_init_valid()\n backend.delete(fake_key_name)\n\n mock_key.assert_called_once_with(backend.bucket, fake_key_name)\n backend.bucket.delete_key.assert_called_once_with(mock_key)", "def test_delete_by_unique_key(self):\n id = get_rand_string()\n\n # Same data and user_id\n user_id = data = get_rand_string()\n\n self.conn.add(id=id, user_id=user_id, data=data)\n self.conn.commit()\n\n # Make sure it's been added\n results = self.conn.query(\"id:\" + id).results\n\n # Make sure the docs were in fact added.\n self.assertEquals(len(results), 1,\n \"No results returned for query id:%s\"% (id))\n\n # Delete the document and make sure it's no longer in the index\n self.conn.delete(id)\n self.conn.commit()\n results = self.conn.query(\"id:\" + id).results\n self.assertEquals(len(results), 0,\n \"Document (id:%s) should've been deleted\"% (id))", "def test_delete():\n test_key = 'qmk_compiler_test_unique_key_name'\n\n # Make sure our test key doesn't exist\n try:\n qmk_storage.get(test_key)\n raise RuntimeError('%s exists on S3 when it should not!' % test_key)\n except Exception as e:\n if e.__class__.__name__ != 'NoSuchKey':\n raise\n\n # Store a test key we can delete\n qmk_storage.put(test_key, 'hello')\n assert qmk_storage.get(test_key) == 'hello'\n qmk_storage.delete(test_key)\n\n # Make sure it actually deleted\n try:\n qmk_storage.get(test_key)\n raise RuntimeError('%s exists on S3 when it should not!' % test_key)\n except Exception as e:\n if e.__class__.__name__ != 'NoSuchKey':\n raise", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package()\n key = self.db.redis_key(pkg.filename)\n self.redis[key] = \"foobar\"\n self.db.delete(pkg)\n val = self.redis.get(key)\n self.assertIsNone(val)\n count = self.redis.scard(self.db.redis_set)\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def test_delete(self):\n pass", "def delete(self,key):\n\n pass", "def delete(self, keyword, key):", "def test_delete_item_using_delete(self):\n pass", "def test_delitem(self):\n with self.assertRaises(QiitaDBNotImplementedError):\n del self.tester['1.SKM7.640188']", "def test_delete(self):\n\n self.feature_test.set_percentage(5)\n self.feature_test.delete()\n key = self.feature_test._get_redis_key()\n redis_data = cloak.redis.get(key)\n self.assertTrue(redis_data is None)\n\n set_key = Feature._get_redis_set_key()\n self.assertFalse(cloak.redis.sismember(set_key, key))", "def test_AlgorithmsIdHandler_DELETE_NotFound(self):\n searched_id = 'xyz1'\n right_list = []\n create_test_algorithm_list(right_list, 101)\n documents = []\n create_test_documents_list(right_list, documents, 101)\n index = search.Index(name=search_algorithm._INDEX_STRING)\n index.put(documents)\n # end of preparing data\n self.assertIsNone(index.get(searched_id), msg='Algorithm is there but should not be')\n response = self.testapp.delete('/algorithms/' + searched_id)\n self.assertEqual(200, response.status_int, msg='Wrong return code')\n self.assertIsNone(index.get(searched_id), msg='Algorithm is still there')", "def delete(self, key):\n pass", "def delete(self, key):\n pass", "def test_delete_without_any_where_args(self):\r\n with self.assertRaises(query.QueryException):\r\n TestModel.objects(attempt_id=0).delete()", "def test_delete_run(self):\n pass", "def test_delete_record(self):\n pass", "def test_delete1(self):\n pass", "def _delete_key(self):\n return self.connection.delete(self.key)", "def test_aws_service_api_keypair_delete(self):\n pass", "def test_delete_api_key(self):\n pass", "def test_delete7(self):\n pass", "async def _delete(self, key):\n return 1 if await self.client.delete(key) else 0", "def testDeletingItem(self):\n\n data = \"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\"\n memcache.set('data', data)\n assert memcache.get('data') == data\n memcache.delete('data')\n assert memcache.get('data') == None", "def test_delete_case(self):\n pass", "def test_delete_single(single_bucket): # pylint: disable=redefined-outer-name\n single_bucket.delete(\"key 1\")\n\n assert single_bucket.is_empty() is True" ]
[ "0.79012454", "0.7562065", "0.75430506", "0.75408924", "0.7473593", "0.7439396", "0.74009305", "0.7335228", "0.7265693", "0.7248221", "0.7233685", "0.7212405", "0.71671504", "0.7114317", "0.70954394", "0.70858955", "0.7073204", "0.7073204", "0.7015491", "0.698004", "0.69772476", "0.6950299", "0.6940179", "0.6933076", "0.6921324", "0.68847615", "0.68795115", "0.6856387", "0.6852444", "0.6816194" ]
0.7573106
1
Tests that search() correctly returns a matching Task given a Task
def test_search(self): self.task_storage.add(self.my_task) search_task = task.Task(title='title', notes='note') task_search_list = self.task_storage.search(search_task) self.assertTrue(self.my_task in task_search_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_search_not_found(self):\n self.task_storage.add(self.my_task)\n search_task = task.Task(title='title1', notes='note1')\n task_search_list = self.task_storage.search(search_task)\n\n self.assertEqual(task_search_list, None)", "def test_GET_task_search(self):\n\t\tdata = self.GET_data('/api/task/search')\n\t\tself.assertEqual(0, len(data))\n\t\ttask_id = self.POST_task(TEST_TASK_DATA)\n\t\tdata = self.GET_data('/api/task/search')\n\t\tself.assertEqual(1, len(data))\n\t\tself.assertEqual(task_id, data[0]['_id'])\n\t\tself.assertDataMatch(TEST_TASK_DATA, data[0], keys=[k for k in TEST_TASK_DATA.keys()])", "def search_tasks(self, search_method):\n\n if search_method == 'Employee Name':\n employee_name = self.search_employee_name()\n result = self.task.find_task(search_method, employee_name)\n\n if not result:\n print(f'\\nNo task found with employee name \"{employee_name}\"')\n self.search_again()\n\n elif search_method == 'Keyword':\n keyword = self.search_keyword()\n result = self.task.find_task(search_method, keyword)\n\n if not result:\n print(f'\\nNo task found with \"{keyword}\" in task name or notes')\n self.search_again()\n\n elif search_method == 'Time Spent':\n time_spent = self.search_time_spent()\n result = self.task.find_task(search_method, time_spent)\n\n if not result:\n print(f'\\nNo task found with \"{time_spent}\" minutes time spent')\n self.search_again()\n\n elif search_method == 'Date':\n date = self.search_date()\n result = self.task.find_task(search_method, date)\n\n if not result:\n print(f'\\nNo task found with date of \"{date}\"')\n self.search_again()\n\n elif search_method == 'Date Range':\n date1 = self.search_date(\"starting date\")\n date2 = self.search_date(\"ending date\")\n dates = [date1, date2]\n result = self.task.find_task(search_method, dates)\n\n if not result:\n print(f'\\nNo task found between dates \"{date1}\" and \"{date2}\"')\n self.search_again()\n else:\n self.main_menu()\n\n self.print_tasks(result)", "def test_task_finder(test_operator, task_name, task_type):\n found_task = test_operator.find_task(task_name, task_type=task_type)\n assert found_task", "def find_task(self, task_str):\n task_str = task_str.replace('é', 'e').title()\n task_not_found = True\n custom_quest = False\n if \":\" in task_str:\n task_strs = task_str.split(\":\")\n task_str = task_strs[0]\n quest_str = task_strs[1]\n custom_quest = True\n while task_not_found:\n for task in self.tasks:\n if (task_str == task.reward.title()) or (task_str == task.quest.replace('é', 'e').title()) or (task_str in (reward.title() for reward in task.rewards)) or (task_str in (nickname.title() for nickname in task.nicknames)):\n out_task = task\n task_not_found = False\n if custom_quest:\n out_task = copy.copy(task)\n out_task.quest = quest_str.title()\n return out_task\n break\n if task_not_found:\n raise TaskNotFound()", "def search_tasks(self, search_string):\n\n\t\ttasks = self.tasklist.search(search_string.lower())\n\t\tif tasks:\n\t\t\tself.show_tasks(tasks)\n\t\telse:\n\t\t\tprint('\\nThere were no tasks containing \"{}\".\\n'.format(search_string))", "def test_find(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n new_task = self.task_storage.find(self.my_task.key)\n\n self.assertEqual(self.my_task, new_task)", "def findTaskInList(task, taskList):\n found = False\n for t in taskList:\n if t.tBegin == task.tBegin and t.tEnd == task.tEnd and t.batchSize == task.batchSize \\\n and t.order == task.order and t.machine == task.machine and t.processingUnit == task.processingUnit \\\n and t.operation == task.operation:\n found = True\n return found\n return found", "def search_task_name(self):\n\n task_name = input(\"\\nEnter a task name:\")\n\n if len(task_name) == 0:\n input(\"\\nTask name cannot be empty!\\n\")\n return self.search_task_name()\n else:\n return", "def test_query_to_tasks(self):\n Org(id='test1', status=CONNECTED).put()\n Org(id='test2', status=CONNECTED).put()\n Org(id='test3', status=DISCONNECTED).put()\n\n count = task_utils.query_to_tasks(\n query=Org.query(Org.status == CONNECTED),\n queue=Queue('adapter-update'),\n task_generator=lambda key: Task(url='/something/{}'.format(key.string_id()))\n )\n\n self.assertEqual(count, 2)\n task_count = len(self.taskqueue.get_filtered_tasks())\n self.assertEqual(task_count, 2)", "def find(self, task_id):\n for task_obj in self.queue:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in queue: '{}'\".format(task_id))", "def task_scanned(now_task):", "def search_task(var,username):\n cursor = conn.cursor()\n cursor.execute(\"SELECT category,task_name, deadline from tasks where (task_name=%s OR category=%s OR deadline=%s) AND username=%s;\", (var,var,var,username))\n rows = cursor.fetchall()\n return rows", "def search(self, search):\n raise NotImplementedError", "def test_search(self):\n d = self._search()\n self._response([2, 5, 10])\n self.assertEqual(self.successResultOf(d), [2, 5, 10])", "async def search(self, *args, **kwargs):\n pass", "def find_by_exact_match(self):\n while True: \n self.task_name_search = input(\"What is the keyword/s you are looking\"\n \" for? Press Q to quit to the main screen: \").strip()\n if self.task_name_search.upper() in [\"Q\", \"QUIT\", \"EXIT\"]:\n x = self.dict_list\n return x\n self.find_by_exact_match_list = []\n count = 0\n for i in self.dict_list:\n for key, value in i.items():\n if re.search(self.task_name_search, value):\n self.find_by_exact_match_list.append(i)\n count+=1\n break\n if count == 0:\n print(\"There were no matches.\")\n else:\n self.display_style(self.find_by_exact_match_list)\n break\n self.del_or_edit()", "def search():\n pass", "def find(self, task_id):\n for task_obj in self._queue:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in dorm: '{}'\".format(task_id))", "def test_search(self):\n pass", "def test_search(self):\n pass", "def test_search(self):\n pass", "def search_sale_tasks(start=None, end=None, f=None):\n s = Search(using=elastic, index=CONFIG['ES']['TASKS']) \\\n .query('term', doctype='task') \\\n .query('term', planned=True) \\\n .query('has_child', type='history', query=Q('term', workflow='sale'))\n\n if start is not None:\n s = s.query('bool', filter=Q('range', due={ 'gte': start }))\n if end is not None:\n s = s.query('bool', filter=Q('range', due={ 'lt': end }))\n if f is not None:\n s = s.query('bool', filter=f)\n\n return s.scan()", "def get(self, **kwargs):\n clone = self.filter(**kwargs)\n num = len(clone)\n if num == 1:\n return clone._result_cache[0]\n if not num:\n raise Task.DoesNotExist(\n 'Task matching query does not exist. '\n 'Lookup parameters were {0}'.format(kwargs))\n raise ValueError(\n 'get() returned more than one Task -- it returned {0}! '\n 'Lookup parameters were {1}'.format(num, kwargs))", "def localSearch(self, best, A, task, **kwargs):\n\t\treturn task.repair(best + self.epsilon * A * self.normal(0, 1, task.D), rnd=self.Rand)", "def test_22_get_specific_completed_task_anonymous(self):\r\n\r\n #model.rebuild_db()\r\n with self.flask_app.app_context():\r\n self.create()\r\n app = db.session.query(App).first()\r\n task = db.session.query(Task)\\\r\n .filter(App.id == app.id)\\\r\n .first()\r\n\r\n for i in range(10):\r\n task_run = TaskRun(app_id=app.id, task_id=task.id,\r\n user_ip=\"127.0.0.1\", info={'answer': 1})\r\n db.session.add(task_run)\r\n db.session.commit()\r\n\r\n ntask = Task(id=task.id, state='completed')\r\n\r\n assert ntask not in db.session\r\n db.session.merge(ntask)\r\n db.session.commit()\r\n\r\n res = self.app.get('app/%s/task/%s' % (app.short_name, task.id),\r\n follow_redirects=True)\r\n msg = 'You have already participated in this task'\r\n assert msg in res.data, res.data\r\n assert 'Try with another one' in res.data, res.data", "def get_by_name(task_name):\n return tasks.find_one({'name': task_name})", "def test_contain_tasks(self):\n dag = self.dagbag.get_dag(self.dag_id)\n tasks = dag.tasks\n task_ids = list(map(lambda task: task.task_id, tasks))\n self.assertIn(\"extract\", task_ids)", "def test_findtasks_found(monkeypatch, modpath):\n monkeypatch.setattr(loadlimit.importhook, 'lstaskfiles', fake_lstaskfiles)\n monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n FakeModuleWithTasks)\n\n taskfile = 'a_0.py'\n\n sys.meta_path.append(TaskImporter(taskfile))\n taskmod = import_module(modpath)\n\n assert len(taskmod.__tasks__) == 1\n task = taskmod.__tasks__[0]\n assert task.__name__ == 'TestTask'\n assert isinstance(task, type)\n assert issubclass(task, TaskABC)", "def search_keyword(self):\n\n task_name = input(\"\\nEnter a search term:\")\n\n if len(task_name) == 0:\n input(\"\\nSearch Term cannot be empty!\\n\")\n return self.search_keyword()\n else:\n return task_name" ]
[ "0.75317067", "0.7350429", "0.69791824", "0.69752604", "0.6914059", "0.6566952", "0.64011717", "0.6261199", "0.6232779", "0.6019473", "0.5957226", "0.59462476", "0.59112656", "0.5889475", "0.5885827", "0.5874752", "0.58672816", "0.586257", "0.5858258", "0.5855104", "0.5855104", "0.5855104", "0.5845524", "0.584142", "0.5835542", "0.5815303", "0.5781309", "0.5777138", "0.576404", "0.575378" ]
0.794454
0
Tests add()'s handling of failed file reading
def test_add_read_fail(self): file_handler = open(self.test_task_filename, 'w') file_handler.write('Mock corrupt data') file_handler.close() os.chmod(self.test_task_filename, 000) self.assertRaises(IOError, self.task_storage.add, self.my_task)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_file_append_missing_file(self):\n with (self.assertRaises(IOError)):\n FileWriter(self.bogus_path).append(self.ascii_string)", "def test_add1(self):\n try:\n TempfileManager.add_tempfile(tempdir + 'add1')\n self.fail(\"Expected IOError because file 'add1' does not exist\")\n except IOError:\n pass", "def test_add_write_fail(self):\n os.chmod(self.test_task_filename, 0400)\n\n self.assertRaises(IOError, self.task_storage.add, self.my_task)", "def test_add_and_read_file(self, tmpdir, fs):\n with fs.add_file('my-dir', 'my-file') as f:\n f.write('hello')\n assert fs.read('my-dir', 'my-file') == 'hello'", "def test_adding_invalid_file(self):\n (added, status) = self.app.add_album(__file__)\n self.assertEqual(added, False)\n self.assertIn('Unable to load', status)\n self.assertEqual(self.get_album_count(), 0)", "def test_add_failure(self):\n self.protocol.addFailure(\n self.test, pysubunit.RemoteError(compat._u(\"boo qux\")))\n self.assertEqual(\n self.io.getvalue(),\n compat._b(\n ('failure: %s [\\n' +\n _remote_exception_str + ': boo qux\\n]\\n')\n % self.test.id()))", "def test_failures(self):\n reader = TextReader('jacksheet', subject='R1XXX', localization=0)\n with pytest.raises(FileNotFoundError):\n reader.load()", "def test_add_raises_catch():\n with pytest.raises(TypeError):\n tasks.add(task='not a Task object')", "def test_add_1(self):\n contents = testdata.get_words()\n d = testdata.create_dir()\n ts = {\n \"foo.txt\": [contents],\n }\n ds = d.add(ts)\n path = ds[0]\n self.assertTrue(os.path.isfile(path), \"{} does not exist\".format(path))\n self.assertEqual(contents, path.read_text())", "def test_file_append_utf8_missing_file(self):\n with (self.assertRaises(IOError)):\n FileWriter(self.bogus_path).append_utf8(self.unicode_string)", "def test_from_file_fail(self):\n with TemporaryDirectory() as tmp:\n fp = os.path.join(tmp, \"test.txt\")\n open(fp, 'a').close()\n assert os.path.exists(fp)\n with self.assertRaises(TypeError):\n BaseDataClass.from_file(fp)", "def test_file_error(self):\n my_reader = DataSetReader()\n covid_list = CovidCase.objects.all()\n\n with self.assertRaises(IOError):\n my_reader.writeFile(covid_list, \"Not_A_File.csv\")", "def test_additional_resource_size(self):\n\n testpath = \"./testfile.dat\"\n test_submission = Submission()\n self.addCleanup(os.remove, testpath)\n\n # Check with non-existant file\n with self.assertRaises(RuntimeError):\n test_submission.add_additional_resource(\"Some description\", testpath, copy_file=True)\n\n # Check with file that is too big\n size = int(2e8) # bytes in 200 MB\n with open(testpath, \"wb\") as testfile:\n testfile.write(bytes(\"\\0\" * size, \"utf-8\"))\n with self.assertRaises(RuntimeError):\n test_submission.add_additional_resource(\"Some description\", testpath, copy_file=True)\n\n # Clean up\n os.remove(testpath)\n\n # Check with file that is not too big.\n size = int(5e7) # bytes in 50 MB\n with open(testpath, \"wb\") as testfile:\n testfile.write(bytes(\"\\0\" * size, \"utf-8\"))\n try:\n test_submission.add_additional_resource(\"Some description\", testpath, copy_file=True)\n except RuntimeError:\n self.fail(\"Submission.add_additional_resource raised an unexpected RuntimeError.\")\n\n # Clean up\n self.doCleanups()", "def test_update_write_fail(self):\n self.task_storage.add(self.my_task)\n os.chmod(self.test_task_filename, 0400)\n\n self.assertRaises(IOError, self.task_storage.update, self.my_task)", "def test_add(self):\n self.assertEqual(work_file.add(10, 5), 15)\n self.assertEqual(work_file.add(-1, 1), 0)\n self.assertEqual(work_file.add(-1, -1), -2)", "def test_no_such_file(self):\n with self.assertRaises(IOError):\n analyze_text('foobar')", "def test_error():\n file = gff.GFFFile()\n with pytest.raises(ValueError):\n # 'seqid' beginning with '>' is not legal\n file.append(\">xyz\", \"ab\", \"cd\", 1, 2, None, None, None, {\"Id\":\"foo\"})\n with pytest.raises(ValueError):\n # String fields must not be empty\n file.append(\"\", \"ab\", \"cd\", 1, 2, None, None, None, {\"Id\":\"foo\"})\n with pytest.raises(ValueError):\n # String fields must not be empty\n file.append(\"xyz\", \"\", \"cd\", 1, 2, None, None, None, {\"Id\":\"foo\"})\n with pytest.raises(ValueError):\n # String fields must not be empty\n file.append(\"xyz\", \"ab\", \"\", 1, 2, None, None, None, {\"Id\":\"foo\"})", "def test_read_fail2(self):\n mod_fn = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_bad_data2.mod')\n with self.assertRaises(ValueError):\n modreader.get_data(mod_fn) # missing 'IEOF' end", "def test_existing_file_after_assert_error(exist_of_file):\n try:\n assert read_magic_number(exist_of_file)\n except AssertionError:\n print(\"Now lets do check of existing file\")", "def test_file_readlines_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).readlines()", "def test_file_read_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read()", "def testFailFiles(self):\n # Cleaning possible files already occupying the available set\n self.dummySubscription.failFiles([])\n\n # First test - Test if initial file (on available set) is inserted in the\n # failed set - no arguments\n\n dummyFile2 = File('/tmp/dummyfile2,8888', 1, 1, 1)\n # Insert dummyFile2 into the available files Set at dummySubscription\n self.dummySubscription.available.addFile(dummyFile2)\n\n S = self.dummySubscription.availableFiles()\n # Fail all files\n self.dummySubscription.failFiles(S)\n\n assert len(self.dummySubscription.availableFiles()) == 0, \\\n \"failed subscription still has %s files, what's up with that?\" % \\\n len(self.dummySubscription.availableFiles())\n\n # Second test - Test if target files are inserted at the failed set\n\n dummyFileList = []\n # Populating the dummy List with a random number of files\n for i in range(1, random.randint(100, 1000)):\n lfn = '/store/data/%s/%s/file.root' % (random.randint(1000, 9999),\n random.randint(1000, 9999))\n size = random.randint(1000, 2000)\n events = 1000\n run = random.randint(0, 2000)\n lumi = random.randint(0, 8)\n\n file = File(lfn=lfn, size=size, events=events,\n checksums={\"cksum\": \"1\"})\n file.addRun(Run(run, *[lumi]))\n dummyFileList.append(file)\n # Add the new files\n self.dummySubscription.available.addFile(dummyFileList)\n # and fail them\n self.dummySubscription.failFiles(files=dummyFileList)\n # Check there are no files available - everything should be failed\n assert len(self.dummySubscription.availableFiles()) == 0, \\\n \"failed subscription still has %s files, what's up with that?\" % \\\n len(self.dummySubscription.availableFiles())\n\n # Check if all files were inserted at subscription's failed files Set\n for x in dummyFileList:\n assert x in self.dummySubscription.failed.getFiles(type='set'), \\\n 'Couldn\\'t make file failed %s' % x.dict['lfn']\n\n # Third test - Test if a replicate file is erased from the other Sets,\n # when a file is considered failed\n\n dummyFile3 = File('/tmp/dummyfile3,5555', 1, 1, 1)\n dummyFileList = []\n dummyFileList.append(dummyFile3)\n\n # Inserting dummyFile3 to be used as an argument, into each of the other\n # file sets\n self.dummySubscription.acquired.addFile(dummyFile3)\n self.dummySubscription.available.addFile(dummyFile3)\n self.dummySubscription.completed.addFile(dummyFile3)\n\n # Run the method failFiles\n self.dummySubscription.failFiles(files=dummyFileList)\n\n # Check if dummyFile3 was inserted at the failed Set\n assert dummyFile3 in self.dummySubscription.failed.getFiles(type='set'), \\\n 'Replicated file could\\'nt be inserted at failed Set'\n\n # Check if dummyFile3 was erased from all the other Sets\n assert dummyFile3 not in self.dummySubscription.acquired.getFiles(type='set'), \\\n 'Failed file still present at acquired Set'\n assert dummyFile3 not in self.dummySubscription.completed.getFiles(type='set'), \\\n 'Failed file still present at completed Set'\n assert dummyFile3 not in self.dummySubscription.available.getFiles(type='set'), \\\n 'Failed file still present at available Set'", "def test_add_error(self):\n self.protocol.addError(\n self.test, pysubunit.RemoteError(compat._u(\"phwoar crikey\")))\n self.assertEqual(\n self.io.getvalue(),\n compat._b(('error: %s [\\n' +\n _remote_exception_str + \": phwoar crikey\\n\"\n \"]\\n\") % self.test.id()))", "def test_no_such_file(self):\n with self.assertRaises(IOError):\n analyze_text('foobar')", "def test_no_such_file(self):\n with self.assertRaises(IOError):\n analyze_text('foobar')", "def test_add1_dir(self):\n try:\n TempfileManager.add_tempfile(tempdir + 'add1')\n self.fail(\n \"Expected IOError because directory 'add1' does not exist\")\n except IOError:\n pass", "def test_add_raises():\n with pytest.raises(TypeError):\n tasks.add(task='not a Task object')", "def test_invalidFile(self):\n self.assertRaises(cesmEnvLib.checkFile(\"blah\", \"write\"))", "def test_add_filelist_to_cache():\n from nose.tools import raises\n\n tmp = FileListCache()\n assert tmp.add_filelist_to_cache() is False\n\n @raises(TypeError)\n def test_tmp():\n \"\"\" nost test \"\"\"\n tmp.add_filelist_to_cache(file_list=1)\n\n test_tmp()", "def test_read_fail1(self):\n mod_fn = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_bad_data1.mod')\n with self.assertRaises(ValueError):\n modreader.get_data(mod_fn) # missing 'IMOD' start" ]
[ "0.7181701", "0.71707875", "0.70211554", "0.67583793", "0.6626335", "0.63298345", "0.63149416", "0.62887055", "0.6272493", "0.62378985", "0.6216637", "0.62026274", "0.6184095", "0.6158653", "0.61405927", "0.6139783", "0.613846", "0.6130343", "0.61097133", "0.60790586", "0.6076906", "0.6066122", "0.6063233", "0.60531473", "0.60531473", "0.6052702", "0.603529", "0.6018113", "0.5999337", "0.59990263" ]
0.73515815
0
Tests add()'s handling of failed file writing
def test_add_write_fail(self): os.chmod(self.test_task_filename, 0400) self.assertRaises(IOError, self.task_storage.add, self.my_task)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_file_append_missing_file(self):\n with (self.assertRaises(IOError)):\n FileWriter(self.bogus_path).append(self.ascii_string)", "def test_add_read_fail(self):\n file_handler = open(self.test_task_filename, 'w')\n file_handler.write('Mock corrupt data')\n file_handler.close()\n os.chmod(self.test_task_filename, 000)\n\n self.assertRaises(IOError, self.task_storage.add, self.my_task)", "def test_add1(self):\n try:\n TempfileManager.add_tempfile(tempdir + 'add1')\n self.fail(\"Expected IOError because file 'add1' does not exist\")\n except IOError:\n pass", "def test_cannot_write_file(self):\n self.api.write_data('/some-fake/path/to-create-file/', 'some-string')", "def test_update_write_fail(self):\n self.task_storage.add(self.my_task)\n os.chmod(self.test_task_filename, 0400)\n\n self.assertRaises(IOError, self.task_storage.update, self.my_task)", "def test_append(self):\n with contextlib.closing(logfile.LogFile(self.name, self.dir)) as log:\n log.write(\"0123456789\")\n\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n self.assertEqual(log.size, 10)\n self.assertEqual(log._file.tell(), log.size)\n log.write(\"abc\")\n self.assertEqual(log.size, 13)\n self.assertEqual(log._file.tell(), log.size)\n f = log._file\n f.seek(0, 0)\n self.assertEqual(f.read(), b\"0123456789abc\")", "def test_add_and_read_file(self, tmpdir, fs):\n with fs.add_file('my-dir', 'my-file') as f:\n f.write('hello')\n assert fs.read('my-dir', 'my-file') == 'hello'", "def test_add_failure(self):\n self.protocol.addFailure(\n self.test, pysubunit.RemoteError(compat._u(\"boo qux\")))\n self.assertEqual(\n self.io.getvalue(),\n compat._b(\n ('failure: %s [\\n' +\n _remote_exception_str + ': boo qux\\n]\\n')\n % self.test.id()))", "def test_atomic_failure(self):\n with TemporaryDirectory() as tmp:\n fp = os.path.join(tmp, \"asdf.txt\")\n\n # raise fake error while writing file atomically\n with self.assertRaises(FakeFileFailure):\n with atomic_write(fp, \"w\") as f:\n tmpfile = f.name\n assert os.path.exists(tmpfile)\n raise FakeFileFailure()\n\n # ensure both the temp and destination files do not exist\n assert not os.path.exists(tmpfile)\n assert not os.path.exists(fp)", "def test_malformed(self):\n fdesc, fname = tempfile.mkstemp()\n tfile = os.fdopen(fdesc, 'w')\n tfile.write(self.file_str2)\n tfile.close()\n assert_raises(Exception, grades.writers.GradesFile, fname)\n os.unlink(fname)", "def test_adding_invalid_file(self):\n (added, status) = self.app.add_album(__file__)\n self.assertEqual(added, False)\n self.assertIn('Unable to load', status)\n self.assertEqual(self.get_album_count(), 0)", "def test_add1_dir(self):\n try:\n TempfileManager.add_tempfile(tempdir + 'add1')\n self.fail(\n \"Expected IOError because directory 'add1' does not exist\")\n except IOError:\n pass", "def test_file_append_utf8_missing_file(self):\n with (self.assertRaises(IOError)):\n FileWriter(self.bogus_path).append_utf8(self.unicode_string)", "def test_add3(self):\n OUTPUT = open(tempdir + 'add3', 'w')\n OUTPUT.write('tempfile\\n')\n OUTPUT.close()\n TempfileManager.add_tempfile(tempdir + 'add3')", "def test_delete_write_fail(self):\n self.task_storage.add(self.my_task)\n os.chmod(self.test_task_filename, 0400)\n\n self.assertRaises(IOError, self.task_storage.delete, self.my_task.key)", "def test_add(self):\n self.assertEqual(work_file.add(10, 5), 15)\n self.assertEqual(work_file.add(-1, 1), 0)\n self.assertEqual(work_file.add(-1, -1), -2)", "def test_add_1(self):\n contents = testdata.get_words()\n d = testdata.create_dir()\n ts = {\n \"foo.txt\": [contents],\n }\n ds = d.add(ts)\n path = ds[0]\n self.assertTrue(os.path.isfile(path), \"{} does not exist\".format(path))\n self.assertEqual(contents, path.read_text())", "def test_add2(self):\n TempfileManager.add_tempfile(tempdir + 'add2', False)", "def test_fileAlreadyExistsNoOverwrite(self):\n fp = FilePath(self.mktemp())\n fp.touch()\n\n self.assertRaises(OSError, self.makeConnectedDccFileReceive, fp.path)", "def test_incorrect_input():\n content = 'hi'\n filename = {}\n\n with pytest.raises(TypeError):\n write_file(content, filename)\n\n content = {}\n filename = 'hi'\n\n with pytest.raises(TypeError):\n write_file(content, filename)", "def test_write_to_bug_file_if_good(self):\n mock = Mock(return_value=3)\n\n @write_error_to_file\n def everything_works_without_exceptions():\n mock()\n\n everything_works_without_exceptions()\n self.assertFalse(os.path.isfile(LOGFILENAME))", "def append_record_failure():\n\t\tpass", "def test_write_bug_to_file_if_exception(self):\n\n mock = Mock(side_effect=KeyError)\n\n @write_error_to_file\n def error_raising():\n mock()\n\n error_raising()\n self.assertRaises(KeyError)\n self.assertTrue(os.path.isfile(LOGFILENAME))\n data_in_log_file = open(LOGFILENAME, mode=\"r\").read()\n self.assertIn('There was an exception in error_raising', data_in_log_file)\n self.assertIn('Traceback', data_in_log_file)\n self.assertIn('KeyError', data_in_log_file)", "def testFailFiles(self):\n # Cleaning possible files already occupying the available set\n self.dummySubscription.failFiles([])\n\n # First test - Test if initial file (on available set) is inserted in the\n # failed set - no arguments\n\n dummyFile2 = File('/tmp/dummyfile2,8888', 1, 1, 1)\n # Insert dummyFile2 into the available files Set at dummySubscription\n self.dummySubscription.available.addFile(dummyFile2)\n\n S = self.dummySubscription.availableFiles()\n # Fail all files\n self.dummySubscription.failFiles(S)\n\n assert len(self.dummySubscription.availableFiles()) == 0, \\\n \"failed subscription still has %s files, what's up with that?\" % \\\n len(self.dummySubscription.availableFiles())\n\n # Second test - Test if target files are inserted at the failed set\n\n dummyFileList = []\n # Populating the dummy List with a random number of files\n for i in range(1, random.randint(100, 1000)):\n lfn = '/store/data/%s/%s/file.root' % (random.randint(1000, 9999),\n random.randint(1000, 9999))\n size = random.randint(1000, 2000)\n events = 1000\n run = random.randint(0, 2000)\n lumi = random.randint(0, 8)\n\n file = File(lfn=lfn, size=size, events=events,\n checksums={\"cksum\": \"1\"})\n file.addRun(Run(run, *[lumi]))\n dummyFileList.append(file)\n # Add the new files\n self.dummySubscription.available.addFile(dummyFileList)\n # and fail them\n self.dummySubscription.failFiles(files=dummyFileList)\n # Check there are no files available - everything should be failed\n assert len(self.dummySubscription.availableFiles()) == 0, \\\n \"failed subscription still has %s files, what's up with that?\" % \\\n len(self.dummySubscription.availableFiles())\n\n # Check if all files were inserted at subscription's failed files Set\n for x in dummyFileList:\n assert x in self.dummySubscription.failed.getFiles(type='set'), \\\n 'Couldn\\'t make file failed %s' % x.dict['lfn']\n\n # Third test - Test if a replicate file is erased from the other Sets,\n # when a file is considered failed\n\n dummyFile3 = File('/tmp/dummyfile3,5555', 1, 1, 1)\n dummyFileList = []\n dummyFileList.append(dummyFile3)\n\n # Inserting dummyFile3 to be used as an argument, into each of the other\n # file sets\n self.dummySubscription.acquired.addFile(dummyFile3)\n self.dummySubscription.available.addFile(dummyFile3)\n self.dummySubscription.completed.addFile(dummyFile3)\n\n # Run the method failFiles\n self.dummySubscription.failFiles(files=dummyFileList)\n\n # Check if dummyFile3 was inserted at the failed Set\n assert dummyFile3 in self.dummySubscription.failed.getFiles(type='set'), \\\n 'Replicated file could\\'nt be inserted at failed Set'\n\n # Check if dummyFile3 was erased from all the other Sets\n assert dummyFile3 not in self.dummySubscription.acquired.getFiles(type='set'), \\\n 'Failed file still present at acquired Set'\n assert dummyFile3 not in self.dummySubscription.completed.getFiles(type='set'), \\\n 'Failed file still present at completed Set'\n assert dummyFile3 not in self.dummySubscription.available.getFiles(type='set'), \\\n 'Failed file still present at available Set'", "def test_silent_write_errors():\n\n tracker = pawprint.Tracker(db=None, table=None)\n\n try:\n tracker.write(event=\"This will fail silently.\")\n except Exception:\n pytest.fail(\"Failed to fail silently.\")", "def test_additional_resource_size(self):\n\n testpath = \"./testfile.dat\"\n test_submission = Submission()\n self.addCleanup(os.remove, testpath)\n\n # Check with non-existant file\n with self.assertRaises(RuntimeError):\n test_submission.add_additional_resource(\"Some description\", testpath, copy_file=True)\n\n # Check with file that is too big\n size = int(2e8) # bytes in 200 MB\n with open(testpath, \"wb\") as testfile:\n testfile.write(bytes(\"\\0\" * size, \"utf-8\"))\n with self.assertRaises(RuntimeError):\n test_submission.add_additional_resource(\"Some description\", testpath, copy_file=True)\n\n # Clean up\n os.remove(testpath)\n\n # Check with file that is not too big.\n size = int(5e7) # bytes in 50 MB\n with open(testpath, \"wb\") as testfile:\n testfile.write(bytes(\"\\0\" * size, \"utf-8\"))\n try:\n test_submission.add_additional_resource(\"Some description\", testpath, copy_file=True)\n except RuntimeError:\n self.fail(\"Submission.add_additional_resource raised an unexpected RuntimeError.\")\n\n # Clean up\n self.doCleanups()", "def test_file_exists(self):\n with TemporaryDirectory() as tmp:\n # define path to file\n fp = os.path.join(tmp, \"asdf.txt\")\n\n # write atomically to file\n with atomic_write(fp, \"w\") as f:\n f.write(\"asdf\")\n\n # ensure file exists\n assert os.path.exists(fp)\n\n # ensure atomic_write to same file raises an error as it already exists\n try:\n with atomic_write(fp, \"w\") as f:\n f.write(\"asdf\")\n except FileExistsError as e:\n self.assertIsInstance(e, FileExistsError)", "def test_add_raises_catch():\n with pytest.raises(TypeError):\n tasks.add(task='not a Task object')", "def test_add_error(self):\n self.protocol.addError(\n self.test, pysubunit.RemoteError(compat._u(\"phwoar crikey\")))\n self.assertEqual(\n self.io.getvalue(),\n compat._b(('error: %s [\\n' +\n _remote_exception_str + \": phwoar crikey\\n\"\n \"]\\n\") % self.test.id()))", "def test_invalid_write(self, mock_progress):\n mock_status = \"status\"\n mock_message = \"message\"\n stub_dir_status = self.StubDirectoryStatus()\n # mock main call to test\n mock_progress.side_effect = progress.exceptions.DirectoryError(\"\", \"\")\n # run function\n with self.assertRaises(progress.exceptions.DirectoryError):\n upload_helpers._set_and_write_directory_status(stub_dir_status, mock_status, mock_message)" ]
[ "0.73165464", "0.7307308", "0.7226781", "0.69408065", "0.6796736", "0.6693423", "0.65727067", "0.6411899", "0.6393688", "0.6373964", "0.63519084", "0.63039166", "0.6282991", "0.6276873", "0.6274579", "0.6184783", "0.61819637", "0.61724293", "0.6146522", "0.61401504", "0.6136607", "0.61338246", "0.61289614", "0.61025316", "0.6100714", "0.60464543", "0.6035038", "0.6033646", "0.60077447", "0.6006966" ]
0.7493461
0
Tests find()'s handling of failed file reading
def test_find_read_fail(self): file_handler = open(self.test_task_filename, 'w') file_handler.write('Mock corrupt data') file_handler.close() os.chmod(self.test_task_filename, 000) self.assertRaises(IOError, self.task_storage.find, self.my_task)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_read_no_file():\n filename = 'asdf'\n with pytest.raises(FileNotFoundError):\n read_file(filename)", "def test_read_raw_suggested(fname):\n with pytest.raises(ValueError, match='Try reading'):\n read_raw(fname)", "def test_failures(self):\n reader = TextReader('jacksheet', subject='R1XXX', localization=0)\n with pytest.raises(FileNotFoundError):\n reader.load()", "def test_no_such_file(self):\n with self.assertRaises(IOError):\n analyze_text('foobar')", "def test_no_such_file(self):\n with self.assertRaises(IOError):\n analyze_text('foobar')", "def test_no_such_file(self):\n with self.assertRaises(IOError):\n analyze_text('foobar')", "def test_no_such_file(self):\n\t\twith self.assertRaises(IOError):\n\t\t\tanalyse_text('foobar')", "def test_read_fail2(self):\n mod_fn = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_bad_data2.mod')\n with self.assertRaises(ValueError):\n modreader.get_data(mod_fn) # missing 'IEOF' end", "def test_read_raw():\n # Use a file ending that does not exist\n f = 'file.bogus'\n with pytest.raises(ValueError, match='file name extension must be one of'):\n _read_raw(f)", "def test_file_read_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read()", "def test_iter_files_negative():\n with pytest.raises(ValueError):\n x = list(iter_files(\"wrong_path\", ignore_errors=False))\n assert len(x) > 0\n\n with pytest.raises(RuntimeError):\n x = list(iter_files(\"http://foobar.baz.nonexistent\", ignore_errors=False))\n assert len(x) > 0\n\n with pytest.raises(RuntimeError):\n x = list(iter_files(\"http://google.com/X\", ignore_errors=False))\n assert len(x) > 0", "def test_read_fail1(self):\n mod_fn = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_bad_data1.mod')\n with self.assertRaises(ValueError):\n modreader.get_data(mod_fn) # missing 'IMOD' start", "def test_file_readlines_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).readlines()", "def test_existing_file_after_assert_error(exist_of_file):\n try:\n assert read_magic_number(exist_of_file)\n except AssertionError:\n print(\"Now lets do check of existing file\")", "def test_sam_parser_throw(self):\n # self.cleanup = False\n\n some_file = '%s/fake_results' % GOLDEN_DIR\n\n try:\n with open(some_file, 'r') as something:\n for line in something:\n parse_sam_line(line)\n # pylint: disable=broad-except\n except Exception:\n pass\n else:\n self.fail('Exception should have been called when parsing a non-SAM file.')", "def test_bad_file():\n\n bad_file = random_string()\n letter = random.choice(string.ascii_lowercase)\n rv, out = getstatusoutput('{} {} -f {}'.format(prg, letter, bad_file))\n assert rv != 0\n expected = \"No such file or directory: '{}'\".format(bad_file)\n assert re.search(expected, out)", "def test_invalid_path() -> None:\n path = rsc / \"does-not-exist.ods\"\n with pytest.raises(FileNotFoundError, match=\"does not exist\"):\n read_ods(path)", "def provoke_and_handle_FileNotFoundError():\n try:\n with open(\"NEIN.mp3\") as f:\n print(\"well\")\n except FileNotFoundError as fnfe:\n print(f\"Sorry! {fnfe}\")", "def test_file_read_bin_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read_bin()", "def test_file_readas_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read_as(\"utf-8\")", "def test_read_components_from_rulefile_rulefile_not_specified2(tmp_path):\n with pytest.raises(SystemExit):\n _read_components_from_rulefile()", "def test_file_readlines_utf8_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).readlines_utf8()", "def test_nonfile(self):\n self.assertEqual(None,readfiles.read_file(\"tests.txt))", "def test_scan_dir_not_found(self, dir_path):\n with self.assertRaises(FileNotFoundError):\n self.file_scanner.scan(dir_path)", "def test_file_read_utf8_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read_utf8()", "def test_02_not_exist(self):\n with self.assertLogs(\"borg\", \"WARNING\") as logger:\n self.api.extract(self.archive, self.file_3)\n message = logger.records[0].getMessage()\n self.assertRegex(\n message,\n r\".*?file_3.*never\",\n \"Warning not logged for bad path\",\n )", "def test_readable_error_if_file_not_found(self):\n fake_path = 'this/path/is/not/real'\n self.assertEqual(LoadJsonConfig.read_config_file(LoadJsonConfig(), fake_path), 'File not found at ' + fake_path)", "def test_info_fail(self):\n path = \"non_existing_audio.wav\"\n with self.assertRaisesRegex(RuntimeError, path):\n self._info(path)", "def test_read_raw_unsupported_multi(fname, tmp_path):\n fname = tmp_path / fname\n fname.write_text('')\n with pytest.raises(RuntimeError, match='Could not read.*using any'):\n read_raw(fname)", "def test_XMLParser_file_not_found():\n with pytest.raises(OSError):\n parser = XMLParser(\"no_such_file\")" ]
[ "0.69080895", "0.68955046", "0.68710303", "0.67394614", "0.67348474", "0.67348474", "0.6710361", "0.6645094", "0.65975255", "0.6564053", "0.6531037", "0.63860875", "0.63495845", "0.63137984", "0.6310357", "0.63024074", "0.6291224", "0.62738997", "0.62598825", "0.62516713", "0.62439764", "0.62289745", "0.6193946", "0.6174704", "0.61646503", "0.6161813", "0.61575913", "0.6145619", "0.6144041", "0.6140799" ]
0.7037131
0
Tests get_all()'s handling of failed file reading
def test_get_all_read_fail(self): file_handler = open(self.test_task_filename, 'w') file_handler.write('Mock corrupt data') file_handler.close() os.chmod(self.test_task_filename, 000) self.assertRaises(IOError, self.task_storage.get_all)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_iter_files_negative():\n with pytest.raises(ValueError):\n x = list(iter_files(\"wrong_path\", ignore_errors=False))\n assert len(x) > 0\n\n with pytest.raises(RuntimeError):\n x = list(iter_files(\"http://foobar.baz.nonexistent\", ignore_errors=False))\n assert len(x) > 0\n\n with pytest.raises(RuntimeError):\n x = list(iter_files(\"http://google.com/X\", ignore_errors=False))\n assert len(x) > 0", "def test_bad_number_of_files():\n with pytest.raises(Exception):\n process_files(['resources/simple_data.json', 'resources/simple_data.json', 'resources/simple_data.json'])\n with pytest.raises(Exception):\n process_files([])", "def test_failures(self):\n reader = TextReader('jacksheet', subject='R1XXX', localization=0)\n with pytest.raises(FileNotFoundError):\n reader.load()", "def testFailedFiles(self):\n assert self.dummySubscription.failedFiles() == \\\n self.dummySubscription.failed.getFiles(type='set'), \\\n 'Method failedFiles does not return failed files Set'", "def test_read_fail2(self):\n mod_fn = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_bad_data2.mod')\n with self.assertRaises(ValueError):\n modreader.get_data(mod_fn) # missing 'IEOF' end", "def test_read_file_to_list():\n new_file = \"Hello,World\"\n data = race.read_file_to_list()\n assert type(data) == list\n for x in data:\n assert type(x) == dict\n assert len(data) == 33\n with pytest.raises(FileNotFoundError):\n Race(new_file).read_file_to_list()", "def test_read_raw_suggested(fname):\n with pytest.raises(ValueError, match='Try reading'):\n read_raw(fname)", "def test_read_raw_unsupported_multi(fname, tmp_path):\n fname = tmp_path / fname\n fname.write_text('')\n with pytest.raises(RuntimeError, match='Could not read.*using any'):\n read_raw(fname)", "def test_read_bad_queries(file):\n\n # Bad topo-id\n with pytest.raises(DataNotFoundInFile) as excinfo:\n read(file, [QueryData(\"bad_topoid\")])\n assert \"bad_topoid\" in str(excinfo.value)\n \n # Bad reach name\n with pytest.raises(DataNotFoundInFile) as excinfo:\n read(file, [QueryData(\"topoid1\", \"bad_reach_name\")])\n assert \"bad_reach_name\" in str(excinfo.value)\n\n # Bad chainage\n with pytest.raises(DataNotFoundInFile) as excinfo:\n read(file, [QueryData(\"topoid1\", \"reach1\", 666)])\n assert \"666\" in str(excinfo.value)", "def test_read_fail1(self):\n mod_fn = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_bad_data1.mod')\n with self.assertRaises(ValueError):\n modreader.get_data(mod_fn) # missing 'IMOD' start", "def test_file_read_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read()", "def test_iter_files():\n for i in iter_files(\"wrong_path\"):\n assert False, \"no files should be yielded\"\n\n files = list(iter_files(\"test_data/\"))\n assert len(files) > 0\n assert (\"test_data/stopwords.txt\", \"test_data/stopwords.txt\") in files\n assert (\"test_data/directory/test_dir\", \"test_data/directory/test_dir\") in files\n\n files = list(iter_files(\"http://google.com\"))\n assert len(files) > 0\n\n files = list(iter_files(\"http://google.com/X\", ignore_errors=True))\n assert not files", "def test_reader(self):\n default_list_file = GAMEBASE + \"/Lists/char-LIST.tex\"\n output = name_pdfs.read_names_file(default_list_file)\n self.assertIsInstance(output, dict)\n self.assertGreater(len(output), 0)\n # Check that the function returns a dict ✓\n # Of greater than length 0 ✓\n fname = \"\"\n for example in self.bad_filenames:\n with self.subTest(fname=example):\n with self.assertRaises(OSError):\n name_pdfs.read_names_file(fname)", "def test_read_not_interested(self):\n try:\n self.reader.read(self.books[2], 0, 0)\n self.fail(\"Readed book not interested\")\n except AssertionError:\n pass", "def test_get_file_fail(self):\n from django.contrib.messages import get_messages\n path = reverse(\"setting-csv\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n client = Client()\n client.force_login(user)\n r = client.post(path, {\"title\": \"hotel\", \"url\": \"http://rachel.wrongurltofetchdata.nl/djangocase/hotel.csv\",\n \"username\": \"py\", \"password\": \"30_bumps\", \"save\": \"on\"})\n messages = list(get_messages(r.wsgi_request))\n assert r.status_code == 200\n assert len(messages) == 1\n assert \"Received an error\" in str(messages[0])", "def test_file_error(self):\n my_reader = DataSetReader()\n covid_list = CovidCase.objects.all()\n\n with self.assertRaises(IOError):\n my_reader.writeFile(covid_list, \"Not_A_File.csv\")", "def test_nonfile(self):\n self.assertEqual(None,readfiles.read_file(\"tests.txt))", "def test_file_readlines_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).readlines()", "def test_file_reader(self) -> None:\n result = [['123', 'Jin He', 'Computer Science'],\n ['234', 'Nanda Koka', 'Software Engineering'],\n ['345', 'Benji Cai', 'Software Engineering']]\n # file have header\n self.assertTrue(\n list(file_reader('C:/Users/Nidhi/Desktop/SEM3/810/HW08/student_majors.txt', 3, '|', True)) == result)\n # file without header\n self.assertFalse(\n list(file_reader('C:/Users/Nidhi/Desktop/SEM3/810/HW08/student_majors.txt', 3, '|')) == result)\n # More than 3 datafield\n with self.assertRaises(ValueError):\n list(file_reader(\n 'C:/Users/Nidhi/Desktop/SEM3/810/HW08/student_majors.txt', 4, '|', True))\n # file not found\n with self.assertRaises(FileNotFoundError):\n list(file_reader('abc.txt', 3, '|', True))", "def test_get_all_failures(self):\n batch = [self.successfulresult, self.failedresult]\n self.db.insert_result_batch(results=batch)\n results = self.db.get_all_failures()\n self.assertTrue(len(results) == 1, msg=\"Retrieved more than a single failure unexpectedly.\")\n self.assertDictContainsSubset(self.failedresult, results[0].__dict__)", "def test_read_raw():\n # Use a file ending that does not exist\n f = 'file.bogus'\n with pytest.raises(ValueError, match='file name extension must be one of'):\n _read_raw(f)", "def test_get_filtered_list_fail(self):\n (flexmock(errata.requests)\n .should_receive(\"get\")\n .and_return(flexmock(status_code=404, text=\"_irrelevant_\")))\n\n self.assertRaises(exceptions.ErrataToolError, errata.get_filtered_list)", "def test_read_components_from_rulefile_rulefile_not_specified2(tmp_path):\n with pytest.raises(SystemExit):\n _read_components_from_rulefile()", "def test_get_all(client: FlaskClient):\n response1 = util.upload_file(\n client, DEFAULT_USER, get_example_file(ExampleFileType.Txt)\n )\n response2 = util.upload_file(\n client, DEFAULT_USER, get_example_file(ExampleFileType.Jpg)\n )\n response3 = util.upload_file(\n client, DEFAULT_USER, get_example_file(ExampleFileType.Png)\n )\n\n # Now retrieve them\n response_get = util.get_all_files(client, DEFAULT_USER)\n assert response_get.status == \"200 OK\"\n assert len(response_get.json) == 3\n assert response1.json in response_get.json\n assert response2.json in response_get.json\n assert response3.json in response_get.json", "def testFailFiles(self):\n # Cleaning possible files already occupying the available set\n self.dummySubscription.failFiles([])\n\n # First test - Test if initial file (on available set) is inserted in the\n # failed set - no arguments\n\n dummyFile2 = File('/tmp/dummyfile2,8888', 1, 1, 1)\n # Insert dummyFile2 into the available files Set at dummySubscription\n self.dummySubscription.available.addFile(dummyFile2)\n\n S = self.dummySubscription.availableFiles()\n # Fail all files\n self.dummySubscription.failFiles(S)\n\n assert len(self.dummySubscription.availableFiles()) == 0, \\\n \"failed subscription still has %s files, what's up with that?\" % \\\n len(self.dummySubscription.availableFiles())\n\n # Second test - Test if target files are inserted at the failed set\n\n dummyFileList = []\n # Populating the dummy List with a random number of files\n for i in range(1, random.randint(100, 1000)):\n lfn = '/store/data/%s/%s/file.root' % (random.randint(1000, 9999),\n random.randint(1000, 9999))\n size = random.randint(1000, 2000)\n events = 1000\n run = random.randint(0, 2000)\n lumi = random.randint(0, 8)\n\n file = File(lfn=lfn, size=size, events=events,\n checksums={\"cksum\": \"1\"})\n file.addRun(Run(run, *[lumi]))\n dummyFileList.append(file)\n # Add the new files\n self.dummySubscription.available.addFile(dummyFileList)\n # and fail them\n self.dummySubscription.failFiles(files=dummyFileList)\n # Check there are no files available - everything should be failed\n assert len(self.dummySubscription.availableFiles()) == 0, \\\n \"failed subscription still has %s files, what's up with that?\" % \\\n len(self.dummySubscription.availableFiles())\n\n # Check if all files were inserted at subscription's failed files Set\n for x in dummyFileList:\n assert x in self.dummySubscription.failed.getFiles(type='set'), \\\n 'Couldn\\'t make file failed %s' % x.dict['lfn']\n\n # Third test - Test if a replicate file is erased from the other Sets,\n # when a file is considered failed\n\n dummyFile3 = File('/tmp/dummyfile3,5555', 1, 1, 1)\n dummyFileList = []\n dummyFileList.append(dummyFile3)\n\n # Inserting dummyFile3 to be used as an argument, into each of the other\n # file sets\n self.dummySubscription.acquired.addFile(dummyFile3)\n self.dummySubscription.available.addFile(dummyFile3)\n self.dummySubscription.completed.addFile(dummyFile3)\n\n # Run the method failFiles\n self.dummySubscription.failFiles(files=dummyFileList)\n\n # Check if dummyFile3 was inserted at the failed Set\n assert dummyFile3 in self.dummySubscription.failed.getFiles(type='set'), \\\n 'Replicated file could\\'nt be inserted at failed Set'\n\n # Check if dummyFile3 was erased from all the other Sets\n assert dummyFile3 not in self.dummySubscription.acquired.getFiles(type='set'), \\\n 'Failed file still present at acquired Set'\n assert dummyFile3 not in self.dummySubscription.completed.getFiles(type='set'), \\\n 'Failed file still present at completed Set'\n assert dummyFile3 not in self.dummySubscription.available.getFiles(type='set'), \\\n 'Failed file still present at available Set'", "def test_get_data_fail(self):\n self.assertIsNone(get_data('this_must_fail', 5, 0))", "def test_list_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.get(-1)", "def testEmpty(self):\n data = fetchers.fetch('http://unittest/200.txt').read()\n self.assertRaises(\n xrds.XRDSError,\n xrds.get_elements, data, [])", "def test_content_file(self):\n\n url=[\"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\",\n \"http://archive.ics.uci.edu/ml/machine-learning-databases/car/car.data\",\n \"http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\"]\n cwd=os.getcwd()\n list_of_files=requester.batch_url_to_csv(url, fnames=[\"m1\", \"m2\",\"m3\"])\n total_rows=0\n reader_list=[]\n for j in range(len(list_of_files)):\n reader=csv.DictReader(list_of_files[j])\n for rows in reader:\n total_rows+=1\n reader_list.append(total_rows)\n\n unique=set((reader_list))\n if len(unique)!=len(reader_list):\n with self.assertRaises(AssertionError):\n requester.batch_url_to_csv(url,fnames=['m1','m2','m3'])", "def test_all_unknown_class(self):\n storage = FileStorage()\n with self.assertRaises(NameError):\n storage.all(dog)" ]
[ "0.67754906", "0.6768293", "0.66898245", "0.6532667", "0.64860004", "0.6446616", "0.6441123", "0.643655", "0.6398089", "0.6365367", "0.63103926", "0.6277082", "0.6246861", "0.61393595", "0.6119875", "0.61132956", "0.6075328", "0.6074766", "0.6072406", "0.60654676", "0.60591894", "0.60407203", "0.60222536", "0.60059065", "0.59655774", "0.5950969", "0.59508735", "0.5946686", "0.5921461", "0.5912472" ]
0.74396557
0
Tests update()'s handling of failed file writing
def test_update_write_fail(self): self.task_storage.add(self.my_task) os.chmod(self.test_task_filename, 0400) self.assertRaises(IOError, self.task_storage.update, self.my_task)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cannot_write_file(self):\n self.api.write_data('/some-fake/path/to-create-file/', 'some-string')", "def test_file_update_delete_conflict(self):\n dir0, dir1 = self.make_temp_dirs(2)\n self.write_file(dir0, \"foo\", \"bar\")\n self.sync_all()\n\n self.write_file(dir0, \"foo\", \"baz\")\n self.delete_file(dir1, \"foo\")\n self.sync_all()\n self.assertFileAbsent(dir0, \"foo\")\n self.assertFileAbsent(dir1, \"foo\")", "def test_atomic_failure(self):\n with TemporaryDirectory() as tmp:\n fp = os.path.join(tmp, \"asdf.txt\")\n\n # raise fake error while writing file atomically\n with self.assertRaises(FakeFileFailure):\n with atomic_write(fp, \"w\") as f:\n tmpfile = f.name\n assert os.path.exists(tmpfile)\n raise FakeFileFailure()\n\n # ensure both the temp and destination files do not exist\n assert not os.path.exists(tmpfile)\n assert not os.path.exists(fp)", "def test_skipped_update(self):\n dir0, dir1, dir2 = self.make_temp_dirs(3)\n self.write_file(dir0, \"foo\", \"bar\")\n self.sync_all()\n\n # Update dir0 and sync dir0/dir1 but not dir2\n self.write_file(dir0, \"foo\", \"baz\")\n self.sync_dirs(dir0, dir1)\n self.assertFile(dir0, \"foo\", \"baz\")\n self.assertFile(dir1, \"foo\", \"baz\")\n self.assertFile(dir2, \"foo\", \"bar\")\n\n # dir2 should pick up the change when all are sync'd\n self.sync_all()\n self.assertFile(dir0, \"foo\", \"baz\")\n self.assertFile(dir1, \"foo\", \"baz\")\n self.assertFile(dir2, \"foo\", \"baz\")", "def test_silent_write_errors():\n\n tracker = pawprint.Tracker(db=None, table=None)\n\n try:\n tracker.write(event=\"This will fail silently.\")\n except Exception:\n pytest.fail(\"Failed to fail silently.\")", "def test_file_conflict(self):\n dir0, dir1 = self.make_temp_dirs(2)\n self.write_file(dir0, \"foo\")\n self.sync_all()\n\n self.write_file(dir0, \"foo\", \"bar\")\n time.sleep(0.1)\n self.write_file(dir1, \"foo\", \"baz\")\n self.sync_all()\n # File with later mtime wins\n self.assertFile(dir0, \"foo\", \"baz\")\n self.assertFile(dir1, \"foo\", \"baz\")", "def test_handle_install_existing_version_file(self):\n mock_status_callback = MagicMock()\n mock_firmware_handler = self.MockFirmwareHandler()\n\n firmware_update = OSFirmwareUpdate(\n mock_firmware_handler, mock_status_callback\n )\n firmware_update.logger.setLevel(logging.CRITICAL)\n\n file_handle = open(\"last_firmware_version.txt\", \"w\")\n expected_status = FirmwareUpdateStatus(\n FirmwareUpdateStatusType.ERROR,\n FirmwareUpdateErrorType.UNKNOWN,\n )\n\n firmware_update.handle_install(\"some_file\")\n\n firmware_update.status_callback.assert_called_once_with(\n expected_status\n )\n\n file_handle.close()\n os.remove(\"last_firmware_version.txt\")", "def test_update_case(self):\n pass", "def test_update(self):\n\n # boilerplate\n tempdir = tempfile.mkdtemp()\n for i in range(10):\n file(os.path.join(tempdir, str(i)), 'w').write(str(i))\n\n # First, make a manifest:\n manifest = convert([tempdir])\n newtempdir = tempfile.mkdtemp()\n manifest_file = os.path.join(newtempdir, 'manifest.ini')\n file(manifest_file,'w').write(manifest)\n manifest = ManifestParser(manifests=(manifest_file,))\n self.assertEqual(manifest.get('name'),\n [str(i) for i in range(10)])\n\n # All of the tests are initially missing:\n self.assertEqual([i['name'] for i in manifest.missing()],\n [str(i) for i in range(10)])\n\n # But then we copy one over:\n self.assertEqual(manifest.get('name', name='1'), ['1'])\n manifest.update(tempdir, name='1')\n self.assertEqual(sorted(os.listdir(newtempdir)),\n ['1', 'manifest.ini'])\n\n # Update that one file and copy all the \"tests\":\n file(os.path.join(tempdir, '1'), 'w').write('secret door')\n manifest.update(tempdir)\n self.assertEqual(sorted(os.listdir(newtempdir)),\n ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'manifest.ini'])\n self.assertEqual(file(os.path.join(newtempdir, '1')).read().strip(),\n 'secret door')\n\n # clean up:\n shutil.rmtree(tempdir)\n shutil.rmtree(newtempdir)", "def test_overwrite_corrupted_files(overwrite_on_tape_topology, core_config_mock, caches_mock):\n rse1_id, rse2_id, rse3_id, did1, did2 = overwrite_on_tape_topology(did1_corrupted=True, did2_corrupted=True)\n all_rses = [rse1_id, rse2_id, rse3_id]\n\n class _FTSWrapper(FTSWrapper):\n @staticmethod\n def on_receive(job_params):\n for job in (job_params if isinstance(job_params, list) else [job_params]):\n for file in job.get('files', []):\n if (file.get('file_metadata', {}).get('dst_type') == 'TAPE'\n and file.get('file_metadata', {}).get('dst_file', {}).get('file_on_tape') is not None):\n # Fake that dst_file metadata contains file_on_tape == True\n # As we don't really have tape RSEs in our tests, file_on_tape is always false\n file['file_metadata']['dst_file']['file_on_tape'] = True\n return job_params\n\n with patch('rucio.daemons.conveyor.poller.FTS3Transfertool', _FTSWrapper):\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n # Both transfers must be marked as failed because the file size is incorrect\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.FAILED\n\n # Re-submit the failed requests. They must fail again, because overwrite_corrupted_files is False\n # 2 runs: for multihop, finisher works one hop at a time\n finisher(once=True, partition_wait_time=0)\n finisher(once=True, partition_wait_time=0)\n request = request_core.get_request_by_did(rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.QUEUED\n request = request_core.get_request_by_did(rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.QUEUED\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n # Set overwrite to True before running the poller or finisher\n core_config.set('transfers', 'overwrite_corrupted_files', True)\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.FAILED\n\n # Re-submit one more time. Now the destination file must be overwritten\n finisher(once=True, partition_wait_time=0)\n finisher(once=True, partition_wait_time=0)\n request = request_core.get_request_by_did(rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.QUEUED\n request = request_core.get_request_by_did(rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.QUEUED\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n request = request_core.get_request_by_did(rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.SUBMITTED\n assert __wait_for_fts_state(request, expected_state='ARCHIVING') == 'ARCHIVING'\n request = request_core.get_request_by_did(rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.SUBMITTED\n assert __wait_for_fts_state(request, expected_state='ARCHIVING') == 'ARCHIVING'", "def test_update_users_Returns_false_for_invalid_field(\n self,mock_modify_users_file, mock_read_users_file\n ):\n mock_read_users_file.return_value = self.users_data\n mock_modify_users_file.return_value = True \n crud = CRUD()\n self.assertFalse(crud.update_users(\"1\",\"champs_inexistant\",\"Maxime\"))", "def test_write_to_bug_file_if_good(self):\n mock = Mock(return_value=3)\n\n @write_error_to_file\n def everything_works_without_exceptions():\n mock()\n\n everything_works_without_exceptions()\n self.assertFalse(os.path.isfile(LOGFILENAME))", "def test_malformed(self):\n fdesc, fname = tempfile.mkstemp()\n tfile = os.fdopen(fdesc, 'w')\n tfile.write(self.file_str2)\n tfile.close()\n assert_raises(Exception, grades.writers.GradesFile, fname)\n os.unlink(fname)", "def test_log_file_is_updated(self):\n import os\n\n file_size_before = os.path.getsize(self.chatbot.database.path)\n\n # Force the chatbot to update it's timestamp\n self.chatbot.log = True\n\n # Submit input which should cause a new log to be created\n input_text = \"What is the airspeed velocity of an unladen swallow?\"\n response = self.chatbot.get_response(input_text)\n\n file_size_after = os.path.getsize(self.chatbot.database.path)\n\n self.assertLess(file_size_before, file_size_after)", "def test_fileAlreadyExistsNoOverwrite(self):\n fp = FilePath(self.mktemp())\n fp.touch()\n\n self.assertRaises(OSError, self.makeConnectedDccFileReceive, fp.path)", "def test_update_unexpected_error(self, data_update, requests_mock, capsys):\n requests_mock.put(data_url, exc=ConnectionError)\n with pytest.raises(ConnectionError):\n r = operations.update(data_url, data=data_update)\n assert 'Unexpected error when connecting to' in capsys.readouterr().out", "def test_update(tmp_path):\n _ = get_logger(verbosity=4)\n for osdk_version in [\"latest\", \"1.3.1\", \"1.3.1\"]:\n version = osdk_update.osdk_update(version=osdk_version, **tmp_path)\n file_data = osdk_update.OsdkFileData(version=version, **tmp_path)\n assert file_data.files_not_matching() == []\n for filename in file_data.downloads:\n try:\n os.remove(file_data.downloads[filename]['dst'])\n except Exception:\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_refresh_error_update_cache(self):\n self.host_updater.refresh_cache()\n mock_method_path = ('dbtobindzone.updaters.host_updater'\n '.HostUpdater.temp_cache_file')\n patch = mock.patch(mock_method_path, new_callable=mock.PropertyMock)\n with patch as mock_method:\n mock_method.return_value = '/TMP/DIR/NOT/EXISTS'\n result = self.host_updater.refresh_cache()\n self.assertFalse(result)", "def test_writing(self):\n with contextlib.closing(RiggedDailyLogFile(self.name, self.dir)) as log:\n log.write(\"123\")\n log.write(\"456\")\n log.flush()\n log.write(\"7890\")\n\n with open(self.path) as f:\n self.assertEqual(f.read(), \"1234567890\")", "def test_file_exists(self):\n with TemporaryDirectory() as tmp:\n # define path to file\n fp = os.path.join(tmp, \"asdf.txt\")\n\n # write atomically to file\n with atomic_write(fp, \"w\") as f:\n f.write(\"asdf\")\n\n # ensure file exists\n assert os.path.exists(fp)\n\n # ensure atomic_write to same file raises an error as it already exists\n try:\n with atomic_write(fp, \"w\") as f:\n f.write(\"asdf\")\n except FileExistsError as e:\n self.assertIsInstance(e, FileExistsError)", "def test_no_change_without_enough_results(self):\n MetadataUpdater.min_results_for_update = 2\n self.write_contents(\n 'external/wpt/fail.html.ini', \"\"\"\\\n [fail.html]\n expected: FAIL\n \"\"\")\n self.update({\n 'results': [{\n 'test': '/fail.html',\n 'status': 'PASS',\n 'expected': 'FAIL',\n }],\n })\n self.assert_contents(\n 'external/wpt/fail.html.ini', \"\"\"\\\n [fail.html]\n expected: FAIL\n \"\"\")", "def test_update_no_version_change(\n dbbackup, update, version_file=None, orig_version=None\n):\n version_file = cli.version_file()\n open(version_file, \"w\").write(orig_version)\n cli.initialize()\n update.assert_not_called()\n dbbackup.assert_not_called()", "def test_update_http_error(self, data_update, requests_mock, capsys):\n requests_mock.put(data_url, status_code=300)\n with pytest.raises(RuntimeError):\n r = operations.update(data_url, data=data_update)\n assert 'HTTP error: 300' in capsys.readouterr().out", "async def test_setup_failed_update(hass: HomeAssistant, ufp: MockUFPFixture) -> None:\n\n ufp.api.update = AsyncMock(side_effect=NvrError)\n\n await hass.config_entries.async_setup(ufp.entry.entry_id)\n await hass.async_block_till_done()\n assert ufp.entry.state == ConfigEntryState.SETUP_RETRY\n assert ufp.api.update.called", "def test_update_users_Returns_false_for_invalid_id(\n self, mock_modify_users_file, mock_read_users_file\n ):\n mock_read_users_file.return_value = self.users_data\n mock_modify_users_file.return_value = True \n crud = CRUD()\n self.assertFalse(crud.update_users(\"10\",\"name\",\"Maxime\"))", "def test_put_raises_on_overwriting(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src1 = os.path.join(uploads, 'demo-test.tar.gz')\n src2 = os.path.join(uploads, 'test.jpg')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put_variant(src1, id, 'demo-test.tar.gz')\n with assert_raises(x.FileExists):\n backend.put_variant(src2, id, 'demo-test.tar.gz')", "def test_update_system(self):\n pass" ]
[ "0.6860754", "0.66650736", "0.643922", "0.64237213", "0.6337336", "0.63254875", "0.6300389", "0.6256422", "0.62026453", "0.61951196", "0.61921984", "0.6148384", "0.6147777", "0.61377096", "0.6131382", "0.6124437", "0.6122346", "0.61113584", "0.61113584", "0.61113584", "0.6094106", "0.6090008", "0.60726935", "0.6069118", "0.6060784", "0.60565436", "0.6056207", "0.60483104", "0.6030516", "0.6013882" ]
0.7388159
0
Tests delete()'s handling of failed file writing
def test_delete_write_fail(self): self.task_storage.add(self.my_task) os.chmod(self.test_task_filename, 0400) self.assertRaises(IOError, self.task_storage.delete, self.my_task.key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_file_deleted(self):\n try:\n with get_temp_file() as (fd, name):\n os.unlink(name)\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))", "def test_file_update_delete_conflict(self):\n dir0, dir1 = self.make_temp_dirs(2)\n self.write_file(dir0, \"foo\", \"bar\")\n self.sync_all()\n\n self.write_file(dir0, \"foo\", \"baz\")\n self.delete_file(dir1, \"foo\")\n self.sync_all()\n self.assertFileAbsent(dir0, \"foo\")\n self.assertFileAbsent(dir1, \"foo\")", "def test_write_delete(self):\n\n expected = \"Hello, World! I'm domain2idna\"\n File(\"hi\").write(expected)\n\n with open(\"hi\") as file:\n actual = file.read()\n\n self.assertEqual(expected, actual)\n\n expected = False\n File(\"hi\").delete()\n actual = PyFunceble.path.isfile(\"hi\")\n\n self.assertEqual(expected, actual)", "def test_read_delete(self):\n\n expected = \"Hello, World! This has been written by Fun Ilrys.\"\n File(\"hi\").write(expected)\n actual = File(\"hi\").read()\n\n self.assertEqual(expected, actual)\n\n expected = False\n File(\"hi\").delete()\n actual = PyFunceble.path.isfile(\"hi\")\n\n self.assertEqual(expected, actual)", "def test_raise_file_not_found_error_for_fileio_delete() -> None:\n with tempfile.TemporaryDirectory() as tmpdirname:\n # Write to the temporary file\n output_file_location = os.path.join(tmpdirname, \"foo.txt\")\n\n # Instantiate the file-io\n file_io = PyArrowFileIO()\n\n # Delete the non-existent file using the file-io implementations delete method\n with pytest.raises(FileNotFoundError) as exc_info:\n file_io.delete(output_file_location)\n\n assert \"Cannot delete file\" in str(exc_info.value)\n\n # Confirm that the file no longer exists\n assert not os.path.exists(output_file_location)", "def test_cannot_remove_file(self):\n self.api.remove_file('/some-fake/path/to-delete-file.json')", "def test_write_overwrite_delete(self):\n\n expected = \"Hello, World! I'm domain2idna\"\n File(\"hi\").write(expected)\n\n with open(\"hi\") as file:\n actual = file.read()\n\n self.assertEqual(expected, actual)\n\n expected = \"Hello, World! Python is great, you should consider learning it!\"\n File(\"hi\").write(expected, overwrite=True)\n\n with open(\"hi\") as file:\n actual = file.read()\n\n self.assertEqual(expected, actual)\n\n expected = False\n File(\"hi\").delete()\n actual = PyFunceble.path.isfile(\"hi\")\n\n self.assertEqual(expected, actual)", "def test_cleanup_file_deleted(fs: FakeFilesystem, requests_mock: Mocker) -> None:\n requests_mock.get(\"https://example.com/test.csv\", text=CONTENTS)\n\n adapter = CSVFile(\"https://example.com/test.csv\")\n assert adapter.path.exists()\n adapter.path.unlink()\n adapter.close()", "def test_cannot_write_file(self):\n self.api.write_data('/some-fake/path/to-create-file/', 'some-string')", "def test_delete_download_backup_errors(\n rotkehlchen_api_server: APIServer,\n data_dir: Path,\n username: str,\n):\n user_data_dir = Path(data_dir, username)\n # Make sure deleting file outside of user data dir fails\n undeletable_file = Path(data_dir / 'notdeletablefile')\n undeletable_file.touch()\n assert undeletable_file.exists()\n response = requests.delete(\n api_url_for(\n rotkehlchen_api_server,\n 'databasebackupsresource'),\n json={'files': [str(undeletable_file)]},\n )\n assert_error_response(\n response=response,\n contained_in_msg='is not in the user directory',\n status_code=HTTPStatus.CONFLICT,\n )\n response = requests.get(\n api_url_for(\n rotkehlchen_api_server,\n 'databasebackupsresource'),\n json={'file': str(undeletable_file)},\n )\n assert_error_response(\n response=response,\n contained_in_msg='is not in the user directory',\n status_code=HTTPStatus.CONFLICT,\n )\n undeletable_file.unlink() # finally delete normally\n\n response = requests.delete(\n api_url_for(\n rotkehlchen_api_server,\n 'databasebackupsresource'),\n json={'files': [str(Path(user_data_dir, 'idontexist'))]},\n )\n assert_error_response(\n response=response,\n contained_in_msg='does not exist',\n status_code=HTTPStatus.BAD_REQUEST,\n )\n response = requests.get(\n api_url_for(\n rotkehlchen_api_server,\n 'databasebackupsresource'),\n json={'file': str(Path(user_data_dir, 'idontexist'))},\n )\n assert_error_response(\n response=response,\n contained_in_msg='does not exist',\n status_code=HTTPStatus.BAD_REQUEST,\n )\n\n # test delete two files and only one exists\n undeletable_file.touch()\n response = requests.put(api_url_for(rotkehlchen_api_server, 'databasebackupsresource'))\n filepath = Path(assert_proper_response_with_result(response))\n response = requests.delete(\n api_url_for(\n rotkehlchen_api_server,\n 'databasebackupsresource'),\n json={'files': [str(undeletable_file), str(filepath)]},\n )\n assert_error_response(\n response=response,\n contained_in_msg='is not in the user directory',\n status_code=HTTPStatus.CONFLICT,\n )\n assert undeletable_file.exists()\n assert filepath.exists()", "def test_delete_file(self):\n # put file\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src = os.path.join(uploads, 'test.tar.gz')\n\n id1 = utils.generate_id('test.tar.gz')\n backend.put(src, id1)\n\n # regression testing (only delete what requested)\n id2 = id1.split('-')\n id2[4] += 'ZZZ'\n id2 = '-'.join(id2)\n\n backend.put(src, id1, True)\n backend.put_variant(src, id1, 'demo.txt')\n backend.put(src, id2, True)\n backend.delete(id1)\n\n path1 = '/'.join(backend.id_to_path(id1)) + '/test.tar.gz'\n path2 = '/'.join(backend.id_to_path(id1)) + '/demo.txt'\n self.assertFalse(backend.exists(path1))\n self.assertFalse(backend.exists(path2))\n\n # assume only proper file deleted\n path3 = '/'.join(backend.id_to_path(id2)) + '/test.tar.gz'\n self.assertTrue(backend.exists(path3))", "def test_force_delete(mocker, tmp_path):\n ro_file = Path(tmp_path, 'bar')\n ro_file.write_text(\"Test data\")\n make_readonly(ro_file)\n\n rmtree = mocker.Mock()\n utils.force_delete(rmtree, ro_file, sys.exc_info())\n\n assert (ro_file.stat().st_mode & stat.S_IWRITE) == stat.S_IWRITE\n rmtree.assert_called_once_with(ro_file)\n\n utils.rmtree(tmp_path)", "def test_malformed(self):\n fdesc, fname = tempfile.mkstemp()\n tfile = os.fdopen(fdesc, 'w')\n tfile.write(self.file_str2)\n tfile.close()\n assert_raises(Exception, grades.writers.GradesFile, fname)\n os.unlink(fname)", "def test_deletion_fail(self):\n\n # Assert that a RelaxNoPipeError occurs when the data pipe does not exist.\n self.assertRaises(RelaxNoPipeError, pipes.delete, 'x')", "def testTempDirWhenDeleteEncounterError(self):\n self.Patch(os, \"chmod\")\n self.Patch(tempfile, \"mkdtemp\", return_value=\"/tmp/tempdir\")\n expected_error = OSError(\"Expected OS Error\")\n self.Patch(shutil, \"rmtree\", side_effect=expected_error)\n\n def _Call():\n with utils.TempDir():\n pass\n\n # Verify OSError should be raised.\n self.assertRaises(OSError, _Call)\n tempfile.mkdtemp.assert_called_once() #pylint: disable=no-member\n shutil.rmtree.assert_called_with(\"/tmp/tempdir\") #pylint: disable=no-member", "def test_delete_nonexistent(client: FlaskClient):\n response = util.delete_file(client, DEFAULT_USER, \"test-nonexistent\")\n assert response.status == \"404 NOT FOUND\"", "def test_atomic_failure(self):\n with TemporaryDirectory() as tmp:\n fp = os.path.join(tmp, \"asdf.txt\")\n\n # raise fake error while writing file atomically\n with self.assertRaises(FakeFileFailure):\n with atomic_write(fp, \"w\") as f:\n tmpfile = f.name\n assert os.path.exists(tmpfile)\n raise FakeFileFailure()\n\n # ensure both the temp and destination files do not exist\n assert not os.path.exists(tmpfile)\n assert not os.path.exists(fp)", "def tearDown(self):\n try:\n os.remove(self.junk_file)\n except OSError as doh:\n if doh.errno == 2:\n # No such File, ignore\n pass\n else:\n raise", "def test_cleanup_on_failure_when_preparing_file(self, mocker):\n remove_spy = mocker.spy(os, 'remove')\n self._retryable.side_effect = requests.HTTPError('Fail')\n\n payload = dict(id=\"B\", data={\"some\": \"data\"}, ai_service='A')\n headers = {'x-rh-identity': 'ABC'}\n self.client.post(self.url, json=payload, headers=headers)\n\n remove_spy.assert_called_once()", "def test_fileAlreadyExistsNoOverwrite(self):\n fp = FilePath(self.mktemp())\n fp.touch()\n\n self.assertRaises(OSError, self.makeConnectedDccFileReceive, fp.path)", "def test_no_deletion(self):\n\t\tanalyse_text(self.filename)\n\t\tself.assertTrue(os.path.exists(self.filename))", "def test_delete_run(self):\n pass", "def test_no_delete(self):\n analyze_text(self.filename)\n self.assertTrue(os.path.exists(self.filename))", "def test_delete(self):\n package = make_package()\n path = self.storage.get_path(package)\n os.makedirs(os.path.dirname(path))\n with open(path, 'w') as ofile:\n ofile.write('foobar')\n self.storage.delete(package)\n self.assertFalse(os.path.exists(path))", "def test_update_write_fail(self):\n self.task_storage.add(self.my_task)\n os.chmod(self.test_task_filename, 0400)\n\n self.assertRaises(IOError, self.task_storage.update, self.my_task)", "def test_no_deletion(self):\n analyze_text(self.filename)\n self.assertTrue(os.path.exists(self.filename))", "def test_no_deletion(self):\n analyze_text(self.filename)\n self.assertTrue(os.path.exists(self.filename))", "def test_file_closed(self):\n try:\n with get_temp_file() as (fd, name):\n os.close(fd)\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))\n else:\n file_exists = os.access(name, os.F_OK)\n self.assertFalse(file_exists)", "def test_rmtree(tmp_path):\n file_path = Path(tmp_path, \"bar\")\n file_path.write_text(\"Test data\")\n make_readonly(file_path)\n\n utils.rmtree(tmp_path)\n\n assert not Path(tmp_path).exists()", "def test_delete_with_file(self):\n c1 = self.hiarc_collections.create_collection(\n self.hiarc_util.create_collection())\n\n filepath = os.path.join(\n os.getcwd(), self.hiarc_util.TEST_FILE_PATH, 'Test.txt')\n f1 = self.hiarc_files.create_file(\n self.hiarc_util.create_file(), filepath)\n\n self.hiarc_collections.add_file_to_collection(\n hiarc.AddFileToCollectionRequest(f1.key), c1.key)\n\n self.hiarc_collections.delete_collection(c1.key)\n self.assertRaises(hiarc.rest.ApiException,\n self.hiarc_collections.get_collection, c1.key)\n\n r = self.hiarc_files.get_file(f1.key)\n assert f1 == r\n\n self.hiarc_files.delete_file(f1.key)\n self.assertRaises(hiarc.rest.ApiException,\n self.hiarc_files.get_file, f1.key)" ]
[ "0.78282595", "0.72518045", "0.72256637", "0.7099771", "0.70138484", "0.6928033", "0.6867104", "0.6863741", "0.6757048", "0.67564034", "0.67120224", "0.6711196", "0.6697561", "0.6683005", "0.6674397", "0.6671692", "0.6669273", "0.6648235", "0.664784", "0.6643082", "0.661105", "0.657755", "0.6551836", "0.65355945", "0.6518095", "0.64768535", "0.64768535", "0.6469384", "0.6420189", "0.6419869" ]
0.7606043
1
Tests that update() acts correctly when no note is specified
def test_update_no_note(self): self.my_task.notes = None self.my_task.key = self.task_storage.add(self.my_task) self.my_task.title = 'foo' key = self.task_storage.update(self.my_task) new_task = self.task_storage.find(key) self.assertEqual(self.my_task, new_task)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_note(self):\n pass", "def dummy_update( self ):\r\n pass", "def test_update_case(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update_note(self):\n\n url = reverse(\n 'crm-admin:note-update',\n kwargs={\n 'pk': self.object.id\n }\n )\n\n # Test that the page load first\n response = self.c.get(url)\n self.assertEqual(response.status_code, 200)\n\n # Send data\n data = {\n 'comment': 'other value'\n }\n response = self.c.post(url, data)\n self.assertEqual(response.status_code, 302)\n\n # Get the latest added object\n obj = Note.objects.get(id=self.object.id)\n self.assertEqual(obj.comment, 'other value')", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_update_one(self):\n pass", "def test_noop_model_update(self):\n m0 = TestUpdateModel.create(count=5, text='monkey')\n\n with patch.object(self.session, 'execute') as execute:\n m0.update()\n assert execute.call_count == 0\n\n with patch.object(self.session, 'execute') as execute:\n m0.update(count=5)\n assert execute.call_count == 0", "def test_update_record(self):\n pass", "def test_update_no_args(self):\n Base._Base__nb_objects = 0\n s = Square(1, 0, 0, 1)\n s.update()\n self.assertEqual(str(s), \"[Square] (1) 0/0 - 1\")", "def test_update_release_note_not_found(db, admin_client):\n release_note = ReleaseNoteFactory()\n release_note_id = to_global_id(\"ReleaseNoteNode\", release_note.kf_id)\n release_note.delete()\n\n variables = {\n \"releaseNote\": release_note_id,\n \"input\": {\"description\": \"Updated description\"},\n }\n resp = admin_client.post(\n \"/graphql\",\n format=\"json\",\n data={\"query\": UPDATE_RELEASE_NOTE, \"variables\": variables},\n )\n\n assert \"errors\" in resp.json()\n errors = resp.json()[\"errors\"]\n assert \"does not exist\" in errors[0][\"message\"]", "def noteUpdate(ownerId, noteId, text):\n query = QUERY_UPDATE_NOTE\n query = query.format(**{'owner_id':ownerId, 'note_id':noteId, 'text':text})\n\n try:\n cursor.execute(query)\n connection.commit()\n except Exception as e:\n return False, ERROR_UPDATE_NOTE, 'Note update failed'\n\n return True, NO_ERROR, 'Updated successfuly!'", "def updateNote(self, authenticationToken, note):\r\n pass", "def test_23_empty_update(self):\n r = Rectangle(1, 2, 3, 4, 5)\n r.update()\n self.assertEqual(r.__str__(), \"[Rectangle] (5) 3/4 - 1/2\")", "def test_update_release_note(db, admin_client):\n release_note = ReleaseNoteFactory()\n release_note_id = to_global_id(\"ReleaseNoteNode\", release_note.kf_id)\n\n variables = {\n \"releaseNote\": release_note_id,\n \"input\": {\"description\": \"Updated description\"},\n }\n resp = admin_client.post(\n \"/graphql\",\n format=\"json\",\n data={\"query\": UPDATE_RELEASE_NOTE, \"variables\": variables},\n )\n\n release = resp.json()[\"data\"][\"updateReleaseNote\"][\"releaseNote\"]\n assert ReleaseNote.objects.count() == 1\n assert release[\"kfId\"] == ReleaseNote.objects.first().kf_id\n assert release[\"description\"] == variables[\"input\"][\"description\"]", "def test_noop_model_update(self):\r\n m0 = TestUpdateModel.create(count=5, text='monkey')\r\n\r\n with patch.object(ConnectionPool, 'execute') as execute:\r\n m0.update()\r\n assert execute.call_count == 0\r\n\r\n with patch.object(ConnectionPool, 'execute') as execute:\r\n m0.update(count=5)\r\n assert execute.call_count == 0", "def test_client_partial_update(self):\n pass", "def test_update9(self):\n pass", "def update_note(self, new_note):\r\n self.__note = new_note", "def update( ):\r\n pass", "def test_schedule_updated_notes(self):\n self.mockTicketAddMessage()\n # start five hours from now\n params = self._getNowAsDict(add_hours=5)\n params['description'] = 'This is a description'\n params['tzname'] = 'UTC'\n params['is_dst'] = '0'\n response = self.app.post(url_for(controller='maintenances', action='schedule', id=3),\n params=params)\n \n service_4_pre = db_sess.query(ScheduledService).get(4)\n service_4_start_time = service_4_pre.start_time\n\n params = self._getNowAsDict(add_hours=5)\n params['description'] = 'This is not a description'\n params['tzname'] = 'UTC'\n params['is_dst'] = '0'\n\n response = self.app.post(url_for(controller='maintenances', action='schedule', id=3),\n params=params)\n this_maintenance = db_sess.query(ScheduledMaintenance).get(3)\n service_4_post = db_sess.query(ScheduledService).get(4)\n self.assertEqual(service_4_start_time,service_4_post.start_time)\n self.assertEqual(this_maintenance.general_description,\n 'This is not a description')", "def test_update_scenario(self):\n pass", "def test_meeting_update(self):\n pass", "def test_client_update(self):\n pass", "def update(*args):", "def update():", "def update():", "def noteUpdateMetadata(ownerId, noteId, title = '', subject = ''):\n\n if title == '' and subject == '':\n return True, NO_ERROR, 'Nothing was changed'\n\n query = QUERY_UPDATE_NOTE_METADATA\n queryFields = ''\n if title != '':\n queryFields += 'title=\"' + title + '\"'\n\n if subject != '':\n if queryFields != '':\n queryFields += ', '\n queryFields += 'subject=\"' + subject + '\"'\n\n query = query.format(**{'owner_id':ownerId, 'note_id':noteId, 'fields':queryFields})\n \n try:\n cursor.execute(query)\n connection.commit()\n except Exception as e:\n return False, NO_ERROR, 'Note update failed'\n\n return True, NO_ERROR, 'Success'" ]
[ "0.8323263", "0.6799715", "0.6731598", "0.6721813", "0.6721813", "0.6721813", "0.66855013", "0.66341954", "0.66315436", "0.6610659", "0.6530954", "0.6468007", "0.6454314", "0.6421094", "0.6410134", "0.64047325", "0.6364371", "0.6359656", "0.6321669", "0.63181335", "0.6269567", "0.62548596", "0.6235255", "0.6162009", "0.6095763", "0.60935545", "0.60889345", "0.6074855", "0.6074855", "0.6054201" ]
0.70699596
1
Tests that get() updates and returns the correct key
def test_get(self): key = self.key_gen.get() key2 = self.key_gen.get() self.assertEqual(key, key2 - 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_set_with_get(self):\n storage = Storage()\n storage.set('1', 1)\n self.assertEqual(1, storage.set('1', 2, get=True), \"Should return previous value\")\n self.assertEqual(2, storage.get('1'), 'Should get new value')\n self.assertEqual(None, storage.set('2', 1, get=True), \"Should return None as there was no key '2'\")", "def get(self, key):", "def get(self, key):", "def test_get(self):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1,2,'three'],\n '4': {1:'one', 2:'two'}}\n for key in keys_to_set.keys():\n storage.set(key, keys_to_set[key])\n\n values = [storage.get(key) for key in keys_to_set.keys()]\n true_values = [keys_to_set[key] for key in keys_to_set.keys()]\n self.assertEqual(true_values, values)\n self.assertRaises(StorageKeyError,storage.get, '0')", "def test_newkey(self):\n d = {\n \"action\": \"set\",\n \"node\": {\n \"expiration\": \"2013-09-14T00:56:59.316195568+02:00\",\n \"modifiedIndex\": 183,\n \"key\": u(\"/testkey\"),\n \"ttl\": 19,\n \"value\": \"test0\",\n },\n }\n\n res = self.client.put(d[\"node\"][\"key\"], d[\"node\"][\"value\"])\n zeroth = res.header.revision\n d[\"node\"][\"value\"] = \"test1\"\n res = self.client.put(d[\"node\"][\"key\"], d[\"node\"][\"value\"])\n self.assertEqual(zeroth + 1, res.header.revision)\n self.assertEqual(self.client.get(d[\"node\"][\"key\"])[0], b(d[\"node\"][\"value\"]))", "def get(self, key):\n pass", "def test_put_get(self):\n key = 1\n item = 'aaa'\n cache = LRUCache(5)\n cache.put(key, item)\n assert item == cache.get(key)\n assert 1 == cache.size", "def test_one_key(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n self.storage.set('1', 'one', moe=self.now + 1)\n self.now += 2\n self.gc.expire_random()\n self.assertRaises(StorageKeyError, self.storage.get, '1')", "def test_get_returns_value(self):\n lru = LRUCache(10)\n lru.put(\"test\", \"value\")\n\n self.assertEqual(lru.get(\"test\"), \"value\")", "def test_get_ttl(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1, 2, 'three'],\n '4': {1: 'one', 2: 'two'}}\n moes = {'1': time.time() + 5, '4': time.time() + 10}\n for key in keys_to_set.keys():\n storage.set(key, keys_to_set[key], moes.get(key))\n # test at moment t\n self.assertEqual(keys_to_set['1'], storage.get('1'), \"Key '1' should still exist.\")\n # test at moment t+6, one key should expire\n self.now += 6\n keys_to_set.pop('1')\n moes.pop('1')\n self.assertRaises(StorageKeyError, storage.get, '1')\n self.assertEqual(keys_to_set['4'], storage.get('4'), \"Key '4' should still exist.\")\n self.assertEqual(keys_to_set, storage._keys_dict, \"Remaining keys are wrong\")\n self.assertEqual(moes, storage._moe_dict, \"Remaining moes are wrong\")\n # test at moment t+11\n self.now += 5\n keys_to_set.pop('4')\n moes.pop('4')\n self.assertRaises(StorageKeyError, storage.get, '1')\n self.assertRaises(StorageKeyError, storage.get, '4')\n self.assertEqual(keys_to_set, storage._keys_dict, \"Remaining keys are wrong\")\n self.assertEqual(moes, storage._moe_dict, \"Remaining moes are wrong\")", "def get(self, key):\n pass", "def _get(self, key: str):\n pass", "def check_setget(self, key, value):\n\n\t\tyield self.conn.set(key, value)\n\n\t\tnew_value = yield self.conn.get(key)\n\t\tself.assertEqual(new_value, value)\n\n\t\tself.assert_((yield self.conn.delete(key)))\n\n\t\tnew_value = yield self.conn.get(key)\n\t\tself.assertEqual(new_value, None)", "def test_storeAndRetrieveKey(self):\n domain, username, password, key = \"domain\", \"user\", \"password\", \"key\"\n\n self.assertStored(domain, username, password, key)\n self.assertEqual(self.users.key(domain, username), key)", "def get(self, key: t.Hashable) -> t.Any:", "def get(self, key, default=None):", "def test_flush_key(self):\r\n a = Addon.objects.get(id=1)\r\n eq_(base.flush_key(a.cache_key), base.flush_key(a))", "def test_update_no_match(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n\n self.task_storage.delete(self.my_task.key)\n\n self.my_task.title = 'foo'\n\n self.key = self.task_storage.update(self.my_task)\n\n self.assertIsNone(self.key)", "def test_keys_ttl(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n storage = Storage()\n storage.set('1', 'one', self.now + 5)\n storage.set('2', 'two')\n storage.set('3', 'three', self.now + 10)\n self.now += 6\n self.assertEqual(['2','3'], storage.keys('*'))\n self.assertEqual(['2','3'], list(storage._keys_dict.keys()))", "def test_key_string_values(self, mock_cache):\n ExampleKeyedConfig(left='left', right=u'〉☃', enabled=True, int_field=10, changed_by=self.user).save()\n mock_cache.get.return_value = None\n\n entry = ExampleKeyedConfig.current('left', u'〉☃')\n key = mock_cache.get.call_args[0][0]\n self.assertEqual(entry.int_field, 10)\n mock_cache.get.assert_called_with(key)\n self.assertEqual(mock_cache.set.call_args[0][0], key)\n\n mock_cache.get.reset_mock()\n entry = ExampleKeyedConfig.current(u'left', u'〉☃')\n self.assertEqual(entry.int_field, 10)\n mock_cache.get.assert_called_with(key)", "def check_get_and_set_method_for_key(self, tab, key):\r\n old_value = tab[key]\r\n new_value = 'New Value'\r\n tab[key] = new_value\r\n self.assertEquals(tab[key], new_value)\r\n tab[key] = old_value\r\n self.assertEquals(tab[key], old_value)", "def testGetKey(self):\n\n os.environ['APPLICATION_ID'] = ''\n assert memcache_stub.getKey('bar') == 'YmFy'\n assert (memcache_stub.getKey('b', namespace='a') == 'YS5i')\n os.environ['APPLICATION_ID'] = 'app'\n assert (memcache_stub.getKey('b', namespace='a') == 'YXBwLmEuYg==')\n del os.environ['APPLICATION_ID']\n memcache.set('counter', 0, namespace='me')\n assert memcache.get('counter', namespace='me') == 0", "def get(self, key, lock):\n raise NotImplementedError()", "def test_basic_user_foo():\n\n u = User(_id='joe',\n name='Joe Bar',\n gpg=\"8F049AD82C92066C7352D28A7B585B30807C2A87\",\n email=\"[email protected]\")\n\n assert 'joe' == u.save()\n u.save()\n\n joe = User.get_by_email('[email protected]')\n\n joe.pop('updated_at')\n u.pop('updated_at')\n\n assert joe == u\n\n joe = User.get_by_key(\"8F049AD82C92066C7352D28A7B585B30807C2A87\")\n\n joe.pop('updated_at')\n\n assert joe == u\n\n try:\n joe = User.get_by_key(\"foo\")\n assert True is False, \"KeyCheck failed\"\n except KeyError:\n pass", "def lookup(self, key):", "def test_redis_key(self):\n\n generated = self.feature_test._get_redis_key()\n expected = \"feature.1.testing\"\n self.assertEqual(generated, expected)", "def update(self, key):\n return self.state", "def get(self, key):\n raise NotImplementedError", "def test_get(schema, schemas, key, expected_value):\n returned_artifacts = artifacts.model.get(schemas, schema)\n\n assert getattr(returned_artifacts, key) == expected_value", "def test_set(self):\n storage = Storage()\n items_to_add = {'test_s': 'hello',\n 'test_list': [1,2,'three'],\n 'test_dict': {1:'one', 2:'two'}}\n moes = {'test_list': time.time() + 5}\n for key in items_to_add.keys():\n storage.set(key, items_to_add[key], moes.get(key))\n\n self.assertEqual(items_to_add, storage._keys_dict, 'Added keys are wrong.')\n self.assertEqual(moes, storage._moe_dict, 'Added moes are wrong.')" ]
[ "0.77135766", "0.7143771", "0.7143771", "0.700232", "0.697707", "0.6917543", "0.68096644", "0.68030596", "0.68010265", "0.679268", "0.6750709", "0.6750439", "0.66834974", "0.66759515", "0.66249615", "0.6555398", "0.65309304", "0.64803517", "0.64782", "0.6461552", "0.6438102", "0.64219856", "0.64197963", "0.64195305", "0.6352578", "0.63313735", "0.62883294", "0.62579846", "0.6257703", "0.62494504" ]
0.77496535
0
Tests get()'s handling of failed file writing
def test_get_write_fail(self): os.chmod(self.test_key_filename, 0400) self.assertRaises(IOError, self.key_gen.get)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_read_fail(self):\n file_handler = open(self.test_key_filename, 'w')\n file_handler.write('Mock corrupt data')\n file_handler.close()\n os.chmod(self.test_key_filename, 000)\n\n self.assertRaises(IOError, self.key_gen.get)", "def test_cannot_write_file(self):\n self.api.write_data('/some-fake/path/to-create-file/', 'some-string')", "def test_get_all_read_fail(self):\n file_handler = open(self.test_task_filename, 'w')\n file_handler.write('Mock corrupt data')\n file_handler.close()\n os.chmod(self.test_task_filename, 000)\n\n self.assertRaises(IOError, self.task_storage.get_all)", "def test_add_read_fail(self):\n file_handler = open(self.test_task_filename, 'w')\n file_handler.write('Mock corrupt data')\n file_handler.close()\n os.chmod(self.test_task_filename, 000)\n\n self.assertRaises(IOError, self.task_storage.add, self.my_task)", "def test_find_read_fail(self):\n file_handler = open(self.test_task_filename, 'w')\n file_handler.write('Mock corrupt data')\n file_handler.close()\n os.chmod(self.test_task_filename, 000)\n\n self.assertRaises(IOError, self.task_storage.find, self.my_task)", "def test_download_write_asset_item_error(self, mock_get):\n # Arrange\n mock_resp = MockResponse({}, 200, content=Exception(\"boom\"))\n mock_get.return_value = mock_resp\n m = mock_open()\n\n with patch(\"__main__.open\", m, create=True):\n\n # Act\n abc = download_write_collection_item_asset(\"dumy/image/url\", \"foo\")\n\n # Assert\n self.assertEquals(abc, False)", "def test_update_write_fail(self):\n self.task_storage.add(self.my_task)\n os.chmod(self.test_task_filename, 0400)\n\n self.assertRaises(IOError, self.task_storage.update, self.my_task)", "def test_malformed(self):\n fdesc, fname = tempfile.mkstemp()\n tfile = os.fdopen(fdesc, 'w')\n tfile.write(self.file_str2)\n tfile.close()\n assert_raises(Exception, grades.writers.GradesFile, fname)\n os.unlink(fname)", "def test_file_open_bug():\n \n value = Value('test', context, 'reentrant_test', clsmap['file'], data_dir='./cache')\n \n try:\n os.remove(value.namespacemanager.file)\n except OSError:\n pass\n \n value.set_value(\"x\")\n\n f = open(value.namespacemanager.file, 'w')\n f.write(\"BLAH BLAH BLAH\")\n f.close()\n \n # TODO: do we have an assertRaises() in nose to use here ?\n try:\n value.set_value(\"y\")\n assert False\n except:\n pass\n \n _synchronizers.clear()\n context.clear()\n value = Value('test', context, 'reentrant_test', clsmap['file'], data_dir='./cache')\n\n # TODO: do we have an assertRaises() in nose to use here ?\n try:\n value.set_value(\"z\")\n assert False\n except:\n pass", "def test_add_write_fail(self):\n os.chmod(self.test_task_filename, 0400)\n\n self.assertRaises(IOError, self.task_storage.add, self.my_task)", "def test_write_to_bug_file_if_good(self):\n mock = Mock(return_value=3)\n\n @write_error_to_file\n def everything_works_without_exceptions():\n mock()\n\n everything_works_without_exceptions()\n self.assertFalse(os.path.isfile(LOGFILENAME))", "def test_file_append_missing_file(self):\n with (self.assertRaises(IOError)):\n FileWriter(self.bogus_path).append(self.ascii_string)", "def test_writing(self):\n with contextlib.closing(logfile.LogFile(self.name, self.dir)) as log:\n log.write(\"123\")\n log.write(\"456\")\n log.flush()\n log.write(\"7890\")\n\n with open(self.path) as f:\n self.assertEqual(f.read(), \"1234567890\")", "def test_get_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.get(-1)", "def test_atomic_failure(self):\n with TemporaryDirectory() as tmp:\n fp = os.path.join(tmp, \"asdf.txt\")\n\n # raise fake error while writing file atomically\n with self.assertRaises(FakeFileFailure):\n with atomic_write(fp, \"w\") as f:\n tmpfile = f.name\n assert os.path.exists(tmpfile)\n raise FakeFileFailure()\n\n # ensure both the temp and destination files do not exist\n assert not os.path.exists(tmpfile)\n assert not os.path.exists(fp)", "def test_writing(self):\n with contextlib.closing(RiggedDailyLogFile(self.name, self.dir)) as log:\n log.write(\"123\")\n log.write(\"456\")\n log.flush()\n log.write(\"7890\")\n\n with open(self.path) as f:\n self.assertEqual(f.read(), \"1234567890\")", "def testRetrieveStateFile(self):\n # Test call was succesful\n self.assertEqual(resource_manager.RetrieveResourceState(), {})\n\n # Test file was created\n self.assertTrue(os.path.exists(config.RESOURCE_FILE))\n\n # Test bad resource file\n with open(config.RESOURCE_FILE, 'w') as fh:\n fh.write(\"blah\")\n fh.close()\n self.assertRaises(TurbiniaException, resource_manager.RetrieveResourceState)\n os.remove(config.RESOURCE_FILE)", "def test_write_bug_to_file_if_exception(self):\n\n mock = Mock(side_effect=KeyError)\n\n @write_error_to_file\n def error_raising():\n mock()\n\n error_raising()\n self.assertRaises(KeyError)\n self.assertTrue(os.path.isfile(LOGFILENAME))\n data_in_log_file = open(LOGFILENAME, mode=\"r\").read()\n self.assertIn('There was an exception in error_raising', data_in_log_file)\n self.assertIn('Traceback', data_in_log_file)\n self.assertIn('KeyError', data_in_log_file)", "def test_file_error(self):\n my_reader = DataSetReader()\n covid_list = CovidCase.objects.all()\n\n with self.assertRaises(IOError):\n my_reader.writeFile(covid_list, \"Not_A_File.csv\")", "def testFileOutSetException(self):\n def testFileOut():\n self.node.file_out = '../NewFile.ccc'\n\n self.assertRaises(\n AttributeError,\n testFileOut\n )", "def test_delete_write_fail(self):\n self.task_storage.add(self.my_task)\n os.chmod(self.test_task_filename, 0400)\n\n self.assertRaises(IOError, self.task_storage.delete, self.my_task.key)", "def test_invalid_write(self, mock_progress):\n mock_status = \"status\"\n mock_message = \"message\"\n stub_dir_status = self.StubDirectoryStatus()\n # mock main call to test\n mock_progress.side_effect = progress.exceptions.DirectoryError(\"\", \"\")\n # run function\n with self.assertRaises(progress.exceptions.DirectoryError):\n upload_helpers._set_and_write_directory_status(stub_dir_status, mock_status, mock_message)", "def test_incorrect_input():\n content = 'hi'\n filename = {}\n\n with pytest.raises(TypeError):\n write_file(content, filename)\n\n content = {}\n filename = 'hi'\n\n with pytest.raises(TypeError):\n write_file(content, filename)", "def testFileOutSetException(self):\n def testFileOut():\n self.cc.file_out = '../NewFile.cc'\n\n self.assertRaises(\n AttributeError,\n testFileOut\n )", "def test_is_not_google_file(self):\r\n bad_file = StringIO.StringIO()\r\n bad_file.write('failing tests please')", "def test_geturl_purpose(self):\n self.fs.create('foo')\n with self.assertRaises(errors.NoURL):\n self.fs.geturl('foo', '__nosuchpurpose__')", "def test_download_image(self, mock_get):\n\n # Test the good url first\n image_url = self.test_data[\"good_image_url\"][\"url\"]\n image_data = self.test_data[\"good_image_url\"][\"image_data\"]\n\n mock_get.return_value = self._build_mock_response(iter_content = [bytes.fromhex(image_data)])\n\n with tempfile.TemporaryDirectory() as dir_name: \n full_filename = self.retriever._download_image(dir_name, image_url)\n with open(full_filename, \"rb\") as read_back_file:\n chunk = read_back_file.read(100)\n\n self.assertEqual(bytes.fromhex(image_data), \\\n chunk, msg = \"For the 'good' image URL, the image data written must match the test data\")\n\n # Test the url that missing file name\n image_url = self.test_data[\"bad_image_url\"][\"url\"]\n image_data = self.test_data[\"bad_image_url\"][\"image_data\"]\n\n mock_get.return_value = self._build_mock_response(iter_content = [bytes.fromhex(image_data)])\n\n with tempfile.TemporaryDirectory() as dir_name:\n with self.assertRaises(ValueError, msg = \"URLs without file name in them must raise an exception\"):\n full_filename = self.retriever._download_image(dir_name, image_url)\n\n # Test the rection to a HTTP error\n image_url = self.test_data[\"good_image_url\"][\"url\"]\n image_data = self.test_data[\"good_image_url\"][\"image_data\"]\n\n mock_get.return_value = self._build_mock_response(status = 500, raise_for_status = HTTPError('Server-side error'))\n\n with tempfile.TemporaryDirectory() as dir_name: \n with self.assertRaises(Exception, msg = \"HTTP errors must raise an exception\"):\n full_filename = self.retriever._download_image(dir_name, image_url)", "def test_invalidFile(self):\n self.assertRaises(cesmEnvLib.checkFile(\"blah\", \"write\"))", "def testReadConfigFileInvalid(self):\n path = os.path.join(self.tempdir, 'foo.json')\n osutils.WriteFile(path, 'invalid contents')\n\n with self.assertRaises(workspace_lib.ConfigFileError):\n workspace_lib.ReadConfigFile(path)", "def _RaiseIfNotWritable(self):\n if not self._storage_file:\n raise IOError('Unable to write to closed storage writer.')" ]
[ "0.7072801", "0.6922023", "0.6912302", "0.66762865", "0.65602356", "0.6547754", "0.6545857", "0.63780993", "0.63550174", "0.6294372", "0.6274557", "0.62691563", "0.6246302", "0.61645716", "0.6151025", "0.61481947", "0.613047", "0.60995805", "0.6093023", "0.60695434", "0.6060338", "0.6017432", "0.59992063", "0.59610647", "0.5930424", "0.5927068", "0.5919983", "0.5903373", "0.58742595", "0.5856897" ]
0.70491755
1
Get information about past meeting recording
async def fetch_recording(meeting_id: str) -> dict: return await zoom_service.zoom.get_recording(meeting_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_past_meeting_details(self):\n pass", "def _parse_past_meetings(self, response):\n meetings = []\n for item in response.css('table.table-striped tbody tr'):\n dt_str = item.css('time::text').extract_first()\n meetings.append({\n 'start': {\n 'date': datetime.strptime(dt_str, '%b %d, %Y').date()\n },\n 'documents': self._parse_past_documents(item),\n })\n return meetings", "def get_exam_recording():\n try:\n # Users can get their own exam recordings, if they're an examiner they can get all of them\n user_id = authenticate_token(request)\n examiner = is_examiner(user_id)\n getting_own_results = is_self(user_id)\n\n if examiner or getting_own_results:\n results_query = db.session.query(User, Exam, ExamRecording, func.count(ExamWarning.exam_recording_id)).\\\n filter(User.user_id==ExamRecording.user_id).\\\n filter(Exam.exam_id==ExamRecording.exam_id).\\\n outerjoin(ExamWarning, ExamWarning.exam_recording_id==ExamRecording.exam_recording_id).\\\n group_by(ExamRecording.exam_recording_id)\n \n results, next_page_exists = filter_results(results_query, ExamRecording)\n\n exam_recordings = []\n in_progress = request.args.get('in_progress', default=None, type=int)\n if in_progress is not None: in_progress = in_progress==1\n for u, e, er, ew_count in results:\n updated = False\n duration = e.duration\n # If exam recording has not ended (or does not have a time_ended value)\n if er.time_started is not None and er.time_ended is None:\n # Check if the time now has surpassed the latest possible finish time (recording start time + exam duration)\n latest_finish_time = er.time_started + timedelta(hours=duration.hour, minutes=duration.minute)\n if latest_finish_time <= datetime.utcnow():\n # If so, set the value to latest possible time\n updated = True\n er.time_ended = latest_finish_time\n # Check so that when querying by in_progress = 1 / True, we dont include recordings that added time_ended to\n if not (updated and in_progress):\n exam_recordings.append({\n 'exam_recording_id':er.exam_recording_id,\n 'user_id':u.user_id,\n 'first_name':u.first_name,\n 'last_name':u.last_name,\n 'exam_id':e.exam_id,\n 'exam_name':e.exam_name,\n 'login_code':e.login_code,\n 'duration':e.duration.strftime(\"%H:%M:%S\"),\n 'subject_id':e.subject_id,\n 'time_started':datetime_to_str(er.time_started),\n 'time_ended':datetime_to_str(er.time_ended),\n 'video_link':er.video_link,\n 'warning_count':ew_count,\n 'document_link': e.document_link\n })\n db.session.commit()\n\n return jsonify({'exam_recordings':exam_recordings, 'next_page_exists':next_page_exists}), 200\n \n return jsonify({'user_id': user_id, 'message': \"access denied, invalid user.\" }), 403\n except (Exception, exc.SQLAlchemyError) as e:\n print(traceback.format_exc())\n return jsonify({ 'message': e.args }), 500", "def test_list_past_meeting_polls(self):\n pass", "def test_past_meeting_participants(self):\n pass", "def test_past_meetings(self):\n pass", "def test_list_past_meeting_files(self):\n pass", "def extract_meeting_info():\n meeting = flask.request.get_json()\n\n try:\n meeting_type = str(meeting['meeting_type'])\n except KeyError as e:\n flask.abort(400, f'{e} not in JSON body')\n\n try:\n meeting_date = datetime.date.fromisoformat(meeting['meeting_date'])\n except KeyError as e:\n flask.abort(400, f'{e} not in JSON body')\n except ValueError as e:\n flask.abort(400, str(e))\n\n try:\n attendees = list(meeting['attendees'])\n except KeyError as e:\n flask.abort(400, f'{e} not in JSON body')\n except TypeError as e:\n flask.abort(400, '\\'attendees\\' must be an array')\n\n return meeting_type, meeting_date, attendees", "def test_api_livesession_read_attendances_no_timeline_video(self):\n\n started = int(to_timestamp(timezone.now())) - 10\n\n video = VideoFactory(\n live_state=RUNNING,\n live_info={\"started_at\": str(int(to_timestamp(timezone.now())) - 10)},\n live_type=JITSI,\n )\n\n livesession = LiveSessionFactory(\n consumer_site=video.playlist.consumer_site,\n email=\"[email protected]\",\n live_attendance={\n started: {\"muted\": 1},\n started + 3: {\"onStage\": 0},\n started + 6: {\"muted\": 0},\n },\n lti_id=str(video.playlist.lti_id),\n lti_user_id=\"56255f3807599c377bf0e5bf072359fd\",\n video=video,\n )\n\n livesession.refresh_from_db()\n # token with right context_id and lti_user_id\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=video.playlist,\n consumer_site=str(video.playlist.consumer_site.id),\n context_id=str(video.playlist.lti_id),\n )\n\n with mock.patch.object(\n Video, \"get_list_timestamps_attendances\", return_value={}\n ):\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(\n response.json(),\n {\n \"count\": 1,\n \"next\": None,\n \"previous\": None,\n \"results\": [\n {\n \"id\": str(livesession.id),\n \"display_name\": \"[email protected]\",\n \"is_registered\": False,\n \"live_attendance\": {},\n }\n ],\n },\n )", "def displayRecordedVideo(request, flightName=None, sourceShortName=None, time=None):\n \"\"\"\n Returns first segment of all sources that are part of a given episode.\n Used for both playing back videos from active episode and also\n for playing videos associated with each note.\n \"\"\"\n\n ctx = getVideoContext(request, flightName, sourceShortName, time)\n active = ctx['isLive']\n\n theTemplate = 'xgds_video/map_recorded_playbacks.html'\n if active:\n theTemplate = 'xgds_video/map_active_playbacks.html'\n\n return render(request,\n theTemplate,\n ctx)", "def meeting_info(meeting_id, conn):\n with conn.cursor() as cur:\n cur.execute(\n 'SELECT meeting_type, meeting_date '\n 'FROM meetings '\n 'WHERE meeting_id = %s',\n (meeting_id,)\n )\n\n if cur.rowcount == 0:\n return None\n\n (meeting_type, meeting_date) = cur.fetchone()\n\n with conn.cursor() as cur:\n cur.execute(\n 'SELECT uniqname '\n 'FROM attendance '\n 'WHERE meeting_id = %s',\n (meeting_id,)\n )\n attendees = [uniqname for (uniqname,) in cur.fetchall()]\n\n return {\n 'id': meeting_id,\n 'meeting_type': meeting_type,\n 'meeting_date': meeting_date.isoformat(),\n 'attendees': attendees\n }", "def process_meeting(self, meeting):\n return [meeting]", "def get_history(hdr):\n return hdr['HISTORY']", "def get():\n\n if not in_agent_mode():\n return responses.bad_request_resp(\n \"Episode data only recorded when in Agent mode\"\n )\n\n current_ep_file = bb_logging.EP_FILE\n if not current_ep_file:\n return responses.bad_request_resp(\"No episode being recorded\")\n\n cwd = os.getcwd()\n full_log_dir = os.path.join(cwd, bb_logging.INST_LOG_DIR)\n full_ep_file = os.path.join(cwd, current_ep_file)\n\n data = {\n \"inst_id\": bb_logging.INSTANCE_ID,\n \"cur_ep_id\": bb_logging.EP_ID,\n \"cur_ep_file\": full_ep_file,\n \"log_dir\": full_log_dir,\n }\n\n return responses.ok_resp(data)", "def get_recent_obsid():\n#\n#--- extract a list of the last two weeks of acis observations\n#\n stop = time.strftime('%Y:%j:%H:%M:%S', time.gmtime())\n stop = Chandra.Time.DateTime(stop).secs\n start = stop - 86400 * 14\n\n a_list = make_obsid_list(start, stop)\n\n return a_list", "def get_talk_between_time(self, event, room, startTime, endTime):\r\n query = QtSql.QSqlQuery(\"SELECT Id, Date FROM presentations \\\r\n WHERE Event='%s' AND Room='%s' \\\r\n AND Date BETWEEN '%s' \\\r\n AND '%s' ORDER BY Date ASC\" % (event, room, startTime, endTime))\r\n query.next()\r\n if query.isValid():\r\n return query.value(0)\r\n else:\r\n return None", "def get(self):\n\n response = openvidu().list_recordings()\n\n if response.status_code == 200:\n return response.json()[\"items\"]\n elif response.status_code == 501:\n abort(NotImplemented, query=\"OpenVidu Server recording module is disabled\")\n abort(response)", "def recording_data(self):\n return self._get('recording/data')", "def getActivityRecords(self):\n return self._ActivityRecords", "def recorded_messages(self):\n messages = []\n for time in sorted(self.reception_records):\n messages.extend(self.reception_records[time])\n return messages", "def get_experiment_speaker_info(db_root):\n seen_speakers = ['VCTK-speaker-p225-female',\n 'VCTK-speaker-p226-male',\n 'VCTK-speaker-p227-male',\n 'VCTK-speaker-p228-female',\n 'VCTK-speaker-p229-female',\n 'VCTK-speaker-p230-female',\n 'VCTK-speaker-p231-female',\n 'VCTK-speaker-p232-male',\n 'VCTK-speaker-p233-female',\n 'VCTK-speaker-p234-female',\n 'VCTK-speaker-p236-female',\n 'VCTK-speaker-p237-male',\n 'VCTK-speaker-p238-female',\n 'VCTK-speaker-p239-female',\n 'VCTK-speaker-p240-female',\n 'VCTK-speaker-p241-male',\n 'VCTK-speaker-p243-male',\n 'VCTK-speaker-p244-female',\n 'VCTK-speaker-p245-male',\n 'VCTK-speaker-p246-male',\n 'VCTK-speaker-p247-male',\n 'VCTK-speaker-p248-female',\n 'VCTK-speaker-p249-female',\n 'VCTK-speaker-p250-female',\n 'VCTK-speaker-p251-male',\n 'VCTK-speaker-p252-male',\n 'VCTK-speaker-p253-female',\n 'VCTK-speaker-p254-male',\n 'VCTK-speaker-p255-male',\n 'VCTK-speaker-p256-male',\n 'VCTK-speaker-p257-female',\n 'VCTK-speaker-p258-male',\n 'VCTK-speaker-p259-male',\n 'VCTK-speaker-p260-male',\n 'VCTK-speaker-p261-female',\n 'VCTK-speaker-p262-female',\n 'VCTK-speaker-p263-male',\n 'VCTK-speaker-p264-female',\n 'VCTK-speaker-p265-female',\n 'VCTK-speaker-p266-female',\n 'VCTK-speaker-p267-female',\n 'VCTK-speaker-p268-female',\n 'VCTK-speaker-p269-female',\n 'VCTK-speaker-p270-male',\n 'VCTK-speaker-p271-male',\n 'VCTK-speaker-p272-male',\n 'VCTK-speaker-p273-male',\n 'VCTK-speaker-p274-male',\n 'VCTK-speaker-p275-male',\n 'VCTK-speaker-p276-female',\n 'VCTK-speaker-p277-female',\n 'VCTK-speaker-p278-male',\n 'VCTK-speaker-p279-male',\n 'VCTK-speaker-p280-female',\n 'VCTK-speaker-p281-male',\n 'VCTK-speaker-p282-female',\n 'VCTK-speaker-p283-female',\n 'VCTK-speaker-p284-male',\n 'VCTK-speaker-p285-male',\n 'VCTK-speaker-p286-male',\n 'VCTK-speaker-p287-male',\n 'VCTK-speaker-p288-female',\n 'VCTK-speaker-p292-male',\n 'VCTK-speaker-p293-female',\n 'VCTK-speaker-p294-female',\n 'VCTK-speaker-p295-female',\n 'VCTK-speaker-p297-female',\n 'VCTK-speaker-p298-male',\n 'VCTK-speaker-p299-female',\n 'VCTK-speaker-p300-female',\n 'VCTK-speaker-p301-female',\n 'VCTK-speaker-p302-male',\n 'VCTK-speaker-p303-female',\n 'VCTK-speaker-p304-male',\n 'VCTK-speaker-p305-female',\n 'VCTK-speaker-p306-female',\n 'VCTK-speaker-p307-female',\n 'VCTK-speaker-p308-female',\n 'VCTK-speaker-p310-female',\n 'VCTK-speaker-p311-male',\n 'VCTK-speaker-p312-female',\n 'VCTK-speaker-p313-female',\n 'VCTK-speaker-p314-female',\n 'VCTK-speaker-p316-male',\n 'VCTK-speaker-p317-female',\n 'VCTK-speaker-p318-female',\n 'VCTK-speaker-p323-female',\n 'VCTK-speaker-p326-male',\n 'VCTK-speaker-p329-female',\n 'VCTK-speaker-p330-female',\n 'VCTK-speaker-p333-female',\n 'VCTK-speaker-p334-male',\n 'VCTK-speaker-p335-female',\n 'VCTK-speaker-p336-female',\n 'VCTK-speaker-p339-female',\n 'VCTK-speaker-p340-female',\n 'VCTK-speaker-p341-female',\n 'VCTK-speaker-p343-female',\n 'VCTK-speaker-p345-male',\n 'VCTK-speaker-p347-male',\n 'VCTK-speaker-p351-female',\n 'VCTK-speaker-p360-male',\n 'VCTK-speaker-p361-female',\n 'VCTK-speaker-p362-female',\n 'VCTK-speaker-p363-male',\n 'VCTK-speaker-p364-male',\n 'VCTK-speaker-p374-male',\n 'VCTK-speaker-p376-male']\n\n # speaker index list for training and validation\n n_speaker = len(seen_speakers)\n\n # take all speakers in train and validation!!!\n train_speakers = seen_speakers\n valid_speakers = seen_speakers\n print('number of VCTK speakers = %d' % n_speaker)\n\n sp2id = {sp: i for i, sp in enumerate(seen_speakers)}\n id2sp = {i: sp for i, sp in enumerate(seen_speakers)}\n\n return seen_speakers, sp2id, id2sp", "def getCurrentObservation(self):\n\n if (len(self.observationHistory) == 0):\n return None\n\n return self.observationHistory[-1]", "def current(self):\n\t\treturn self.reading_set.latest(field_name='time')", "def get_agenda_without_datetime(message):\n meetings = database.get_meetings(message.chat_id)\n keyboard = []\n\n for meeting in meetings:\n if meeting.agenda:\n keyboard.append(\n [\n InlineKeyboardButton(\n meeting.formatted_datetime(),\n callback_data=f\"{consts.GET_AGENDA},{meeting.meeting_id}\",\n )\n ]\n )\n\n if keyboard:\n reply_markup = InlineKeyboardMarkup(keyboard)\n message.reply_text(\n \"Please select the meeting that you'll like to retrieve the agenda.\",\n reply_markup=reply_markup,\n )\n else:\n message.reply_text(\"No meeting agenda found.\")", "def past_shows(self):\n upcoming_shows = Show.query.filter(Show.start_time < datetime.now(), Show.artist_id == self.id).all()\n return [show.serialized_data for show in upcoming_shows]", "def getTimeframedData(self, website, timeframe, currentTime=time.time()):\n timeList = list(website.log.keys())\n # inside the dic from most recent to most ancient\n # reverse order\n # list of time of requests\n inFrame = []\n # getting the times within the timeframe\n for listind in range(len(timeList)):\n if (currentTime-timeList[len(timeList)-1-listind] <= timeframe):\n inFrame.append(timeList[len(timeList)-1-listind])\n # Indicators\n # Max\n maxTime = self.computeMaxResponseTime(website, inFrame)\n # Avg\n avgTime = self.computeAvgResponsetime(website, inFrame)\n # Availability\n availability = self.computeAvailability(website, inFrame)\n # Status\n status = self.computeStatus(website, currentTime)\n\n # Alert checking with 120 timeframe\n if (timeframe == 120):\n self.checkForIsDownAlert(website= website, availability= availability)\n self.checkForIsUpAlert(website=website, availability=availability)\n\n\n return {'website': website, 'frame': timeframe,'time': currentTime, 'indicators': {'maxTime': maxTime, 'avgTime': avgTime, 'availability': availability, 'status': status}}", "def info_current_event_json():\n event = Event.query.filter_by(is_current=True).first() or \\\n Event.query.order_by(Event.id.desc()).first_or_404()\n timeuntil = timesince(event.countdown, until=True)\n return jsonify(event=event.data, timeuntil=timeuntil)", "def _parse_combined_meetings(self, response):\n meetings = self._parse_past_meetings(response)\n meeting_dates = [meeting['start']['date'] for meeting in meetings]\n\n for meeting in response.meta.get('upcoming', []):\n if meeting['start']['date'] not in meeting_dates:\n meetings.append(meeting)\n\n for meeting in meetings:\n item = {\n '_type': 'event',\n 'name': 'Board of Commissioners',\n 'event_description': '',\n 'classification': BOARD,\n 'start': {\n 'date': meeting['start']['date'],\n 'time': time(8, 30),\n 'note': 'Times may change based on notice',\n },\n 'end': {\n 'date': meeting['start']['date'],\n 'time': time(13, 0),\n 'note': 'Times may change based on notice',\n },\n 'all_day': False,\n 'location': {\n 'address': '4859 S Wabash Chicago, IL 60615',\n 'name': 'Charles A. Hayes FIC',\n 'neighborhood': '',\n },\n 'documents': meeting['documents'],\n 'sources': meeting.get('sources', [{\n 'url': response.url,\n 'note': ''\n }]),\n }\n item['status'] = meeting.get('status', self._generate_status(item))\n item['id'] = self._generate_id(item)\n yield item", "def get_last_observation(data):\n date, time = data[0], data[1]\n \n last_observation = \"{} at {} GMT\".format(date, time)\n return last_observation", "def get_exercise_history():\n user_id = session.get(\"email\")\n\n history = fm.full_attempt_history(user_id)\n\n msg = \"Attempt history found for user: {}. {} records.\"\\\n .format(user_id, len(history))\n app.logger.info(msg)\n return jsonify({\"history\": history})" ]
[ "0.61916536", "0.5642174", "0.5593307", "0.5527492", "0.5521462", "0.5513049", "0.54530025", "0.53965956", "0.5392502", "0.5381713", "0.536658", "0.52653486", "0.5265268", "0.5221127", "0.5218197", "0.52050954", "0.51843894", "0.51745516", "0.5103511", "0.5078141", "0.5072908", "0.5058744", "0.5046621", "0.5046135", "0.5041831", "0.5041015", "0.5039388", "0.5037217", "0.50359315", "0.50022537" ]
0.58373797
1
Create a new incident priority.
def create_incident_priority( *, db_session: Session = Depends(get_db), incident_priority_in: IncidentPriorityCreate, ): incident_priority = create(db_session=db_session, incident_priority_in=incident_priority_in) return incident_priority
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(\n *,\n db_session,\n incident_priority: str,\n incident_type: str,\n reporter_email: str,\n title: str,\n status: str,\n description: str,\n tags: List[dict],\n visibility: str = None,\n) -> Incident:\n # We get the incident type by name\n if not incident_type:\n incident_type = incident_type_service.get_default(db_session=db_session)\n if not incident_type:\n raise Exception(\"No incident type specified and no default has been defined.\")\n else:\n incident_type = incident_type_service.get_by_name(\n db_session=db_session, name=incident_type[\"name\"]\n )\n\n # We get the incident priority by name\n if not incident_priority:\n incident_priority = incident_priority_service.get_default(db_session=db_session)\n if not incident_priority:\n raise Exception(\"No incident priority specified and no default has been defined.\")\n else:\n incident_priority = incident_priority_service.get_by_name(\n db_session=db_session, name=incident_priority[\"name\"]\n )\n\n if not visibility:\n visibility = incident_type.visibility\n\n tag_objs = []\n for t in tags:\n tag_objs.append(tag_service.get_or_create(db_session=db_session, tag_in=TagCreate(**t)))\n\n # We create the incident\n incident = Incident(\n title=title,\n description=description,\n status=status,\n incident_type=incident_type,\n incident_priority=incident_priority,\n visibility=visibility,\n tags=tag_objs,\n )\n db_session.add(incident)\n db_session.commit()\n\n event_service.log(\n db_session=db_session,\n source=\"Dispatch Core App\",\n description=\"Incident created\",\n incident_id=incident.id,\n )\n\n # We add the reporter to the incident\n reporter_participant = participant_flows.add_participant(\n reporter_email, incident.id, db_session, ParticipantRoleType.reporter\n )\n\n # We resolve the incident commander email\n incident_commander_email = resolve_incident_commander_email(\n db_session,\n reporter_email,\n incident_type.name,\n \"\",\n title,\n description,\n incident_priority.page_commander,\n )\n\n if reporter_email == incident_commander_email:\n # We add the role of incident commander the reporter\n participant_role_service.add_role(\n participant_id=reporter_participant.id,\n participant_role=ParticipantRoleType.incident_commander,\n db_session=db_session,\n )\n else:\n # We create a new participant for the incident commander and we add it to the incident\n participant_flows.add_participant(\n incident_commander_email,\n incident.id,\n db_session,\n ParticipantRoleType.incident_commander,\n )\n\n return incident", "def createPriority(self):\n return _libsbml.Event_createPriority(self)", "async def setIncident_priority(\n self,\n eventID: str,\n incidentNumber: int,\n priority: IncidentPriority,\n author: str,\n ) -> None:", "def update_incident_priority(\n *,\n db_session: Session = Depends(get_db),\n incident_priority_id: int,\n incident_priority_in: IncidentPriorityUpdate,\n):\n incident_priority = get(db_session=db_session, incident_priority_id=incident_priority_id)\n if not incident_priority:\n raise HTTPException(\n status_code=404, detail=\"The incident priority with this id does not exist.\"\n )\n\n incident_priority = update(\n db_session=db_session,\n incident_priority=incident_priority,\n incident_priority_in=incident_priority_in,\n )\n return incident_priority", "def insert(self, pri):\n heaps = self.priorities\n if pri > 10 or pri < 1:\n raise ValueError(\n 'Priority must be between 1 (high) - 10 (low)'\n )\n if pri not in heaps.keys():\n self._create_priorities(pri)\n\n priority = heaps.get(pri)\n priority.push(self._order)\n self._order += 1", "def _create_new_prio_gen ( self ):\n return roverlay.util.counter.SkippingPriorityGenerator (\n 10, skip=roverlay.static.hookinfo.get_priorities()\n )", "def test_task_priority(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n # Register\r\n self.register()\r\n self.signin()\r\n\r\n # By default, tasks without priority should be ordered by task.id (FIFO)\r\n tasks = db.session.query(Task).filter_by(app_id=1).order_by('id').all()\r\n res = self.app.get('api/app/1/newtask')\r\n task1 = json.loads(res.data)\r\n # Check that we received a Task\r\n err_msg = \"Task.id should be the same\"\r\n assert task1.get('id') == tasks[0].id, err_msg\r\n\r\n # Now let's change the priority to a random task\r\n import random\r\n t = random.choice(tasks)\r\n # Increase priority to maximum\r\n t.priority_0 = 1\r\n db.session.add(t)\r\n db.session.commit()\r\n # Request again a new task\r\n res = self.app.get('api/app/1/newtask')\r\n task1 = json.loads(res.data)\r\n # Check that we received a Task\r\n err_msg = \"Task.id should be the same\"\r\n assert task1.get('id') == t.id, err_msg\r\n err_msg = \"Task.priority_0 should be the 1\"\r\n assert task1.get('priority_0') == 1, err_msg", "def setPriority(self, p):\n self.priority = p", "def _create_priorities(self, pri):\n heaps = self.priorities\n heaps[pri] = MinBinaryHeap()", "def test_priority_add_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('priority add new_priority')\n rv, output = self._execute('priority list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def create_an_incident(self):\n sql = \"\"\"INSERT INTO incidences (createdOn,\\\n createdBy,\\\n type,\\\n location,\\\n status,\\\n comment)\\\n VALUES(\\'%s\\',\\'%s\\',\\'%s\\',\\'%s\\',\\'%s\\',\\'%s\\')\n RETURNING id\"\"\" % (\n self.createdOn,\n self.createdBy,\n self.incidence_type,\n self.location,\n self.status,\n self.comment\n )\n conn = self.db_obj.con\n curr = conn.cursor()\n curr.execute(sql, self)\n conn.commit()", "def _set_priority(self, v, load=False):\n try:\n t = YANGDynClass(v,base=np.uint8, is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True)\n except (TypeError, ValueError):\n raise ValueError(\"\"\"priority must be of a type compatible with base=np.uint8, is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True\"\"\")\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def create_stp_instance(self, instance, priority):\n pass", "async def createIncident(self, incident: Incident, author: str) -> Incident:", "def _set_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"priority must be of a type compatible with enumeration\"\"\",\n 'defined-type': \"openconfig-qos:enumeration\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\"\"\",\n })\n\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def _set_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"priority must be of a type compatible with enumeration\"\"\",\n 'defined-type': \"openconfig-qos:enumeration\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\"\"\",\n })\n\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def _set_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"priority must be of a type compatible with enumeration\"\"\",\n 'defined-type': \"openconfig-qos:enumeration\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\"\"\",\n })\n\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def test_post_cve_id_reserve_priority(reg_user_headers):\n res = requests.post(\n f'{env.AWG_BASE_URL}{CVE_ID_URL}',\n headers=reg_user_headers,\n params={\n 'amount': '1',\n 'cve_year': f'{utils.CURRENT_YEAR}',\n 'short_name': reg_user_headers['CVE-API-ORG']\n }\n )\n ok_response_contains(res, f'CVE-{utils.CURRENT_YEAR}-')\n assert json.loads(res.content.decode())['cve_ids']\n assert len(json.loads(res.content.decode())['cve_ids']) == 1\n\n priority_id = json.loads(res.content.decode())['cve_ids'][0]['cve_id']\n assert int(priority_id.split('-')[-1]) < 20000\n for key in json.loads(res.content.decode())['meta'].keys(): # Check that remaining_quota is in response\n assert key == 'remaining_quota'", "def ticket_priority(self) -> \"TicketPriority\":\n return TicketPriority(connection=self)", "def setpriority(pid=None, priority=1):\n\n #import win32api,win32process,win32con\n from ctypes import windll\n\n priorityclasses = [0x40, # IDLE_PRIORITY_CLASS,\n 0x4000, # BELOW_NORMAL_PRIORITY_CLASS,\n 0x20, # NORMAL_PRIORITY_CLASS,\n 0x8000, # ABOVE_NORMAL_PRIORITY_CLASS,\n 0x80, # HIGH_PRIORITY_CLASS,\n 0x100, # REALTIME_PRIORITY_CLASS\n ]\n if pid is None:\n pid = windll.kernel32.GetCurrentProcessId()\n handle = windll.kernel32.OpenProcess(PROCESS_ALL_ACCESS, True, pid)\n windll.kernel32.SetPriorityClass(handle, priorityclasses[priority])", "def insert(self, id, priority):\n self.n += 1\n i = self.n\n while i > 1:\n pIdx = int(i/2)\n p = self.elements[pIdx]\n\n if priority > p[PRIORITY]:\n break\n self.elements[i] = list(p)\n self.positions[p[ID]] = 1\n i = pIdx\n\n self.elements[i][ID] = id\n self.elements[i][PRIORITY] = priority\n self.positions[id] = i", "def set_priority(priority=2, pid=None):\n print \"TODO: add os independent support\"\n priorityclasses = [win32process.IDLE_PRIORITY_CLASS,\n win32process.BELOW_NORMAL_PRIORITY_CLASS,\n win32process.NORMAL_PRIORITY_CLASS,\n win32process.ABOVE_NORMAL_PRIORITY_CLASS,\n win32process.HIGH_PRIORITY_CLASS,\n win32process.REALTIME_PRIORITY_CLASS]\n if pid == None:\n pid = win32api.GetCurrentProcessId()\n handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)\n win32process.SetPriorityClass(handle, priorityclasses[priority])", "def get_incident_priority(*, db_session: Session = Depends(get_db), incident_priority_id: int):\n incident_priority = get(db_session=db_session, incident_priority_id=incident_priority_id)\n if not incident_priority:\n raise HTTPException(\n status_code=404, detail=\"The incident priority with this id does not exist.\"\n )\n return incident_priority", "def add(self, item, priority=0) -> None:\n if item in self.entry_finder:\n self.remove(item)\n count = next(self.counter)\n entry = (priority, count, [item])\n self.entry_finder[item] = entry\n heapq.heappush(self.priority_queue, entry)", "def setpriority(self, pid=None, priority=5):\n\t \n\t import win32api,win32process,win32con\n\t \n\t priorityclasses = [win32process.IDLE_PRIORITY_CLASS,\n\t win32process.BELOW_NORMAL_PRIORITY_CLASS,\n\t win32process.NORMAL_PRIORITY_CLASS,\n\t win32process.ABOVE_NORMAL_PRIORITY_CLASS,\n\t win32process.HIGH_PRIORITY_CLASS,\n\t win32process.REALTIME_PRIORITY_CLASS]\n\t if pid == None:\n\t pid = win32api.GetCurrentProcessId()\n\t handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)\n\t win32process.SetPriorityClass(handle, priorityclasses[priority])", "def insert(self, value, priority=2):\n if not isinstance(priority, int):\n raise TypeError(\"Priority must be an integer\")\n if priority in self.priority_queue:\n self.priority_queue[priority].append(value)\n else:\n self.priority_queue[priority] = [value]\n print(self.priority_queue)", "def incident(self, name, requester, priority, status, assignee, description):\n payload = {\n \"incident\":{\n \"name\": name,\n \"requester\": {\"email\": requester},\n \"priority\": priority,\n \"state\": status,\n \"assignee\":{\"email\": assignee},\n \"description\": description\n }\n }\n response = self.session.post(\n \"{0}/incidents.json\".format(self.uri),\n json=payload\n )\n return response.text", "def add_condition(self):\n m = self.get_current_measurement()\n result = PriorityDialog()\n if result.exec_():\n # Update Survey.priority based on the input\n key, val1, val2, weight = result.key, result.val1, result.val2, result.weight\n \n # If the condition is x == val1, determine whether val1 is str or int\n if result.type == 'value':\n val1 = get_type(val1)(val1)\n\n # Add the condition to Survey.priority\n arr = np.array([[val1, val2, weight]])\n if key not in m.priority:\n m.priority[key] = np.zeros(shape=(0, 3))\n m.priority[key] = np.append(m.priority[key], arr, axis=0)\n \n self.mgr.changed = True\n \n self.load_conditions()", "def priority(self, priority):\n self._priority = priority", "def add(self, item, priority=0):\n if item in self.set:\n self.remove(item)\n count = next(self.counter)\n entry = [priority, count, item]\n self.set[item] = entry\n hpq.heappush(self.heap, entry)" ]
[ "0.6736081", "0.6539175", "0.65382916", "0.59715176", "0.5838012", "0.5808987", "0.57756734", "0.57389534", "0.5729797", "0.5616003", "0.5595802", "0.559525", "0.5535251", "0.5528951", "0.54703873", "0.54703873", "0.54703873", "0.5454121", "0.54461855", "0.5445957", "0.5444199", "0.53654426", "0.5357505", "0.5320137", "0.5285338", "0.5258109", "0.52361375", "0.5201766", "0.5197745", "0.5187024" ]
0.8211468
0
Update an existing incident priority.
def update_incident_priority( *, db_session: Session = Depends(get_db), incident_priority_id: int, incident_priority_in: IncidentPriorityUpdate, ): incident_priority = get(db_session=db_session, incident_priority_id=incident_priority_id) if not incident_priority: raise HTTPException( status_code=404, detail="The incident priority with this id does not exist." ) incident_priority = update( db_session=db_session, incident_priority=incident_priority, incident_priority_in=incident_priority_in, ) return incident_priority
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def setIncident_priority(\n self,\n eventID: str,\n incidentNumber: int,\n priority: IncidentPriority,\n author: str,\n ) -> None:", "def setPriority(self, p):\n self.priority = p", "def update(self, index, priority=-1):\n if (priority == -1):\n priority = self._max_priority\n elif (priority > self._max_priority):\n self._max_priority = priority\n\n # Search for index\n node = self.findIndex(index)\n\n # Replace with new priority\n diff = priority - node.priority\n node.priority = priority\n\n # Update value\n self._updateValue(node.parent, diff)", "def _update(self, priority, key):\n i = self._index[key]\n item = self._heap[i]\n old_priority = item.priority\n item.priority = priority\n if priority < old_priority:\n self._sift_up(i)\n else:\n self._sift_down(i)", "def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities):\n self.tree.val_update(i, p**self.alpha)", "def increase_priority(self):\n if self._priority > 0:\n self._priority -= 1", "def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities):\n self.tree.val_update(i, p ** self.alpha)", "def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities):\n self.tree.val_update(i, p ** self.alpha)", "def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities): self.tree.val_update(i, float(p**self.alpha))", "def priority(self, priority):\n self._priority = priority", "def _update_priority(self, task, prio, worker):\n task.priority = prio = max(prio, task.priority)\n for dep in task.deps or []:\n t = self._state.get_task(dep)\n if t is not None and prio > t.priority:\n self._update_priority(t, prio, worker)", "def update(self, idx: int, new_priority: T.Union[int, float]):\n old_priority, item = self.__heap[idx]\n self.__heap[idx] = (new_priority, item)\n\n if new_priority < old_priority:\n self.__sift_up(idx)\n else:\n self.__sift_down(idx)", "def set_priority(self, priority):\n self.options[\"priority\"] = priority", "def _set_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"priority must be of a type compatible with enumeration\"\"\",\n 'defined-type': \"openconfig-qos:enumeration\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\"\"\",\n })\n\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def _set_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"priority must be of a type compatible with enumeration\"\"\",\n 'defined-type': \"openconfig-qos:enumeration\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\"\"\",\n })\n\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def _set_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"priority must be of a type compatible with enumeration\"\"\",\n 'defined-type': \"openconfig-qos:enumeration\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\"\"\",\n })\n\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def _set_priority(self, v, load=False):\n try:\n t = YANGDynClass(v,base=np.uint8, is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True)\n except (TypeError, ValueError):\n raise ValueError(\"\"\"priority must be of a type compatible with base=np.uint8, is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True\"\"\")\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def delete_and_update_priority(self):\r\n for pbi in PBI.objects.filter(priority__gt=self.priority, project=self.project):\r\n pbi.priority -= 1\r\n pbi.save()\r\n\r\n self.delete()", "def set_priority(self, priority):\n self._priority = priority", "def change_priority(self, elem, prio):\n pos = self.pos[elem]\n currPrio = self.A[pos][1]\n self.A[pos] = (elem, prio)\n if self.cmpFn(prio, currPrio):\n self.insert_loop(pos, pos // 2) # Up heapify\n else:\n self.combine(pos) # Down heapify", "def priority(self, priority):\n\n self._priority = priority", "def priority(self, priority):\n\n self._priority = priority", "def priority(self, priority):\n\n self._priority = priority", "def setpriority(self, pid=None, priority=5):\n\t \n\t import win32api,win32process,win32con\n\t \n\t priorityclasses = [win32process.IDLE_PRIORITY_CLASS,\n\t win32process.BELOW_NORMAL_PRIORITY_CLASS,\n\t win32process.NORMAL_PRIORITY_CLASS,\n\t win32process.ABOVE_NORMAL_PRIORITY_CLASS,\n\t win32process.HIGH_PRIORITY_CLASS,\n\t win32process.REALTIME_PRIORITY_CLASS]\n\t if pid == None:\n\t pid = win32api.GetCurrentProcessId()\n\t handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)\n\t win32process.SetPriorityClass(handle, priorityclasses[priority])", "def set_priority(self, priority):\n self.options['priority'] = priority", "def setpriority(pid=None, priority=1):\n\n #import win32api,win32process,win32con\n from ctypes import windll\n\n priorityclasses = [0x40, # IDLE_PRIORITY_CLASS,\n 0x4000, # BELOW_NORMAL_PRIORITY_CLASS,\n 0x20, # NORMAL_PRIORITY_CLASS,\n 0x8000, # ABOVE_NORMAL_PRIORITY_CLASS,\n 0x80, # HIGH_PRIORITY_CLASS,\n 0x100, # REALTIME_PRIORITY_CLASS\n ]\n if pid is None:\n pid = windll.kernel32.GetCurrentProcessId()\n handle = windll.kernel32.OpenProcess(PROCESS_ALL_ACCESS, True, pid)\n windll.kernel32.SetPriorityClass(handle, priorityclasses[priority])", "def setPriority(self, *args):\n return _libsbml.Event_setPriority(self, *args)", "def change_priority(self, priority, key):\n index = self.__position[key]\n current = self.__heap[index][0]\n self.__heap[index][0] = priority\n\n if priority > current:\n self.__bubble_down(index)\n else:\n self.__bubble_up(index)", "def priority_update(self, error,batch_index):\n pass", "def set_priority(priority=2, pid=None):\n print \"TODO: add os independent support\"\n priorityclasses = [win32process.IDLE_PRIORITY_CLASS,\n win32process.BELOW_NORMAL_PRIORITY_CLASS,\n win32process.NORMAL_PRIORITY_CLASS,\n win32process.ABOVE_NORMAL_PRIORITY_CLASS,\n win32process.HIGH_PRIORITY_CLASS,\n win32process.REALTIME_PRIORITY_CLASS]\n if pid == None:\n pid = win32api.GetCurrentProcessId()\n handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)\n win32process.SetPriorityClass(handle, priorityclasses[priority])" ]
[ "0.73270506", "0.65260607", "0.64954793", "0.6305196", "0.61920965", "0.61805874", "0.6173031", "0.6173031", "0.6085935", "0.60838604", "0.6056914", "0.6028682", "0.59914374", "0.5985253", "0.5985253", "0.5985253", "0.5980391", "0.5977671", "0.5963382", "0.59602666", "0.5934204", "0.5934204", "0.5934204", "0.5926656", "0.5924749", "0.58984417", "0.58554626", "0.5834776", "0.58052", "0.5792313" ]
0.82435286
0
Get an incident priority.
def get_incident_priority(*, db_session: Session = Depends(get_db), incident_priority_id: int): incident_priority = get(db_session=db_session, incident_priority_id=incident_priority_id) if not incident_priority: raise HTTPException( status_code=404, detail="The incident priority with this id does not exist." ) return incident_priority
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_priority(self):\n return self.__priority", "def _get_priority(self):\n return self.__priority", "def _get_priority(self):\n return self.__priority", "def _get_priority(self):\n return self.__priority", "def priority(self) -> int:\n return pulumi.get(self, \"priority\")", "def priority(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"priority\")", "def get_priority(self):\n return self.options[\"priority\"]", "def getPriority(self):\n return self.priority", "def get_priority(self):\n return self._priority", "def get_priority(self):\n return self._priority", "def get_priority(self, item):\n try:\n return self.set[item][0]\n except KeyError:\n print(\"Can't get priority of non-existing item\")", "def get_priority(self):\n return self.options['priority']", "def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> str:\n return pulumi.get(self, \"priority\")", "def get_priority(self):\n priorities = dict(PRIORITY_CHOICES)\n return priorities.get(self.priority, \"N/A\")", "def get_priority(self):\n return str(self.priority)", "def get_priority(self, elem):\n pos = self.pos[elem]\n return self.A[pos][1]", "def priority(self):\n return self._priority", "def priority(self):\n return self._priority", "def priority(self):\n return self._priority", "def getpriority(self, name):\n\t\tif name not in self:\n\t\t\treturn None\n\t\treturn self.attributes[name].priority", "def getPriority(self, *args):\n return _libsbml.Event_getPriority(self, *args)", "def priority(self):\n return self._pri", "def priority(node):\n return node.priority", "def find_priority(x):\n pat = r\"priority\\s*(\\d*)\"\n result = re.search(pat, str(x), flags=re.IGNORECASE)\n if result:\n return int(result.group(1))", "def getPriority(self):" ]
[ "0.71666795", "0.71666795", "0.71666795", "0.71666795", "0.7143092", "0.71222526", "0.71222526", "0.70545536", "0.7038056", "0.7033286", "0.7033286", "0.7011243", "0.6980747", "0.6961434", "0.6961434", "0.6961434", "0.6961434", "0.69483155", "0.6813188", "0.6794462", "0.67939043", "0.67374575", "0.67374575", "0.67374575", "0.672802", "0.67048216", "0.670217", "0.6637103", "0.65574414", "0.6541855" ]
0.80712813
0
"Get run value" Optimization step, run_mem is the memory when running the program with a certain argument, and to get a value we first check if we have it in the running memory, and if not we get it from the "immutable" saved state (self.program)
def __getitem__(self, pos): try: return self.run_mem[pos] except KeyError: self.run_mem[pos] = self.program[pos] return self.run_mem[pos]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self, run_number):\n return self[self.run_cache[run_number]]", "def get_val(self, arg_idx):\n\t\tidx = arg_idx-1\n\t\tif idx >= len(self.__par_modes) or self.__par_modes[idx] == 0:\n\t\t\treturn self.memory[self.memory[self.ptr+arg_idx]]\n\t\telif self.__par_modes[idx] == 1:\n\t\t\treturn self.memory[self.ptr + arg_idx]", "def run(self):\n return self.opt().eval()", "def run(self):\n\n self.publisher.publish('run-start', vm=self)\n\n self.halt = False\n\n while not self.halt:\n self.publisher.publish('step', vm=self)\n\n if self.exec_ptr > (len(self.memory) - 1):\n raise OverflowError(\"Execution pointer has overran memory: \" + str(self.exec_ptr))\n\n a = b = c = None\n\n instruction = self.memory[self.exec_ptr]\n\n if instruction == HALT:\n self.halt = True\n continue\n elif instruction == SET:\n a = self.get_a_param()\n b = self.get_b_param()\n\n self.instruction_set(a, b)\n\n self.exec_ptr += 3\n elif instruction == PUSH:\n a = self.get_a_param()\n\n self.instruction_push(a)\n self.exec_ptr += 2\n elif instruction == POP:\n a = self.get_a_param()\n\n self.instruction_pop(a)\n\n self.exec_ptr += 2\n elif instruction == EQ:\n a = self.get_a_param()\n b = self.get_b_param()\n c = self.get_c_param()\n\n self.instruction_eq(a, b, c)\n\n self.exec_ptr += 4\n elif instruction == GT:\n a = self.get_a_param()\n b = self.get_b_param()\n c = self.get_c_param()\n\n self.instruction_gt(a, b, c)\n\n self.exec_ptr += 4\n elif instruction == JMP:\n a = self.get_a_param()\n\n self.instruction_jmp(a)\n elif instruction == JT:\n a = self.get_a_param()\n b = self.get_b_param()\n\n self.instruction_jt(a, b)\n elif instruction == JF:\n a = self.get_a_param()\n b = self.get_b_param()\n\n self.instruction_jf(a, b)\n elif instruction == ADD:\n a = self.get_a_param()\n b = self.get_b_param()\n c = self.get_c_param()\n\n self.instruction_add(a, b, c)\n\n self.exec_ptr += 4\n elif instruction == MULT:\n a = self.get_a_param()\n b = self.get_b_param()\n c = self.get_c_param()\n\n self.instruction_mult(a, b, c)\n\n self.exec_ptr += 4\n elif instruction == MOD:\n a = self.get_a_param()\n b = self.get_b_param()\n c = self.get_c_param()\n\n self.instruction_mod(a, b, c)\n\n self.exec_ptr += 4\n elif instruction == AND:\n a = self.get_a_param()\n b = self.get_b_param()\n c = self.get_c_param()\n\n self.instruction_and(a, b, c)\n\n self.exec_ptr += 4\n elif instruction == OR:\n a = self.get_a_param()\n b = self.get_b_param()\n c = self.get_c_param()\n\n self.instruction_or(a, b, c)\n\n self.exec_ptr += 4\n elif instruction == NOT:\n a = self.get_a_param()\n b = self.get_b_param()\n\n self.instruction_not(a, b)\n\n self.exec_ptr += 3\n elif instruction == RMEM:\n a = self.get_a_param()\n b = self.get_b_param()\n\n self.instruction_rmem(a, b)\n\n self.exec_ptr += 3\n elif instruction == WMEM:\n a = self.get_a_param()\n b = self.get_b_param()\n\n self.instruction_wmem(a, b)\n\n self.exec_ptr += 3\n elif instruction == CALL:\n a = self.get_a_param()\n\n self.instruction_call(a)\n elif instruction == RET:\n self.instruction_ret()\n elif instruction == OUT:\n a = self.get_a_param()\n\n self.instruction_out(a)\n\n self.exec_ptr += 2\n elif instruction == IN:\n a = self.get_a_param()\n\n self.instruction_in(a)\n\n self.exec_ptr += 2\n elif instruction == NOOP:\n self.exec_ptr += 1\n continue\n else:\n raise ValueError(\"Unknown instruction \" + str(instruction))\n\n self.publisher.publish('run-end', vm=self)", "def work_mem(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"work_mem\")", "def get_a_param(self):\n value = self.memory[self.exec_ptr + 1]\n Vm.validate_value(value)\n return value", "def get_address_value(cls, addr):\n\t\tprint \" Called get_address_value({})\".format(addr)\n\t\ttype = abs(addr) // 1000 # integer division\n\t\trelative_address = abs(addr) - (type * 1000)\n\t\tprint \"> Get mem value: type = {}, addr = {}\".format(type, relative_address)\n\t\t# use heap for search if addr is negative, else the current local mem\n\t\tif addr >= 14000:\n\t\t\tprint \"> Const vars memory: {}\".format(cls.const_vars)\n\t\t\treturn cls.const_vars[addr]\n\t\telif addr < 0:\n\t\t\tprint \"> Heap memory: {}\".format(cls.heap.memory)\n\t\t\treturn cls.heap.memory[type][abs(relative_address)]\n\t\telse:\n\t\t\tprint \"> Stack memory: {}\".format(cls.stack.peek().memory)\n\t\t\treturn cls.stack.peek().memory[type][relative_address]", "def get_value(self, index, mode):\n address = self.get_address(index, mode)\n try:\n return self.program[address]\n except KeyError:\n return 0", "def run_program(program):\n halt = False\n instruction_pointer = 0\n\n while not halt:\n halt = process_instruction(instruction_pointer, program)\n instruction_pointer += STEP_SIZE\n\n return program", "def getValue(self, value=None):\n if self.data and self.source & COMMANDLINE:\n return self.data\n\n if self.environ and str(self.environ) in os.environ:\n self.source = ENVIRONMENT\n self.file = None\n return self.cast(os.environ[str(self.environ)])\n\n if self.data:\n return self.data\n\n if self.default:\n self.source = BUILTIN\n self.file = None\n return self.default\n\n self.source = CODE\n self.file = None\n\n if value is None:\n return []\n\n return value", "def run_info ( run_num ) : \n global _rinfos_\n rinfo = _rinfos_.get ( run_num , None )\n if rinfo : return rinfo \n \n try :\n \n #\n url = run_url.format ( run_num )\n _obj = urllib.urlopen ( url )\n rinfo = json.load ( _obj )\n\n rinfo = rinfo if rinfo else None\n _rinfos_ [ run_num ] = rinfo \n return rinfo\n \n except:\n return None \n\n return None", "def run(self) -> Tuple[Any, Log]:\n return self._value", "def _readmem(self, address, command):\n # Build list of commands to read register.\n address = '{0:08X}'.format(address) # Convert address value to hex string.\n commands = [\n '{0} {1} 1'.format(command, address),\n 'q'\n ]\n # Run command and parse output for register value.\n output = self.run_commands(commands)\n match = re.search('^{0} = (\\S+)'.format(address), output,\n re.IGNORECASE | re.MULTILINE)\n if match:\n return int(match.group(1), 16)\n else:\n raise AdaLinkError('Could not find expected memory value, are the JLink and board connected?')", "def run(name, program, arguments, cache_enabled, filename):\n output = None\n if cache_enabled:\n output = get_output_from_cache(name, filename)\n\n if output is None:\n call_arguments = [program] + arguments + [filename]\n try:\n output = subprocess.check_output(\n call_arguments, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as error:\n output = error.output\n except OSError:\n return {\n filename: {\n 'error': [('Could not execute \"%s\".%sMake sure all ' +\n 'required programs are installed') %\n (' '.join(call_arguments), os.linesep)]\n }\n }\n output = output.decode('utf-8')\n if cache_enabled:\n save_output_in_cache(name, filename, output)\n return output", "def getvalue(program, index, mode=POSITION_MODE):\n if mode == POSITION_MODE:\n return program[program[index]]\n elif mode == IMMEDIATE_MODE:\n return program[index]\n raise Exception(f\"unknown mode: {mode}\")", "def load_program(self, program):\n for idx, val in enumerate(program):\n self.memory[idx] = val", "def prog():\n global program\n return program", "def show_mem(cmd, cnt, args):\n if cpu is None:\n log(\"Load program first\") \n return\n elif len(cpu.memory) == 0:\n log(\"Load program first\") \n return \n chunk = 0\n chunk_count = len(cpu.memory)\n while chunk < chunk_count: \n chunk_start = cpu.memory[chunk][MEMADDR]\n chunk_end = chunk_start + cpu.memory[chunk][MEMSIZE] \n log(\"{:d} {:#x}..{:#x}\".format(chunk, chunk_start, chunk_end)) \n chunk += 1\n if machine == \"ARM\":\n if len(cpu.high_memory) != 0:\n log(\"High memory\")\n for addr in sorted(cpu.high_memory):\n log(\"{:#x}\".format(addr))", "def user_input(self, op):\n params = 1\n a = self.read_memory(op, 0, params)\n \n input_value = self.input_value()\n if input_value is None:\n return {\"ptr\": self.ptr, \"yield\": True}\n else:\n self.write_memory(a, int(input_value))\n return self.ptr + params + 1", "def get_runObj(run):\n\n if os.path.exists(os.path.join(run, 'runParameters.xml')):\n run_parameters_file = \"runParameters.xml\"\n elif os.path.exists(os.path.join(run, 'RunParameters.xml')):\n run_parameters_file = \"RunParameters.xml\"\n else:\n logger.error(\"Cannot find RunParameters.xml or runParameters.xml in \"\n \"the run folder for run {}\".format(run))\n return None\n\n rppath = os.path.join(run, run_parameters_file)\n try:\n rp = RunParametersParser(os.path.join(run, run_parameters_file))\n except OSError:\n logger.warn(\"Problems parsing the runParameters.xml file at {}. \"\n \"This is quite unexpected. please archive the run {} manually\".format(rppath, run))\n return None\n else:\n # This information about the run type \n try:\n # Works for recent control software\n runtype = rp.data['RunParameters'][\"Setup\"][\"Flowcell\"]\n except KeyError:\n # Use this as second resource but print a warning in the logs\n logger.warn(\"Parsing runParameters to fecth instrument type, \"\n \"not found Flowcell information in it. Using ApplicaiotnName\")\n # here makes sense to use get with default value \"\" ->\n # so that it doesn't raise an exception in the next lines\n # (in case ApplicationName is not found, get returns None)\n runtype = rp.data['RunParameters'][\"Setup\"].get(\"ApplicationName\", \"\")\n\n if \"NextSeq\" in runtype:\n return NextSeq_Run(run, CONFIG[\"analysis\"][\"NextSeq\"])\n else:\n logger.warn(\"Unrecognized run type {}, cannot parse the run {}. \"\n \"The sequencer must be NextSeq\".format(runtype, run))\n return None", "def run_program(self, log_level):\n \n if log_level >= 1:\n next_input = self.input_parameters[self.input_position]\n print(f\"Run program for amplifier {self.name}, next input: {next_input}\")\n \n retval = intcode_computer.run(self.memory, self.code_position,\n self.input_parameters, self.input_position, 0, False, log_level)\n \n self.stop_code = retval[0]\n self.output = retval[1][0] #First output is the only relevant output\n self.code_position = retval[2]\n self.input_position = retval[3]\n\n if self.stop_code > 0: #Error\n self.output = 0\n\n return self.output", "def get_slot_value(self, slot_name_or_uri):\n # print(\"MemoryManager: Implement Me\")\n if slot_name_or_uri not in self.memory.keys():\n raise Exception(\"Has no memory for key: %s\" % slot_name_or_uri)\n return self.memory[slot_name_or_uri]", "def run_and_measure(\n self,\n quil_program: Program,\n qubits: Optional[List[int]] = None,\n trials: int = 1,\n memory_map: Optional[Dict[str, List[Union[int, float]]]] = None,\n ) -> np.ndarray:\n if qubits is None:\n qubits = sorted(cast(Set[int], quil_program.get_qubits(indices=True)))\n\n if memory_map is not None:\n quil_program = self.augment_program_with_memory_values(quil_program, memory_map)\n\n request = self._run_and_measure_request(\n quil_program=quil_program,\n qubits=qubits,\n trials=trials,\n )\n response = self._qvm_client.run_and_measure_program(request)\n return np.asarray(response.results)", "def get_run(arn=None):\n pass", "def get_array_value(cls, quad):\n\t\taddr = quad.left_operand\n\t\tsub_index = cls.get_address_value(quad.right_operand)\n\t\ttype = abs(addr) // 1000 # integer division\n\t\trelative_address = abs(addr) - (type * 1000)\n\t\tprint \"> Rel = {} - {}\".format(abs(addr), (type * 1000))\n\t\tprint \"> Get ARR mem value: type = {}, rel = {}, sub = {}, set_to = {}\".format(type, relative_address, sub_index, quad.result)\n\n\t\ttry:\n\t\t\tif addr < 0:\n\t\t\t\tif len(cls.heap.memory[type][abs(relative_address)]) > sub_index and sub_index >= 0 :\n\t\t\t\t\tval = cls.heap.memory[type][abs(relative_address)][sub_index]\n\t\t\t\telse:\n\t\t\t\t\tprint \"> Error on Heap memory: {}\".format(cls.heap.memory)\t\t\t\t\n\t\t\t\t\tError.out_of_bounds(len(cls.heap.memory[type][abs(relative_address)]), sub_index)\n\t\t\telse:\n\t\t\t\tif len(cls.stack.peek().memory[type][relative_address]) > sub_index and sub_index >= 0 :\n\t\t\t\t\tval = cls.stack.peek().memory[type][relative_address][sub_index]\n\t\t\t\telse:\n\t\t\t\t\tError.out_of_bounds(len(cls.stack.peek().memory[type][relative_address]), sub_index)\n\t\texcept TypeError:\n\t\t\tError.type_array()\n\n\t\tcls.set_address_value(quad.result, val)", "def getCompiled(self):\n if self.isCompiled():\n return self.program\n else:\n raise Exception(\"el programa no ha sido compilado aun\")", "def exec(self):\n while self.i < len(self.prog):\n opcode = self.prog[self.i] % 100\n if opcode == 99:\n return True\n mode_first = (self.prog[self.i] // 100) % 10\n mode_second = (self.prog[self.i] // 1000) % 10\n mode_third = (self.prog[self.i] // 10000) % 10\n param1 = self.get_param(mode_first,self.i+1)\n if opcode in [7,8,1,2]:\n if mode_third == 0:\n index_third = self.prog[self.i + 3]\n elif mode_third == 2:\n index_third = self.prog[self.i + 3] + self.base\n if opcode != 3 and opcode != 4 and opcode != 9:\n param2 = self.get_param(mode_second,self.i+2)\n if opcode == 1:\n self.prog[index_third] = param1 + param2\n self.i += 4\n elif opcode == 2:\n self.prog[index_third] = param1 * param2\n self.i += 4\n elif opcode == 3:\n if len(self.input) == 0:\n return False\n if mode_first == 0:\n self.prog[self.prog[self.i + 1]] = self.input.pop(0)\n elif mode_first == 2:\n self.prog[self.prog[self.i + 1] + self.base] = self.input.pop(0)\n self.i += 2\n elif opcode == 4:\n # print(param1)\n self.i += 2\n self.output.append(param1)\n elif opcode == 5:\n if param1 != 0:\n self.i = param2\n else:\n self.i += 3\n elif opcode == 6:\n if param1 == 0:\n self.i = param2\n else:\n self.i += 3\n elif opcode == 7:\n if param1 < param2:\n self.prog[index_third] = 1\n else:\n self.prog[index_third] = 0\n self.i += 4\n elif opcode == 8:\n if param1 == param2:\n self.prog[index_third] = 1\n else:\n self.prog[index_third] = 0\n self.i += 4\n elif opcode == 9:\n self.base += param1\n self.i += 2\n else:\n print(\"unknown opcode\", opcode)\n # print(self.prog)\n print(\"Error: reached end of program without halt instruction\")\n return True", "def exe():\n e = entry()\n if e:\n return load(e)", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()" ]
[ "0.631511", "0.6118623", "0.599009", "0.58504933", "0.56327003", "0.54532564", "0.54278064", "0.53919244", "0.5292854", "0.52785087", "0.5211099", "0.52065086", "0.5205116", "0.5199101", "0.5145069", "0.51335573", "0.51126", "0.51109827", "0.5099991", "0.5098417", "0.50968105", "0.5066784", "0.50435597", "0.50309217", "0.5028502", "0.50161135", "0.50091225", "0.50049335", "0.49970806", "0.49970806" ]
0.65592486
0
For every C function defined in the Complex wrapper section, tell ctypes how to handle its inputs and outputs
def setup_ctypes(): lib.createComplex.argtypes = [ctypes.c_double, ctypes.c_double] lib.createComplex.restype = ctypes.c_void_p lib.deleteComplex.argypes = [ctypes.c_void_p] lib.deleteComplex.restype = None lib.getRealPart.argypes = [ctypes.c_void_p] lib.getRealPart.restype = ctypes.c_double lib.getImaginaryPart.argypes = [ctypes.c_void_p] lib.getImaginaryPart.restype = ctypes.c_double lib.add.argypes = [ctypes.c_void_p, ctypes.c_void_p] lib.add.restype = ctypes.c_void_p lib.equals.argtypes = [ctypes.c_void_p, ctypes.c_void_p] lib.equals.restype = ctypes.c_bool
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_capi(lines):\n pattern = r'(\\w+)\\s+(\\**)\\s*(\\w+)\\((.*)\\)' # Float32 *sin(...)\n pexcept = r'except (\\??)(.*)'\n\n functions = []\n for line in lines:\n if line.strip():\n m = re.match(pattern, line)\n restype, stars, fname, argtypes = m.groups()\n rest = line[len(m.group(0)):].strip()\n if rest:\n maybe, badval = re.match(pexcept, rest).groups()\n else:\n maybe, badval = None, None\n\n restype = parse_type(\"%s %s\" % (restype, \" \".join(stars)))\n argtypes = map(parse_type, argtypes.split(','))\n signature = Function(restype, argtypes)\n functions.append(Py_Function(fname, signature, maybe, badval))\n\n return functions", "def compile_cutils():\r\n\r\n types = ['npy_' + t for t in ['int8', 'int16', 'int32', 'int64', 'int128',\r\n 'int256', 'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',\r\n 'float16', 'float32', 'float64', 'float80', 'float96', 'float128',\r\n 'float256']]\r\n\r\n complex_types = ['npy_' + t for t in ['complex32', 'complex64',\r\n 'complex128', 'complex160', 'complex192', 'complex512']]\r\n\r\n inplace_map_template = \"\"\"\r\n #if defined(%(typen)s)\r\n static void %(type)s_inplace_add(PyArrayMapIterObject *mit, PyArrayIterObject *it)\r\n {\r\n int index = mit->size;\r\n while (index--) {\r\n %(op)s\r\n\r\n PyArray_MapIterNext(mit);\r\n PyArray_ITER_NEXT(it);\r\n }\r\n }\r\n #endif\r\n \"\"\"\r\n\r\n floatadd = \"((%(type)s*)mit->dataptr)[0] = ((%(type)s*)mit->dataptr)[0] + ((%(type)s*)it->dataptr)[0];\"\r\n complexadd = \"\"\"\r\n ((%(type)s*)mit->dataptr)[0].real = ((%(type)s*)mit->dataptr)[0].real + ((%(type)s*)it->dataptr)[0].real;\r\n ((%(type)s*)mit->dataptr)[0].imag = ((%(type)s*)mit->dataptr)[0].imag + ((%(type)s*)it->dataptr)[0].imag;\r\n \"\"\"\r\n\r\n fns = ''.join([inplace_map_template % {'type': t, 'typen': t.upper(),\r\n 'op': floatadd % {'type': t}}\r\n for t in types] +\r\n [inplace_map_template % {'type': t, 'typen': t.upper(),\r\n 'op': complexadd % {'type': t}}\r\n for t in complex_types])\r\n\r\n fn_array = (\"static inplace_map_binop addition_funcs[] = {\" +\r\n ''.join([\"\"\"\r\n #if defined(%(typen)s)\r\n %(type)s_inplace_add,\r\n #endif\r\n \"\"\" % {'type': t, 'typen': t.upper()}\r\n for t in types + complex_types]) +\r\n \"\"\"NULL};\r\n \"\"\")\r\n\r\n type_number_array = (\"static int type_numbers[] = {\" +\r\n ''.join([\"\"\"\r\n #if defined(%(typen)s)\r\n %(typen)s,\r\n #endif\r\n \"\"\" % {'type': t, 'typen': t.upper()}\r\n for t in types + complex_types]) +\r\n \"-1000};\")\r\n\r\n code = (\"\"\"\r\n #include <Python.h>\r\n #include \"numpy/arrayobject.h\"\r\n\r\n extern \"C\"{\r\n static PyObject *\r\n run_cthunk(PyObject *self, PyObject *args)\r\n {\r\n PyObject *py_cthunk = NULL;\r\n if(!PyArg_ParseTuple(args,\"O\",&py_cthunk))\r\n return NULL;\r\n\r\n if (!PyCObject_Check(py_cthunk)) {\r\n PyErr_SetString(PyExc_ValueError,\r\n \"Argument to run_cthunk must be a PyCObject.\");\r\n return NULL;\r\n }\r\n void * ptr_addr = PyCObject_AsVoidPtr(py_cthunk);\r\n int (*fn)(void*) = (int (*)(void*))(ptr_addr);\r\n void* it = PyCObject_GetDesc(py_cthunk);\r\n int failure = fn(it);\r\n\r\n return Py_BuildValue(\"i\", failure);\r\n }\r\n\r\n #if NPY_API_VERSION >= 0x00000008\r\n typedef void (*inplace_map_binop)(PyArrayMapIterObject *, PyArrayIterObject *);\r\n \"\"\" + fns + fn_array + type_number_array +\r\n\r\n\"\"\"\r\nstatic int\r\nmap_increment(PyArrayMapIterObject *mit, PyObject *op, inplace_map_binop add_inplace)\r\n{\r\n PyArrayObject *arr = NULL;\r\n PyArrayIterObject *it;\r\n PyArray_Descr *descr;\r\n if (mit->ait == NULL) {\r\n return -1;\r\n }\r\n descr = PyArray_DESCR(mit->ait->ao);\r\n Py_INCREF(descr);\r\n arr = (PyArrayObject *)PyArray_FromAny(op, descr,\r\n 0, 0, NPY_ARRAY_FORCECAST, NULL);\r\n if (arr == NULL) {\r\n return -1;\r\n }\r\n if ((mit->subspace != NULL) && (mit->consec)) {\r\n PyArray_MapIterSwapAxes(mit, (PyArrayObject **)&arr, 0);\r\n if (arr == NULL) {\r\n return -1;\r\n }\r\n }\r\n it = (PyArrayIterObject*)\r\n PyArray_BroadcastToShape((PyObject*)arr, mit->dimensions, mit->nd);\r\n if (it == NULL) {\r\n Py_DECREF(arr);\r\n return -1;\r\n }\r\n\r\n (*add_inplace)(mit, it);\r\n\r\n Py_DECREF(arr);\r\n Py_DECREF(it);\r\n return 0;\r\n}\r\n\r\n\r\nstatic PyObject *\r\ninplace_increment(PyObject *dummy, PyObject *args)\r\n{\r\n PyObject *arg_a = NULL, *index=NULL, *inc=NULL;\r\n PyArrayObject *a;\r\n inplace_map_binop add_inplace = NULL;\r\n int type_number = -1;\r\n int i =0;\r\n PyArrayMapIterObject * mit;\r\n\r\n if (!PyArg_ParseTuple(args, \"OOO\", &arg_a, &index,\r\n &inc)) {\r\n return NULL;\r\n }\r\n if (!PyArray_Check(arg_a)) {\r\n PyErr_SetString(PyExc_ValueError, \"needs an ndarray as first argument\");\r\n return NULL;\r\n }\r\n\r\n a = (PyArrayObject *) arg_a;\r\n\r\n if (PyArray_FailUnlessWriteable(a, \"input/output array\") < 0) {\r\n return NULL;\r\n }\r\n\r\n if (PyArray_NDIM(a) == 0) {\r\n PyErr_SetString(PyExc_IndexError, \"0-d arrays can't be indexed.\");\r\n return NULL;\r\n }\r\n type_number = PyArray_TYPE(a);\r\n\r\n\r\n\r\n while (type_numbers[i] >= 0 && addition_funcs[i] != NULL){\r\n if (type_number == type_numbers[i]) {\r\n add_inplace = addition_funcs[i];\r\n break;\r\n }\r\n i++ ;\r\n }\r\n\r\n if (add_inplace == NULL) {\r\n PyErr_SetString(PyExc_TypeError, \"unsupported type for a\");\r\n return NULL;\r\n }\r\n mit = (PyArrayMapIterObject *) PyArray_MapIterArray(a, index);\r\n if (mit == NULL) {\r\n goto fail;\r\n }\r\n if (map_increment(mit, inc, add_inplace) != 0) {\r\n goto fail;\r\n }\r\n\r\n Py_DECREF(mit);\r\n\r\n Py_INCREF(Py_None);\r\n return Py_None;\r\n\r\nfail:\r\n Py_XDECREF(mit);\r\n\r\n return NULL;\r\n}\r\n #endif\r\n\r\n\r\n static PyMethodDef CutilsExtMethods[] = {\r\n {\"run_cthunk\", run_cthunk, METH_VARARGS|METH_KEYWORDS,\r\n \"Run a theano cthunk.\"},\r\n #if NPY_API_VERSION >= 0x00000008\r\n {\"inplace_increment\", inplace_increment,\r\n METH_VARARGS,\r\n \"increments a numpy array inplace at the passed indexes.\"},\r\n #endif\r\n {NULL, NULL, 0, NULL} /* Sentinel */\r\n };\"\"\")\r\n\r\n if PY3:\r\n # This is not the most efficient code, but it is written this way to\r\n # highlight the changes needed to make 2.x code compile under python 3.\r\n code = code.replace(\"<Python.h>\", '\"numpy/npy_3kcompat.h\"', 1)\r\n code = code.replace(\"PyCObject\", \"NpyCapsule\")\r\n code += \"\"\"\r\n static struct PyModuleDef moduledef = {\r\n PyModuleDef_HEAD_INIT,\r\n \"cutils_ext\",\r\n NULL,\r\n -1,\r\n CutilsExtMethods,\r\n };\r\n\r\n PyMODINIT_FUNC\r\n PyInit_cutils_ext(void) {\r\n import_array();\r\n return PyModule_Create(&moduledef);\r\n }\r\n }\r\n \"\"\"\r\n else:\r\n code += \"\"\"\r\n PyMODINIT_FUNC\r\n initcutils_ext(void)\r\n {\r\n import_array();\r\n (void) Py_InitModule(\"cutils_ext\", CutilsExtMethods);\r\n }\r\n } //extern C\r\n \"\"\"\r\n\r\n loc = os.path.join(config.compiledir, 'cutils_ext')\r\n if not os.path.exists(loc):\r\n os.mkdir(loc)\r\n\r\n args = cmodule.GCC_compiler.compile_args()\r\n cmodule.GCC_compiler.compile_str('cutils_ext', code, location=loc,\r\n preargs=args)", "def complex(real, imag):", "def complex_sum(c_1,c_2):\n return c_1 + c_2", "def test_jit_example(self):\n source = io.StringIO(\"\"\"\n int mega_complex_stuff(int* a, int* b, int count) {\n int sum = 0;\n int i;\n for (i=0; i < count; i++)\n sum += a[i] * b[i];\n return sum;\n }\n \"\"\")\n arch = get_current_arch()\n html_filename = make_filename(self.id()) + '.html'\n with html_reporter(html_filename) as reporter:\n obj = cc(source, arch, debug=True, reporter=reporter)\n m = load_obj(obj)\n # print(m.x.argtypes)\n count = 6\n T = ctypes.c_int * count\n a = T()\n a[:] = 1, 0, 2, 0, 3, 0\n b = T()\n b[:] = 5, 0, 4, 0, 9, 0\n y = m.mega_complex_stuff(a, b, count)\n self.assertEqual(40, y)", "def cython_c2py_conv_function_pointer(t_, ts):\n t = t_[1]\n argnames = []\n argdecls = []\n argbodys = []\n argrtns = []\n for n, argt in t[1][2]:\n argnames.append(n)\n decl, body, rtn = ts.cython_py2c(n, argt, proxy_name=\"c_\" + n)\n argdecls += decl.split('\\n') if isinstance(decl,basestring) else [decl]\n argbodys += body.split('\\n') if isinstance(body,basestring) else [body]\n argrtns += rtn.split('\\n') if isinstance(rtn,basestring) else [rtn]\n rtnname = 'rtn'\n rtnprox = 'c_' + rtnname\n rtncall = 'c_call_' + rtnname\n while rtnname in argnames or rtnprox in argnames:\n rtnname += '_'\n rtnprox += '_'\n argdecls = indent(argdecls)\n argbodys = indent(argbodys)\n rtndecl, rtnbody, rtnrtn, _ = ts.cython_c2py(rtncall, t[2][2],\n cached=False, proxy_name=rtnprox, existing_name=rtncall)\n if rtndecl is None and rtnbody is None:\n rtnprox = rtnname\n rtndecls = [rtndecl]\n returns_void = (t[2][2] == 'void')\n if not returns_void:\n rtndecls.append(\"cdef {0} {1}\".format(ts.cython_ctype(t[2][2]),\n rtncall))\n rtndecl = indent(rtndecls)\n rtnbody = indent(rtnbody)\n s = ('def {{proxy_name}}({arglist}):\\n'\n '{argdecls}\\n'\n '{rtndecl}\\n'\n ' if {{var}} == NULL:\\n'\n ' raise RuntimeError(\"{{var}} is NULL and may not be '\n 'safely called!\")\\n'\n '{argbodys}\\n')\n s += ' {{var}}({carglist})\\n' if returns_void else \\\n ' {rtncall} = {{var}}({carglist})\\n'\n s += '{rtnbody}\\n'\n s = s.format(arglist=\", \".join(argnames), argdecls=argdecls,\n cvartypeptr=ts.cython_ctype(t_).format(type_name='cvartype'),\n argbodys=argbodys, rtndecl=rtndecl, rtnprox=rtnprox,\n rtncall=rtncall, carglist=\", \".join(argrtns), rtnbody=rtnbody)\n caches = 'if {cache_name} is None:\\n' + indent(s)\n if not returns_void:\n caches += \"\\n return {rtnrtn}\".format(rtnrtn=rtnrtn)\n caches += '\\n {cache_name} = {proxy_name}\\n'\n return s, s, caches", "def arg_to_CFI(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if options.wrap_fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n \n cfi_args = {}\n for arg in ast.declarator.params:\n declarator = arg.declarator\n name = declarator.user_name\n attrs = declarator.attrs\n meta = declarator.metaattrs\n cfi_args[name] = False\n arg_typemap = arg.typemap\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif meta[\"assumed-rank\"]:\n cfi_args[name] = True\n elif attrs[\"rank\"]:\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"string\":\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"char\":\n if declarator.is_indirect():\n cfi_args[name] = True\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n cfi_args[name] = True\n has_cfi_arg = any(cfi_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n declarator = ast.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cfi\"\n\n if not (need_buf_result or\n has_cfi_arg):\n return False\n\n options.wrap_fortran = False\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"cfi\"\n C_new._generated = \"arg_to_cfi\"\n C_new.splicer_group = \"cfi\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_cfi_suffix\n\n C_new.wrap.assign(c=True)#, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n name = arg.declarator.user_name\n if cfi_args[name]:\n arg.declarator.metaattrs[\"api\"] = generated_suffix\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n result_as_string.declarator.metaattrs[\"api\"] = \"cfi\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n # Fortran function may call C subroutine if string/vector result\n # Fortran function calls bufferify function.\n node._PTR_F_C_index = C_new._function_index\n return True", "def load_c_functions(self):\n\n # Load shared object\n lib = ctypes.cdll.LoadLibrary(os.path.join(self.working_directory,\"models/doubly_constrained/flow_forward_models.so\"))\n lib2 = ctypes.cdll.LoadLibrary(os.path.join(self.working_directory,\"models/doubly_constrained/potential_function.so\"))\n\n # Load DSF procedure flow inference\n self.infer_flows_dsf_procedure = lib.infer_flows_dsf_procedure\n self.infer_flows_dsf_procedure.restype = ctypes.c_double\n self.infer_flows_dsf_procedure.argtypes = [ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_size_t,\n ctypes.c_double,\n ctypes.c_size_t,\n ctypes.c_bool,\n ctypes.c_bool]\n\n\n # Load Newton Raphson procedure flow inference\n self.infer_flows_newton_raphson = lib.infer_flows_newton_raphson\n self.infer_flows_newton_raphson.restype = None #ctypes.c_double\n self.infer_flows_newton_raphson.argtypes = [ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_double,\n ctypes.c_size_t,\n ctypes.c_size_t,\n ctypes.c_size_t,\n ctypes.c_size_t]\n\n # Load Iterative proportional filtering procedure flow inference\n self.infer_flows_ipf_procedure = lib.infer_flows_ipf_procedure\n self.infer_flows_ipf_procedure.restype = ctypes.c_double\n self.infer_flows_ipf_procedure.argtypes = [ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_size_t,\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_double,\n ctypes.c_bool]\n\n # Load Iterative proportional filtering procedure flow inference\n self.infer_flows_ipf_procedure_singly = lib.infer_flows_ipf_procedure_singly\n self.infer_flows_ipf_procedure_singly.restype = ctypes.c_double\n self.infer_flows_ipf_procedure_singly.argtypes = [ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_size_t,\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_double,\n ctypes.c_bool]\n\n # Load potential function\n self.potential_stochastic = lib2.potential_stochastic\n self.potential_stochastic.restype = ctypes.c_double\n self.potential_stochastic.argtypes = [ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_size_t]", "def make_c_function_stubs(self):\n fn =\\\n\"\"\"{rettype} {fnname}({args}){{\n {rettype} ret;\n\n ret = {cast_and_deref}___madz_LANG_python_OUTPUT.{nodename}({argnames});\n\n return ret;\n}}\n\n\"\"\"\n fn_no_return =\\\n\"\"\"{rettype} {fnname}({args}){{\n ___madz_LANG_python_OUTPUT.{nodename}({argnames});\n return;\n}}\n\n\"\"\"\n res = \"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n fragments = {\n \"maybe_parentheses\": \")\" if isinstance(node.type.return_type.get_type(),pdl.TypeStruct) else \"\",\n \"cast_and_deref\": self.make_c_cast_deref_string(c_gen, node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n \"argnames\":\",\".join(map(\n lambda a: a.name,\n node.type.args))\n }\n res += (fn if not isinstance(node.type.return_type, pdl.TypeTypeNone) else fn_no_return).format(**fragments)\n return res", "def test_callback_from_c(self):\n source = io.StringIO(\"\"\"\n int add(int x, int y);\n int x(int a) {\n return add(a + 1, 13);\n }\n \"\"\")\n arch = get_current_arch()\n obj = cc(source, arch, debug=True)\n def my_add(x: int, y: int) -> int:\n return x + y + 2\n imports = {\n 'add': my_add\n }\n m = load_obj(obj, imports=imports)\n y = m.x(101)\n self.assertEqual(117, y)", "def complex_difference(c_1,c_2):\n return c_1 - c_2", "def boost_initialization():\n global Lib_c \n Lib_c = ctypes.CDLL('./integral_function.so')\n Lib_c.set.restype = None\n Lib_c.set.argtypes = (ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p)\n Lib_c.set_target.restype = None\n Lib_c.set_target.argtypes = (ctypes.c_int,)\n Lib_c.function.restype = ctypes.c_double\n Lib_c.function.argtypes = (ctypes.c_int,ctypes.c_double)", "def Python_to_C(c_object):\n try :\n cast_function = py_to_c_registry[(c_object.dtype, c_object.precision)]\n except KeyError:\n errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal')\n cast_func = FunctionDef(name = cast_function,\n body = [],\n arguments = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)],\n results = [Variable(dtype=c_object.dtype, name = 'v', precision = c_object.precision)])\n\n return cast_func", "def _gen_code(self):\r\n #TODO: maybe generate one C function only to save compile time? Also easier to take that as a basis and hand craft other covariances??\r\n\r\n #generate c functions from sympy objects \r\n argument_sequence = self._sp_x+self._sp_z+self._sp_theta\r\n code_list = [('k',self._sp_k)]\r\n # gradients with respect to covariance input\r\n code_list += [('dk_d%s'%x.name,dx) for x,dx in zip(self._sp_x,self._sp_dk_dx)]\r\n # gradient with respect to parameters\r\n code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta,self._sp_dk_dtheta)]\r\n # gradient with respect to multiple output parameters\r\n if self.output_dim > 1:\r\n argument_sequence += self._sp_theta_i + self._sp_theta_j\r\n code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta_i,self._sp_dk_dtheta_i)]\r\n (foo_c,self._function_code), (foo_h,self._function_header) = \\\r\n codegen(code_list, \"C\",'foobar',argument_sequence=argument_sequence)\r\n #put the header file where we can find it\r\n f = file(os.path.join(tempfile.gettempdir(),'foobar.h'),'w')\r\n f.write(self._function_header)\r\n f.close()\r\n\r\n # Substitute any known derivatives which sympy doesn't compute\r\n self._function_code = re.sub('DiracDelta\\(.+?,.+?\\)','0.0',self._function_code)\r\n\r\n\r\n ############################################################\r\n # This is the basic argument construction for the C code. #\r\n ############################################################\r\n \r\n arg_list = ([\"X2(i, %s)\"%x.name[2:] for x in self._sp_x]\r\n + [\"Z2(j, %s)\"%z.name[2:] for z in self._sp_z])\r\n\r\n # for multiple outputs need to also provide these arguments reversed.\r\n if self.output_dim>1:\r\n reverse_arg_list = list(arg_list)\r\n reverse_arg_list.reverse()\r\n\r\n # Add in any 'shared' parameters to the list.\r\n param_arg_list = [shared_params.name for shared_params in self._sp_theta]\r\n arg_list += param_arg_list\r\n\r\n precompute_list=[]\r\n if self.output_dim > 1:\r\n reverse_arg_list+=list(param_arg_list)\r\n split_param_arg_list = [\"%s1(%s)\"%(theta.name[:-2].upper(),index) for index in ['ii', 'jj'] for theta in self._sp_theta_i]\r\n split_param_reverse_arg_list = [\"%s1(%s)\"%(theta.name[:-2].upper(),index) for index in ['jj', 'ii'] for theta in self._sp_theta_i]\r\n arg_list += split_param_arg_list\r\n reverse_arg_list += split_param_reverse_arg_list\r\n # Extract the right output indices from the inputs.\r\n c_define_output_indices = [' '*16 + \"int %s=(int)%s(%s, %i);\"%(index, var, index2, self.input_dim-1) for index, var, index2 in zip(['ii', 'jj'], ['X2', 'Z2'], ['i', 'j'])]\r\n precompute_list += c_define_output_indices\r\n reverse_arg_string = \", \".join(reverse_arg_list)\r\n arg_string = \", \".join(arg_list)\r\n precompute_string = \"\\n\".join(precompute_list)\r\n\r\n # Code to compute argments string needed when only X is provided.\r\n X_arg_string = re.sub('Z','X',arg_string)\r\n # Code to compute argument string when only diagonal is required.\r\n diag_arg_string = re.sub('int jj','//int jj',X_arg_string)\r\n diag_arg_string = re.sub('j','i',diag_arg_string)\r\n if precompute_string == '':\r\n # if it's not multioutput, the precompute strings are set to zero\r\n diag_precompute_string = ''\r\n diag_precompute_replace = ''\r\n else:\r\n # for multioutput we need to extract the index of the output form the input.\r\n diag_precompute_string = precompute_list[0]\r\n diag_precompute_replace = precompute_list[1]\r\n \r\n\r\n # Here's the code to do the looping for K\r\n self._K_code =\\\r\n \"\"\"\r\n // _K_code\r\n // Code for computing the covariance function.\r\n int i;\r\n int j;\r\n int N = target_array->dimensions[0];\r\n int num_inducing = target_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n for (j=0;j<num_inducing;j++){\r\n%s\r\n //target[i*num_inducing+j] = \r\n TARGET2(i, j) += k(%s);\r\n }\r\n }\r\n %s\r\n \"\"\"%(precompute_string,arg_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n self._K_code_X = \"\"\"\r\n // _K_code_X\r\n // Code for computing the covariance function.\r\n int i;\r\n int j;\r\n int N = target_array->dimensions[0];\r\n int num_inducing = target_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n %s // int ii=(int)X2(i, 1);\r\n TARGET2(i, i) += k(%s);\r\n for (j=0;j<i;j++){\r\n %s //int jj=(int)X2(j, 1);\r\n double kval = k(%s); //double kval = k(X2(i, 0), shared_lengthscale, LENGTHSCALE1(ii), SCALE1(ii));\r\n TARGET2(i, j) += kval;\r\n TARGET2(j, i) += kval;\r\n }\r\n }\r\n /*%s*/\r\n \"\"\"%(diag_precompute_string, diag_arg_string, re.sub('Z2', 'X2', diag_precompute_replace), X_arg_string,str(self._sp_k)) #adding a string representation forces recompile when needed\r\n\r\n # Code to do the looping for Kdiag\r\n self._Kdiag_code =\\\r\n \"\"\"\r\n // _Kdiag_code\r\n // Code for computing diagonal of covariance function.\r\n int i;\r\n int N = target_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for\r\n for (i=0;i<N;i++){\r\n %s\r\n //target[i] =\r\n TARGET1(i)=k(%s);\r\n }\r\n %s\r\n \"\"\"%(diag_precompute_string,diag_arg_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n # Code to compute gradients\r\n grad_func_list = []\r\n if self.output_dim>1:\r\n grad_func_list += c_define_output_indices\r\n grad_func_list += [' '*16 + 'TARGET1(%i+ii) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, arg_string) for i, theta in enumerate(self._sp_theta_i)]\r\n grad_func_list += [' '*16 + 'TARGET1(%i+jj) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, reverse_arg_string) for i, theta in enumerate(self._sp_theta_i)]\r\n grad_func_list += ([' '*16 + 'TARGET1(%i) += PARTIAL2(i, j)*dk_d%s(%s);'%(i,theta.name,arg_string) for i,theta in enumerate(self._sp_theta)])\r\n grad_func_string = '\\n'.join(grad_func_list) \r\n\r\n self._dK_dtheta_code =\\\r\n \"\"\"\r\n // _dK_dtheta_code\r\n // Code for computing gradient of covariance with respect to parameters.\r\n int i;\r\n int j;\r\n int N = partial_array->dimensions[0];\r\n int num_inducing = partial_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n for (j=0;j<num_inducing;j++){\r\n%s\r\n }\r\n }\r\n %s\r\n \"\"\"%(grad_func_string,\"/*\"+str(self._sp_k)+\"*/\") # adding a string representation forces recompile when needed\r\n\r\n\r\n # Code to compute gradients for Kdiag TODO: needs clean up\r\n diag_grad_func_string = re.sub('Z','X',grad_func_string,count=0)\r\n diag_grad_func_string = re.sub('int jj','//int jj',diag_grad_func_string)\r\n diag_grad_func_string = re.sub('j','i',diag_grad_func_string)\r\n diag_grad_func_string = re.sub('PARTIAL2\\(i, i\\)','PARTIAL1(i)',diag_grad_func_string)\r\n self._dKdiag_dtheta_code =\\\r\n \"\"\"\r\n // _dKdiag_dtheta_code\r\n // Code for computing gradient of diagonal with respect to parameters.\r\n int i;\r\n int N = partial_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n for (i=0;i<N;i++){\r\n %s\r\n }\r\n %s\r\n \"\"\"%(diag_grad_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n # Code for gradients wrt X, TODO: may need to deal with special case where one input is actually an output.\r\n gradX_func_list = []\r\n if self.output_dim>1:\r\n gradX_func_list += c_define_output_indices\r\n gradX_func_list += [\"TARGET2(i, %i) += PARTIAL2(i, j)*dk_dx_%i(%s);\"%(q,q,arg_string) for q in range(self._real_input_dim)]\r\n gradX_func_string = \"\\n\".join(gradX_func_list)\r\n\r\n self._dK_dX_code = \\\r\n \"\"\"\r\n // _dK_dX_code\r\n // Code for computing gradient of covariance with respect to inputs.\r\n int i;\r\n int j;\r\n int N = partial_array->dimensions[0];\r\n int num_inducing = partial_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N; i++){\r\n for (j=0; j<num_inducing; j++){\r\n %s\r\n }\r\n }\r\n %s\r\n \"\"\"%(gradX_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n \r\n\r\n diag_gradX_func_string = re.sub('Z','X',gradX_func_string,count=0)\r\n diag_gradX_func_string = re.sub('int jj','//int jj',diag_gradX_func_string)\r\n diag_gradX_func_string = re.sub('j','i',diag_gradX_func_string)\r\n diag_gradX_func_string = re.sub('PARTIAL2\\(i, i\\)','2*PARTIAL1(i)',diag_gradX_func_string)\r\n\r\n # Code for gradients of Kdiag wrt X\r\n self._dKdiag_dX_code= \\\r\n \"\"\"\r\n // _dKdiag_dX_code\r\n // Code for computing gradient of diagonal with respect to inputs.\r\n int N = partial_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n for (int i=0;i<N; i++){\r\n %s\r\n }\r\n %s\r\n \"\"\"%(diag_gradX_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a\r\n # string representation forces recompile when needed Get rid\r\n # of Zs in argument for diagonal. TODO: Why wasn't\r\n # diag_func_string called here? Need to check that.\r\n #self._dKdiag_dX_code = self._dKdiag_dX_code.replace('Z[j', 'X[i')\r\n\r\n # Code to use when only X is provided. \r\n self._dK_dtheta_code_X = self._dK_dtheta_code.replace('Z[', 'X[')\r\n self._dK_dX_code_X = self._dK_dX_code.replace('Z[', 'X[').replace('+= PARTIAL2(', '+= 2*PARTIAL2(') \r\n self._dK_dtheta_code_X = self._dK_dtheta_code_X.replace('Z2(', 'X2(')\r\n self._dK_dX_code_X = self._dK_dX_code_X.replace('Z2(', 'X2(')\r\n\r\n\r\n #TODO: insert multiple functions here via string manipulation\r\n #TODO: similar functions for psi_stats\r", "def _fc_function_definitions(self) -> str:\n result = 'extern \"C\" {\\n\\n'\n for namespace in self.namespaces:\n for member in namespace.members:\n result += member.fortran_c_wrapper()\n\n result += '}\\n\\n'\n return result", "def complex_integral(self,func,a,b):\r\n \r\n import scipy\r\n from scipy import array\r\n \r\n def quad_routine(func, a, b, x_list, w_list):\r\n c_1 = (b-a)/2.0\r\n c_2 = (b+a)/2.0\r\n eval_points = map(lambda x: c_1*x+c_2, x_list)\r\n func_evals = list(map(func, eval_points)) # Python 3: make a list here\r\n return c_1 * sum(array(func_evals) * array(w_list))\r\n \r\n def quad_gauss_7(func, a, b):\r\n x_gauss = [-0.949107912342759, -0.741531185599394, -0.405845151377397, 0, 0.405845151377397, 0.741531185599394, 0.949107912342759]\r\n w_gauss = array([0.129484966168870, 0.279705391489277, 0.381830050505119, 0.417959183673469, 0.381830050505119, 0.279705391489277,0.129484966168870])\r\n return quad_routine(func,a,b,x_gauss, w_gauss)\r\n \r\n def quad_kronrod_15(func, a, b):\r\n x_kr = [-0.991455371120813,-0.949107912342759, -0.864864423359769, -0.741531185599394, -0.586087235467691,-0.405845151377397, -0.207784955007898, 0.0, 0.207784955007898,0.405845151377397, 0.586087235467691, 0.741531185599394, 0.864864423359769, 0.949107912342759, 0.991455371120813]\r\n w_kr = [0.022935322010529, 0.063092092629979, 0.104790010322250, 0.140653259715525, 0.169004726639267, 0.190350578064785, 0.204432940075298, 0.209482141084728, 0.204432940075298, 0.190350578064785, 0.169004726639267, 0.140653259715525, 0.104790010322250, 0.063092092629979, 0.022935322010529]\r\n return quad_routine(func,a,b,x_kr, w_kr)\r\n \r\n class Memorize: # Python 3: no need to inherit from object\r\n def __init__(self, func):\r\n self.func = func\r\n self.eval_points = {}\r\n def __call__(self, *args):\r\n if args not in self.eval_points:\r\n self.eval_points[args] = self.func(*args)\r\n return self.eval_points[args]\r\n \r\n def quad(func,a,b):\r\n ''' Output is the 15 point estimate; and the estimated error '''\r\n func = Memorize(func) # Memorize function to skip repeated function calls.\r\n g7 = quad_gauss_7(func,a,b)\r\n k15 = quad_kronrod_15(func,a,b)\r\n # I don't have much faith in this error estimate taken from wikipedia\r\n # without incorporating how it should scale with changing limits\r\n return [k15, (200*scipy.absolute(g7-k15))**1.5]\r\n \r\n return quad(func,a,b)", "def complex_integral(self,func,a,b):\r\n \r\n import scipy\r\n from scipy import array\r\n \r\n def quad_routine(func, a, b, x_list, w_list):\r\n c_1 = (b-a)/2.0\r\n c_2 = (b+a)/2.0\r\n eval_points = map(lambda x: c_1*x+c_2, x_list)\r\n func_evals = list(map(func, eval_points)) # Python 3: make a list here\r\n return c_1 * sum(array(func_evals) * array(w_list))\r\n \r\n def quad_gauss_7(func, a, b):\r\n x_gauss = [-0.949107912342759, -0.741531185599394, -0.405845151377397, 0, 0.405845151377397, 0.741531185599394, 0.949107912342759]\r\n w_gauss = array([0.129484966168870, 0.279705391489277, 0.381830050505119, 0.417959183673469, 0.381830050505119, 0.279705391489277,0.129484966168870])\r\n return quad_routine(func,a,b,x_gauss, w_gauss)\r\n \r\n def quad_kronrod_15(func, a, b):\r\n x_kr = [-0.991455371120813,-0.949107912342759, -0.864864423359769, -0.741531185599394, -0.586087235467691,-0.405845151377397, -0.207784955007898, 0.0, 0.207784955007898,0.405845151377397, 0.586087235467691, 0.741531185599394, 0.864864423359769, 0.949107912342759, 0.991455371120813]\r\n w_kr = [0.022935322010529, 0.063092092629979, 0.104790010322250, 0.140653259715525, 0.169004726639267, 0.190350578064785, 0.204432940075298, 0.209482141084728, 0.204432940075298, 0.190350578064785, 0.169004726639267, 0.140653259715525, 0.104790010322250, 0.063092092629979, 0.022935322010529]\r\n return quad_routine(func,a,b,x_kr, w_kr)\r\n \r\n class Memorize: # Python 3: no need to inherit from object\r\n def __init__(self, func):\r\n self.func = func\r\n self.eval_points = {}\r\n def __call__(self, *args):\r\n if args not in self.eval_points:\r\n self.eval_points[args] = self.func(*args)\r\n return self.eval_points[args]\r\n \r\n def quad(func,a,b):\r\n ''' Output is the 15 point estimate; and the estimated error '''\r\n func = Memorize(func) # Memorize function to skip repeated function calls.\r\n g7 = quad_gauss_7(func,a,b)\r\n k15 = quad_kronrod_15(func,a,b)\r\n # I don't have much faith in this error estimate taken from wikipedia\r\n # without incorporating how it should scale with changing limits\r\n return [k15, (200*scipy.absolute(g7-k15))**1.5]\r\n \r\n return quad(func,a,b)", "def helper_cccc(standardized_output: dict):\n\n for module in standardized_output[\"classes\"]:\n WMC = 0\n n_func = 0\n module_name = module[\"class name\"]\n for file in standardized_output[\"files\"]:\n for func in file[\"functions\"]:\n if \"class name\" in func and func[\"class name\"] == module_name:\n WMC += func[\"CC\"]\n n_func += 1\n module[\"WMC\"] = WMC\n module[\"no. functions\"] = n_func", "def caller_calculate_cpc(self):\n int_regs = 0\n fp_regs = 0\n\n #Calculate number of int-ptr arguments used in context \n if self.rdi_set is False:\n int_regs = 0\n elif self.rdi_set is True and self.rsi_set is False:\n int_regs = 1\n elif self.rsi_set is True and self.rdx_set is False:\n int_regs = 2\n #special handling for syscalls where r10 is used\n elif self.rdx_set is True and self.rcx_set is False and self.r10_set is False:\n int_regs = 3\n elif (self.rcx_set is True or self.r10_set is True) and self.r8_set is False:\n int_regs = 4\n elif self.r8_set is True and self.r9_set is False:\n int_regs = 5\n elif self.r9_set is True:\n int_regs = 6\n\n #Calculate number of fp arguments used in context\n if self.xmm0_set is False:\n fp_regs = 0\n elif self.xmm0_set is True and self.xmm1_set is False:\n fp_regs = 1\n elif self.xmm1_set is True and self.xmm2_set is False:\n fp_regs = 2\n elif self.xmm2_set is True and self.xmm3_set is False:\n fp_regs = 3\n elif self.xmm3_set is True and self.xmm4_set is False:\n fp_regs = 4\n elif self.xmm4_set is True and self.xmm5_set is False:\n fp_regs = 5\n elif self.xmm5_set is True and self.xmm6_set is False:\n fp_regs = 6\n elif self.xmm6_set is True and self.xmm7_set is False:\n fp_regs = 7\n elif self.xmm7_set is True:\n fp_regs = 8\n\n return int_regs + fp_regs + self.extra_args", "def call_ccall(x):\n ret = c_call(x)\n return ret, cython.typeof(ret)", "def complex_multiplication(c1,c2,cr):\n cr[0] = c1[0]*c2[0] - c1[1]*c2[1]\n cr[1] = c1[0]*c2[1] + c1[1]*c2[0]\n return cr", "def write_fortran_wrappers(out, decl, return_val):\n delegate_name = decl.name + f_wrap_suffix\n out.write(decl.fortranPrototype(delegate_name, [\"static\"]))\n out.write(\" { \\n\")\n\n call = FortranDelegation(decl, return_val)\n\n if decl.name == \"MPI_Init\":\n # Use out.write() here so it comes at very beginning of wrapper function\n out.write(\" int argc = 0;\\n\");\n out.write(\" char ** argv = NULL;\\n\");\n call.addActual(\"&argc\");\n call.addActual(\"&argv\");\n call.write(out)\n out.write(\" *ierr = %s;\\n\" % return_val)\n out.write(\"}\\n\\n\")\n\n # Write out various bindings that delegate to the main fortran wrapper\n write_fortran_binding(out, decl, delegate_name, \"MPI_INIT\", [\"fortran_init = 1;\"])\n write_fortran_binding(out, decl, delegate_name, \"mpi_init\", [\"fortran_init = 2;\"])\n write_fortran_binding(out, decl, delegate_name, \"mpi_init_\", [\"fortran_init = 3;\"])\n write_fortran_binding(out, decl, delegate_name, \"mpi_init__\", [\"fortran_init = 4;\"])\n return\n\n # This look processes the rest of the call for all other routines.\n for arg in decl.args:\n if arg.name == \"...\": # skip ellipsis\n continue\n\n if not (arg.pointers or arg.array):\n if not arg.isHandle():\n # These are pass-by-value arguments, so just deref and pass thru\n dereferenced = \"*%s\" % arg.name\n call.addActual(dereferenced)\n else:\n # Non-ptr, non-arr handles need to be converted with MPI_Blah_f2c\n # No special case for MPI_Status here because MPI_Statuses are never passed by value.\n call.addActualMPI2(\"%s_f2c(*%s)\" % (conversion_prefix(arg.type), arg.name))\n call.addActualMPICH(\"(%s)(*%s)\" % (arg.type, arg.name))\n\n else:\n if not arg.isHandle():\n # Non-MPI handle pointer types can be passed w/o dereferencing, but need to\n # cast to correct pointer type first (from MPI_Fint*).\n call.addActual(\"(%s)%s\" % (arg.castType(), arg.name))\n else:\n # For MPI-1, assume ints, cross fingers, and pass things straight through.\n call.addActualMPICH(\"(%s*)%s\" % (arg.type, arg.name))\n conv = conversion_prefix(arg.type)\n temp = \"temp_%s\" % arg.name\n\n # For MPI-2, other pointer and array types need temporaries and special conversions.\n if not arg.isHandleArray():\n call.addTemp(arg.type, temp)\n call.addActualMPI2(\"&%s\" % temp)\n\n if arg.isStatus():\n call.addCopy(\"%s_f2c(%s, &%s);\" % (conv, arg.name, temp))\n call.addWriteback(\"%s_c2f(&%s, %s);\" % (conv, temp, arg.name))\n else:\n call.addCopy(\"%s = %s_f2c(*%s);\" % (temp, conv, arg.name))\n call.addWriteback(\"*%s = %s_c2f(%s);\" % (arg.name, conv, temp))\n else:\n # Make temporary variables for the array and the loop var\n temp_arr_type = \"%s*\" % arg.type\n call.addTemp(temp_arr_type, temp)\n call.addTemp(\"int\", \"i\")\n\n # generate a copy and a writeback statement for this type of handle\n if arg.isStatus():\n copy = \" %s_f2c(&%s[i], &%s[i])\" % (conv, arg.name, temp)\n writeback = \" %s_c2f(&%s[i], &%s[i])\" % (conv, temp, arg.name)\n else:\n copy = \" temp_%s[i] = %s_f2c(%s[i])\" % (arg.name, conv, arg.name)\n writeback = \" %s[i] = %s_c2f(temp_%s[i])\" % (arg.name, conv, arg.name)\n\n # Generate the call surrounded by temp array allocation, copies, writebacks, and temp free\n count = \"*%s\" % arg.countParam().name\n call.addCopy(\"%s = (%s)malloc(sizeof(%s) * %s);\" %\n (temp, temp_arr_type, arg.type, count))\n call.addCopy(\"for (i=0; i < %s; i++)\" % count)\n call.addCopy(\"%s;\" % copy)\n call.addActualMPI2(temp)\n call.addWriteback(\"for (i=0; i < %s; i++)\" % count)\n call.addWriteback(\"%s;\" % writeback)\n call.addWriteback(\"free(%s);\" % temp)\n\n call.write(out)\n if decl.returnsErrorCode():\n out.write(\" *ierr = %s;\\n\" % return_val)\n else:\n out.write(\" return %s;\\n\" % return_val)\n out.write(\"}\\n\\n\")\n\n # Write out various bindings that delegate to the main fortran wrapper\n write_fortran_binding(out, decl, delegate_name, decl.name.upper())\n write_fortran_binding(out, decl, delegate_name, decl.name.lower())\n write_fortran_binding(out, decl, delegate_name, decl.name.lower() + \"_\")\n write_fortran_binding(out, decl, delegate_name, decl.name.lower() + \"__\")", "def complex_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, numbers.Complex):\n name = type(var).__name__\n raise ComplexError(\n 'Function {} expected complex number, {} got instead.'.format(func, name))", "def generic_function(self, node, ordered_functions):\n for generic in node.fortran_generic:\n new = node.clone()\n ordered_functions.append(new)\n self.append_function_index(new)\n new._generated = \"fortran_generic\"\n fmt = new.fmtdict\n # XXX append to existing suffix\n if generic.fmtdict:\n fmt.update(generic.fmtdict)\n fmt.function_suffix = fmt.function_suffix + generic.function_suffix\n new.fortran_generic = {}\n new.wrap.assign(fortran=True)\n new.ast.declarator.params = generic.decls\n\n # Try to call original C function if possible.\n # All arguments are native scalar.\n need_wrapper = False\n if new.ast.declarator.is_indirect():\n need_wrapper = True\n \n for arg in new.ast.declarator.params:\n if arg.declarator.is_indirect():\n need_wrapper = True\n break\n elif arg.typemap.sgroup == \"native\":\n pass\n else:\n need_wrapper = True\n break\n\n if need_wrapper:\n # The C wrapper is required to cast constants.\n # generic.yaml: GenericReal\n new.C_force_wrapper = True\n new.wrap.c = True\n new._PTR_C_CXX_index = node._function_index\n else:\n new._PTR_F_C_index = node._function_index\n \n # Do not process templated node, instead process\n # generated functions above.\n # node.wrap.c = False\n node.wrap.fortran = False", "def write_c_wrapper(out, decl, return_val, write_body):\n if generate_gotcha:\n write_gotcha_c_wrapper(out, decl, return_val, write_body)\n else:\n write_pmpi_c_wrapper(out, decl, return_val, write_body)", "def setUp(self):\n import sys, os\n from ctypes import CDLL, c_int, c_double, POINTER, byref\n from numpy import arange\n from solvcon.dependency import guess_dllname\n from solvcon.conf import env\n # load the dll created by f2py and bind the subroutine.\n libpath = os.path.join(env.libdir, guess_dllname('sc_solvcontest'))\n self.lib_c_ctypes = CDLL(libpath)\n # create test arrays.\n self.a = arange(10, dtype='int32')\n self.b = arange(20, dtype='float64').reshape((4,5))\n # prepare arguments.\n self.args = [\n byref(c_int(0)), # placeholder.\n byref(c_double(0.0)), # placeholder.\n byref(c_int(self.a.shape[0])),\n self.a.ctypes._as_parameter_,\n byref(c_int(self.b.shape[1])),\n byref(c_int(self.b.shape[0])),\n self.b.ctypes._as_parameter_,\n ]", "def __complex__(self) -> complex:\n return self._translate_in_type(complex, self.integer, self.float_num)", "def build(self, cres):\n _launch_threads()\n # Build wrapper for ufunc entry point\n ctx = cres.target_context\n library = cres.library\n signature = cres.signature\n llvm_func = library.get_function(cres.fndesc.llvm_func_name)\n wrapper, env = build_gufunc_wrapper(library, ctx, llvm_func,\n signature, self.sin, self.sout,\n fndesc=cres.fndesc,\n env=cres.environment)\n\n ptr = library.get_pointer_to_function(wrapper.name)\n\n # Get dtypes\n dtypenums = []\n for a in signature.args:\n if isinstance(a, types.Array):\n ty = a.dtype\n else:\n ty = a\n dtypenums.append(as_dtype(ty).num)\n\n return dtypenums, ptr, env", "def complex_inverse(c1,cr):", "def helper_test_cccc(standardized_output: dict, output: dict):\n\n tot_loc = 0\n tot_cloc = 0\n\n for file in standardized_output[\"files\"]:\n for function in file[\"functions\"]:\n tot_loc += function[\"LOC\"]\n tot_cloc += function[\"CLOC\"]\n\n output[\"LOC\"] = tot_loc\n output[\"CLOC\"] = tot_cloc\n output[\"classes\"] = standardized_output[\"classes\"]\n output[\"files\"] = standardized_output[\"files\"]\n\n for module in output[\"classes\"]:\n WMC = 0\n n_func = 0\n module_name = module[\"class name\"]\n for file in output[\"files\"]:\n for func in file[\"functions\"]:\n if \"class name\" in func and func[\"class name\"] == module_name:\n WMC += func[\"CC\"]\n n_func += 1\n module[\"WMC\"] = WMC\n module[\"no. functions\"] = n_func" ]
[ "0.5993062", "0.57701015", "0.5712526", "0.5676194", "0.5634198", "0.54955506", "0.549056", "0.54669434", "0.5429584", "0.53910625", "0.5366752", "0.53551114", "0.53387433", "0.53340805", "0.5324377", "0.52511126", "0.52511126", "0.5248503", "0.5233666", "0.51930535", "0.5190048", "0.51849306", "0.5121008", "0.5091067", "0.5071545", "0.5047761", "0.50225973", "0.5014687", "0.5010409", "0.500627" ]
0.68730485
0
Configure a task logger to generate site and taskspecific logs.
def _setup_task_logger(logger): if logger.handlers: # Already processed return parts = logger.name.split(".") if len(parts) < 4: # Malformed return site = parts[2] task = parts[3] _ensure_dirs(os.path.join(_log_dir, site)) formatter = Formatter( fmt="[%(asctime)s %(levelname)-7s] %(message)s", datefmt=_DATE_FORMAT) infohandler = TimedRotatingFileHandler( os.path.join(_log_dir, site, task + ".log"), "midnight", 1, 30) infohandler.setLevel("INFO") debughandler = FileHandler( os.path.join(_log_dir, site, task + ".log.verbose"), "w") debughandler.setLevel("DEBUG") errorhandler = RotatingFileHandler( os.path.join(_log_dir, site, task + ".err"), maxBytes=1024**2, backupCount=4) errorhandler.setLevel("WARNING") for handler in [infohandler, debughandler, errorhandler]: handler.setFormatter(formatter) logger.addHandler(handler)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _configure_logging(self):\n pass", "def on_celery_setup_logging(**_):\n # underscore is a throwaway-variable, to avoid code style warning for\n # unused variable\n pass", "def _alter_logger(*args, **kwargs):\n\n # TODO: dress up root logger here under Celery\n # configure_logging()\n pass", "def log_settings(config):\n LOGGER.propagate = False\n formatter = ViseronLogFormat(config.logging)\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n handler.addFilter(DuplicateFilter())\n LOGGER.addHandler(handler)\n\n LOGGER.setLevel(LOG_LEVELS[config.logging.level])\n logging.getLogger(\"apscheduler.scheduler\").setLevel(logging.ERROR)\n logging.getLogger(\"apscheduler.executors\").setLevel(logging.ERROR)", "def setup_logfile():\r\n from core.general.appinit import log_init\r\n log_init(\r\n 'general',\r\n 'django_api'\r\n )", "def setup_class(cls):\n if os.path.exists(logfilename):\n os.remove(logfilename)\n log = logutils.get_logger(__name__)\n log.root.handlers = []\n logutils.config(mode='standard', console_lvl='stdinfo',\n file_name=logfilename)", "def logger_settings(self):\n LOG_CONFIG['root']['handlers'].append(self.logmode)\n flask_log = logging.getLogger(DEFAULT_NAME_FLASK_LOGGER)\n flask_log.setLevel(logging.ERROR)\n dictConfig(LOG_CONFIG)\n self.logger = logging.getLogger()", "def configure(base_path):\n\n log_path = os.path.join(\n base_path,\n 'logs',\n )\n current_time = datetime.datetime.now().strftime(\"%d.%m.%Y %H:%M:%S\")\n\n log_fmt = '%(asctime)s [%(threadName)-12.12s] [%(levelname)-3.4s] %(message)s'\n\n logging.basicConfig(\n level=logging.INFO,\n format=log_fmt,\n handlers=[\n TimedRotatingFileHandler(\n filename=f\"{log_path}/analysis-service.({current_time}).log\",\n encoding='utf-8',\n when=\"d\"\n ),\n logging.StreamHandler()\n ]\n )", "def __setup_logging(self):\n\n loglevel = logging.INFO\n if self.config[\"verbose\"]:\n loglevel = logging.DEBUG\n\n FORMAT = '[%(asctime)s %(filename)s:%(lineno)s %(levelname)s] %(message)s'\n if self.config[\"log\"]:\n logging.basicConfig(format=FORMAT, level=loglevel, filename=self.config[\"log\"])\n else:\n logging.basicConfig(format=FORMAT, level=loglevel)", "def _config_log(self):\n config_worker = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'handlers': {\n 'queue': {\n 'class': 'hqc_meas.utils.log.tools.QueueHandler',\n 'queue': self.log_queue,\n },\n },\n 'root': {\n 'level': 'DEBUG',\n 'handlers': ['queue']\n },\n }\n logging.config.dictConfig(config_worker)", "def _app(ctx, logfile, verbose):\n log_levels = {\n 0: logging.WARNING,\n 1: logging.INFO,\n 2: logging.DEBUG,\n }\n loglevel = log_levels.get(verbose, logging.DEBUG)\n # TODO more flexible logging config\n logging.basicConfig(format='%(name)s: %(levelname)s: %(message)s',\n level=loglevel, filename=logfile)\n\n tasks = ctx.obj['tasks']\n tasks.context = ctx", "def task_logger(self, handler: Handler, msg: str) -> None:\n handler(\"HacsTask<%s> %s\", self.slug, msg)", "def setup_logging_with_config(config: DynaBox):\n global logger\n logger = setup_logging_threatbus(config, logger_name)", "def _setup_logging(self):\n if self.app_config_has(\"logging\"):\n log_config = self.app_config()[\"logging\"]\n filename_list = [\n v['filename'] for k, v in\n _find_config_tree(log_config, \"filename\")\n ]\n # pre-create directory in advance for all loggers\n for file in filename_list:\n file_dir = os.path.dirname(file)\n if file_dir and not os.path.isdir(file_dir):\n os.makedirs(file_dir, exist_ok=True)\n dictConfig(log_config)\n else:\n log = getLogger()\n handler = StreamHandler()\n formatter = Formatter(\n \"%(asctime)s-%(threadName)s-%(name)s-%(levelname)s-%(message)s\"\n )\n handler.setFormatter(formatter)\n log.addHandler(handler)\n log.setLevel(DEBUG)\n msg = (\"Starting \" + os.path.basename(__name__) +\n \" version \" + __version__ + \" on \" +\n \"_\".join(uname()).replace(\" \", \"_\"))\n logger = getLogger(__name__)\n logger.debug(msg)", "def setup_log(self):\n self.logger, _ = get_logger(\"datatransform\")", "def setup_logging():\n log.setup('keystone')", "def config_logger( self, ):\r\n logger = logging.getLogger( self.logger_id )\r\n\r\n logger.handlers = []\r\n logger.setLevel( self.parameters.logging_level ) # DEBUG , INFO WARNING ERROR CRITICAL\r\n\r\n # create the logging file handler.....\r\n fh = logging.FileHandler( self.parameters.pylogging_fn )\r\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n fh.setFormatter( formatter )\r\n logger.addHandler( fh )\r\n\r\n msg = \"Done config_logger\"\r\n print( msg )\r\n logger.info( msg ) # .debug .info .warn .error\r\n AppGlobal.set_logger( logger )\r\n\r\n return logger", "def configure_logger (max_threads):\n\t\t# Hack for log line separator\n\t\twith open(\"pinger.log\", \"a\") as log:\n\t\t\tlog.write(\n\t\t\t\t\"==============================================================================================\\n\")\n\n\t\tlogging.basicConfig(filename=\"pinger.log\", level=logging.DEBUG, filemode='a',\n\t\t format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%d.%m.%Y %H:%M:%S')\n\t\tlogging.info(\"Started with max threads: %d\", max_threads)", "def configure(cls):\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n logger_handler = logging.StreamHandler()\n logger.addHandler(logger_handler)\n logger_handler.setFormatter(logging.Formatter('%(message)s'))\n cls.logger = logger", "def setup_logging(config: Any) -> Logger:\n green = \"\\033[32m\"\n reset = \"\\033[0m\"\n logger = setup_logger(\n name=f\"{green}[ignite]{reset}\",\n level=logging.DEBUG if config.debug else logging.INFO,\n format=\"%(name)s: %(message)s\",\n filepath=config.output_dir / \"training-info.log\",\n )\n return logger", "def configLogging():\n # define a basic logger to write to file\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='/tmp/execute_pomset.log',\n filemode='w')\n\n # end def configureLogging\n pass", "def _configure_system_job(config, job_exe, system_logging_level):\n logging_env_vars = {'SYSTEM_LOGGING_LEVEL': system_logging_level}\n config.add_to_task('main', env_vars=logging_env_vars, resources=job_exe.get_resources())", "def configure_logging(config):\n logging.basicConfig(level=logging.getLevelName(config.logging.level),\n format=config.logging.format)\n\n if config.subtask_debug:\n logging.getLogger('mercury.rpc.ping').setLevel(logging.DEBUG)\n logging.getLogger('mercury.rpc.ping2').setLevel(logging.DEBUG)\n logging.getLogger('mercury.rpc.jobs.monitor').setLevel(logging.DEBUG)\n\n if config.asyncio_debug:\n logging.getLogger('mercury.rpc.active_asyncio').setLevel(logging.DEBUG)", "def dask_logger_config(\n time_interval=60.0,\n info_interval=1.0,\n log_path=\"logs/\",\n n_tasks_min=1,\n filemode=\"a\",\n additional_info=None,\n config_path=None,\n additional_logger_names=None,\n):\n\n def dask_logger(dask_client):\n pathlib.Path(log_path).mkdir(parents=True, exist_ok=True)\n\n config_logger(dask_client, log_path, config_path)\n\n dask_client.versions_logger = threading.Thread(\n target=versions_logger, args=(dask_client, log_path, additional_info)\n )\n dask_client.versions_logger.start()\n\n dask_client.task_logger = threading.Thread(\n target=task_logger,\n args=(dask_client, log_path, time_interval, n_tasks_min, filemode),\n )\n dask_client.task_logger.do_run = True\n dask_client.task_logger.force_log = False\n dask_client.task_logger.start()\n\n dask_client.info_logger = threading.Thread(\n target=info_logger, args=(dask_client, log_path, info_interval, filemode)\n )\n dask_client.info_logger.do_run = True\n dask_client.info_logger.start()\n\n if additional_logger_names:\n logger_logger(id(dask_client), log_path, additional_logger_names)\n\n dask_client.get = graph_logger_config(dask_client.get, log_path=log_path)\n\n return dask_client\n\n return dask_logger", "def init_logs(self):\n\n handler = logging.FileHandler(self.app.config['LOG'])\n handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))\n self.app.logger.addHandler(handler)\n if self.app.config.get(\"LOG_LEVEL\") == \"DEBUG\":\n self.app.logger.setLevel(logging.DEBUG)\n elif self.app.config.get(\"LOG_LEVEL\") == \"WARN\":\n self.app.logger.setLevel(logging.WARN)\n else:\n self.app.logger.setLevel(logging.INFO)\n self.app.logger.info('Startup with log: %s' % self.app.config['LOG'])", "def setup_logger():\n logger = logging.getLogger('tracking_log')\n logger.setLevel(logging.INFO)\n #Where to Store needs to be identified?\n f_handler = logging.FileHandler(PROCESSED_LOGFILE, mode='a', encoding = None, delay = False)\n f_handler.setLevel(logging.INFO)\n f_format = logging.Formatter('%(asctime)s\\t%(message)s\\t%(dataset_id)s\\t%(status)s')\n f_handler.setFormatter(f_format)\n logger.addHandler(f_handler)\n return logger", "def task(ctx, config):\n log.info('{config}'.format(config=config))", "def setup_logging(log_file):\n\tglobal logger\n\tif log_file:\n\t\tlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',filename=log_file,filemode='w',level=logging.INFO)\n\telse:\n\t\tlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',level=logging.INFO)\n\tlogger = logging.getLogger('default')", "def configure_logging():\n class TimeFormatter(logging.Formatter):\n def formatTime(self, record, datefmt=None):\n datefmt = datefmt or '%Y-%m-%d %H:%M:%S'\n return time.strftime(datefmt, time.localtime(record.created))\n\n class SeverityFilter(logging.Filter):\n def filter(self, record):\n record.severity = record.levelname[0]\n return True\n\n if not os.path.exists(LOG_DIR):\n os.mkdir(LOG_DIR)\n\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n log_file = logging.handlers.RotatingFileHandler(LOG_FILE, backupCount=100)\n log_file.addFilter(SeverityFilter())\n log_file.setFormatter(TimeFormatter('%(asctime)s %(severity)s: %(message)s'))\n logger.addHandler(log_file)\n\n # Log all uncaught exceptions.\n def log_exception(exception_type, value, stack_trace):\n logging.error(\n ''.join(traceback.format_exception(exception_type, value, stack_trace)),\n )\n sys.excepthook = log_exception\n\n # Rotate log files once on startup to get per-execution log files.\n if os.path.exists(LOG_FILE):\n log_file.doRollover()", "def setup_logger(self):\n setup_logger(logger, 'mayavi.log', mode=self.log_mode)" ]
[ "0.6633256", "0.6393813", "0.6320544", "0.62959045", "0.6273784", "0.6260842", "0.6259181", "0.6179772", "0.61361146", "0.61344683", "0.6133988", "0.6130853", "0.6126949", "0.6111299", "0.61107033", "0.6107245", "0.60685533", "0.60507756", "0.6001248", "0.5997276", "0.5990514", "0.59712404", "0.59639144", "0.5948641", "0.5938431", "0.5936211", "0.5924024", "0.59052616", "0.5897442", "0.5887731" ]
0.799023
0
Set up the logging infrastructure. If log_dir is given, logs will be written to that directory. If quiet is True, logs below ERROR level will not be written to standard error.
def setup_logging(log_dir=None, quiet=False): global _setup_done if _setup_done: return _setup_done = True _root.handlers = [] # Remove any handlers already attached _root.setLevel("DEBUG") stream = StreamHandler() stream.setLevel("ERROR" if quiet else "DEBUG") stream.setFormatter(_ColorFormatter()) _root.addHandler(stream) if log_dir: _setup_file_logging(log_dir) if quiet: _disable_pywikibot_logging()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_logging(debug=False, quiet=0):\n fmt = '%(asctime)s: %(levelname)-7s: '\n if debug:\n fmt += '%(filename)s:%(funcName)s: '\n fmt += '%(message)s'\n\n # 'Sat, 05 Oct 2013 18:58:50 -0400 (EST)'\n datefmt = '%a, %d %b %Y %H:%M:%S %z'\n tzname = time.strftime('%Z', time.localtime())\n if tzname and ' ' not in tzname and len(tzname) <= 5:\n # If the name is verbose, don't include it. Some systems like to use\n # \"Eastern Daylight Time\" which is much too chatty.\n datefmt += f' ({tzname})'\n\n if debug:\n level = logging.DEBUG\n elif quiet <= 0:\n level = logging.INFO\n elif quiet <= 1:\n level = logging.WARNING\n elif quiet <= 2:\n level = logging.ERROR\n elif quiet <= 3:\n level = logging.CRITICAL\n\n formatter = ColoredFormatter(fmt, datefmt)\n handler = logging.StreamHandler(stream=sys.stdout)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger()\n logger.addHandler(handler)\n logger.setLevel(level)", "def setup_logging(log_dir: Optional[str] = None) -> None:\n config: Dict[str, Any] = {\n \"version\": 1,\n \"disable_existing_loggers\": True,\n \"formatters\": {\"console\": {\"format\": \"%(asctime)s:\\t%(message)s\"}},\n \"handlers\": {\n \"console\": {\n \"level\": \"WARNING\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"console\",\n \"stream\": \"ext://sys.stdout\",\n }\n },\n \"loggers\": {\n LOG_NAME: {\"handlers\": [\"console\"], \"level\": \"DEBUG\", \"propagate\": False}\n },\n }\n if log_dir is not None:\n config[\"loggers\"][LOG_NAME][\"handlers\"].append(\"file\")\n config[\"formatters\"][\"file\"] = {\n \"format\": \"%(asctime)s - %(levelname)s - %(name)s - %(message)s\"\n }\n config[\"handlers\"][\"file\"] = {\n \"level\": \"DEBUG\",\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"formatter\": \"file\",\n \"filename\": os.path.join(log_dir, LOG_NAME + \".log\"),\n \"maxBytes\": 1000000,\n \"backupCount\": 3,\n }\n logging.config.dictConfig(config)", "def logging_setup(args, log_dir):\n timestamp_file = datetime.now().strftime(\"%Y%m%d-%H.%M_rcf_abb.log\")\n log_file = Path(log_dir) / timestamp_file\n\n handlers = []\n\n if not args.skip_logfile:\n handlers.append(log.FileHandler(log_file, mode=\"a\"))\n if not args.quiet:\n handlers.append(log.StreamHandler(sys.stdout))\n\n log.basicConfig(\n level=log.DEBUG if args.debug else log.INFO,\n format=\"%(asctime)s:%(levelname)s:%(funcName)s:%(message)s\",\n handlers=handlers,\n )", "def __initLogger(self, logLevel=logging.INFO, quiet=None, logFile=None):\n # If you call basicConfig more than once without removing handlers\n # it is effectively a noop. In this program it is possible to call\n # __initLogger more than once as we learn information about what\n # options the user has supplied in either the config file or\n # command line; hence, we will need to load and unload the handlers\n # to ensure consistently fomatted output.\n log = logging.getLogger()\n for h in list(log.handlers):\n log.removeHandler(h)\n\n if quiet:\n if logFile:\n # Case: Batch and log file supplied. Log to only file\n self.__log_to_file(logFile, logLevel)\n else:\n # If the user elected quiet mode *and* did not supply\n # a file. We will be *mostly* quiet but not completely.\n # If there is an exception/error/critical we will print\n # to stdout/stderr.\n logging.basicConfig(\n level=logging.ERROR,\n format=STREAM_LOG_FORMAT\n )\n else:\n if logFile:\n # Case: Not quiet and log file supplied.\n # Log to both file and stdout/stderr\n self.__log_to_file(logFile, logLevel)\n self.__log_to_stream(logLevel)\n else:\n # Case: Not quiet and no log file supplied.\n # Log to only stdout/stderr\n self.__log_to_stream(logLevel)", "def setup_logs(arg_log_dir, log_level='debug'):\n assert log_level.lower() in ('debug', 'info', 'warning', 'error', 'critical')\n global logger\n cl_logger = log.LogManager(app_name=APP_NAME,\n log_name=__name__,\n log_dir=arg_log_dir)\n logger = cl_logger.logger\n logger.setLevel(log_level.upper())", "def setup_logger() -> None:\n LOGGER.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(levelname)s \\t|%(asctime)s \\t| %(name)s \\t| %(message)s')\n\n if not check_if_dir_exists(FILENAMES.LOG_DIR):\n os.mkdir(to_abs_file_path(FILENAMES.LOG_DIR))\n\n file_handler: logging.FileHandler = logging.FileHandler(to_abs_file_path(FILENAMES.LOG), mode='w')\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n\n console_handler: logging.StreamHandler = logging.StreamHandler()\n console_handler.setLevel(logging.WARNING)\n\n LOGGER.addHandler(file_handler)\n LOGGER.addHandler(console_handler)\n LOGGER.info('Filehandler and Console_Handler were born, let\\'s start logging')", "def setup_logging(log_basedir=\"logs\"):\n BASEDIR = os.path.abspath(os.path.dirname(__file__))\n LOGDIR = os.path.join(BASEDIR,log_basedir)\n \n # Check if the logs directory exists and is writable\n if not os.path.isdir(LOGDIR):\n print('ERROR: Log directory {} does not exist.'.format(LOGDIR))\n sys.exit(1)\n if not os.access(LOGDIR, os.W_OK):\n print('ERROR: No permissions to write to log directory {}.'.format(LOGDIR))\n sys.exit(1)\n\n # Set the log message format\n fmt = '%(levelname)s - %(asctime)s.%(msecs).03d %(process)d [%(filename)s:%(lineno)d] %(message)s'\n datefmt = '%m%d %H:%M:%S'\n formatter = logging.Formatter(fmt, datefmt)\n\n # Log to console\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(formatter)\n\n root = logging.getLogger()\n root.setLevel(logging.DEBUG)\n root.addHandler(console_handler)\n\n # Log to file, use a rotating file\n file_name = os.path.join(LOGDIR, '{}.log'.format(\"flask_api_otrs\") )\n\n file_handler = logging.handlers.RotatingFileHandler(file_name, backupCount=7)\n file_handler.setFormatter(formatter)\n root.addHandler(file_handler)", "def setup_root_logger(loglevel=logging.DEBUG, logdir=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Logs'),\n log_config_file=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Utils', 'cent_logger.json')):\n try:\n\n if not os.path.isdir(logdir):\n os.makedirs(logdir)\n\n if log_config_file is not None and os.path.exists(log_config_file):\n with open(log_config_file, 'rt') as logconf:\n config = json.load(logconf)\n # create absolute path for logfile\n config['handlers']['file_handler']['filename'] = logdir + '/' + config['handlers']['file_handler']['filename']\n config['handlers']['longterm']['filename'] = logdir + '/' + config['handlers']['longterm']['filename']\n config['handlers']['single_run']['filename'] = logdir + '/' + config['handlers']['single_run']['filename']\n root_logger = logging.getLogger(\"framework\")\n logging.config.dictConfig(config)\n logger.info(\"I initialized the framework logger\")\n root_logger.info(\"Configured basic root logger from: {}\".format(log_config_file))\n test_logger = logging.getLogger(\"tests\")\n logging.config.dictConfig(config)\n logger.info(\"I initialized the tests logger\")\n test_logger.info(\"Configured basic tests logger from: {}\".format(log_config_file))\n\n # disable logs from below external modules\n for disabled_module in config['disable_module_logs']:\n root_logger.debug('Disabled logging for module: {}'.format(disabled_module))\n logging.getLogger(disabled_module).disabled = True\n\n except Exception as e:\n print(\"Error configuring logger: {}\".format(e), file=sys.stderr)\n raise e#", "def setup_logger(config):\n filename = config[\"LOGGER_FILE\"]\n log_dir = '/'.join(filename.split('/')[0:-1]) + \"/\"\n\n check_and_create_directory(log_dir)\n\n level = config[\"LOGGER_LOGLEVEL\"].upper()\n filemode = 'a'\n _format = '%(asctime)s %(name)8s %(module)15s %(funcName)12s %(' \\\n 'levelname)7s: %(message)s'\n _dateformat = '(%d.%m.%Y, %H:%M:%S)'\n\n logging.basicConfig(filename=filename, filemode=filemode, level=level,\n format=_format, datefmt=_dateformat)\n\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n logging.getLogger(\"werkzeug\").setLevel(logging.WARNING)\n\n # Display log simultaneously on console\n if config[\"CONSOLE_LOGGING\"]:\n add_terminal_logging(_format, level)", "def setup_logging():\n logging.basicConfig(format='%(levelname)s: %(message)s', level=LOGLEVEL)", "def set_loglevel(loglevel=\"INFO\", quiet=False):\n config = {}\n config[\"handlers\"] = [{\n \"sink\": sys.stdout,\n \"format\": LOGFORMAT,\n \"level\": loglevel,\n \"colorize\": True,\n }]\n logger.configure(**config)\n \n if quiet == True:\n logger.disable(\"LOCOA\")\n else:\n logger.enable(\"LOCOA\")", "def _setup_file_logging(log_dir):\n global _file_logging_enabled, _log_dir\n _file_logging_enabled = True\n\n _log_dir = log_dir\n _ensure_dirs(log_dir)\n\n formatter = Formatter(\n fmt=\"[%(asctime)s %(levelname)-7s] %(name)s: %(message)s\",\n datefmt=_DATE_FORMAT)\n\n infohandler = _WatchedRotatingFileHandler(\n os.path.join(log_dir, \"all.log\"), maxBytes=32 * 1024**2, backupCount=4)\n infohandler.setLevel(\"INFO\")\n\n errorhandler = _WatchedRotatingFileHandler(\n os.path.join(log_dir, \"all.err\"), maxBytes=32 * 1024**2, backupCount=4)\n errorhandler.setLevel(\"WARNING\")\n\n for handler in [infohandler, errorhandler]:\n handler.setFormatter(formatter)\n _root.addHandler(handler)", "def setup_logging():\n lvl = os.getenv(\"LOG_LEVEL\")\n path = os.getenv(\"LOG_PATH\")\n\n logger = get_logger()\n logger.setLevel(lvl)\n\n filehandler = logging.FileHandler(path)\n filehandler.setLevel(lvl)\n filehandler.setFormatter(logging.Formatter(\n \"[%(asctime)s] %(levelname)s: %(message)s\",\n datefmt=\"%Y-%d-%m %H:%M:%S\"\n ))\n\n streamhandler = logging.StreamHandler()\n streamhandler.setLevel(lvl)\n streamhandler.setFormatter(logging.Formatter(\"%(message)s\"))\n\n logger.addHandler(filehandler)\n logger.addHandler(streamhandler)", "def setupLogging():\n global enabled, dummyInstance\n from pyemma.util.config import conf_values\n args = conf_values['Logging']\n\n if args.enabled:\n if args.tofile and args.file:\n filename = args.file\n else:\n filename = None\n try:\n logging.basicConfig(level=args.level,\n format=args.format,\n datefmt='%d-%m-%y %H:%M:%S',\n filename=filename,\n filemode='a')\n except IOError as ie:\n import warnings\n warnings.warn('logging could not be initialized, because of %s' % ie)\n return\n \"\"\" in case we want to log to both file and stream, add a separate handler\"\"\"\n if args.toconsole and args.tofile:\n ch = logging.StreamHandler()\n ch.setLevel(args.level)\n ch.setFormatter(logging.Formatter(args.format))\n logging.getLogger('').addHandler(ch)\n else:\n dummyInstance = dummyLogger()\n\n enabled = args.enabled", "def setup():\n config['global']['log.access_file'] = ''\n config['global']['log.error_file'] = ''\n config['global']['log.screen'] = False\n log_level = getattr(logging, config.log_level)\n logging.root.setLevel(logging.NOTSET)\n file_log.setLevel(log_level)\n logging.root.addHandler(file_log)\n if config.log_screen:\n console_log.setLevel(log_level)\n logging.root.addHandler(console_log)", "def setup_std_logging (logger, log_file, verbose):\n class debug_filter(logging.Filter):\n \"\"\"\n Ignore INFO mesages\n \"\"\"\n def filter(self, record):\n return logging.INFO != record.levelno\n\n class NullHandler(logging.Handler):\n \"\"\"\n for when there is no logging\n \"\"\"\n def emit(self, record):\n pass\n\n # We are interesting in all messages\n logger.setLevel(logging.DEBUG)\n has_handler = False\n\n # log to file if that is specified\n if log_file:\n handler = logging.FileHandler(log_file, delay=False)\n handler.setFormatter(logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)6s - %(message)s\"))\n handler.setLevel(MESSAGE)\n logger.addHandler(handler)\n has_handler = True\n\n # log to stderr if verbose\n if verbose:\n stderrhandler = logging.StreamHandler(sys.stderr)\n stderrhandler.setFormatter(logging.Formatter(\" %(message)s\"))\n stderrhandler.setLevel(logging.DEBUG)\n if log_file:\n stderrhandler.addFilter(debug_filter())\n logger.addHandler(stderrhandler)\n has_handler = True\n\n # no logging\n if not has_handler:\n logger.addHandler(NullHandler())", "def setup_logger():\n root = logging.getLogger()\n root.setLevel(LOGGING_LEVEL)\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(LOGGING_LEVEL)\n ch.setFormatter(formatter)\n root.addHandler(ch)", "def setup_std_logging (logger, log_file, verbose):\n class debug_filter(logging.Filter):\n \"\"\"\n Ignore INFO messages\n \"\"\"\n def filter(self, record):\n return logging.INFO != record.levelno\n\n class NullHandler(logging.Handler):\n \"\"\"\n for when there is no logging\n \"\"\"\n def emit(self, record):\n pass\n\n # We are interesting in all messages\n logger.setLevel(logging.DEBUG)\n has_handler = False\n\n # log to file if that is specified\n if log_file:\n handler = logging.FileHandler(log_file, delay=False)\n handler.setFormatter(logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)6s - %(message)s\"))\n handler.setLevel(MESSAGE)\n logger.addHandler(handler)\n has_handler = True\n\n # log to stderr if verbose\n if verbose:\n stderrhandler = logging.StreamHandler(sys.stderr)\n stderrhandler.setFormatter(logging.Formatter(\" %(message)s\"))\n stderrhandler.setLevel(logging.DEBUG)\n if log_file:\n stderrhandler.addFilter(debug_filter())\n logger.addHandler(stderrhandler)\n has_handler = True\n\n # no logging\n if not has_handler:\n logger.addHandler(NullHandler())", "def setup():\n global log_handler\n\n if vaex.settings.main.logging.setup:\n logger.setLevel(logging.DEBUG)\n\n # create console handler and accept all loglevels\n if vaex.settings.main.logging.rich:\n from rich.logging import RichHandler\n log_handler = RichHandler()\n else:\n log_handler = logging.StreamHandler()\n\n # create formatter\n formatter = logging.Formatter('%(levelname)s:%(threadName)s:%(name)s:%(message)s')\n\n\n # add formatter to console handler\n log_handler.setFormatter(formatter)\n log_handler.setLevel(logging.DEBUG)\n\n # add console handler to logger\n logger.addHandler(log_handler)\n\n logging.getLogger(\"vaex\").setLevel(logging.ERROR) # default to higest level\n _set_log_level(vaex.settings.main.logging.error, logging.ERROR)\n _set_log_level(vaex.settings.main.logging.warning, logging.WARNING)\n _set_log_level(vaex.settings.main.logging.info, logging.INFO)\n _set_log_level(vaex.settings.main.logging.debug, logging.DEBUG)\n # VAEX_DEBUG behaves similar to VAEX_LOGGING_DEBUG, but has more effect\n DEBUG_MODE = os.environ.get('VAEX_DEBUG', '')\n if DEBUG_MODE:\n _set_log_level(DEBUG_MODE, logging.DEBUG)", "def setup_logging(log_file, verbose):\n if verbose:\n log_level = logging.DEBUG\n else:\n log_level = logging.INFO\n\n logger.setLevel(log_level)\n\n log_format = logging.Formatter('%(asctime)-15s %(message)s')\n\n console_log = logging.StreamHandler()\n console_log.setLevel(log_level)\n console_log.setFormatter(log_format)\n\n file_log = logging.FileHandler(log_file)\n file_log.setFormatter(log_format)\n file_log.setLevel(log_level)\n\n root_logger = logging.getLogger()\n root_logger.addHandler(console_log)\n root_logger.addHandler(file_log)", "def setup_logger(app_name, log_directory, log_level):\n # Setting up logger\n # log_levels: NOTSET=0, DEBUG=10, INFO=20, WARN=30, ERROR=40, and CRITICAL=50\n # TODO - on linux we want /var/log ... error on MacOs ... protected directory\n # log_file_name = Path('/var/log/{}.log'.format(app_name))\n log_file_name = Path('{}/{}.log'.format(log_directory, app_name))\n\n short_file_format = \"%(asctime)s:%(levelname)s:%(message)s\"\n long_file_format = \"%(asctime)s %(HOST)s %(AppId)d %(AppVersion)s %(levelname)s %(name)s %(message)s %(filename)s %(funcName)s %(levelname)s %(lineno)d %(message)s %(module)s %(msecs)d %(name)s %(pathname)s %(process)d %(processName)s %(relativeCreated)d %(thread)d %(threadName)s %(uid)\"\n long_file_format = \"%(asctime)s %(levelname)s %(name)s %(message)s %(filename)s %(funcName)s %(levelname)s %(lineno)d %(message)s %(module)s %(msecs)d %(name)s %(pathname)s %(process)d %(processName)s %(relativeCreated)d %(thread)d %(threadName)s\"\n # long_file_format = \"%(asctime)s:%(levelname)s%(name)s %(message)s %(filename)s %(funcName)s %(levelname)s %(lineno)d %(message)s %(module)s %(msecs)d %(name)s %(pathname)s %(process)d %(processName)s %(relativeCreated)d %(thread)d %(threadName)s\"\n log_file_format = short_file_format\n\n # make sure valid log level is passed in, default to DEBUG ...\n valid_log_levels = [logging.DEBUG, logging.INFO, logging.WARN, logging.ERROR, logging.CRITICAL]\n if log_level not in valid_log_levels:\n log_level = logging.DEBUG\n\n extra_attributes = {'Host': '10.0.0.1',\n 'AppId': 1024,\n 'AppVersion': '1.0.0',\n 'uid': 12345}\n logger = logging.getLogger()\n logging.LoggerAdapter(logger, extra_attributes)\n\n # add in our custom UTC timezone converter\n logging.Formatter.converter = time_tz\n logging.basicConfig(level=log_level, filename=log_file_name, filemode=\"a\",\n format=log_file_format)\n\n # configure stdout same as file\n sh = logging.StreamHandler(sys.stdout)\n sh.setFormatter(logging.Formatter(log_file_format))\n logging.getLogger().addHandler(sh)\n\n logging.info('App:{} startup'.format(app_name))\n return", "def setupLogger():\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)s %(levelname)s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename='prepareToSubmit.log',\n filemode='w')\n # define a Handler which writes INFO messages or higher to the sys.stderr\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n # set a format which is simpler for console use\n formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\n # tell the handler to use this format\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('').addHandler(console)", "def setup_std_logging (logger, log_file, verbose):\n class debug_filter(logging.Filter):\n \"\"\"\n Ignore INFO messages\n \"\"\"\n def filter(self, record):\n return logging.INFO != record.levelno\n\n class NullHandler(logging.Handler):\n \"\"\"\n for when there is no logging \n \"\"\"\n def emit(self, record):\n pass\n\n # We are interesting in all messages\n logger.setLevel(logging.DEBUG)\n has_handler = False\n\n # log to file if that is specified\n if log_file:\n handler = logging.FileHandler(log_file, delay=False)\n handler.setFormatter(logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)6s - %(message)s\"))\n handler.setLevel(MESSAGE)\n logger.addHandler(handler)\n has_handler = True\n\n # log to stderr if verbose\n if verbose:\n stderrhandler = logging.StreamHandler(sys.stderr)\n stderrhandler.setFormatter(logging.Formatter(\" %(message)s\"))\n stderrhandler.setLevel(logging.DEBUG)\n if log_file:\n stderrhandler.addFilter(debug_filter())\n logger.addHandler(stderrhandler)\n has_handler = True\n\n # no logging\n if not has_handler:\n logger.addHandler(NullHandler())", "def initialize_logger():\n if not os.path.exists(LOGGING_DIRECTORY):\n os.makedirs(LOGGING_DIRECTORY)\n os.chmod(LOGGING_DIRECTORY, 0o777)", "def configure_logger(debug=False, logfile=None, verbose=False):\n root = logging.getLogger()\n root.setLevel(logging.DEBUG)\n\n stream = logging.StreamHandler(sys.stdout)\n if debug and verbose:\n stream.setLevel(logging.DEBUG)\n elif verbose:\n stream.setLevel(logging.INFO)\n else:\n stream.setLevel(logging.WARNING)\n\n stream.setFormatter(logging.Formatter(\"%(asctime)s - %(message)s\"))\n root.addHandler(stream)\n\n if logfile:\n file = logging.FileHandler(logfile, \"a\")\n if debug:\n file.setLevel(logging.DEBUG)\n else:\n file.setLevel(logging.INFO)\n\n file.setFormatter(logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\"))\n root.addHandler(file)", "def setup_logging():\r\n import ConfigParser # change this to configparser for Python 3\r\n # import logging\r\n import logging.config\r\n global logger\r\n\r\n try:\r\n \tlogging.config.fileConfig(\"celog.conf\")\r\n except ConfigParser.NoSectionError: \r\n\t# if there is no configuration file setup a default configuration\r\n logging.basicConfig(filename='code_extract.log',level= _logging_level,\r\n\t\t\tformat='%(asctime)s %(levelname)s - %(message)s',\r\n\t\t\tdatefmt='%Y %b %d, %a %H:%M:%S'\r\n\t\t\t)\r\n \r\n logger = logging.getLogger('%s' % __name__)\r\n\r\n logger.debug('logger ready')", "def configure_logging(logdir=None):\n logconfig = LOGCONFIG_DICT.copy()\n if logdir:\n debugfile = os.path.join(logdir, DEBUGFILE)\n logconfig['handlers']['debugfile']['filename'] = debugfile\n errorfile = os.path.join(logdir, ERRORFILE)\n logconfig['handlers']['errorfile']['filename'] = errorfile\n\n logging.config.dictConfig(logconfig)", "def set_log_dir(dir):\r\n LogOptions._LOG_DIR = dir", "def _setup_std_logging (logger, log_file, verbose):\n class debug_filter(logging.Filter):\n \"\"\"\n Ignore INFO messages\n \"\"\"\n def filter(self, record):\n return logging.INFO != record.levelno\n\n class NullHandler(logging.Handler):\n \"\"\"\n for when there is no logging\n \"\"\"\n def emit(self, record):\n pass\n\n # We are interesting in all messages\n logger.setLevel(logging.DEBUG)\n has_handler = False\n\n # log to file if that is specified\n if log_file:\n handler = logging.FileHandler(log_file, delay=False)\n handler.setFormatter(logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)6s - %(message)s\"))\n handler.setLevel(MESSAGE)\n logger.addHandler(handler)\n has_handler = True\n\n # log to stderr if verbose\n if verbose:\n stderrhandler = logging.StreamHandler(sys.stderr)\n stderrhandler.setFormatter(logging.Formatter(\" %(message)s\"))\n stderrhandler.setLevel(logging.DEBUG)\n if log_file:\n stderrhandler.addFilter(debug_filter())\n logger.addHandler(stderrhandler)\n has_handler = True\n\n # no logging\n if not has_handler:\n logger.addHandler(NullHandler())", "def _setup_logging(log_config: Path = LOG_CONFIG_FILE, silent: bool = False) -> None:\n\n if not log_config.is_file():\n raise RuntimeError(\n \"Logging file {log_file} not found\".format(log_file=log_config)\n )\n\n with log_config.open() as log_file:\n config_orig = yaml.safe_load(log_file.read()) # type: Any\n\n def prepare_filenames(config: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Prepend `LOGS_DIR` to all 'filename' attributes listed for handlers in logging.yaml\n :param config: Configuration dictionary\n :return: Configuration with 'filename's prepended with LOGS_DIR\n \"\"\"\n for handler_name in config[\"handlers\"].keys():\n handler_config = config[\"handlers\"][handler_name]\n if \"filename\" in handler_config:\n filename = Path(handler_config[\"filename\"]).name\n handler_config[\"filename\"] = str(LOGS_DIR.joinpath(filename))\n return config\n\n config = prepare_filenames(config_orig)\n # for some reason, pyright fails with \"'config' is not a known member of module\"\n # even though this is an officially documented member of logging\n # for now we ignore the type\n logging.config.dictConfig(config) # type: ignore\n if silent:\n _remove_non_file_handlers()" ]
[ "0.71798515", "0.6696411", "0.66884494", "0.66336703", "0.6422789", "0.6404984", "0.629583", "0.62205315", "0.6207769", "0.60925233", "0.60813147", "0.60172576", "0.6015258", "0.6000806", "0.5992214", "0.5985083", "0.5964345", "0.59621847", "0.5936569", "0.59219706", "0.5915377", "0.5909019", "0.59036213", "0.58938605", "0.58860964", "0.58845466", "0.58610654", "0.5857321", "0.5848249", "0.58324075" ]
0.80954593
0
Runs a shell command and returns the output.
def run_shell_command(command, checkReturnValue=True, verbose=False): process = subprocess.Popen( command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, bufsize=1) outText = "" for line in iter(process.stdout.readline, ''): if verbose: sys.stdout.write(line) outText += line process.communicate()[0] """ returnValue = process.returncode if checkReturnValue and (returnValue != 0): raise Exception(outText) """ return outText
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_command(shell_command, get_output):\n command_ran = subprocess.run(shell_command, capture_output=get_output)\n return command_ran", "def _run_shell(self, cmd):\n self._logger.info(\"Running command\\n{}\".format(\" \".join(cmd)))\n\n out = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n stdout, stderr = out.communicate()\n result = stdout.decode(encoding='utf-8')\n if stderr:\n error_msg = stderr.decode(encoding='utf-8')\n print(error_msg)\n raise Exception(error_msg)\n\n return result", "def run(command):\n\n out = \"\"\n try:\n out = str(subprocess.check_output(command,\n shell=True,\n universal_newlines=True))\n except subprocess.CalledProcessError as e:\n raise RuntimeError(\n 'Failed to execute command %s: %s' % (e.cmd, e.returncode))\n else:\n return out", "def runCommand(command):\n process = subprocess.Popen(command, shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n return process.communicate()", "def execute_shell_with_output(command):\n logger.debug(\"Execute with output shell command: %s\" % command)\n outputs = os.popen(command).readlines()\n outputs = [output.strip('\\n') for output in outputs]\n return outputs", "def run_command(command: str) -> str:\n path_command = f\"PATH={shell_path()} {command}\"\n status, output = getstatusoutput(path_command)\n if status == 0:\n return output\n raise ShellError(status, output)", "def run_command(command):\n\n return subprocess.run(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True)", "def system_call(command):\n p = subprocess.Popen([command], stdout=subprocess.PIPE, shell=True)\n return p.stdout.read()", "def run_command(command):\n process = subprocess.Popen(\n command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n response, error = process.communicate()\n return response.decode().rstrip('\\n'), error.decode().rstrip('\\n')", "def shell_command(command, shell=True):\n p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=shell)\n result = p.communicate()[0]\n if result == \"command not known\":\n LOGGER.info(\"command not known \" + err)\n\n return result.strip()", "def shell(cmd, check=True, stdin=None, stdout=None, stderr=None):\n return subprocess.run(cmd, shell=True, check=check, stdin=stdin, stdout=stdout, stderr=stderr)", "def run_command(cmd, shell=False):\n\tlog.debug(\"Running command: \" + ' '.join(cmd))\n\tprocess = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=shell)\n\tcmd_out = ''\n\tcmd_err = ''\n\twhile True:\n\t\tout = process.stdout.readline()\n\t\tif out == '' and process.poll() != None:\n\t\t\tcmd_err = process.stderr.read()\n\t\t\tbreak\n\t\tif out != '':\n\t\t\tsys.stdout.write(out)\n\t\t\tsys.stdout.flush()\n\t\t\tcmd_out += out\n\t\t\t\n\tif cmd_err != '':\n\t\tlog.warning(\"Error running command: \" + cmd_err)\n\treturn cmd_out, cmd_err, process.returncode", "def shell_command_output(command):\n process = subprocess.Popen(\n args=['/bin/bash', '-c', command],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n process.wait()\n output = process.stdout.read().decode()\n assert process.returncode == 0, ('Shell command failed: %r : %s' % (command, output))\n return output", "def execute(command):\n process = subprocess.Popen(command, stdout=subprocess.PIPE)\n return process.communicate()", "def Run(command_line):\n print >> sys.stderr, command_line\n return subprocess.check_output(command_line, shell=True)", "def sys_exec(command):\n print('Running: {}'.format(command))\n return os.popen(command).read().rstrip()", "def shell(commandline, verbose=SHELL_VERBOSE):\n if verbose:\n sys.stderr.write(\"[Executing: \" + commandline + \"]\\n\")\n return sp.check_output(commandline, shell=True)", "def shell(cmd):\n return subprocess.check_output(cmd, shell=True).decode(\"utf-8\")", "def ShellCommandOutput(command):\n process = subprocess.Popen(\n args=['/bin/bash', '-c', command],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n process.wait()\n output = process.stdout.read().decode()\n assert process.returncode == 0, (\n 'Shell command failed: %r : %s' % (command, output))\n return output", "def Run(cmd):\n return os.popen(cmd).read()", "def shell_output(cmd):\n return subprocess.check_output(cmd.split()).decode().strip()", "def shell_call(cmd):\n try:\n x = subprocess.run(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True\n )\n ret = (x.returncode, str(x.stdout, \"utf-8\"), str(x.stderr, \"utf-8\"))\n return ret\n except subprocess.SubprocessError as e:\n logger.error(\"System error running command: \" + str(cmd))\n logger.error(str(e.output))\n raise RuntimeError()", "def shell_cmd(self, cmd):\n cmd_ex = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)\n output = cmd_ex.communicate()[0]", "def _Shell(*cmd, **kw):\n _LOGGER.info('Executing %s.', cmd)\n prog = subprocess.Popen(cmd, shell=True, **kw)\n\n stdout, stderr = prog.communicate()\n if prog.returncode != 0:\n raise RuntimeError('Command \"%s\" returned %d.' % (cmd, prog.returncode))\n return (stdout, stderr)", "def run_subprocess(command):\n if verbose:\n print \"Running \" + str(command)\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n output = proc.communicate()[0]\n if verbose:\n print \"Output: \" + output\n\n if proc.returncode != 0:\n raise CalledProcessError(command, proc.returncode, output)\n else:\n return output", "def Executingbysubprocess(command):\n result = subprocess.Popen(command, shell=True, stdout=PIPE).stdout\n output = result.read()\n print output", "def run(cmd, shell=False, cwd=None):\n try:\n out = check_output(cmd, shell=shell, cwd=cwd, stderr=STDOUT)\n except CalledProcessError as ex:\n return ex.returncode, ex.output\n else:\n return 0, out", "def run(cmd: str, verbose: bool = False):\n\n if verbose:\n print(cmd)\n\n out = subprocess.check_output(cmd, shell=True).decode(\"utf-8\")\n\n if verbose:\n print(out)\n\n return out", "def get_shell_cmd_output(cmd):\n output = subprocess.check_output(cmd, shell=True, universal_newlines=True)\n return output", "def run_command(*args):\n cmd = sp.Popen(args, shell=True, stdout=sp.PIPE, stderr=sp.STDOUT, encoding='utf-8')\n stdout, _ = cmd.communicate()\n\n if cmd.returncode != 0:\n raise ValueError(f\"Running `{args[0]}` failed with return code {cmd.returncode}, output: \\n {stdout}\")\n else:\n return stdout.strip('\\n')" ]
[ "0.8129673", "0.76473814", "0.762463", "0.7579825", "0.75197285", "0.7503632", "0.7502268", "0.74991596", "0.74890614", "0.74848115", "0.7482531", "0.7450168", "0.7447455", "0.7440299", "0.7410105", "0.74087685", "0.7403609", "0.7398195", "0.73907506", "0.73829544", "0.7367979", "0.7365718", "0.7358288", "0.7346887", "0.7345273", "0.7330634", "0.73176855", "0.73049504", "0.72961277", "0.72832507" ]
0.7687183
1
Conveinence function to write text to a file at specified path
def txtWrite(text, path, mode="w"): dirMake(os.path.dirname(path)) textFile = open(path, mode) textFile.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(cls, path, text):\n with cls.open(path, 'wt') as fd:\n return fd.write(text)", "def save_file(path, text):\n with path.open(mode='w') as f_stream:\n f_stream.write(text)", "def writefile(path: Union[str, Path], txt: str) -> None:\n with open(path, 'w') as outfile:\n outfile.write(txt)", "def write_text_file(path: Path, data: str) -> None:\n path.write_text(data, encoding='utf-8')", "def save_text_file(text, path):\n os.makedirs(os.path.dirname(path), exist_ok=True)\n with open(path, \"w\") as f:\n f.write(text)", "def writeFile(self, name, text):\n\t\ttry:\n\t\t\tf = open(name, 'w')\n\t\t\tf.write (text)\n\t\t\tf.close()\n\t\texcept IOError:\n\t\t\tprint \"Error writing file %s\" % name", "def write(file, text):\n with open(file, 'w') as f:\n f.write(text)", "def write_text(file, text):\n\n with open(file, \"w\") as fin:\n fin.write(text)", "def write_into_file(pathname, text):\n with open(pathname+\".txt\", \"a\") as myfile:\n myfile.write(text)\n myfile.write('\\n')", "def store_file(text: str, file_path: str) -> None:\n with open(file=file_path, mode='w', encoding='utf8') as f:\n f.write(text)", "def write_file(filename=\"\", text=\"\"):\n with open(filename, \"w\") as f:\n return(f.write(text))", "def writeFile(file_name, file_text, mode='w+'):\n with open(file_name, mode) as file:\n file.write(file_text)", "def write_text_tofile(text):\n try:\n with open(os.path.join(script_dir, 'output_file.txt'), 'a') as output:\n output.write(text + '\\n')\n except:\n pass", "def write_file(filename=\"\", text=\"\"):\n with open(filename, 'w') as f:\n return f.write(text)", "def txt_file_writer(path):\n return open(path, 'w', encoding=cfg.ENCODING)", "def writeFile(fileName, text):\n with open(fileName, 'w', encoding='utf-8') as f:\n f.write(text)", "def save_file(self, file_name, text):\n\n with open(file_name, 'w') as content_file:\n content = content_file.write(text)", "def write_file(filename=\"\", text=\"\"):\n with open(filename, mode=\"w\", encoding=\"utf-8\") as m:\n return m.write(text)", "def write_file(filename=\"\", text=\"\"):\n with open(filename, 'w') as fl:\n wr = fl.write(text)\n return wr", "def file_write(stuff, file_path):\n with open(file_path, \"wt\") as fo:\n fo.write(stuff)", "def write_file(filename=\"\", text=\"\"):\n with open(filename, 'w', encoding='utf-8') as f:\n return f.write(text)", "def write_file(rel_path, text, *args, **kwargs):\n path = os.path.join(os.path.dirname(__file__), \"resources\", rel_path)\n with open(path, 'w+', *args, **kwargs) as _file:\n _file.write(text)", "def write(self,path,content):\n file_path = os.path.join( self.directory, path)\n with open(file_path, \"w\") as file:\n file.write( content )", "def fwrite(filename, text):\n basedir = os.path.dirname(filename)\n if not os.path.isdir(basedir):\n os.makedirs(basedir)\n\n with open(filename, 'w') as f:\n f.write(text)", "def writeText(outputText, fileName):\n with open(fileName,\"w\") as fileObject:\n fileObject.write(outputText)", "def write_txt(data, out_path, type=\"w\"):\n with open(out_path, type) as f:\n f.write(data.encode(\"utf-8\"))", "def write_to_file(file_name, content):\n with open(file_name, \"w\") as text_file:\n text_file.write(str(content))", "def write(self, path, content):\n this_file = open(path, 'w')\n this_file.write(content)\n this_file.close()", "def file_writer(path, data):\n with open(path, \"a\") as file:\n file.write(data + \"\\n\")", "def write_text(file_path, text):\n # Check if file ends with txt\n if not file_path.endswith('.txt'):\n raise IllegalArgumentError(f\"{file_path} needs to have a .txt extension\")\n\n # Write file\n with open(file_path, 'w') as file:\n if isinstance(text, str):\n file.write(text)\n elif isinstance(text, list):\n file.writelines(text)\n else:\n raise IllegalArgumentError(\"text variable is not a string or list of strings\")\n\n return True" ]
[ "0.8543954", "0.8357027", "0.8341502", "0.8121914", "0.8031457", "0.7922882", "0.7885296", "0.7877682", "0.7796177", "0.76735586", "0.76676124", "0.7660709", "0.7648635", "0.76421416", "0.76414704", "0.75855273", "0.7556236", "0.7516805", "0.7516794", "0.75154793", "0.74886644", "0.74714774", "0.7447214", "0.7436259", "0.73494554", "0.73432606", "0.72757834", "0.7227783", "0.7227159", "0.7201913" ]
0.83784384
1
Write sitk image to path.
def imgWrite(img, path): dirMake(os.path.dirname(path)) sitk.WriteImage(img, path) # Reformat files to be compatible with CIS Software #ext = os.path.splitext(path)[1].lower() #if ext == ".vtk": vtkReformat(path, path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(img, path):\n create_directories_for_file_name(path)\n writer = sitk.ImageFileWriter()\n writer.Execute(img, path, True)", "def write_image(self, name: str, image_path: str):\n # TODO: implement\n raise NotImplementedError(\"We are working on this!\")", "def write(self, uri):\n img_to_write = self.msiToWrite.get_image()\n\n # sitk can only write images of dimension 2,3,4. This hack is\n # to fake 1d images as being 2d. 1d images e.g. occure after taking\n # the mean of an image.\n if len(img_to_write.shape) == 1:\n img_to_write = np.reshape(img_to_write, (1, 1, img_to_write.shape[0]))\n\n img = sitk.GetImageFromArray(img_to_write, isVector=True)\n sitk.WriteImage(img, uri)\n logging.info(\"written file \" + uri + \" to disk\")\n return None", "def write(self, image):\n raise NotImplementedError()", "def write_image(self, image_name, image):\n raise NotImplementedError", "def save(self):\n\n self.image.save(\"./output/\" + self.name + \" pg\" + str(self._page) + \".png\")", "def write_stitched_image(self):\r\n\r\n self.write_debug(\"End of train detected. Writing stitched image.\")\r\n cv2.imwrite(os.path.join(self.output_dir_stitched, 'stitched.jpg'), self.stitched_image)", "def write_itk_image(image, path):\n\n writer = itk.ImageFileWriter()\n writer.SetFileName(path)\n\n if os.path.splitext(path)[1] == '.nii':\n Warning('You are converting nii, ' + \\\n 'be careful with type conversions')\n\n writer.Execute(image)", "def save_image(self):\n img = self.driver.find_element_by_xpath(web_map[self.region][img_path]).get_attribute(\"src\")\n img = requests.get(img, stream=True)\n self.search_results.export_image(self.region, img)", "def write_img_to_fs(name, data):\n with open(name, \"wb\") as fout:\n fout.write(data)", "def save(img, path, file_name):\n\n name = os.path.join(path,file_name).replace('/', os.sep)\n\n io.imsave(name,img)", "def save_image(image):\n if config['save_images']['enabled']:\n directory = config['save_images']['destination']\n filename = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S%f\") + '.jpg'\n destination = os.path.join(directory, filename)\n logging.debug('saving image to %s', destination)\n f = open(destination, 'wb')\n f.write(image)\n f.close", "def write(\n path: Union[Path, str],\n image: np.ndarray) -> None:\n raise NotImplementedError()", "def write(self, filename):\n\n self.__image.save(filename)", "def imwrite(image, path):\n return scipy.misc.imsave(path, to_range(image, 0, 255, np.uint8))", "def save_output_image_to_directory(self):\n curr_directory = os.path.dirname(os.path.abspath(__file__))\n images_dir = curr_directory + \"/images/\"\n if not os.path.exists(images_dir):\n os.makedirs(images_dir)\n self.output_image_name = md5(str(uuid4()).encode()).hexdigest() + \".png\"\n image_file_name = images_dir + self.output_image_name\n self.output_image.save(image_file_name)\n logger.info(\"Image file saved locally : %s\", image_file_name)", "def saveImageAs(self, name):\n\t\tself.image.save(name)", "def imwrite(image, path):\n\n if image.ndim == 3 and image.shape[2] == 1: # for gray image\n image = np.array(image, copy=True)\n image.shape = image.shape[0:2]\n\n imgarray=((image+1.0)*127.5).astype(np.uint8)\n img=Image.fromarray(imgarray)\n img.save(path)", "def write_nifti(self, output_path):\n nib.save(self.niftiImage, output_path)\n print('Image saved at: {}'.format(output_path))", "def save_image(self):\n self.table_to_image.img.save(self.file_name)\n aws.AWSHandler().upload_image(self.file_name)", "def _write(self, stream):\n\n self._img.append(self.make_path())\n self._img.append(self.make_border())\n self._img.append(self.make_text())\n\n ET.ElementTree(self._img).write(stream, encoding=\"UTF-8\", xml_declaration=True)", "def _save_image(self, image_name, image, output_dir):\n dst = '{}/{}'.format(output_dir, self._image_filename(image_name))\n os.makedirs(output_dir, exist_ok=True)\n try:\n with open(dst, 'wb') as f:\n for chunk in image.save(named=self.image_registry_name(image_name)):\n f.write(chunk)\n log.info('Image {} saved as {}'.format(image_name, dst))\n except Exception as err:\n if os.path.isfile(dst):\n os.remove(dst)\n raise err", "def save(self, filepath):\n self.drawer.flush()\n self.img.save(filepath)", "def write_mhd_and_raw(Data, path):\n if not isinstance(Data, sitk.SimpleITK.Image):\n print('Please check your ''Data'' class')\n return False\n\n data_dir, file_name = os.path.split(path)\n if not os.path.isdir(data_dir):\n os.makedirs(data_dir)\n\n sitk.WriteImage(Data, path, True)\n\n return True", "def saveImage(self):\n\t\tself.getStackView().saveImage()", "def write_image(self, filename):\n cv2.imwrite(filename, self.image)", "def write_image(path, image):\n image = tf.image.encode_jpeg(image, quality=100)\n return tf.io.write_file(path, image)", "def write_image(img, img_name):\n\n cv2.imwrite(img_name, img)", "def save_screenshot(self, img, file_name: str):\n img.save(str(self.info.screenshots_path / file_name))", "def save_image(image, file_name):\n io.imsave(file_name,image)" ]
[ "0.82689285", "0.69438845", "0.6797211", "0.67003214", "0.665736", "0.6627939", "0.6531617", "0.65256447", "0.64676696", "0.6366418", "0.6359249", "0.63326037", "0.63315624", "0.6317838", "0.6264821", "0.62386143", "0.6221924", "0.61992943", "0.6194874", "0.6182149", "0.6181042", "0.6152047", "0.6140826", "0.61075765", "0.6094104", "0.6085288", "0.60838366", "0.605972", "0.60437185", "0.60386556" ]
0.78326446
1
Reformats vtk file so that it can be read by CIS software.
def vtkReformat(inPath, outPath): # Get size of map inFile = open(inPath,"rb") lineList = inFile.readlines() for line in lineList: if line.lower().strip().startswith("dimensions"): size = map(int,line.split(" ")[1:dimension+1]) break inFile.close() if dimension == 2: size += [0] outFile = open(outPath,"wb") for (i,line) in enumerate(lineList): if i == 1: newline = line.lstrip(line.rstrip("\n")) line = "lddmm 8 0 0 {0} {0} 0 0 {1} {1} 0 0 {2} {2}".format(size[2]-1, size[1]-1, size[0]-1) + newline outFile.write(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vF3d_VTK(field,name,VTKformat): \n if VTKformat == 'vtu':\n vf3d_vtu(field,name)\n elif VTKformat == None:\n print 'Please select a VTK format'\n else:\n print 'The selected format has not been developed yet'\n return #nothing, since functions output the written VTK file", "def loadVtk(self, fname):\n\n reader = vtk.vtkUnstructuredGridReader()\n reader.SetFileName(filename)\n reader.Update()\n self._vtk = reader.GetOutput()", "def ParseVTK(self, use_cython=True, force_linear=False):\n if not vtk_loaded:\n raise Exception('Unable to load VTK module. Cannot parse raw cdb data')\n return\n \n \n if self.CheckRaw():\n raise Exception('Missing key data. Cannot parse into unstructured grid') \n \n # Convert to vtk style arrays\n if use_cython and cython_loaded:\n cells, offset, cell_type, numref = CDBparser.Parse(self.raw,\n force_linear)\n \n else:\n cells, offset, cell_type, numref = PythonParser.Parse(self.raw, \n force_linear)\n\n # Check for missing midside nodes\n if force_linear or np.all(cells != -1):\n nodes = self.raw['nodes'][:, :3].copy()\n nnum = self.raw['nnum']\n else:\n mask = cells == -1\n \n nextra = mask.sum()\n maxnum = numref.max() + 1\n cells[mask] = np.arange(maxnum, maxnum + nextra)\n \n nnodes = self.raw['nodes'].shape[0]\n nodes = np.zeros((nnodes + nextra, 3))\n nodes[:nnodes] = self.raw['nodes'][:, :3]\n \n # Add extra node numbers\n nnum = np.hstack((self.raw['nnum'], np.ones(nextra, np.int32)*-1))\n \n if cython_loaded:\n # Set new midside nodes directly between their edge nodes\n temp_nodes = nodes.copy()\n _relaxmidside.ResetMidside(cells, temp_nodes)\n nodes[nnodes:] = temp_nodes[nnodes:]\n \n \n # Create unstructured grid\n uGrid = Utilities.MakeuGrid(offset, cells, cell_type, nodes)\n\n # Store original ANSYS cell and node numbering\n Utilities.AddPointScalars(uGrid, nnum, 'ANSYSnodenum')\n\n # Add node components to unstructured grid\n ibool = np.empty(uGrid.GetNumberOfPoints(), dtype=np.int8)\n for comp in self.raw['node_comps']:\n ibool[:] = 0 # reset component array\n\n # Convert to new node numbering\n nodenum = numref[self.raw['node_comps'][comp]]\n \n ibool[nodenum] = 1\n Utilities.AddPointScalars(uGrid, ibool, comp)\n \n # Add tracker for original node numbering\n Utilities.AddPointScalars(uGrid,\n np.arange(uGrid.GetNumberOfPoints()),\n 'VTKorigID')\n self.vtkuGrid = uGrid\n \n return uGrid", "def saveVelocityAndPressureVTK_binary(pressure,u,v,w,x,y,z,filename,dims):\n numEl_size = u.size; numEl = np.prod(numEl_size);\n # open the file and write the ASCII header:\n file = open(filename,'w')\n file.write('# vtk DataFile Version 3.0\\n')\n file.write('VTK file for data post-processed with Python\\n')\n file.write('Binary\\n\\n')\n file.write('DATASET STRUCTURED_GRID\\n')\n file.write('DIMENSIONS %d %d %d \\n'%(dims[0],dims[1],dims[2]))\n file.write('POINTS %d float\\n'%(numEl))\n file.close()\n \n # append binary x,y,z data\n file = open(filename,'ab')\n for i in range(len(x)): # there really needs to be a better way.\n pt = [x[i],y[i],z[i]]\n pt_buf = array('f',pt)\n pt_buf.byteswap()\n file.write(pt_buf)\n \n \n file.close()\n \n # append an ASCII sub header\n file = open(filename,'a')\n file.write('POINT_DATA %d \\n'%numEl)\n file.write('VECTORS velocity_vectors float\\n')\n file.close()\n \n # append binary u,v,w data\n file = open(filename,'ab')\n for i in range(len(u)):\n pt = [u[i],v[i],w[i]]\n pt_buf = array('f',pt)\n pt_buf.byteswap()\n file.write(pt_buf)\n \n file.close()\n \n # append ASCII sub header for scalar velocity magnitude data\n file = open(filename,'a')\n file.write('SCALARS VelocityMagnitude float\\n')\n file.write('LOOKUP_TABLE default\\n')\n \n file.close()\n \n file = open(filename,'ab')\n v_mag = np.sqrt(u**2+v**2+w**2)\n file = open(filename,'ab')\n p_buf = array('f',v_mag); p_buf.byteswap()\n file.write(p_buf)\n file.close()\n \n \n # append another ASCII sub header for the scalar pressure data\n file = open(filename,'a')\n file.write('SCALARS Pressure float\\n')\n file.write('LOOKUP_TABLE default\\n')\n file.close()\n \n # append binary pressure data\n file = open(filename,'ab')\n p_buf = array('f',pressure); p_buf.byteswap()\n file.write(p_buf)\n file.close()", "def render_vtk(file_name):\n import vtk\n\n # Read the source file.\n reader = vtk.vtkUnstructuredGridReader()\n reader.SetFileName(file_name)\n reader.Update() # Needed because of GetScalarRange\n output = reader.GetOutput()\n scalar_range = output.GetScalarRange()\n\n # Create the mapper that corresponds the objects of the vtk.vtk file\n # into graphics elements\n mapper = vtk.vtkDataSetMapper()\n mapper.SetInputData(output)\n mapper.SetScalarRange(scalar_range)\n\n # Create the Actor\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n\n # Create the Renderer\n renderer = vtk.vtkRenderer()\n renderer.AddActor(actor)\n renderer.SetBackground(1, 1, 1) # Set background to white\n\n # Create the RendererWindow\n renderer_window = vtk.vtkRenderWindow()\n renderer_window.AddRenderer(renderer)\n\n # Create the RendererWindowInteractor and display the vtk_file\n interactor = vtk.vtkRenderWindowInteractor()\n interactor.SetRenderWindow(renderer_window)\n interactor.Initialize()\n interactor.Start()", "def _writeVTKOutput(self):\n\n sigma = numpy.ones((self.numStations, 3), dtype=numpy.float64)\n sigma[:, 0] *= self.sigmaEast\n sigma[:, 1] *= self.sigmaNorth\n sigma[:, 2] *= self.sigmaUp\n\n vtkHead = \"# vtk DataFile Version 2.0\\n\" + \\\n \"Synthetic GPS stations\\n\" + \\\n \"ASCII\\n\" + \\\n \"DATASET POLYDATA\\n\" + \\\n \"POINTS \" + repr(self.numStations) + \" double\\n\"\n\n v = open(self.vtkOutputFile, 'w')\n v.write(vtkHead)\n numpy.savetxt(v, self.coords)\n\n numConnect = 2 * self.numStations\n connectHead = \"VERTICES %d %d\\n\" % (self.numStations, numConnect)\n v.write(connectHead)\n verts = numpy.arange(self.numStations, dtype=numpy.int64)\n sizes = numpy.ones_like(verts)\n outConnect = numpy.column_stack((sizes, verts))\n numpy.savetxt(v, outConnect, fmt=\"%d\")\n \n dispHead = \"POINT_DATA \" + repr(self.numStations) + \"\\n\" + \\\n \"VECTORS displacement double\\n\"\n v.write(dispHead)\n numpy.savetxt(v, self.dispNoise)\n\n sigHead = \"VECTORS uncertainty double\\n\"\n v.write(sigHead)\n numpy.savetxt(v, sigma)\n v.close()\n \n return", "def writeVelocityPlot(self):\n name = \"velocity.vtk\"\n chargeFile = open(name,'w')\n chargeFile.write(\"%s\\n\"%(\"# vtk DataFile Version 2.0\"))\n chargeFile.write(\"%s\\n\"%(\"obtained via hydraulicmodule\"))\n chargeFile.write(\"%s\\n\"%(\"ASCII\"))\n chargeFile.write(\"%s\\n\"%(\"DATASET UNSTRUCTURED_GRID\"))\n chargeFile.write(\"%s %i %s\\n\"%(\"POINTS\",len(self.points),\"double\"))\n dim = self.mesh.getSpaceDimensions()\n if (dim==2): \n for ind in range(0,len(self.points)):\n chargeFile.write(\"%15.8e %15.8e %15.8e\\n\"%(self.points[ind][0],\\\n self.points[ind][1],\\\n 0.))\n pass\n pass\n elif (dim==3): \n for ind in range(0,len(self.points)):\n chargeFile.write(\"%15.8e %15.8e %15.8e\\n\"%(self.points[ind][0],\\\n self.points[ind][1],\\\n self.points[ind][2]))\n pass\n pass\n else:\n raise Exception(\" error in mesh dimension \") \n numberOfCells = self.mesh.getNumberOfCells()\n connectivity = self.mesh.getConnectivity()\n\n cellListSize = 0\n for i in range(0,numberOfCells): # gmsh meshes: type of elements\n gmshType = connectivity[i][1]\n if gmshType == 1: # 2-node line\n cellListSize += 3\n pass\n elif gmshType == 2: # 3-node triangles\n cellListSize += 4\n pass\n elif gmshType == 3: # 4-node quadrangles\n cellListSize += 5\n pass\n elif gmshType == 4: # 4-node tetrahedron\n cellListSize += 5\n pass\n elif gmshType == 5: # 8-node hexahedrons\n cellListSize += 9\n pass\n pass\n chargeFile.write(\"CELLS %i %i\\n\"%(numberOfCells,cellListSize))\n ind = 0\n for cell in connectivity:\n ind = cell[2]+3\n# print \" ctm dbg cell \",vtkTyp,ind,cell,\" perm \",permutation[ind],permutation[ind+1],permutation[ind+2],permutation[ind+3]\n # \n vtkTyp = _vtkGmsh(cell[1])\n if (vtkTyp==3): # 2-node line\n ind = cell[2]+3\n chargeFile.write(\"%i %i %i\\n\"%(\n 2,\\\n cell[ind]-1,\\\n cell[ind+1]-1)\n )\n pass\n \n elif (vtkTyp==5): # triangles\n chargeFile.write(\"%i %i %i %i\\n\"%(\n 3, \n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1)\n )\n pass\n elif (vtkTyp==9): # quadr\n chargeFile.write(\"%i %i %i %i %i\\n\"%(\n 4,\\\n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1,\\\n cell[ind+3]-1)\n )\n pass\n elif (vtkTyp==10): # tetra\n chargeFile.write(\"%i %i %i %i %i\\n\"%(\n 4,\\\n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1,\\\n cell[ind+3]-1)\n )\n pass\n elif (vtkTyp==12): # hexahedron\n chargeFile.write(\"%i %i %i %i %i %i %i %i %i\\n\"%(\n 8,\\\n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1,\\\n cell[ind+3]-1,\\\n cell[ind+4]-1,\\\n cell[ind+5]-1,\\\n cell[ind+6]-1,\\\n cell[ind+7]-1)\n )\n pass\n pass\n chargeFile.write(\"%s %i\\n\"%(\"CELL_TYPES\",numberOfCells))\n#\n for i in range(0,numberOfCells):\n gmshType = connectivity[i][1]\n\n if (gmshType)==1:\n cellTyp = 3\n pass\n elif (gmshType)==2:\n cellTyp = 5\n pass\n elif (gmshType)==3:\n cellTyp = 9\n pass\n elif (gmshType)==4:\n cellTyp = 10\n pass\n elif (gmshType)==5:\n cellTyp = 12\n pass\n elif (gmshType)==6:\n cellTyp = 13\n pass\n elif gmshType == 7:\n cellTyp = 14\n pass\n else:\n raise Exception(\" check gmshtype \")\n chargeFile.write(\"%i\\n\"%(cellTyp))\n chargeFile.write(\"%s %d\\n\"%(\"POINT_DATA\",len(self.points)))\n chargeFile.write(\"%s\\n\"%(\"VECTORS vectors float\"))\n for velocityComponent in self.velocity:\n chargeFile.write(\" %e %e %e\\n \"%(velocityComponent[0], velocityComponent[1], velocityComponent[2]))\n chargeFile.write(\"%s\\n\"%(\"SCALARS charge double\"))\n chargeFile.write(\"%s\\n\"%(\"LOOKUP_TABLE default\"))\n#\n \n chargeDataFile=open(\"./\" + self.flowComponent.meshDirectoryName + \"/\" + \"HeVel.dat\",'r')\n line = chargeDataFile.readline()\n while \"Number Of Nodes\" not in line:\n line = chargeDataFile.readline()\n#line.split()\n nodesNumber = line.split()[-1]\n while \"Perm\" not in line:\n line = chargeDataFile.readline()\n#\n# We read the permutation\n#\n for i in range(int(nodesNumber)): chargeDataFile.readline()\n#\n# We read the charge\n#\n for i in range(int(nodesNumber)): chargeFile.write(\" %15.10e\\n \"%(float(chargeDataFile.readline())))", "def vtp(self, f_vtu, f_vtp):\r\n reader = vtk.vtkXMLUnstructuredGridReader()\r\n reader.SetFileName(f_vtu)\r\n reader.Update()\r\n ugrid = reader.GetOutput()\r\n geometryFilter = vtk.vtkGeometryFilter()\r\n geometryFilter.SetInputData(ugrid)\r\n geometryFilter.Update()\r\n polydata = geometryFilter.GetOutput()\r\n writer =vtk.vtkXMLPolyDataWriter()\r\n writer.SetFileName(f_vtp)\r\n writer.SetInputData(polydata)\r\n writer.Write()\r\n print(\"vtp file created.\")", "def onevtkfile():\n basedir = '/home/amit/WorkSpace/UCLA/simulations/PhaseDiagram/RawData'\n with hp.File('VTKFile.h5', 'w') as onefile:\n allvtk = np.empty((600, 500, 3, 216), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n vtkfilepath = '{}/Run{}/VTKFile-{}.h5'.format(basedir, i, j+1)\n with hp.File(vtkfilepath, 'r') as vtkfile:\n for t in range(500):\n allvtk[j, t, i, :] = vtkfile['T{}/Points'.format(2*t)][:].ravel()\n onefile.create_dataset('Points', data=allvtk, chunks=(1, 50, 3, 216), \n compression='gzip', compression_opts=9)", "def ReadVTK(self, filename, element_type=None):\n\n try:\n import vtkInterface as vtki\n except IOError:\n raise IOError(\"vtkInterface is not installed. Please install it first using 'pip install vtkInterface'\")\n\n self.__reset__()\n\n vmesh = vtki.UnstructuredGrid(filename)\n flat_elements = np.copy(np.delete(vmesh.cells, vmesh.offset))\n\n if not np.all(vmesh.celltypes == vmesh.celltypes[0]):\n raise IOError(\"Cannot read VTK files with hybrid elements\")\n\n cellflag = vmesh.celltypes[0]\n\n if cellflag == 5:\n self.element_type = \"tri\"\n divider = 3\n elif cellflag == 9:\n self.element_type = \"quad\"\n divider = 4\n elif cellflag == 10:\n self.element_type = \"tet\"\n divider = 4\n elif cellflag == 12:\n self.element_type = \"hex\"\n divider = 8\n elif cellflag == 3:\n self.element_type = \"line\"\n divider = 2\n else:\n raise IOError(\"VTK element type not understood\")\n\n if element_type is not None:\n if self.element_type != element_type:\n raise ValueError(\"VTK file does not contain {} elements\".format(element_type))\n\n\n self.elements = np.ascontiguousarray(flat_elements.reshape(int(flat_elements.shape[0]/divider),divider), dtype=np.uint64)\n self.points = np.ascontiguousarray(vmesh.points, dtype=np.float64)\n self.nelem = self.elements.shape[0]\n self.nnode = self.points.shape[0]\n\n if self.points.shape[1] == 3:\n if np.allclose(self.points[:,2],0.):\n self.points = np.ascontiguousarray(self.points[:,:2])\n\n if self.element_type == \"tri\" or self.element_type == \"quad\":\n self.GetEdges()\n self.GetBoundaryEdges()\n elif self.element_type == \"tet\" or self.element_type == \"hex\":\n self.GetFaces()\n self.GetBoundaryFaces()\n self.GetBoundaryEdges()\n\n return", "def create_pvsm_file(vtk_files, pvsm_filename, relative_paths=True):\n from xml.etree.ElementTree import Element, SubElement, Comment\n\n\n import os.path as op\n\n\n\n top = Element('ParaView')\n\n comment = Comment('Generated for PyMOTW')\n top.append(comment)\n\n numberi = 4923\n # vtk_file = \"C:\\Users\\miros\\lisa_data\\83779720_2_liver.vtk\"\n\n sms = SubElement(top, \"ServerManagerState\", version=\"5.4.1\")\n file_list = SubElement(sms, \"ProxyCollection\", name=\"sources\")\n for vtk_file_orig in vtk_files:\n numberi +=1\n dir, vtk_file_head = op.split(vtk_file_orig)\n if relative_paths:\n vtk_file = vtk_file_head\n else:\n vtk_file = vtk_file_orig\n number = str(numberi)\n proxy1 = SubElement(sms, \"Proxy\", group=\"sources\", type=\"LegacyVTKFileReader\", id=number, servers=\"1\")\n property = SubElement(proxy1, \"Property\", name=\"FileNameInfo\", id=number + \".FileNameInfo\", number_of_elements=\"1\")\n element = SubElement(property, \"Element\", index=\"0\", value=vtk_file)\n property2 = SubElement(proxy1, \"Property\", name=\"FileNames\", id=number + \".FileNames\", number_of_elements=\"1\")\n pr2s1 = SubElement(property2, \"Element\", index=\"0\", value=vtk_file)\n pr2s2 = SubElement(property2, \"Domain\", name=\"files\", id=number + \".FileNames.files\")\n\n # < Property\n # name = \"Opacity\"\n # id = \"8109.Opacity\"\n # number_of_elements = \"1\" >\n # < Element\n # index = \"0\"\n # value = \"0.28\" / >\n # < Domain\n # name = \"range\"\n # id = \"8109.Opacity.range\" / >\n # < / Property >\n\n fn1 = SubElement(file_list, \"Item\", id=number, name=vtk_file_head)\n\n xml_str = prettify(top)\n # logger.debug(xml_str)\n with open(op.expanduser(pvsm_filename), \"w\") as file:\n file.write(xml_str)\n\n # ElementTree(top).write()", "def reformat():\n toolkit.reformat()", "def test_convert_MeshVTK():\n mesh = MeshVTK(\n path=join(TEST_DATA_DIR, \"StructElmer\"), name=\"case_t0001\", format=\"vtu\"\n )\n\n meshmat = mesh.convert(meshtype=\"MeshMat\", scale=1)\n\n # meshsol = MeshSolution(mesh=[meshmat])\n # meshsol.plot_mesh(is_show_fig=False)", "def exportVTK(self, fname):\r\n filename, ext = os.path.splitext(fname)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n sWrite = vtk.vtkXMLStructuredGridWriter()\r\n sWrite.SetInputData(self.Grid)\r\n sWrite.SetFileName(filename + \".vts\")\r\n sWrite.Write()\r\n elif self.GridType == \"vtkUnstructuredGrid\":\r\n sWrite = vtk.vtkXMLUnstructuredGridWriter()\r\n sWrite.SetInputData(self.Grid)\r\n sWrite.SetFileName(filename + \".vtu\")\r\n sWrite.Write()\r\n else:\r\n print(\"Grid type is not recognized\")", "def WriteVTK(self, filename=None, result=None, fmt=\"binary\", interpolation_degree=10, ProjectionFlags=None):\n\n self.__do_essential_memebers_exist__()\n\n if fmt == \"xml\":\n pass\n elif fmt == \"binary\":\n try:\n from pyevtk.hl import pointsToVTK, linesToVTK, gridToVTK, unstructuredGridToVTK\n from pyevtk.vtk import VtkVertex, VtkLine, VtkTriangle, VtkQuad, VtkTetra, VtkPyramid, VtkHexahedron\n except ImportError:\n raise ImportError(\"Could not import evtk. Install it using 'pip install pyevtk'\")\n else:\n raise ValueError(\"Writer format not understood\")\n\n elements = np.copy(self.elements)\n\n cellflag = None\n if self.element_type =='tri':\n cellflag = 5\n offset = 3\n if self.elements.shape[1]==6:\n cellflag = 22\n offset = 6\n elif self.element_type =='quad':\n cellflag = 9\n offset = 4\n if self.elements.shape[1]==8:\n cellflag = 23\n offset = 8\n if self.element_type =='tet':\n cellflag = 10\n offset = 4\n if self.elements.shape[1]==10:\n cellflag = 24\n offset = 10\n # CHANGE NUMBERING ORDER FOR PARAVIEW\n para_arange = [0,4,1,6,2,5,7,8,9,3]\n elements = elements[:,para_arange]\n elif self.element_type == 'hex':\n cellflag = 12\n offset = 8\n if self.elements.shape[1] == 20:\n cellflag = 25\n offset = 20\n elif self.element_type == 'line':\n cellflag = 3\n offset = 2\n\n if filename is None:\n warn('File name not specified. I am going to write one in the current directory')\n filename = os.path.join(PWD(__file__), \"output.vtu\")\n if \".vtu\" in filename and fmt == \"binary\":\n filename = filename.split('.')[0]\n if \".vtu\" not in filename and fmt == \"xml\":\n filename = filename + \".vtu\"\n\n\n if self.InferPolynomialDegree() > 1:\n try:\n from Florence.PostProcessing import PostProcess\n from Florence.VariationalPrinciple import DisplacementFormulation\n except ImportError:\n raise RuntimeError(\"Writing high order elements to VTK is not supported yet\")\n if result is not None and result.ndim > 1:\n raise NotImplementedError(\"Writing vector/tensor valued results to binary vtk not supported yet\")\n return\n else:\n if result is None:\n result = np.zeros_like(self.points)[:,:,None]\n if result.ndim == 1:\n result = result.reshape(result.shape[0],1,1)\n pp = PostProcess(3,3)\n pp.SetMesh(self)\n pp.SetSolution(result)\n pp.SetFormulation(DisplacementFormulation(self,compute_post_quadrature=False))\n pp.WriteVTK(filename,quantity=0,interpolation_degree=interpolation_degree, ProjectionFlags=ProjectionFlags)\n return\n\n\n if self.InferSpatialDimension() == 2:\n points = np.zeros((self.points.shape[0],3))\n points[:,:2] = self.points\n else:\n points = self.points\n\n if result is None:\n if fmt == \"xml\":\n write_vtu(Verts=self.points, Cells={cellflag:elements},fname=filename)\n elif fmt == \"binary\":\n unstructuredGridToVTK(filename,\n np.ascontiguousarray(points[:,0]),np.ascontiguousarray(points[:,1]),\n np.ascontiguousarray(points[:,2]), np.ascontiguousarray(elements.ravel()),\n np.arange(0,offset*self.nelem,offset)+offset, np.ones(self.nelem)*cellflag)\n else:\n if isinstance(result, np.ndarray):\n if result.ndim > 1:\n if result.size == result.shape[0]:\n result = result.flatten()\n\n if fmt == \"xml\":\n if result.ndim > 1:\n if result.shape[0] == self.nelem:\n write_vtu(Verts=self.points, Cells={cellflag:elements},\n cvdata={cellflag:result.ravel()},fname=filename)\n elif result.shape[0] == self.points.shape[0]:\n write_vtu(Verts=self.points, Cells={cellflag:elements},\n pvdata=result.ravel(),fname=filename)\n else:\n if result.shape[0] == self.nelem:\n write_vtu(Verts=self.points, Cells={cellflag:elements},cdata=result,fname=filename)\n elif result.shape[0] == self.points.shape[0]:\n write_vtu(Verts=self.points, Cells={cellflag:elements},pdata=result,fname=filename)\n elif fmt == \"binary\":\n if result.ndim <= 1:\n if result.shape[0] == self.nelem:\n unstructuredGridToVTK(filename,\n np.ascontiguousarray(points[:,0]),np.ascontiguousarray(points[:,1]),\n np.ascontiguousarray(points[:,2]), np.ascontiguousarray(elements.ravel()),\n np.arange(0,offset*self.nelem,offset)+offset, np.ones(self.nelem)*cellflag,\n cellData={'result':np.ascontiguousarray(result.ravel())})\n elif result.shape[0] == self.points.shape[0]:\n unstructuredGridToVTK(filename,\n np.ascontiguousarray(points[:,0]),np.ascontiguousarray(points[:,1]),\n np.ascontiguousarray(points[:,2]), np.ascontiguousarray(elements.ravel()),\n np.arange(0,offset*self.nelem,offset)+offset, np.ones(self.nelem)*cellflag,\n pointData={'result':np.ascontiguousarray(result.ravel())})\n else:\n if result.shape[1] == 3:\n result_data = {'result':tuple(( np.ascontiguousarray(result[:,0]), np.ascontiguousarray(result[:,1]), np.ascontiguousarray(result[:,2]) ))}\n elif result.shape[1] == 2:\n result_data = {'result':tuple(( np.ascontiguousarray(result[:,0]), np.ascontiguousarray(result[:,1]) ))}\n else:\n raise NotImplementedError(\"Writing vector/tensor valued results > 3 to binary vtk not supported yet\")\n\n if result.shape[0] == self.nelem:\n unstructuredGridToVTK(filename,\n np.ascontiguousarray(points[:,0]),np.ascontiguousarray(points[:,1]),\n np.ascontiguousarray(points[:,2]), np.ascontiguousarray(elements.ravel()),\n np.arange(0,offset*self.nelem,offset)+offset, np.ones(self.nelem)*cellflag,\n cellData=result_data)\n elif result.shape[0] == self.points.shape[0]:\n unstructuredGridToVTK(filename,\n np.ascontiguousarray(points[:,0]),np.ascontiguousarray(points[:,1]),\n np.ascontiguousarray(points[:,2]), np.ascontiguousarray(elements.ravel()),\n np.arange(0,offset*self.nelem,offset)+offset, np.ones(self.nelem)*cellflag,\n pointData=result_data)", "def VTKByteOrder():\n return _vtk.VTKByteOrder()", "async def transform(self, file):\n\t\tpass", "def convert_from_libsvm_format(infile, outfile=None):\n ofh = -1\n if not outfile is None:\n ofh = open(outfile, 'a')\n\n with open(infile, 'r') as ifh:\n while True:\n line = ifh.readline()\n if not line:\n break\n standard_line = re.sub(\" \\d+?\\:\", \" \", line)\n print standard_line\n if ofh > 0:\n ofh.write(standard_line)\n\n # close open files\n ifh.close()\n if ofh > 0:\n ofh.close()", "def LoadSphere():\n return vtkInterface.PolyData(spherefile)", "def _read(self):\n # initializng data dictionary\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab", "def toVTK(self):\n from vtk.util import numpy_support as nps\n output = self.geometry.toVTK()\n shp = self.geometry.shape\n # Add data to output\n for data in self.data:\n arr = data.array.array\n arr = np.reshape(arr, shp).flatten(order='F')\n c = nps.numpy_to_vtk(num_array=arr, deep=True)\n c.SetName(data.name)\n output.GetCellData().AddArray(c)\n return output", "def loadVTK(self, filename, folder):\n import vtk\n print('Extracting Dataset')\n start = time.time()\n reader = vtk.vtkPolyDataReader()\n reader.SetFileName(folder + filename)\n reader.Update()\n polydata = reader.GetOutput()\n n = polydata.GetNumberOfPoints()\n self.data = np.array([0, 0, 0])\n\n for i in range(0, n, 1):\n vraw = list(polydata.GetPoint(i))\n inRange = np.all([vraw[0] > self.ranges[0,0], vraw[0] < self.ranges[0,1], vraw[1] > self.ranges[1,0], vraw[1] < self.ranges[1,1], vraw[2] > self.ranges[2,0], vraw[2] < self.ranges[2,1]])\n if inRange:\n self.data = np.vstack((self.data, np.array(vraw)))\n if i % 50000 == 0:\n print(' Out of the ' + str(n) + ' particles in the dataset, ' + str(i) + ' (' + str(round(i*100/n, 3)) + ' %) have been processed, and ' + str(len(self.data) - 1) + ' have been stored.')\n\n self.data = self.data[1:, :]\n rangeStr = '_x[' + str(self.ranges[0,0]) + ',' + str(self.ranges[0,1]) + ']_y[' + str(self.ranges[1,0]) + ',' + str(self.ranges[1,1]) + ']_z[' + str(self.ranges[1,0]) + ',' + str(self.ranges[1,1]) + '].npy'\n np.save(folder + 'VoronoiData' + rangeStr, self.data)\n print('Elapsed Time: ' + str(round(time.time() - start, 3)))", "def SetStructuredData(self, file): # real signature unknown; restored from __doc__\n pass", "def update_dat_file(\n dat_folder: str,\n m: np.array,\n vpvs: bool,\n depth: bool,\n produce_tvel: bool = True,\n tvel_name: str = \"Test\",\n):\n M_tt = m[1] # / 1e14\n M_pp = m[2] # / 1e14\n M_rr = m[0] # / 1e14\n M_rp = m[4] # / 1e14\n M_rt = m[3] # / 1e14\n M_tp = m[5] # / 1e14\n\n focal_mech_update = (\n f\"{M_tt:10.4f}{-M_tp+0:10.4f}{M_rt:10.4f}{M_pp:10.4f}{-M_rp+0:10.4f}{M_rr:10.4f}\\n\"\n )\n\n with open(join(dat_folder, \"crfl.dat\"), \"r+\") as f:\n data = f.readlines()\n skiprows = 3 # Always need to skip first 3 lines\n \"\"\" Updating the moment tensor in .dat file\"\"\"\n data[-8] = focal_mech_update\n \"\"\" Updating the structural parameters in .dat file \"\"\"\n if vpvs and depth == False:\n print(\"vpvs are changed in dat file starting from depth 0\")\n n_params = int((len(m) - 6) / 2)\n vp = m[6 : 6 + n_params]\n vs = m[6 + n_params : 6 + 2 * n_params]\n for i in range(n_params):\n line = data[skiprows + i * 2]\n \"\"\" search for and create floats from the string line \"\"\"\n flt = np.array(re.findall(\"\\d+\\.\\d+\", line), dtype=float)\n \"\"\" replace vp and vs \"\"\"\n text = f\"{flt[0]:10.4f}{vp[i]:10.4f}{flt[2]:10.4f}{vs[i]:10.4f}{flt[4]:10.4f}{flt[5]:10.4f}{1:10d}\\n\"\n data[skiprows + i * 2] = text\n if i != n_params - 1:\n line = data[skiprows + i * 2 + 1]\n \"\"\" search for and create floats from the string line \"\"\"\n flt1 = np.array(re.findall(\"\\d+\\.\\d+\", line), dtype=float)\n \"\"\" replace depth \"\"\"\n text = f\"{flt1[0]:10.4f}{vp[i]:10.4f}{flt[2]:10.4f}{vs[i]:10.4f}{flt[4]:10.4f}{flt[5]:10.4f}{1:10d}\\n\"\n data[skiprows + i * 2 + 1] = text\n elif depth and vpvs == False:\n n_params = int((len(m) - 6))\n depth = m[6 : 6 + n_params]\n if n_params == 1:\n print(\"depth of MOHO (from TAYAK) will be changed\")\n flt = np.array(re.findall(\"\\d+\\.\\d+\", data[9]), dtype=float)\n data[\n 9\n ] = f\"{depth[0]:10.4f}{flt[1]:10.4f}{flt[2]:10.4f}{flt[3]:10.4f}{flt[4]:10.4f}{flt[5]:10.4f}{1:10d}\\n\"\n flt = np.array(re.findall(\"\\d+\\.\\d+\", data[8]), dtype=float)\n data[\n 8\n ] = f\"{depth[0]:10.4f}{flt[1]:10.4f}{flt[2]:10.4f}{flt[3]:10.4f}{flt[4]:10.4f}{flt[5]:10.4f}{1:10d}\\n\"\n else:\n print(\"depths are changed in dat file starting from depth 0\")\n for i in range(n_params):\n line = data[skiprows + i * 2]\n \"\"\" search for and create floats from the string line \"\"\"\n flt = np.array(re.findall(\"\\d+\\.\\d+\", line), dtype=float)\n \"\"\" replace vp and vs \"\"\"\n text = f\"{depth[i]:10.4f}{flt[1]:10.4f}{flt[2]:10.4f}{flt[3]:10.4f}{flt[4]:10.4f}{flt[5]:10.4f}{1:10d}\\n\"\n data[skiprows + i * 2] = text\n if i != n_params - 1:\n line = data[skiprows + i * 2 + 1]\n \"\"\" search for and create floats from the string line \"\"\"\n flt1 = np.array(re.findall(\"\\d+\\.\\d+\", line), dtype=float)\n \"\"\" replace depth \"\"\"\n text = f\"{depth[i+1]:10.4f}{flt[1]:10.4f}{flt[2]:10.4f}{flt[3]:10.4f}{flt[4]:10.4f}{flt[5]:10.4f}{1:10d}\\n\"\n data[skiprows + i * 2 + 1] = text\n\n elif depth and vpvs:\n n_params = int((len(m) - 6) / 3)\n depth = m[6 : 6 + n_params]\n vp = m[6 + n_params : 6 + 2 * n_params]\n vs = m[6 + 2 * n_params : 6 + 3 * n_params]\n for i in range(n_params):\n line = data[skiprows + i * 2]\n \"\"\" search for and create floats from the string line \"\"\"\n flt = np.array(re.findall(\"\\d+\\.\\d+\", line), dtype=float)\n \"\"\" replace vp and vs \"\"\"\n text = f\"{depth[i]:10.4f}{vp[i]:10.4f}{flt[2]:10.4f}{vs[i]:10.4f}{flt[4]:10.4f}{flt[5]:10.4f}{1:10d}\\n\"\n data[skiprows + i * 2] = text\n if i != n_params - 1:\n line = data[skiprows + i * 2 + 1]\n \"\"\" search for and create floats from the string line \"\"\"\n flt1 = np.array(re.findall(\"\\d+\\.\\d+\", line), dtype=float)\n \"\"\" replace depth \"\"\"\n text = f\"{depth[i+1]:10.4f}{vp[i]:10.4f}{flt[2]:10.4f}{vs[i]:10.4f}{flt[4]:10.4f}{flt[5]:10.4f}{1:10d}\\n\"\n data[skiprows + i * 2 + 1] = text\n f.close()\n with open(join(dat_folder, \"crfl.dat\"), \"w\") as f:\n f.write(\"\".join(data))\n f.close()\n \"\"\" automatically create .tvel file \"\"\"\n if produce_tvel:\n depth = np.zeros(len(data[3:-18]))\n vp = np.zeros(len(data[3:-18]))\n vs = np.zeros(len(data[3:-18]))\n dens = np.zeros(len(data[3:-18]))\n for i, line in enumerate(data[3:-18]):\n \"\"\" search for and create floats from the string line \"\"\"\n flt = np.array(re.findall(\"\\d+\\.\\d+\", line), dtype=float)\n depth[i] = flt[0]\n vp[i] = flt[1]\n vs[i] = flt[3]\n dens[i] = flt[5]\n create_tvel_file(depth, vp, vs, dens, dat_folder, tvel_name)", "def LoadAirplane():\n return vtkInterface.PolyData(planefile)", "def read_kitti_Tr_velo_to_cam(filename):\n\n with open(filename) as f:\n for line in f:\n data = line.split(' ')\n if data[0] == 'Tr_velo_to_cam:':\n calib = np.array([float(x) for x in data[1:13]])\n calib = calib.reshape(3, 4)\n return _extend_matrix(calib)\n\n raise Exception(\n 'Could not find entry for P2 in calib file {}'.format(filename))", "def office_generate_model_vtk(parser, args, params):\n parser.add_argument('--num_slices', type=int,\n help='Number of slices (processors)',\n metavar='', required=True)\n local_args = parser.parse_known_args(args)\n num_slices = local_args[0].num_slices\n\n control.generate_model_vtk(params, num_slices)", "def mrtrix_mesh2vox(surface_path, template_path, temp_dir, output_prefix):\n # Adapt affine translation using metadata\n template = nib.load(template_path)\n _, _, meta = read_geometry(surface_path, read_metadata=True)\n\n template = nib.as_closest_canonical(template)\n affine = template.affine.copy()\n affine[:-1, -1] = template.affine[:-1, -1] - meta['cras']\n\n new_template = nib.Nifti1Image(template.dataobj, affine)\n new_template_path = temp_dir / 'template.mgz'\n nib.save(new_template, new_template_path)\n\n # Reconstruct volume from mesh\n subprocess.run(['mesh2voxel', surface_path, new_template_path, temp_dir / f'{output_prefix}_output.mgz'])\n\n # Save the reconstructed volume with the right affine\n output = nib.load(temp_dir / f'{output_prefix}_output.mgz')\n new_output = nib.Nifti1Image(output.dataobj, template.affine)\n # nib.save(new_output, output_path)\n\n return new_output", "def save(filename, points3, tris, metadata):\n logging.info(\"saving mesh: %s\"%filename)\n cells = {'triangle':tris}\n vtk_io.write(filename, points3, cells)\n with open(filename+'.readme','w') as fid:\n fid.write(metadata)", "def export_to_vtk(xgrid, ygrid, data, data_name):\n\tfrom evtk.vtk import VtkFile, VtkStructuredGrid\n\t\n\t\n\t#stupid reshape data\n\toldshape = data.shape\n\tnewshape = oldshape + (1,)\n\tdata = data.reshape(newshape)\n\txgrid = xgrid.reshape(newshape)\n\tygrid = ygrid.reshape(newshape)\n\t\n\t\n\tpath = './{}'.format(data_name)\n\tw = VtkFile(path, VtkStructuredGrid)\n\t\n\t#Header stuff?\n\tnx, ny = oldshape[0] - 1, oldshape[1] - 1\n\tw.openGrid(start = (0, 0, 0), end = (nx, ny, 0))\n\tw.openPiece(start = (0, 0, 0), end = (nx, ny, 0))\n\t\n\tw.openElement(\"Points\")\n\tw.addData(\"points\", (xgrid, ygrid, data))\n\tw.closeElement(\"Points\")\n\t\n\tw.openData(\"Point\", scalars = data_name)\n\tw.addData(data_name, data)\n\tw.closeData(\"Point\")\n\t\n\tw.closePiece()\n\tw.closeGrid()\n\t\n\t#Now add the actual data?\n\tw.appendData((xgrid, ygrid, data))\n\tw.appendData(data)\n\t\n\t#finished\n\tw.save()" ]
[ "0.62193173", "0.57385737", "0.5709972", "0.5686796", "0.5682545", "0.56288266", "0.5449302", "0.5447128", "0.5324145", "0.5320638", "0.52094007", "0.51937026", "0.51830435", "0.5158508", "0.5067943", "0.5041662", "0.50389045", "0.5018575", "0.49896502", "0.4977766", "0.4974109", "0.49630463", "0.49390405", "0.49234775", "0.49186507", "0.4868619", "0.48642445", "0.48622322", "0.48490366", "0.48480165" ]
0.690809
0
Send out Pokemon to fill out all pkmnInPlay at the start of a battle
def sendOutPkmnAtStart(self): messages = [] for i in range(0, len(self.pkmnInPlay)): pkmn = self.trainer.getPokemon(i) if not pkmn: break messages += self.pkmnInPlay[i].sendOutPkmn(pkmn) return messages
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def request_capturing(self):\n self.socket.sendall(pack('B', codes['request_pokemon']))\n self.receive_pokemon_suggestion()", "async def loadpokemon(self, ctx):\n await self.bot.di.new_items(ctx.guild, (ServerItem(**item) for item in self.bot.pokemonitems.values()))\n await ctx.send(await _(ctx, \"Successfully added all Pokemon items!\"))", "def main() -> int:\n\n\t# Theoretically reads in the list of pokemon\n\tavailable_pokemon = set(os.listdir(os.path.join(utils.dataDir, \"pokemon\")))\n\tutils.cls()\n\n\tprint(\"Welcome to the Pokémon Battle Simulator (written in Python3)!\\n\")\n\n\tuserPokemon = chooseAPokemon(available_pokemon)\n\n\tutils.cls()\n\n\topponentPokemon = chooseAPokemon(available_pokemon, True)\n\tutils.cls()\n\n\t# Prints the opposing pokemon\n\tprint(\"Battle Starting!\\n\")\n\tutils.dumpPokemon(userPokemon, opponentPokemon)\n\n\tplayerWon = False\n\n\t# Battle Loop\n\twhile True:\n\t\t_ = input(\"Press Enter to continue.\")\n\n\t\tutils.cls()\n\n\t\t# Player chooses a move\n\t\tchoice = chooseAMove(userPokemon, opponentPokemon)\n\t\tchoiceStr = \"%s used %s!\" % (userPokemon, choice)\n\n\t\t#opponent chooses a move\n\t\tutils.cls()\n\t\topponentChoice = opponentPokemon.moves[random.randrange(4)]\n\t\topponentChoiceStr = \"%s used %s!\" % (opponentPokemon, opponentChoice)\n\n\t\torder = utils.decideOrder(userPokemon, choice, opponentPokemon, choice)\n\n\t\tif order:\n\t\t\topponentEvents = opponentPokemon.useMove(opponentChoice, userPokemon, choice)\n\t\t\tif not userPokemon.HP or not opponentPokemon.HP:\n\t\t\t\tutils.printHealthBars(userPokemon, opponentPokemon)\n\t\t\t\tprint(\"%s\\n%s\" % (opponentChoiceStr, opponentEvents))\n\t\t\t\tplayerWon = not opponentPokemon.HP\n\t\t\t\tbreak\n\n\t\t\tuserEvents = userPokemon.useMove(choice, opponentPokemon, opponentChoice)\n\t\t\tutils.printHealthBars(userPokemon, opponentPokemon)\n\t\t\tprint(\"\\n\".join((opponentChoiceStr, opponentEvents, choiceStr, userEvents)))\n\t\t\tif not userPokemon.HP or not opponentPokemon.HP:\n\t\t\t\tplayerWon = not opponentPokemon.HP\n\t\t\t\tbreak\n\n\t\telse:\n\t\t\tuserEvents = userPokemon.useMove(choice, opponentPokemon, opponentChoice)\n\t\t\tif not userPokemon.HP or not opponentPokemon.HP:\n\t\t\t\tutils.printHealthBars(userPokemon, opponentPokemon)\n\t\t\t\tprint(\"%s\\n%s\" % (choiceStr, userEvents))\n\t\t\t\tplayerWon = not opponentPokemon.HP\n\t\t\t\tbreak\n\n\t\t\topponentEvents = opponentPokemon.useMove(opponentChoice, userPokemon, choice)\n\t\t\tutils.printHealthBars(userPokemon, opponentPokemon)\n\t\t\tprint(\"\\n\".join((choiceStr, userEvents, opponentChoiceStr, opponentEvents)))\n\t\t\tif not userPokemon.HP or not opponentPokemon.HP:\n\t\t\t\tplayerWon = not opponentPokemon.HP\n\t\t\t\tbreak\n\n\tif playerWon:\n\t\tprint(\"%s fainted.\\nYou Win!\" % opponentPokemon)\n\telse:\n\t\tprint(\"%s fainted.\\nYou lose...\" % userPokemon)", "def refill(self, pokemonReplacements):\r\n messages = []\r\n if not self.hasMorePokemon():\r\n return messages\r\n \r\n for pkmn in self.pkmnInPlay:\r\n if pkmn.fainted():\r\n messages += self.sendOutPkmn(pkmn, pokemonReplacements)\r\n \r\n return messages", "def play_game(self):\n # need everyone to pass to move to next phase?\n self.deal_cards()\n self.plant_food()", "def addPkmn(self):\n params = []\n toAdd = []\n \n for key in self.vals.keys():\n if self.vals[key] is None:\n continue\n \n params += [key]\n toAdd += [self.vals[key]]\n \n paramStr = self.GetStrFromList(params)\n \n print \"Adding Pkmn:\", self.vals['name']\n self.insertIntoDB(\"Pokemon\", paramStr, toAdd)\n \n id = sself.cursor.lastrowid\n \n for attack in self.attacks:\n front = DBAddAttackInUse(id, connection = self.connection, cursor = self.cursor)\n front.execute(attack)", "def on_game_go(self):\n print(\"============game go\")\n # create dict with players - defines the players that are playing\n self.on_data_to_all_clients({\"game\":{\"hide_all_dialogs\":\"\"}})\n\n for pl in self.player_list:\n if pl.client not in self.client_pl_dict :\n self.client_pl_dict[pl.client] = pl\n pl.override_direction(4)\n\n self.add_food_item()\n\n self.is_game_going = True\n #for pl in self.player_list:\n # pl.direction = 4", "def printPokemon():\n print(\" _ \")\n print(\" _ __ ___ | | _____ _ __ ___ ___ _ __ \")\n print(\" | '_ \\ / _ \\| |/ / _ \\ '_ ` _ \\ / _ \\| '_ \\ \")\n print(\" | |_) | (_) | < __/ | | | | | (_) | | | |\")\n print(\" | .__/ \\___/|_|\\_\\___|_| |_| |_|\\___/|_| |_|\")\n print(\" |_| \")", "def start(self):\n with self.players['w'], self.players['b']:\n\n game = 0\n\n while game < self.num_games:\n\n # Print info.\n print \"Game %d - %s [%s] (White) VS: %s [%s] (Black)\" % (game + 1,\n self.players['w'].name,\n type(self.players['w']).__name__,\n self.players['b'].name,\n type(self.players['b']).__name__)\n # Reset board\n self.board.reset()\n\n # Signal to players that a new game is being played.\n [p.new_game() for p in self.players.itervalues()]\n\n curr_player_idx = 'w'\n\n game_pgn = chess.pgn.Game()\n game_pgn.headers[\"White\"] = self.players['w'].name\n game_pgn.headers[\"Black\"] = self.players['b'].name\n game_pgn.headers[\"Date\"] = time.strftime(\"%Y.%m.%d\")\n game_pgn.headers[\"Event\"] = \"Test\"\n game_pgn.headers[\"Round\"] = game\n game_pgn.headers[\"Site\"] = \"My PC\"\n\n _, time_taken = self.play(curr_player_idx, game_pgn=game_pgn)\n\n result = self.board.result(claim_draw=True)\n if result == '1-0':\n winner = self.players['w']\n elif result == '0-1':\n winner = self.players['b']\n else:\n winner = None\n self.data['draws'] += 1\n print \"Draw.\" \n\n if winner is not None:\n self.data['wins'][winner.name] += 1\n print \"%s wins.\" % winner.name\n\n for color, p in self.players.iteritems():\n print \"Player %s took %f seconds in total\" % (p.name, time_taken[color])\n p.time_taken = 0\n\n game_pgn = game_pgn.root()\n game_pgn.headers[\"Result\"] = result\n with open(resource_filename('guerilla', 'data/played_games/') + self.players['w'].name + '_' +\n self.players['b'].name + '_' + str(game) + '.pgn', 'w') as pgn:\n try:\n pgn.write(str(game_pgn))\n except AttributeError as e:\n print \"Error writing pgn file: %s\" % (e)\n\n self.swap_colours()\n game += 1", "def plant_food(self):\n self.phase.set(0)\n #self.broadcast_phase()\n self.players[self.first_player].take_turn()", "def meeting1(self):\n # Mission 3: Get a treasure map.\n for message in messages.oldendrab1:\n paragraph = message.replace(\"Hero\", self.hero.name)\n prompt(paragraph + \"\\n\")\n self.hero.inventory.append(items.t_map)\n self.hero.missions[3] = True\n prompt(\"You get:\\n\\tTreasure Map\")", "def comp10001bo_play(player_no, hand, stockpiles, discard_piles, build_piles,\n play_history):", "def give_all(self, player):\n self.transfer(self, player, self.account.balance)\n return '%s spins \\'gimel\\' and gets everything.' % (player,)", "def start(self):\n self.player = Player()\n self.dealer = Dealer()\n self.pot = 0\n self.side_bet = 0\n start_game()", "async def _new_player_gifter(self, connection):\n await asyncio.sleep(2)\n for item, count in self.gifts.items():\n count = int(count)\n if count > 10000 and item != \"money\":\n count = 10000\n item_base = GiveItem.build(dict(name=item,\n count=count,\n variant_type=7,\n description=\"\"))\n item_packet = pparser.build_packet(packets.packets['give_item'],\n item_base)\n await asyncio.sleep(.1)\n await connection.raw_write(item_packet)\n send_message(connection,\n \"You have been given {} {}\".format(str(count), item),\n mode=ChatReceiveMode.COMMAND_RESULT)\n return", "def bcp_game_start(self, **kargs):\n self.bcp_player_add(number=1)\n self.bcp_player_turn_start(player=1)\n self.events.post('game_started', **kargs)", "def play(self):\n if not self.active:\n return\n game_info = {\n 'partisans': self.partisans,\n 'swing': self.swing,\n 'media': self.media,\n 'news': self.news,\n 'mojo': self.mojo,\n 'hype': self.hype,\n 'money': self.money,\n 'cash': self.cash,\n 'cards': self.hand.cards,\n\n 'opp_partisans': self.opponent.partisans,\n 'opp_swing': self.opponent.swing,\n 'opp_media': self.opponent.media,\n 'opp_news': self.opponent.news,\n 'opp_mojo': self.opponent.mojo,\n 'opp_hype': self.opponent.hype,\n 'opp_money': self.opponent.money,\n 'opp_cash': self.opponent.cash,\n 'opp_cards': self.opponent.hand.cards\n }\n\n # print('################')\n # print('opp cards:')\n # for card in game_info['opp_cards']:\n # print(card)\n # print('----------------')\n # print('bots cards:')\n # for card in game_info['cards']:\n # print(card)\n # print('################')\n card, action = self.analysis(game_info)\n\n if action == TO_PRESS:\n card.use()\n elif action == TO_DROP:\n card.drop()", "def startGame(self):\n\n\t\tfor name in self.players.keys():\n\t\t\tself.startPlayerGame((name, 0))\n\t\tself.setupGuiSignals()", "async def round(self):\n def turn_check(m):\n return ((m.content.lower() == 'stand') or (m.content.lower() == 'hit')) and m.guild == self.ctx.guild\n # Players\n for i, player in enumerate(self.players):\n if not player.out:\n HoS = ''\n while HoS != \"stand\":\n embed_players = discord.Embed(\n title='Players', color=0x0000fd)\n try:\n await self.ctx.send(f\"{self.users[i].name}, Would you like to hit or stand? \")\n HoS = await self.client.wait_for('message', timeout=20.0, check=turn_check)\n HoS = HoS.content.lower()\n\n if HoS == \"stand\":\n break\n\n elif HoS == \"hit\":\n # give the player a new card\n self.deck.move_cards(player, 1)\n # reload the embed with player hands\n for j, player2 in enumerate(self.players):\n if not player2.out:\n embed_players.add_field(\n name=f\"{self.users[j].name}\", value=player2, inline=True)\n await self.players_msg.edit(embed=embed_players)\n\n if player.get_value() > 21:\n await self.ctx.send(f\"{self.users[i].name} is bust\")\n break\n elif player.get_value() == 21:\n await self.ctx.send(f\"{self.users[i].name} has BlackJack!\")\n player.has_bj = True\n break\n\n except Exception as e:\n print(e)\n continue\n\n # Dealer\n while self.dealer.get_value() < 17:\n self.deck.move_cards(self.dealer, 1)\n\n embed_dealer = discord.Embed(title='Dealer', color=0x00ff00)\n embed_dealer.add_field(name=\"Hand\", value=self.dealer, inline=False)\n await self.dealer_msg.edit(embed=embed_dealer)\n\n # Checks\n # if dealer is bust and not all players are out\n if self.dealer.get_value() > 21 and self.total_players_out < len(self.players):\n for player in self.players:\n if player.get_value() <= 21 and not player.out: # if player is not bust and is not out\n player.credit(2 * player.bet)\n await self.ctx.send(\"Since Dealer is bust, all players win\")\n\n elif self.dealer.get_value() == 21 and self.total_players_out < len(self.players): # Dealer has blackjack\n await self.ctx.send(\"Dealer has BlackJack!\")\n for player in self.players:\n if player.has_bj and not player.out:\n player.credit(2 * player.bet)\n else:\n # Used to check if any of the if statements are activated.\n if_flag = False\n for i, player in enumerate(self.players):\n # if player has blacjack or beat the dealer and not out\n if player.has_bj or (player.get_value() < 21 and player.get_value() > self.dealer.get_value()) and not player.out:\n if_flag = True\n await self.ctx.send(f\"{self.users[i].name}, Conrats on winning!\")\n player.credit(2 * player.bet)\n # if player not bust and tied with dealer\n elif player.get_value() < 21 and player.get_value() == self.dealer.get_value() and not player.out:\n if_flag = True\n await self.ctx.send(f\"{self.users[i].name}, tied with the dealer!\")\n player.credit(player.bet)\n if not if_flag and self.total_players_out < len(self.players):\n await self.ctx.send(\"House wins\")\n\n # end of round cleanup\n for i, player in enumerate(self.players):\n if not player.out:\n player.has_bj = False\n if player.coins < 1:\n await self.ctx.send(f\"{self.users[i].name}, Min bet is €1, get your cheap ass out of here\")\n player.out = True\n self.total_players_out += 1\n elif player.coins > 10000:\n await self.ctx.send(f\"{self.users[i].name}! You\\'re too good, we have to stop you\")\n player.out = True\n self.total_players_out += 1", "def play_nim():\n \n init_piles()\n display_piles()\n while True:\n user_plays()\n display_piles()\n if sum(piles) == 0:\n\n print(\"I let you win this time...\")\n\n break\n computer_plays()\n display_piles()\n if sum(piles) == 0:\n\n print(\"HAHAHAHAHA Lemons like you can never defeat me!!!\")\n\n break", "async def hotpotato(message: discord.Message, participants: int=4):\n await init_game(message, HotPotato, participants)", "def restart(self):\r\n\r\n self.pot = 0\r\n self.actions = 0\r\n self.previous_bet = self.small_blind\r\n self.initiate_blind(self.small_blind + self.big_blind)\r\n\r\n for player in self.players:\r\n player.credits = self.starting_credits\r\n\r\n # Let the first player begin\r\n self.active_player = (self.active_player + 1) % len(self.players)\r\n self.players[self.active_player].active = True\r\n\r\n self.players[self.active_player - 1].flip_cards()\r\n self.community_cards.flip_cards()\r\n\r\n self.deck_model = DeckModel()\r\n\r\n for player in self.players:\r\n player.new_cards(self.deck_model)\r\n\r\n output_text = \"Starting game...\\n{} post the big blind [${}]\\n{} post the small blind [${}]\".format(\r\n self.players[(self.active_player + 1) % len(self.players)].name, self.big_blind,\r\n self.players[self.active_player].name, self.small_blind)\r\n\r\n message = \"Player {} won!\".format(self.players[1].name)\r\n self.game_message.emit(message)\r\n\r\n self.new_pot.emit()\r\n self.new_credits.emit()\r\n self.new_output.emit(output_text)", "def Peacekeaper(self):\n\t\tprint(self.name.title() + \" is now shotting.\")", "def start_game(self):\n\n\t\tpass", "def main():\n number_of_players = get_number_of_players()\n number_of_decks = get_number_of_decks()\n game_data = setup_game(number_of_players)\n\n player_list = game_data[0]\n play_shoe = game_data[2]\n play_dealer = game_data[1]\n play_again = True\n\n while play_again:\n replay = play_game(play_shoe, player_list, play_dealer, number_of_decks)\n if replay:\n play_shoe = replay[1]\n else:\n play_again = False\n \n print(\"Thanks for playing\")", "def doSit(self):\n self.protocol.sendPacket(networkpackets.PacketPokerSit(**self._serial_and_game_id))", "def initial_phase():\n for player in p:\n print(\"\\n\"+player+\" turn:\")\n if player == \"Machine\":\n hands[p[player]].add_card(MY_DECK.deal_cards())\n else:\n hands[p[player]].add_card(MY_DECK.deal_cards())\n hands[p[player]].add_card(MY_DECK.deal_cards())\n print(hands[p[player]])", "def populate_initial_inventory(self):\r\n\r\n weapons_file = open('initial-inventory.json', \"r\")\r\n json_data = json.loads(weapons_file.read())\r\n weapons_file.close()\r\n\r\n weapons = json_data['weapons']\r\n for weapon in weapons:\r\n requests.post(\"http://\" + self.ip_address + \":3000/Weapons\", data=weapon)", "def trainer_party(trainer_id):\n\n db.execute('SELECT * FROM pokemon_party WHERE trainer_id= :trainer_id',{'trainer_id': trainer_id})\n pokemon_trainer_list = db.fetchall() \n\n pokemon_party = []\n #If pokemon party_tranier list is empty prompt them to create their own team\n\n for pokemon in pokemon_trainer_list:\n\n db.execute('SELECT * FROM pokemon WHERE pokedex_id= :pokemon_id',{'pokemon_id':pokemon[2] })\n monster = db.fetchone()\n # print(monster)\n #Pokemon Name\n monster_name = monster[1]\n #Pokemon Level\n monster_level = monster[2]\n #First pokemon type\n monster_type1 = db.execute('SELECT type FROM pokemon_type WHERE id= :id', {'id': monster[3]}).fetchone()\n #second pokemon type\n monster_type2 = db.execute('SELECT type FROM pokemon_type WHERE id= :id', {'id': monster[4]}).fetchone()\n #pokemon base hp\n monster_hp = monster[5]\n #pokemon base attack\n monster_atk = monster[6]\n #pokemon base defense\n monster_def = monster[7]\n #pokemon base special attack\n monster_spatk = monster[8] \n #pokemon base special defense\n monster_spdef = monster[9]\n #pokemon base speed\n monster_spe = monster[10]\n\n pkmn = Pokemon(monster_name, monster_level, monster_type1[0], monster_type2[0], monster_hp, monster_atk, monster_def, monster_spatk, monster_spdef, monster_spe)\n #assign all weakness and resistance to pokemon after their creation\n pkmn.pokemon_weak_resist(monster_type1[0],monster_type2[0])\n \n pokemon_party.append(pkmn)\n \n return pokemon_party", "def all():\n lab = test_loading()\n\n for _ in range(1):\n print('🦅🐀🐙')\n\n test_spawn(lab)\n\n pc = test_spawn_player(lab)\n\n while True:\n pc.store_move(PlayerMove(random.choice(['walk left', 'walk up', 'walk down', 'walk right'])))\n test_turn_ai_and_players(lab)\n if input() == '0':\n break" ]
[ "0.67078197", "0.63009113", "0.62193114", "0.6155208", "0.6047284", "0.58103806", "0.5721495", "0.5694783", "0.56641287", "0.5642981", "0.5624466", "0.55981576", "0.55663645", "0.5546739", "0.5529542", "0.55240864", "0.5524027", "0.54824936", "0.5482483", "0.5477886", "0.54761976", "0.5468636", "0.5464636", "0.54586405", "0.5453035", "0.54399365", "0.5436851", "0.5432541", "0.5428713", "0.5427437" ]
0.6804697
0
Returns whether this side has more Pokemon
def hasPokemon(self): return self.trainer.hasPokemon()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hasMorePokemon(self):\r\n return self.trainer.hasMorePokemon(self.pkmnInPlay)", "def verify_winner(self):\r\n return self.count_pegs() == 1", "def game_over(self):\n return self.lives() < 0", "def is_game_over(self):\n\n if len(self.next_pieces) == 0:\n return True", "def check_win(self):\n return UNEXPOSED not in self.get_game() and self.get_game().count(FLAG) == len(self.get_pokemon_location)", "def gameOver(self):\n\t\treturn self.lives == 0", "def has_next(self):\n return len(self.pile) > 0", "def no_more_move(self):\n if (self.p_no_move + self.c_no_move == 2):\n return True\n return False", "def has_more_pages(self):\n return self._has_more", "def won(self, vehicles):\n return vehicles[0].x == self.size - 2", "def more(self) -> bool:\n return self.__more", "def has_more(self):\n return bool(self.content and self.preview != self.content)", "def is_over(self):\n return self.game.is_over()", "def is_over(self):\n alive_players = [1 if p.status == \"alive\" else 0 for p in self.players]\n # If only one player is alive, the game is over.\n if sum(alive_players) == 1:\n return True\n\n # If all rounds are finshed\n if self.round_counter >= 2:\n return True\n return False", "def is_hungry(self) -> bool:\n if self.eat_count <= 3:\n return True\n else:\n return False", "def check_pokemon(self, index):\n if index in self._pokemon_location:\n self._num_pokemon -= 1\n return index", "def alive(self):\n\t\treturn any( (ind for ind in self.members if ind.current_hp > 0) )", "def has_won(self):\n return len(self.hand) == 0", "def __game_is_over(self):\n return not (self.__playing and self.__bricks_total > 0 and self.__num_lives > 0)", "def game_over(self):\n\n if self._number_of_moves == 9:\n return True\n\n return self._number_of_moves == 9 or self.winner_found()", "def is_few_remaining(self) -> bool:\n return self.on_hand <= self.warn_limit", "def game_over(players):\n active_players = players_with_decks(players)\n if not active_players or len(active_players) == 1:\n return True\n return False", "def was_pig_caught(prize):\n if prize > 20:\n return True\n return False", "def is_game_over(self):\r\n\r\n if self.winner != 0:\r\n return True\r\n\r\n return False", "def enough_players():\n return True", "def is_game_over(self):\n return self.state.all_avatars_placed() and self.state.is_game_over()", "def player_has_won(self):\n return len(self._words_guessed) == self._num_words", "def check_game_over(self):\n for piece in self.pieces:\n if not piece.destroyed:\n return False\n print(\"Signal.END\")\n return True", "def is_game_over(self):\n if self.just_cheated_a or self.just_cheated_b:\n return False\n if self.game_stage == 3:\n return (self.die_a.current_value == \"5\" and self.die_b.current_value == \"6\" or\n self.die_a.current_value == \"6\" and self.die_b.current_value == \"5\")\n else:\n return False", "def has_next(self):\n return self.count < len(self)" ]
[ "0.86962175", "0.66944605", "0.6432077", "0.6378804", "0.62600136", "0.6254641", "0.6234091", "0.6170001", "0.6144419", "0.6124276", "0.6100488", "0.6029803", "0.60084647", "0.6006607", "0.60016185", "0.599479", "0.59779173", "0.5945332", "0.59220695", "0.5919824", "0.58941567", "0.5884506", "0.5875883", "0.58744925", "0.5847023", "0.5826472", "0.581977", "0.57644266", "0.57592446", "0.5739269" ]
0.72014064
1
Returns whether this side has more Pokemon
def hasMorePokemon(self): return self.trainer.hasMorePokemon(self.pkmnInPlay)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hasPokemon(self):\r\n return self.trainer.hasPokemon()", "def verify_winner(self):\r\n return self.count_pegs() == 1", "def game_over(self):\n return self.lives() < 0", "def is_game_over(self):\n\n if len(self.next_pieces) == 0:\n return True", "def check_win(self):\n return UNEXPOSED not in self.get_game() and self.get_game().count(FLAG) == len(self.get_pokemon_location)", "def gameOver(self):\n\t\treturn self.lives == 0", "def has_next(self):\n return len(self.pile) > 0", "def no_more_move(self):\n if (self.p_no_move + self.c_no_move == 2):\n return True\n return False", "def has_more_pages(self):\n return self._has_more", "def won(self, vehicles):\n return vehicles[0].x == self.size - 2", "def more(self) -> bool:\n return self.__more", "def has_more(self):\n return bool(self.content and self.preview != self.content)", "def is_over(self):\n return self.game.is_over()", "def is_over(self):\n alive_players = [1 if p.status == \"alive\" else 0 for p in self.players]\n # If only one player is alive, the game is over.\n if sum(alive_players) == 1:\n return True\n\n # If all rounds are finshed\n if self.round_counter >= 2:\n return True\n return False", "def is_hungry(self) -> bool:\n if self.eat_count <= 3:\n return True\n else:\n return False", "def check_pokemon(self, index):\n if index in self._pokemon_location:\n self._num_pokemon -= 1\n return index", "def alive(self):\n\t\treturn any( (ind for ind in self.members if ind.current_hp > 0) )", "def has_won(self):\n return len(self.hand) == 0", "def __game_is_over(self):\n return not (self.__playing and self.__bricks_total > 0 and self.__num_lives > 0)", "def game_over(self):\n\n if self._number_of_moves == 9:\n return True\n\n return self._number_of_moves == 9 or self.winner_found()", "def is_few_remaining(self) -> bool:\n return self.on_hand <= self.warn_limit", "def game_over(players):\n active_players = players_with_decks(players)\n if not active_players or len(active_players) == 1:\n return True\n return False", "def was_pig_caught(prize):\n if prize > 20:\n return True\n return False", "def is_game_over(self):\r\n\r\n if self.winner != 0:\r\n return True\r\n\r\n return False", "def enough_players():\n return True", "def is_game_over(self):\n return self.state.all_avatars_placed() and self.state.is_game_over()", "def player_has_won(self):\n return len(self._words_guessed) == self._num_words", "def check_game_over(self):\n for piece in self.pieces:\n if not piece.destroyed:\n return False\n print(\"Signal.END\")\n return True", "def is_game_over(self):\n if self.just_cheated_a or self.just_cheated_b:\n return False\n if self.game_stage == 3:\n return (self.die_a.current_value == \"5\" and self.die_b.current_value == \"6\" or\n self.die_a.current_value == \"6\" and self.die_b.current_value == \"5\")\n else:\n return False", "def has_next(self):\n return self.count < len(self)" ]
[ "0.72014064", "0.66944605", "0.6432077", "0.6378804", "0.62600136", "0.6254641", "0.6234091", "0.6170001", "0.6144419", "0.6124276", "0.6100488", "0.6029803", "0.60084647", "0.6006607", "0.60016185", "0.599479", "0.59779173", "0.5945332", "0.59220695", "0.5919824", "0.58941567", "0.5884506", "0.5875883", "0.58744925", "0.5847023", "0.5826472", "0.581977", "0.57644266", "0.57592446", "0.5739269" ]
0.86962175
0
Takes a single column h2o frame and converts it into a numpy array
def _as_numpy(_1d_h2o_frame): f = _check_is_1d_frame(_1d_h2o_frame) nm = str(f.columns[0]) return f[nm].as_data_frame(use_pandas=True)[nm].values
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def as_numpy_array(self):\n return self.frame", "def to_numpy(self) -> np.ndarray:\n return self.frame", "def df_to_array(datasample):\r\n return np.array(datasample)", "def get_2Darray_hdf5(file,cols='Null',nrows='Null',verbose=False):\n if verbose:\n print (\"reading data from hdf5 file {} for filters:\".format(file))\n for col in cols:\n print(col)\n df = pd.read_hdf(file,\"df\")\n smalldf = df.loc[:,cols]\n outarray = smalldf.values #if we switch to pandas 0.24 or higher\n #this could be replaced with smalldf.to_numpy()\n return outarray", "def _numpy(content):\n response = _data_frame(content).values\n return response", "def extract_letor_arrays(frame):\n # the last two columns are y and ids, so don't include them\n if len(frame.columns) > 2:\n X = frame[frame.columns[:-2]].values\n else:\n X = None\n y = frame['y'].values\n ids = frame['ids'].values\n return X, y, ids", "def _convert_to_array(stream):\n if not isinstance(stream, Stream):\n raise TypeError('Input object should be an obspy stream.')\n\n nt = len(stream.traces[0].data)\n nr = len(stream)\n output = np.zeros((nt, nr))\n\n #hey, this is pretty much what's in Stream: turple list, (number, trace) \n #trace has the attribute of data. The data can be regarded to a column of data. \n for i, trace in enumerate(stream):\n output[:, i] = trace.data[:]\n\n return output", "def row_to_array(r):\n a = np.ma.array([i for i in r.as_void()])\n return a", "def _to_ndarray(data):\n return np.atleast_1d(getattr(data, 'values', data))", "def _df_meta_to_arr(df):\n\n if len(df.columns):\n if isinstance(df.columns[0], str):\n columns = df.columns.values.astype(\"S\")\n else:\n columns = df.columns.values\n else:\n columns = []\n\n if len(df.index):\n if isinstance(df.index[0], str):\n index = df.index.values.astype(\"S\")\n else:\n index = df.index.values\n else:\n index = []\n\n return columns, index", "def to_array(self):\n return np.array(self.to_image())", "def _to_numpy_ndarray(cls, data):\n if isinstance(data, np.ndarray):\n return data\n arr = np.array(data, dtype=np.float)\n if len(arr.shape) == 1:\n arr = np.reshape(arr, newshape=(1, arr.shape[0]))\n return arr", "def convert_to_numpy(color_frame, aligned_depth_frame):\n depth_image = np.asanyarray(aligned_depth_frame.get_data())\n frame = np.asanyarray(color_frame.get_data())\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n return frame, depth_image", "def convert_dataframe_to_array(df_or_series):\n if isinstance(df_or_series, pd.DataFrame) or isinstance(\n df_or_series, pd.Series\n ):\n dat = np.array(df_or_series)\n if len(dat.shape) == 1:\n return dat[:, np.newaxis]\n else:\n return dat\n if isinstance(df_or_series, np.ndarray):\n return df_or_series\n else:\n raise TypeError(\n f\"InputData error:\\n\"\n f\"type should be of np.ndarray and is currently type: {type(df_or_series)}\"\n )", "def sparse_matrix_to_array(data_frame, sparse_column):\n array = data_frame[[sparse_column]]\n array[sparse_column] = array[sparse_column].apply(lambda x: x.toarray())\n array[sparse_column] = array[sparse_column].apply(lambda x: x[0])\n array = np.stack(array[sparse_column].values, axis=0) # over write array df as an np array\n return array", "def config_to_array(data):\n return np.array(data[\"data\"]).reshape(data[\"rows\"], data[\"cols\"])", "def load_edgl_as_array(fname):\n df = pd.read_csv(fname, sep=\" \", header=None, usecols=[0, 1])\n return df.to_numpy(dtype=np_ncount_t)", "def get_data_matrix(df):\n return df[[\"Open\", \"High\", 'Low', \"Close\"]].to_numpy()", "def convert_array(blob):\n out = io.BytesIO(blob)\n out.seek(0)\n\n return np.load(out)", "def toarray(self, order=None, out=None):\n d = self._process_toarray_args(order, out)\n for i, row in enumerate(self.rows):\n for pos, j in enumerate(row):\n d[i, j] = self.data[i][pos]\n return d", "def to_2d_array(self):\n return reshape_fns.to_2d(self._obj, raw=True)", "def asc_to_ndarray(fname, path='./'):\r\n header, headlines = get_header(fname, path)\r\n f = file(path+fname,'r')\r\n \r\n for i in xrange(headlines):\r\n f.readline() \r\n \r\n ncols = header['ncols']\r\n nrows = header['nrows']\r\n cellsize = header['cellsize']\r\n \r\n long = header['xllcorner'] + np.arange(ncols) * cellsize\r\n lat = header['yllcorner'] + np.arange(nrows) * cellsize\r\n data = np.zeros((nrows, ncols), dtype=float)\r\n for i in xrange(nrows):\r\n line = f.readline()\r\n data[i,:] = np.fromstring(line, dtype=float, sep=' ')\r\n # print data.shape, nrows, ncols\r\n f.close()\r\n \r\n if header.has_key('NODATA_value'):\r\n data = np.ma.masked_array(data, mask=data==header['NODATA_value'])\r\n \r\n return long, lat, data", "def to_numpy(self) -> np.ndarray:\n log_advice(\n \"`to_numpy` loads all data into the driver's memory. \"\n \"It should only be used if the resulting NumPy ndarray is expected to be small.\"\n )\n return cast(np.ndarray, self._to_pandas().values)", "def to_numpy(data):\n fields = [\n \"x\", \"y\", \"z\",\n \"proximity\"\n ]\n return np.array([[row[field] for field in fields] for row in data])", "def get_array(obj, col=None):\n if isinstance(obj, Series) and (col is None or obj.name == col):\n arr = obj._values\n else:\n assert col is not None\n icol = obj.columns.get_loc(col)\n assert isinstance(icol, int)\n arr = obj._get_column_array(icol)\n if isinstance(arr, BaseMaskedArray):\n return arr._data\n return arr", "def as_2d_array(theta):\n v = theta.view(np.float)\n N = theta.shape[0]\n v.shape = (N, - 1)\n # raise an error if v cannot be reshaped without creating a copy\n return v", "def convert_array(self,blob):\n import io\n import array,numpy\n out = io.BytesIO(blob)\n out.seek(0)\n\n return numpy.load(out)", "def hrsl_to_numpy(ds: xr.Dataset) -> np.ndarray:\n array = ds.transpose(\"lat\", \"lon\").to_array().values\n return np.squeeze(array, 0)", "def convert_to_ndarray(entity):\n if isinstance(entity, np.ndarray) and entity.dtype.kind in set('biufc'):\n # entity is numerical ndarray already\n return entity\n if isinstance(entity, np.ndarray) and isinstance(entity.flat[0], qt.Qobj):\n # entity is output from qt.eigenstates\n return convert_esys_to_ndarray(entity)\n if isinstance(entity, list) and isinstance(entity[0], np.ndarray) and isinstance(entity[0].flat[0], qt.Qobj):\n # entity is a list of qt.eigenstates\n return np.asarray([convert_esys_to_ndarray(entry) for entry in entity])\n # possibly we have a list of numerical values or a list of ndarrays\n converted_entity = np.asarray(entity)\n if converted_entity.dtype.kind not in set('biufc'):\n raise TypeError('Unable to convert data to numerical numpy array: ', entity)\n return converted_entity", "def read_data(path):\n with h5py.File(path, 'r') as hf:\n data = np.array(hf.get('data'))\n label = np.array(hf.get('label'))\n\n data, label=data[:,:,:,0:2], label[:,:,:,0]\n #data=np.expand_dims(data,axis=-1)\n label=np.expand_dims(label,axis=-1)\n\n return data, label" ]
[ "0.64757276", "0.64702165", "0.64014775", "0.6242585", "0.6235455", "0.62248886", "0.6215187", "0.6193573", "0.60600567", "0.60081595", "0.5974779", "0.5886654", "0.5854119", "0.5842756", "0.57685006", "0.5767864", "0.57593876", "0.5755051", "0.5754591", "0.5703501", "0.5701865", "0.5700548", "0.5677197", "0.5673279", "0.56686425", "0.56635106", "0.56416774", "0.56262106", "0.5609914", "0.5606012" ]
0.8304878
0
Construct a firecrest object
def test_firecrest(self): f = firecrest.firecrest(self.firecrest_dir) self.assertEqual(f.software, 'Firecrest') self.assertEqual(f.version, '1.8.28') self.assertEqual(f.start, 1) self.assertEqual(f.stop, 33) self.assertEqual(f.user, 'diane') self.assertEqual(f.date, date(2008,4,12)) xml = f.get_elements() # just make sure that element tree can serialize the tree xml_str = ElementTree.tostring(xml) f2 = firecrest.Firecrest(xml=xml) self.assertEqual(f.software, f2.software) self.assertEqual(f.version, f2.version) self.assertEqual(f.start, f2.start) self.assertEqual(f.stop, f2.stop) self.assertEqual(f.user, f2.user) self.assertEqual(f.date, f2.date)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_constructor(self):\n pass", "def test_create(self):\n filter = Bleach()\n self.assertIsInstance(filter, Bleach)", "def test_create_from_pear(self):\n pass", "def test_constructors(self, name, obj):\n assert getattr(forge, name) == obj", "def test_constructors(self, name, obj):\n assert getattr(forge, name) == obj", "def test_init(self):\n # the self.assertEqual() checks fofr an expected result\n # We are checking if the name and description of our new object is what we actually put\n self.assertEqual(self.new_contact.first_name, \"James\")\n self.assertEqual(self.new_contact.last_name, \"Muriuki\")\n self.assertEqual(self.new_contact.phone_number, \"0712345678\")\n self.assertEqual(self.new_contact.email, \"[email protected]\")", "def get_factory():", "def test_instantiation(self):\n occurrence = Occurrence()\n self.assertTrue(occurrence)", "def __init__(self, fire):\n self.scene = fire.scene\n self.params = None\n self.fire = fire\n self.obstacles = self.speed_ref = self.smoke = None\n self.smoke_field = self.sparse_disc_matrix = None\n self.source = None", "def __init__(self, *args):\n self.args = args\n self.matchers = []\n for a in args:\n if a is _:\n a = lambda k: True\n elif isinstance(a, basestring):\n a = a.__eq__\n elif isinstance(a, (list, tuple, set)):\n a = (lambda ary: (lambda k: k in ary))(a)\n elif hasattr(a, 'search'):\n a = a.search\n else:\n a = str(a).__eq__\n self.matchers.append(a)", "def __init__(self, expected, test_func):\n self._f = test_func\n self._exp = expected", "def __init__(self, matcher, generate):\n self.matcher = matcher\n self._generate = generate", "def test_init_with_fire_villan(self):\n pass", "def test_delivery_factory_class():\n # __init__()\n factory = DeliveryFactory()\n order = Order(1)\n file = \"This is a file.\"\n\n expected_uber = UberEatsDelivery(order, file)\n expected_foodora = FoodoraDelivery(order, file)\n expected_delivery = Delivery(order, \"not uber or foodora\")\n\n assert factory.create_delivery(order, UBER_EATS, file).get_deliverer() == \\\n expected_uber.get_deliverer()\n assert factory.create_delivery(order, FOODORA, file).get_deliverer() == \\\n expected_foodora.get_deliverer()\n assert factory.create_delivery(order, \"not uber or foodora\", file).\\\n get_deliverer() == expected_delivery.get_deliverer()", "def test_factory_methods(self):\n\n po = ProjectObject.gen_bounding_box_object(id=\"1\", bounds=self.bounds)\n self.assertEqual(po.project_type, \"bounding_box\")\n self.assertAlmostEqual(po.bounds, self.bounds)\n self.assertEqual(po.id, \"1\")\n\n po = ProjectObject.gen_voxels_object(id=\"2\", voxels=self.voxels)\n self.assertEqual(po.project_type, \"voxels\")\n self.assertAlmostEqual(po.voxels.bounds(), self.voxels.bounds())\n self.assertEqual(po.id, \"2\")\n\n po = ProjectObject.gen_meshes_object(id=\"3\", meshes=self.meshes)\n self.assertEqual(po.project_type, \"meshes\")\n self.assertEqual(\n po.meshes.num_primitive_meshes(), self.meshes.num_primitive_meshes()\n )\n self.assertEqual(po.id, \"3\")", "def __init__(self, op_test, expect_dict):\n self.expects = expect_dict\n self.checker_name = \"checker\"\n self.op_test = op_test # stop the op_test object.\n self.op_type = op_test.op_type", "def test_constructor(self):\n # Args\n name = 'Mathew'\n age = 13\n grade = 14\n\n # Object construction\n obj = models.Student(name=name, age=age, grade=grade)\n # Constructor class works\n self.assertIsNotNone(obj)\n self.assertEqual(obj.name, name)\n self.assertEqual(obj.grade, grade)", "def test_create_factory_method():\n date_time = datetime.utcnow()\n build_results = BuildResults.create(\n job_name=\"my_jobname\",\n job_link=\"my_joburl\",\n build_date_time=str(date_time),\n build_id=\"1234\",\n platform=\"Linux-x86_64\",\n product=\"MyProduct\",\n )\n\n assert build_results.br_job_name == \"my_jobname\"\n assert build_results.br_job_url_key == \"my_joburl\"\n assert build_results.br_build_date_time == str(date_time)\n assert build_results.br_build_id_key == \"1234\"\n assert build_results.br_platform == \"Linux-x86_64\"\n assert build_results.br_product == \"MyProduct\"\n assert build_results.br_version_key == ebr_connector.__version__\n\n assert build_results.to_dict() == {\n \"br_build_date_time\": str(date_time),\n \"br_build_id_key\": \"1234\",\n \"br_job_name\": \"my_jobname\",\n \"br_job_url_key\": \"my_joburl\",\n \"br_platform\": \"Linux-x86_64\",\n \"br_product\": \"MyProduct\",\n \"br_version_key\": ebr_connector.__version__,\n }", "def test_instantiation(self):\n rule = Rule()\n self.assertTrue(rule)", "def test_instantiation(self):\n event = Event(\n Guild(12345),\n 'Some title',\n datetime(2020, 10, 10, 10, 10, tzinfo=utc),\n 'Some description')\n self.assertEqual(event.guild.id, 12345)\n self.assertEqual(event.title, 'Some title')\n self.assertEqual(event.date, datetime(2020, 10, 10, 10, 10, tzinfo=utc))\n self.assertEqual(event.description, 'Some description')", "def create_matcher(self):\n self.matcher = None\n if \"matcher\" in self.config:\n self.matcher = matcher.Matcher(self.config[\"matcher\"])\n else:\n self.matcher = matcher.TrueMatcher()\n \n self.use_fields_for_id = []\n if \"matcherfield\" in self.config:\n self.use_fields_for_id = self.config[\"matcherfield\"].split(\",\")\n \n if \"clear\" in self.config:\n self.clear_matcher = matcher.Matcher(self.config[\"clear\"])\n self.autoclear = self.auto_acknowledge\n else:\n self.clear_matcher = matcher.FalseMatcher()\n self.autoclear = False", "def test_init(self):\r\n x = self.FWP({'x': 3})\r\n self.assertEqual(x.Name, 'FWP')\r\n self.assertEqual(x.Params, {'x': 3})", "def create(cls, _):\n return cls", "def test_constructor(self):\n assert self.parser is not None", "def make_test_object(self):\n return self.orm_cls.testing_create()", "def __init__(self, name, contained_by, expected, update):\n self._name = name\n self._contained_by = contained_by\n self._expected = expected\n self._update = update\n self._value = None", "def make_object():\n return object()", "def test_create_faction(self):\n faction = self.faction\n\n self.assertTrue(isinstance(faction, Faction))\n self.assertEqual(faction.name, \"Test Faction\")", "def test_constructor(self, name, num_petals, price):\n with pytest.raises(AssertionError):\n chap2.Flower(name, num_petals, price)", "def construct_persona(x):\n return Persona(x)" ]
[ "0.6093629", "0.5956207", "0.5937157", "0.58960754", "0.58960754", "0.56730235", "0.5631295", "0.5618941", "0.56151974", "0.5607867", "0.5604231", "0.5564176", "0.5553448", "0.5520033", "0.5516942", "0.549883", "0.54632604", "0.545859", "0.54328346", "0.5432628", "0.5422888", "0.5406658", "0.53809786", "0.5374341", "0.53593874", "0.5358258", "0.5355362", "0.5333878", "0.53255725", "0.5324901" ]
0.71082515
0
construct a bustard object
def test_bustard(self): b = bustard.bustard(self.bustard_dir) self.assertEqual(b.software, 'Bustard') self.assertEqual(b.version, '1.8.28') self.assertEqual(b.date, date(2008,4,12)) self.assertEqual(b.user, 'diane') self.assertEqual(len(b.phasing), 8) self.assertAlmostEqual(b.phasing[8].phasing, 0.0099) xml = b.get_elements() b2 = bustard.Bustard(xml=xml) self.assertEqual(b.software, b2.software) self.assertEqual(b.version, b2.version) self.assertEqual(b.date, b2.date ) self.assertEqual(b.user, b2.user) self.assertEqual(len(b.phasing), len(b2.phasing)) for key in b.phasing.keys(): self.assertEqual(b.phasing[key].lane, b2.phasing[key].lane) self.assertEqual(b.phasing[key].phasing, b2.phasing[key].phasing) self.assertEqual(b.phasing[key].prephasing, b2.phasing[key].prephasing)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__ (self, pseudo, bag, position, jeu):\n self.pseudo = pseudo\n self.bag = bag\n self.position = position\n self.jeu = jeu", "def __init__(self, length, breadth, height, producingAPI):\n\n\t\tself.length = length\n\t\tself.breadth = breadth\n\t\tself.height = height\n\n\t\tself.producingAPI = producingAPI", "def __init__(self, name, barcode, quantity):\n self.name = name;\n self.barcode = barcode;\n self.quantity = quantity;", "def __init__(\n self,\n name: str,\n yard_id: int,\n garden_id: Optional[int] = None,\n ) -> None:\n logger.debug(\n f'Executing Bed __init__ with name: {name},'\n f'yard_id: {yard_id} and garden_id: {garden_id}',\n )\n self._name = name\n self._yard_id = yard_id\n if garden_id:\n self._garden_id = garden_id", "def __init__(self, brand=\"\", purchase_price=float, length=int):\r\n self.brand = brand\r\n self.purchase_price = purchase_price\r\n self.length = length", "def create(request: BedRequestCreate) -> Bed:\n if request.garden_id:\n bed = Bed(request.name, request.yard_id, request.garden_id)\n else:\n bed = Bed(request.name, request.yard_id)\n return bed", "def __init__(self, rings=False, branches=False):\n self.rings = rings\n self.branches = branches", "def __init__(self):\n this = _libsbml.new_SBO()\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, model):\n aaa\n self.model = model\n\n #: the list of CROD cards\n self._crod = []\n\n #: the list of CONROD cards\n self._conrod = []\n\n self._crod_comment = []\n self._conrod_comment = []\n\n self.crod = CROD(self.model)\n self.conrod = CONROD(self.model)", "def __init__(self, cardname, amount):\n self.cardname = str(cardname)\n self.amount = int(amount)", "def __init__(self, name, type, stash):\n self.name = name\n self.type = type\n self.hand = [stash.deal_domino() for i in range(7)]", "def __init__(self, card_one, from_split=False, player=None, **kwargs):\n if isinstance(card_one, Card):\n self.card_one = card_one\n else:\n raise TypeError(\"'card_one' must be a Card object.\")\n\n if player:\n if isinstance(player, Player):\n self.player = player\n else:\n raise TypeError(\"'player' must be a Player object.\")\n self.wager = self.player.wager(**kwargs)\n self.cards = [card_one]\n self.split = False\n self.soft = card_one.rank == 14\n self.stand = False\n self.bust = False\n self.blackjack = False\n self.from_split = from_split\n self.insurance = False\n self.total = card_one.value\n self.surrender = False\n self.double_down = False\n # this is used to determine whether to add 11 or 1 when delt an ace\n self.non_ace_total = 0\n self.num_aces = 1 * self.soft\n self.num_hard_aces = self.num_aces", "def __init__(self, a_bcc=None, a_fcc=None, symbol=None):\n\n # Set symbol information\n self.symbol = symbol\n \n # Set a_bcc and a_fcc\n if a_bcc is None:\n if a_fcc is not None:\n a_bcc = a_fcc * (2/3)**0.5\n else:\n raise ValueError('At least one of a_bcc and a_fcc is required')\n elif a_fcc is None:\n a_fcc = a_bcc * (3/2)**0.5\n\n self.a_bcc = a_bcc\n self.a_fcc = a_fcc", "def make_bb_object(name, data):\n global BLACKBOARD, TRACE_LEVEL\n bb_obj = BB_object(name, data)\n if TRACE_LEVEL > 2:\n print \"\\tCreating {0} object: {1}\".format( type(data), bb_obj )\n BLACKBOARD[name] = bb_obj\n signal_creation_event(bb_obj)\n return bb_obj", "def __init__(self,make,model,year):\n super().__init__(make,model,year)\n # adicionando atributo especifico dessa classe\n self.batery_size = Batery(100)", "def __init__(self):\n self.name = '{0} {1}'.format(choice(stars), choice(self.__class__.planets))\n self.casteOrder = (list(self.__class__.castes))\n shuffle(self.casteOrder)\n self.tech = choice(self.__class__.techTiers)\n self.genesis = choice(self.__class__.genesisReasons)\n self.description = ''\n self.attributes = '{0} ~ ruled by {1} ~ founded to {2}'.format(self.tech, self.casteOrder[0], self.genesis)", "def __init__(self, sid, node, mag, g1, g2, comment=''):\n BaseCard.__init__(self)\n if comment:\n self.comment = comment\n self.sid = sid\n self.node = node\n self.mag = mag\n self.g1 = g1\n self.g2 = g2\n self.node_ref = None\n self.g1_ref = None\n self.g2_ref = None\n self.xyz = None", "def __init__(self, name, address, phone, badge, salary):\r\n\r\n self.name = name\r\n self.address = address\r\n self.phone = phone\r\n self.badge = badge\r\n self.salary = salary", "def __init__(self, bank_name, account_num, balance):\n self._bank_name = bank_name\n self._account_num = account_num\n self._balance = balance", "def __init__(self, color = 'black', name = 'backpack', max_size = 5):\n # Assign the backpack a color.\n self.color = color\n self.equal = False\n # Assign the backpack a name.\n self.name = name\n # Assign the backpack a maximum size\n self.max_size = max_size\n # Create a list to store the contents of the backpack.\n self.contents = []", "def add_card(cls, card, baror=None, comment=''):\n eid = integer(card, 1, 'eid')\n pid_default = eid\n x1_default, x2_default, x3_default = 0., 0., 0.\n offt_default = 'GGG'\n if baror is not None:\n if baror.pid is not None:\n pid_default = baror.pid\n if baror.x is None:\n x1_default = baror.g0\n x2_default = None\n x3_default = None\n else:\n x1_default, x2_default, x3_default = baror.x\n offt_default = baror.offt\n\n pid = integer_or_blank(card, 2, 'pid', pid_default)\n ga = integer(card, 3, 'ga')\n gb = integer(card, 4, 'gb')\n x, g0 = init_x_g0(card, eid, x1_default, x2_default, x3_default)\n\n # doesn't exist in NX nastran\n offt = integer_string_or_blank(card, 8, 'offt', offt_default)\n #print('cls.offt = %r' % (cls.offt))\n\n pa = integer_or_blank(card, 9, 'pa', 0)\n pb = integer_or_blank(card, 10, 'pb', 0)\n\n wa = np.array([double_or_blank(card, 11, 'w1a', 0.0),\n double_or_blank(card, 12, 'w2a', 0.0),\n double_or_blank(card, 13, 'w3a', 0.0)], dtype='float64')\n\n wb = np.array([double_or_blank(card, 14, 'w1b', 0.0),\n double_or_blank(card, 15, 'w2b', 0.0),\n double_or_blank(card, 16, 'w3b', 0.0)], dtype='float64')\n assert len(card) <= 17, f'len(CBAR card) = {len(card):d}\\ncard={card}'\n return CBAR(eid, pid, [ga, gb], x, g0,\n offt, pa, pb, wa, wb, comment=comment)", "def __init__(self):\n # initialize a bird to default values.\n self.set_instance_vars()\n\n # randomize some parameters, such as starting height\n self.pos_y = self.random_height()\n\n # tag each bird\n\n self.identifier = Bird.num_birds\n\n # create ai net for each bird\n self.initialize_ai()\n\n # increment Bird counter\n Bird.num_birds += 1\n\n # remember time of birth\n self.birth_time = 0", "def __init__(self, data, b):\n self.data = data\n self.base = b", "def __init__(self, name, color, age):\n self.name = name\n self.color = color\n self.age = age\n self.breed = \"something\"", "def __init__(self, number: str, suit: str) -> None:\n self.number = number\n self.suit = suit", "def __init__(self, bus: int, /):", "def __init__(self, obj, bone_name, params):\n self.obj = obj\n self.params = params\n\n self.org_bones = list(\n [bone_name] + connected_children_names(obj, bone_name)\n )[:3] # The basic limb is the first 3 bones\n\n self.segments = params.segments\n self.bbones = params.bbones\n self.limb_type = params.limb_type\n self.rot_axis = params.rotation_axis\n self.auto_align_extremity = params.auto_align_extremity\n\n # Assign values to tweak/FK layers props if opted by user\n if params.tweak_extra_layers:\n self.tweak_layers = list(params.tweak_layers)\n else:\n self.tweak_layers = None\n\n if params.fk_extra_layers:\n self.fk_layers = list(params.fk_layers)\n else:\n self.fk_layers = None", "def __init__(self, brand=\"\", purchase_price=float, length=int, name=\"\", typ=\"\"):\r\n super().__init__(brand, purchase_price, length)\r\n self.name_carver = name\r\n self.typ = typ", "def __init__(self, cards):\n self.cards = cards", "def __init__(self,make,model,year):\r\n\t\tsuper().__init__(make,model,year)\r\n\t\tself.battery = Battery()" ]
[ "0.61758626", "0.6070561", "0.6049549", "0.5977584", "0.59720093", "0.5921451", "0.59111565", "0.5895199", "0.58890486", "0.584525", "0.58363193", "0.5829075", "0.58265615", "0.5792226", "0.57836425", "0.570507", "0.5696389", "0.56958777", "0.56604916", "0.5656449", "0.56563854", "0.5653025", "0.5650236", "0.564843", "0.56428236", "0.5640933", "0.5639214", "0.5632185", "0.5625552", "0.56140727" ]
0.6578043
0
Initialize an instance of BpMainWindow.
def __init__(self, parent=None): # Inherited from QMainWindow if sys.platform == 'darwin': # Workaround for Qt issue on OS X that causes QMainWindow to # hide when adding QToolBar, see # https://bugreports.qt-project.org/browse/QTBUG-4300 super(BpMainWindow, self).__init__(parent, Qt.MacWindowToolBarButtonHint) else: super(BpMainWindow, self).__init__(parent) # temporary variable self._temp_dir = None self.is_save_configure = False # pre-define a model variable self.model = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n \n self.setWindowTitle(\"RPI HMI - pH Debug\") # Title creation", "def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)", "def __init__(self):\n self.app = qt.QApplication(sys.argv)\n self.window = qt.QMainWindow()\n self.screenSize = qt.QDesktopWidget().screenGeometry(-1)\n self.window.setGeometry(self.getDims()[1]/4, self.getDims()[0]/4, self.getDims()[1]/2, self.getDims()[0]/2)", "def create(self, parent):\n self.widget = _QMainWindow(parent)", "def __init__(self):\n self.window = Tk() # The main window\n self.__initialize_variables__() # Initialize the variables\n self.__initialize_menu__() # Initialize the Menu\n self.__initialize_status_bar__()\n self.__initialize_gui__() # Initialize the GUI widgets", "def _prep_window(self, parent=None):\n self.toolkit.app.initialize()\n if not self.initialized:\n self.setup(parent)\n self.resize_to_initial()\n self.update_minimum_size()\n self.update_maximum_size()", "def __init__(self, **kwargs):\n parent = ArgsUtils.extract('parent', None, kwargs)\n self._application = ArgsUtils.extract('pyGlassApp', None, kwargs)\n self._qApplication = ArgsUtils.extract('qApp', None, kwargs)\n self._isMainWindow = ArgsUtils.extract('isMainWindow', bool(parent is None), kwargs)\n self._mainWindow = ArgsUtils.extract('mainWindow', None, kwargs)\n self._centerWidget = None\n\n self._keyboardCallback = ArgsUtils.extract('keyboardCallback', None, kwargs)\n\n if not self._mainWindow:\n if self._isMainWindow:\n self._mainWindow = self\n elif self._application:\n self._mainWindow = self._application.mainWindow\n\n self._dependentWindows = []\n self._currentWidget = None\n\n QtGui.QMainWindow.__init__(self, parent, ArgsUtils.extract('flags', 0, kwargs))\n\n if self._keyboardCallback is not None:\n self.setFocusPolicy(QtCore.Qt.StrongFocus)\n\n if self._isMainWindow:\n self._log = Logger(self, printOut=True)\n self._config = ApplicationConfig(self)\n self._commonConfig = ApplicationConfig(self, common=True)\n self._resourceFolderParts = PyGlassGuiUtils.getResourceFolderParts(self)\n\n icon = PyGlassGuiUtils.createIcon(\n ArgsUtils.get('iconsPath', self.getAppResourcePath('icons', isDir=True), kwargs) )\n if icon:\n self.setWindowIcon(icon)\n\n elif self._mainWindow:\n icon = self._mainWindow.windowIcon()\n if icon:\n self.setWindowIcon(icon)\n\n # Loads the ui file if it exists\n hasWindowFile = ArgsUtils.get('mainWindowFile', False, kwargs)\n if hasWindowFile:\n if not self._centerWidget:\n self._createCentralWidget()\n UiFileLoader.loadWidgetFile(self, target=self._centerWidget)\n\n self._styleSheet = ArgsUtils.get('styleSheet', None, kwargs)\n if self._styleSheet:\n self.setStyleSheet(self.styleSheetPath)\n\n # Sets a non-standard central widget\n centralWidgetName = ArgsUtils.get('centralWidgetName', None, kwargs)\n if centralWidgetName and hasattr(self, centralWidgetName):\n self._centerWidget = getattr(self, centralWidgetName)\n elif not hasWindowFile:\n self._centerWidget = None\n if ArgsUtils.get('defaultCenterWidget', False, kwargs):\n self._createCentralWidget()\n\n self._lastWidgetID = None\n self._widgetParent = None\n self._widgets = None\n self._widgetFlags = None\n\n self._widgetClasses = ArgsUtils.get('widgets', None, kwargs)\n if self._widgetClasses:\n self._initializeWidgetChildren()\n else:\n self._widgetClasses = dict()\n\n self.setWindowTitle(ArgsUtils.get('title', self._createTitleFromClass(), kwargs))\n self.updateStatusBar()", "def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Ciné Club\")\n self.setup_ui() # Ajout des Widgets.\n self.populate_movies()\n self.setup_connexions() # Création des connexion entre widgets.", "def initialize(self):\n self.setWindowTitle(\"Playlist Maker\")\n self.setGeometry(0,0, 800, 494)\n self.mbox = QVBoxLayout()\n self.hbox = QHBoxLayout()\n self.hbtnbox = QHBoxLayout()", "def __init__(self):\n if not QApplication.instance():\n self.app = QApplication([])", "def __init__(self, parent=None):\n self._window = None\n\n self.setup_ui()", "def init(self):\n sg.theme(gui.app_theme)\n self.window = sg.Window(\n gui.app_title,\n gui.create_layout(),\n **gui.window_config,\n )\n gui.after_window_init(self.window)", "def __init__(self, observers):\n super().__init__()\n # make main window\n self.mainWidget = QWidget()\n self.setCentralWidget(self.mainWidget)\n self.setWindowTitle(\"RC GUI\")\n\n # important for setting locations of QWidgets\n self.observers = observers\n\n self.initializeLayout()\n self.mainWidget.setLayout(self.layout)\n print(\"done RC GUI creation\")", "def __init__( self, window_size=QSize( DEFAULT_H_SIZE, DEFAULT_V_SIZE ) ):\n super().__init__()\n\n self.centralWidget = None\n self.window_size = window_size\n\n self.create_models()\n self.create_widgets()\n self.create_layout()\n self.create_menus()\n self.set_state()", "def __init__(self, inWindowTitleStr):\n super(MainWindow, self).__init__()\n self._mainWorkspace = None\n\n self.setWindowTitle(inWindowTitleStr)\n self.setGeometry(500, 100, 700, 900)\n\n self.mainWorkspace = workspace.WorkSpace(parent=self)", "def __init__(self):\n Form, Window = uic.loadUiType(\"Visuals/QtFiles/ConfirmationMenu.ui\")\n self.window = Window()\n self.form = Form()\n self.form.setupUi(self.window)\n self.centre = self.window.findChild(QWidget, \"centralwidget\")", "def initialise_window(self):\n self.imageLabel.setBackgroundRole(QtGui.QPalette.Base)\n self.imageLabel.setScaledContents(True)\n self.scrollArea.setWidget(self.imageLabel)\n self.setCentralWidget(self.scrollArea)\n self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) # Disable horizontal scrollbar.\n self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) # Disable vertical scrollbar.\n self.setWindowTitle(\"Robot Map\") # Set title.\n self.showFullScreen() # Make fullscreen.", "def init_app(self):\n\n self.setAttribute(PyQt5.QtCore.Qt.WA_DeleteOnClose)\n self.setGeometry(300, 300, 500, 550)\n self.setWindowTitle(\"DICOM Viewer\")", "def initUI(self):\n \n self.setWindowTitle(\"Intecol Flir camera\")\n self.setGeometry(300, 100, 1012, 622)", "def setupWindow(self):\n\n\t\tself.main_menu_window = MenuFrame.MainMenuFrame(self.uiCoordinator)\n\t\tself.menu_window = self.main_menu_window._mf\n\t\tself.score_window = self.main_menu_window._hf\n\t\tself.instructions_window = self.main_menu_window._if\n\t\tself.menu_window.playButton.focus_set()", "def __init__(self,*args, **kwargs):\n # super(FCmbMainWindow, self).__init__(*args, **kwargs)\n # self.setParent(mayaMainWindow) # ���´�������Ϊmaya���Ӽ�\n # self.setWindowFlags(Qt.Window)\n # self.setupUi(self) # ���и����ʼ������\n # self.connectSignals()\n\n\n super(FCmbMainWindow, self).__init__(*args, **kwargs)\n self.setParent(mayaMainWindow)\n self.setWindowFlags(Qt.Window)\n self.setupUi(self)", "def initialize(self):\n super(QtMainWindow, self).initialize()\n self.update_menu_bar()", "def initGui(self):\r\n\r\n # Create help action \r\n self.helpAction = QAction( QIcon(\":/plugins/layercombinations/about.png\"), u\"Help\", self.iface.mainWindow())\r\n # connect the action \r\n self.helpAction.triggered.connect( self.showHelp )\r\n # Add menu item\r\n self.iface.addPluginToMenu(u\"&Layer Combinations\", self.helpAction)\r\n\r\n # Create the action that allows to change the widget type\r\n self.changeWidgetAction = QAction(\"Change widget type\", self.iface.mainWindow())\r\n self.changeWidgetAction.triggered.connect( self.changeWidget )\r\n self.iface.addPluginToMenu(u\"&Layer Combinations\", self.changeWidgetAction)\r\n\r\n # Create the action that will toggle the plugin panel\r\n self.action = QAction(QIcon(\":/plugins/layercombinations/icon.png\"), \"Show/hide the Layer Combinations widgets\", self.iface.mainWindow())\r\n self.action.triggered.connect( self.widget.toggle )\r\n # Add toolbar button and menu item\r\n self.iface.addToolBarIcon(self.action)\r\n self.iface.addPluginToMenu(u\"&Layer Combinations\", self.action)\r\n\r\n\r\n # Add the widget to the mainWindow\r\n self.widget.addToiFace(self.iface)", "def initializeUI(self):\n self.setStyleSheet(abstyle)\n self.setGeometry(140, 100, 860, 484)\n self.setWindowTitle('Emotions Data View')\n self.setupModelView()", "def init_gui(self):\n # Choose a layout.\n main_vb = QtGui.QVBoxLayout(self)\n\n # Add a list or tree view.\n self.list_view = QtGui.QListWidget()\n\n # Add the buttons.\n load_btn = QtGui.QPushButton('Load Selected')\n cancel_btn = QtGui.QPushButton('Cancel')\n load_btn.clicked.connect(self.update_list_view)\n cancel_btn.clicked.connect(self.close)\n\n # Connect the list/tree view with a method appropriate for user interaction.\n self.list_view.currentItemChanged['QListWidgetItem*', 'QListWidgetItem*'].connect(self.set_current_name)\n self.list_view.itemChanged['QListWidgetItem*'].connect(self.change_name)\n\n # Add the widgets to the layout.\n btn_hb = QtGui.QHBoxLayout()\n btn_hb.addWidget(load_btn)\n btn_hb.addWidget(cancel_btn)\n main_vb.addWidget(self.list_view)\n main_vb.addLayout(btn_hb)\n\n # Show the GUI.\n self.setGeometry(300, 300, 450, 300)\n self.setWindowTitle('Hello World')\n img_icon = 'C:/Users/caj150430/code/so_much_win.png'\n self.setWindowIcon(QtGui.QIcon(img_icon))\n self.show()", "def initializeUI(self):\n self.setGeometry(100, 100, 450, 300)\n self.setWindowTitle('Model and View Example')\n\n self.setupModelView()\n\n self.show()" ]
[ "0.7311495", "0.73000187", "0.73000187", "0.73000187", "0.73000187", "0.73000187", "0.7089863", "0.6971019", "0.6952852", "0.68420416", "0.6827273", "0.68243486", "0.68117946", "0.6794219", "0.6791741", "0.6789436", "0.6737455", "0.673218", "0.6706583", "0.66974556", "0.6684888", "0.659381", "0.6589742", "0.65674686", "0.65641046", "0.65623546", "0.6537521", "0.65337986", "0.6531066", "0.65255845" ]
0.7458744
0
Get label config file.
def _get_label_config(self, file_path): # Get label config file dir = os.path.dirname(file_path) file = os.path.basename(file_path) split_list = file.split('.') nii_index = split_list.index('nii') file = ''.join(split_list[:nii_index]) config_file = os.path.join(file, 'lbl') if os.path.isfile(config_file): label_config = LabelConfig(config_file, False) else: label_config = self.label_config return label_config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_label_config(self):\n return self.label_config_center", "def get(self, label):\n if label in self.config[self.env]:\n return self.config[self.env][label]\n else:\n logging.warning(f'Config Mgr->get(): label: {label} not configured')\n return None", "def config_file(self):\n return self[CONFIG_FILE_KEY]", "def get_kube_config_file_path(self):\n return self._kube_config", "def config_file(self):\n return join_path(self.prefix.etc.bohrium, \"config.ini\")", "def label_names_file():\n return tfds.core.tfds_path(_LABELS_FNAME)", "def get_label(filename:str) -> str:\n label = filename.split(\"/\")[-2]\n return label", "def get_label(img_path):\n img_name = img_path.stem\n label_name = img_name + \".txt\"\n label_path = img_path.parent / label_name\n with open(label_path) as f:\n label = json.load(f)\n return label", "def get_config_file_for_auto_config(self) -> Optional[Text]:\n return self.config_file", "def get_config_file(self):\r\n return os.path.join(self.cloudletdir, \"applied_config\")", "def get_config(self, filename):\n if filename in self._path_index:\n return self._configs[self._path_index[filename]]\n elif filename in self._name_index:\n return self._configs[self._name_index[filename]]\n elif filename in self._configs:\n return self._configs[filename]\n else:\n raise KeyError(filename)", "def get_output(path, label_file = None):\n img_id = path.split('/')[-1]\n labels = label_file.loc[img_id].values\n return labels", "def get_config():\n\n _, res = DBX.files_download(c.io.FILE_CONFIG)\n return yaml.load(io.BytesIO(res.content), Loader=yaml.SafeLoader)", "def _get_config_filepath(self):\n\t\tif self.configfilepath is None:\n\t\t\treturn os.path.join(self.workdir, \"config.txt\")\n\t\telse:\n\t\t\treturn self.configfilepath", "def getConfigFileName(self):\n return self._configFileName", "def config_file_address() -> str:\n\n config_files = json_files_from_folder(\"config\")\n config_file = choose_config(config_files) # Choice a config file if there is more then 1 in config folder\n return config_file", "def config_file_name(self):\n return self._config_file_name", "def labelled_config(infile, index):\n with gsd.hoomd.open(infile) as trj:\n snap = HoomdFrame(trj[index])\n\n fig = plot_labelled_config(snap)\n\n fig.output_backend = \"svg\"\n export_svgs(fig, \"figures/labelled_config.png\", height=1600, width=3200)", "def getDefaultFileLocation(self):\n\n label_env = os.getenv('DISPASS_LABELFILE')\n std_env = os.getenv('XDG_DATA_HOME') or os.getenv('APPDATA')\n home_file = '~/.dispass/labels'\n\n if label_env:\n return label_env\n if not exists(home_file) and std_env:\n return std_env + '/dispass/labels'\n else:\n return home_file", "def get_config_file(config_file):\n if os.path.isfile(config_file):\n return config_file\n elif resource_exists('pytorch_lm.conf', config_file):\n return resource_filename('pytorch_lm.conf', config_file)\n else:\n raise ValueError('Could not find configuration file {}'.format(config_file))", "def get_config():\n app = NbConvertApp()\n app.load_config_file()\n return app.config", "def read_label_file(self, label_file_name = None): #completed\n if label_file_name is None:\n label_file_name = self.label_file_name\n try:\n label_data = sp.loadmat(label_file_name)['labels'].astype(np.int32)\n return label_data#[:,1], label_data[:,0]#in MATLAB format\n except IOError:\n print \"Unable to open \", label_file_name, \"... Exiting now\"\n sys.exit()", "def get_config(self):\n return ConfigFile.from_file(path.join(self.run_dir, \"os-stdin\"))", "def _get_label(self):\n return self.label", "def get_config(self, name):\n return self.configs[name][0]", "def config(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"config\")", "def get_config_file():\n return deployr_config_repository.get_deployr_config_file()", "def _GetPathFromLabel(self, label):\n\n return os.path.join(self.GetRoot(),\n self._GetRelativeLabelPath(label))", "def get_config_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, configs.DEFAULT_FILENAME_CONFIG)", "def get_cfg_path(filename):\n return os.path.join(get_cfg_dir(), filename)" ]
[ "0.71112484", "0.6886209", "0.67283285", "0.66105795", "0.655368", "0.6528876", "0.6489148", "0.62944597", "0.62603927", "0.61917907", "0.618969", "0.6151914", "0.6135884", "0.60983557", "0.60794586", "0.6058079", "0.597896", "0.5976467", "0.594677", "0.59456503", "0.5944229", "0.59287125", "0.5923232", "0.5897486", "0.5897235", "0.58915496", "0.5887945", "0.58796203", "0.5877393", "0.58730805" ]
0.8079704
0
Choose winning behavior. Returns
def choose_action(self, active_behaviors): winning_behavior = active_behaviors[0] for behavior in active_behaviors: print('Checking behavior:', behavior, 'with weight:', behavior.weight) if behavior.weight > winning_behavior.weight: winning_behavior = behavior print("Winning behavior is", winning_behavior, "and recommends:", str(winning_behavior.motor_recommendation)) return winning_behavior.motor_recommendation
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_win_game(self):\r\n win_game_anims =['anim_speedtap_winround_intensity01_01',\r\n 'anim_speedtap_winround_intensity02_02',\r\n 'anim_speedtap_winround_intensity02_01',\r\n 'anim_speedtap_wingame_intensity02_01',\r\n 'anim_speedtap_wingame_intensity02_02',\r\n 'anim_speedtap_wingame_intensity03_01']\r\n cozmo.logger.info(\"Cozmo win game reacion\")\r\n return win_game_anims[randint(0,5)]", "def choose_action(self, board):\n options = board.empty_cells\n\n # In this game we look for winning possibilities\n for choice in options:\n # For each option play the option,\n # and observe the outcome\n new_board = copy.deepcopy(board)\n new_board.mark_cell(choice[0], choice[1], self._sign)\n # If a winning cell is found, occupy it\n if new_board.has_winner():\n return choice\n\n # In this loop we prevent loosing the game\n for choice in options:\n # For each option play the option,\n # and observe the outcome\n new_board = copy.deepcopy(board)\n new_board.mark_cell(choice[0], choice[1], self._get_opponent_sign())\n # If an opponent has a winning cell occupy it\n if new_board.has_winner():\n return choice\n\n # Otherwise pick randomly\n return random.choice(options)", "def decision():\n return random.choice(['GoToNormal','GoToSleep'])", "def strategy(self, opponent: Player) -> Action:\n return self._random.random_choice(self.p)", "def winner(strategy0, strategy1):\n score0, score1 = play(strategy0, strategy1)\n if score0 > score1:\n return 0\n else:\n return 1", "def winner(strategy0, strategy1):\n score0, score1 = play(strategy0, strategy1)\n if score0 > score1:\n return 0\n else:\n return 1", "def winner(strategy0, strategy1):\n score0, score1 = play(strategy0, strategy1)\n if score0 > score1:\n return 0\n else:\n return 1", "def askWinCondition():\n\n winCondition = Dialog.Dialog(None, {'title': 'Battleship',\n 'text': 'Choose your win condition',\n 'bitmap': 'question',\n 'default': 0,\n 'strings': ('Win by points', 'Win by moves',)}).num\n return winCondition", "def strategy(self, game, args=()):\n return random.choice(list(game.board.open_keys.keys()))", "def rough_outcome(self) -> float:\n # HUYNH YOU PRICK WHY THE FUCK DO YOU MAKE US WRITE THIS SHIT EVEN IT'S NOT USED ANYWHERE\n # pick move based on this may not be optimal but better than random\n # return 1 if win immediately\n # return -1 if all states reachable will result the other player win\n # return 0 if otherwise ??? what the fuck does this mean\n # look two states forward\n pass", "def next_choice(self, opponent: 'Player') -> str:\n\n if self.adaptive_ai:\n # this is an adaptive_ai player, so see if it has collected\n # enough stats about the current opponent yet:\n if sum(self.opponent_choices[opponent.name].values()) > 5:\n # has enough samples to start adapting to the opponent\n print(' {} is trying to guess the opponent\\'s choice...'.format(self.name))\n\n # AI algorithm 1:\n # simply find the most-frequent selection by the opponent and\n # choose its killer.\n\n guess = self.opponent_choices[opponent.name].most_common(1)[0][0]\n ai_choice = weapon_to_beat(guess)\n print(' ', opponent.name, 'most often chose', guess, 'so he/she chose', ai_choice)\n return ai_choice\n\n # use the standard tendency distribution to choose a weapon:\n n = randint(1, self.randmax)\n if n <= self.tendency[0]:\n return 'rock'\n elif n <= self.tendency[0] + self.tendency[1]:\n return 'paper'\n else:\n return 'scissors'", "def choose_winner(): \r\n max_health = Titan.max_health()\r\n winners = tuple((titan.name for titan in Titan.titans if titan.health == max_health))\r\n return winners", "def play_strategic_game():\n board, winner = create_board(), 0\n board[1,1] = 1\n while winner == 0:\n for player in [2,1]:\n board = random_place(board, player)\n winner = evaluate(board)\n if winner != 0:\n break\n return winner", "def chooseAnimToRun(self):\n # e.g. if self.curPhase is 2, we have a 4/7 chance of picking 2\n # a 2/7 chance of picking 1\n # and a 1/7 chance of picking 0\n assert self.notify.debugStateCall(self)\n result = self.curPhase\n if base.config.GetBool(\"anim-props-randomized\", True):\n pairs = []\n for i in xrange(self.curPhase +1):\n pairs.append(( math.pow(2,i) , i))\n sum = math.pow(2,self.curPhase+1) - 1\n result = weightedChoice(pairs, sum=sum)\n self.notify.debug(\"chooseAnimToRun curPhase=%s pairs=%s result=%s\" %\n (self.curPhase,pairs,result))\n return result", "def _choose_best_option(self):", "def player_win(self):\n global chips\n global placed_bet\n\n chips = (self.final_bet*2 + chips)\n self.victory = True\n placed_bet = False", "def _choose_action(self):\n return random.randint(0,self.num_bandits-1)", "def take_turn(self):\r\n self._choose_best_option()\r\n self._do_draw()", "def chooseAction(self, gameState):\n if self.getPreviousObservation() is not None:\n lastCapsule = self.getCapsules(self.getPreviousObservation())\n else:\n lastCapsule = None\n x, y = self.getCurrentObservation().getAgentPosition(self.index)\n self.dangerousFood = [i for i in self.deadWidth.keys() if i in self.getFood(gameState).asList()]\n self.safeFood = [i for i in self.getFood(gameState).asList() if i not in self.dangerousFood]\n # this list stores all the actions can be taken according to different goals\n actions = {}\n\n # this list stores the scores of each goal, which indicates the tendency of choose that goal.\n scores = {}\n # find actions to go to boarder\n boarder_goal, disToBoarder = self.getBoarderGoal(gameState)\n action = self.aStarSearch(gameState, boarder_goal, self.simpleHeuristic)\n actions[\"go_to_boarder\"] = action\n\n # # find the actions to another boarder\n # another_boarder_goal, disToBoarder = self.getAnotherBoarderGoal(gameState, boarder_goal)\n # action = self.aStarSearch(gameState, another_boarder_goal, self.nullHeuristic)\n # actions[\"another_boarder\"] = action\n\n # find actions to return border\n boarder_goal, disToBoarder = self.getBoarderGoal(gameState)\n action = self.aStarSearch(gameState, boarder_goal, self.GeneralHeuristic)\n actions[\"return_boarder\"] = action\n\n # actions to eat capsules\n capsule_goal, disToCapsule = self.getCapsuleGoal(gameState)\n currentCapsule = self.getCapsules(gameState)\n if currentCapsule is None and lastCapsule is not None:\n self.timer = 20\n elif lastCapsule is not None and currentCapsule is not None:\n if len(lastCapsule) - len(currentCapsule) == 1:\n self.timer = 20\n action = self.aStarSearch(gameState, capsule_goal, self.GeneralHeuristic)\n actions[\"capsule\"] = action\n\n # actions to eat safe food\n safe_food_goal, disToSafeFood = self.getSafeFoodGoal(gameState)\n # if (x, y) == (22, 8):\n # print(1)\n capsule_goal, disToCapsule = self.getCapsuleGoal(gameState)\n self.walls.append(capsule_goal)\n action = self.aStarSearch(gameState, safe_food_goal, self.GeneralHeuristic)\n actions[\"safeFood\"] = action\n\n # actions to eat dangerous food\n dangerous_food_goal, disToDangerousFood = self.getDangerousFoodGoal(gameState)\n action = self.aStarSearch(gameState, dangerous_food_goal, self.GeneralHeuristic)\n self.walls.remove(capsule_goal)\n actions[\"dangerousFood\"] = action\n\n # calculate the scores for each action\n\n ghost_goal, ghostDis = self.getGhostGoal(gameState)\n foodNum = self.numOfFood(gameState)\n safeDis = disToSafeFood\n safeDis = self.isZero(safeDis)\n dangerousDis = disToDangerousFood\n dangerousDis = self.isZero(dangerousDis)\n carried = self.getCurrentObservation().getAgentState(self.index).numCarrying\n disToBoarder = self.isZero(disToBoarder)\n\n # choose actions\n if not self.isRed:\n x = self.Width - x\n\n if x < self.midWidth - 1:\n if self.blocked:\n action = actions[\"another_boarder\"]\n if self.previousActions.full():\n self.previousActions.get()\n self.previousActions.put(action)\n return action\n else:\n if foodNum < 3:\n return actions[\"return_boarder\"]\n else:\n if safe_food_goal is not None:\n scores[\"return_boarder\"] = carried / math.sqrt(disToBoarder)\n scores[\"safeFood\"] = 100 / ((carried + 1) * safeDis)\n if disToCapsule == None:\n scores[\"capsule\"] = 0\n else:\n capsuleDis = self.isZero(disToCapsule)\n if self.timer > 0:\n self.timer -= 1\n scores[\"capsule\"] = 0\n else:\n scores[\"capsule\"] = 30 / capsuleDis\n max = -99999\n key = 0\n for k, v in scores.items():\n if v != 0:\n if v > max:\n max = v\n key = k\n return actions[key]\n else:\n scores[\"return_boarder\"] = carried / math.sqrt(disToBoarder)\n\n if disToCapsule == None:\n scores[\"capsule\"] = 0\n else:\n capsuleDis = self.isZero(disToCapsule)\n if self.timer > 0:\n self.timer -= 1\n scores[\"capsule\"] = 0\n\n else:\n scores[\"capsule\"] = 500 / capsuleDis\n if dangerousDis is not None:\n if ghostDis is not None:\n scaredTimer = self.opponentscaredTime(gameState)\n if scaredTimer is None:\n if ghostDis > 5:\n ghostDis = 5\n if dangerousDis + self.deadWidth[dangerous_food_goal] + 1 < ghostDis:\n scores[\"dangerousFood\"] = 30 / ((carried + 1) * dangerousDis)\n else:\n scores[\"dangerousFood\"] = 3 / ((carried + 1) * dangerousDis)\n else:\n if dangerousDis + self.deadWidth[dangerous_food_goal] + 1 < scaredTimer:\n scores[\"dangerousFood\"] = 30 / ((carried + 1) * dangerousDis)\n else:\n scores[\"dangerousFood\"] = 8 / ((carried + 1) * dangerousDis)\n else:\n scores[\"dangerousFood\"] = 20 / ((carried + 1) * dangerousDis)\n max = -99999\n key = 0\n for k, v in scores.items():\n if v != 0:\n if v > max:\n max = v\n key = k\n return actions[key]\n else:\n if foodNum < 3:\n return actions[\"return_boarder\"]\n else:\n if safe_food_goal is not None:\n scores[\"return_boarder\"] = carried / math.sqrt(disToBoarder)\n scores[\"safeFood\"] = 30 / ((carried + 1) * safeDis)\n if disToCapsule == None:\n scores[\"capsule\"] = 0\n else:\n capsuleDis = self.isZero(disToCapsule)\n if self.timer > 0:\n self.timer -= 1\n scores[\"capsule\"] = 0\n scores[\"return_boarder\"] = carried / disToBoarder\n else:\n scores[\"capsule\"] = 10 / capsuleDis\n max = -99999\n key = 0\n for k, v in scores.items():\n if v != 0:\n if v > max:\n max = v\n key = k\n return actions[key]\n else:\n scores[\"return_boarder\"] = carried / math.sqrt(disToBoarder)\n if disToCapsule == None:\n scores[\"capsule\"] = 0\n else:\n capsuleDis = self.isZero(disToCapsule)\n if self.timer > 0:\n self.timer -= 1\n scores[\"capsule\"] = 0\n scores[\"return_boarder\"] = carried / disToBoarder\n else:\n scores[\"capsule\"] = 500 / capsuleDis\n if dangerousDis is not None:\n if ghostDis is not None:\n scaredTimer = self.opponentscaredTime(gameState)\n if scaredTimer is None:\n if ghostDis > 5:\n ghostDis = 5\n if dangerousDis + self.deadWidth[dangerous_food_goal] + 1 < ghostDis:\n scores[\"dangerousFood\"] = 30 / ((carried + 1) * dangerousDis)\n else:\n scores[\"dangerousFood\"] = 3 / ((carried + 1) * dangerousDis)\n else:\n if dangerousDis + self.deadWidth[dangerous_food_goal] + 1 < scaredTimer:\n scores[\"dangerousFood\"] = 30 / ((carried + 1) * dangerousDis)\n else:\n scores[\"dangerousFood\"] = 8 / ((carried + 1) * dangerousDis)\n else:\n scores[\"dangerousFood\"] = 20 / ((carried + 1) * dangerousDis)\n max = -99999\n key = 0\n for k, v in scores.items():\n if v != 0:\n if v > max:\n max = v\n key = k\n return actions[key]", "def WIN_BONUS() -> int:\n return 2", "def get_winner(state):\n\n if", "def play(self, state):\n if self.exploration and np.random.random() < self.explorer.get_eps():\n return np.random.randint(0, self.num_action)\n else:\n return np.argmax(self.means[state,:])", "def pick_action(self, equity, to_call, pot_odds):\n # action to us: check or bet\n if to_call == 0:\n # lock hands - 1/3 of the time make a small bet instead of a big one\n if equity > 90 and self.r_test(0.33, 'lock_trap'):\n self.make_bet(self.minimum_bet(\"trap1\"))\n elif equity > 65 or (equity > 40 and self.r_test(0.03, 'c1')):\n self.make_bet(self.big_raise(\"R1\"))\n elif equity > 55 or self.r_test(0.02, 'c2'):\n self.make_bet(self.minimum_bet(\"R2\"))\n else:\n self.bot.check()\n # TODO: combine these and make them aware of button\n # use pot odds to call/bet/fold\n else:\n return_ratio = equity / pot_odds\n self.bot.log(\" return ratio={:.3f}\".format(return_ratio))\n if equity > 70 or (equity > 40 and self.r_test(0.03, 'po1')):\n self.make_bet(self.big_raise(\"R3\"))\n elif to_call < self.data.big_blind and \\\n (equity > 55 or self.r_test(0.03, 'po2')):\n # small preflop raise from SB, get more money into the pot\n self.make_bet(self.minimum_bet(\"R4\"))\n elif return_ratio > 1.25:\n self.bot.log(\" return ratio > 1.25, calling {}\".format(to_call))\n self.bot.call(to_call)\n elif return_ratio > 1 \\\n and MathUtils.percentage(to_call, self.our_stack()) < 10:\n self.bot.log(\" return ratio > 1 and bet is small, calling {}\"\n .format(to_call))\n self.bot.call(to_call)\n else:\n self.bot.fold()", "def _select_action(self):\n if self.eval_mode:\n epsilon = self.epsilon_eval\n else:\n epsilon = self.epsilon_fn(\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_train)\n if random.random() <= epsilon:\n # Choose a random action with probability epsilon.\n return random.randint(0, self.num_actions - 1)\n else:\n # Choose the action with highest Q-value at the current state.\n if self._interact == 'stochastic':\n selected_action = self._stochastic_action\n elif self._interact == 'greedy':\n selected_action = self._q_argmax\n else:\n raise ValueError('Undefined interaction')\n return self._sess.run(selected_action,\n {self.state_ph: self.state})", "def play_against_random(self, action):\n state, status, done = self.step(action)\n if not done and self.turn == 2:\n state, s2, done = self.random_step()\n if done:\n if s2 == self.STATUS_WIN:\n status = self.STATUS_LOSE\n elif s2 == self.STATUS_TIE:\n status = self.STATUS_TIE\n else:\n raise ValueError(\"???\")\n return state, status, done", "def play_game(self, p1, p2):\n self.state_manager.init_game()\n state = self.state_manager.get_state()\n players = [p1, p2]\n player = random.choice([1, 2])\n actions = []\n p1_wins = 0\n p2_wins = 0\n while not self.state_manager.is_game_over(state):\n current_agent = players[player - 1]\n actor_chosen_action = current_agent.target_policy(state, player, is_top_policy=True) # is_top_policy = True to ensure that the agents uses the ANET and not the random exploration\n actions.append(actor_chosen_action)\n self.state_manager.perform_action(actor_chosen_action)\n\n state = self.state_manager.get_state()\n player = change_player(player)\n if player == 1:\n p2_wins += 1\n else:\n p1_wins += 1\n winning_agent = players[change_player(player)-1] # Since player is changed in end of while, the winning player at winning state is the previous player\n # print(p1.name + \" vs. \" + p2.name + \", winner: \" + winning_agent.name + \", actions: \" + str(actions))\n self.results[winning_agent.name] += 1\n return p1_wins, p2_wins, actions", "async def evaluate(self):\n if self.players[1].id == bot.user.id:\n self.p2_move = random.choice((\"Rock\", \"Paper\", \"Scissors\"))\n\n if None in self.moves:\n return\n\n if len(self.moves) == 1:\n tie_embed = discord.Embed(title=\"It's a Draw\")\n await self.channel.send(embed=tie_embed)\n return await self.end()\n\n if self.moves == {\"Rock\", \"Paper\"}:\n winner = \"Paper\"\n elif self.moves == {\"Scissors\", \"Paper\"}:\n winner = \"Scissors\"\n elif self.moves == {\"Rock\", \"Scissors\"}:\n winner = \"Rock\"\n\n # P1 Wins\n if self.p1_move == winner:\n embed = discord.Embed(\n title=f\"{self.players[0].name}'s **{winner}** beats {self.players[1].name}'s **{self.p2_move}**\")\n await self.channel.send(embed=embed)\n await self.end(winner=self.players[0])\n\n # P2 Wins\n elif self.p2_move == winner:\n embed = discord.Embed(\n title=f\"{self.players[1].name}'s **{winner}** beats {self.players[0].name}'s **{self.p1_move}**\")\n await self.channel.send(embed=embed)\n await self.end(winner=self.players[1])", "def play(self):\n if self.stats['round'] == 0:\n if self.data['personalities'] and self.data['events']:\n self.choose_opponent()\n self.resolve_conflict()\n else:\n self.stats['round'] += 1\n elif self.stats['round'] == 1:\n if self.data['locations']:\n self.choose_location()\n self.resolve_conflict()\n else:\n self.stats['round'] += 1\n else:\n print(\"You've won\")\n self.game_over = True\n return self.stats", "def make_random_move(self):\n choice = None\n options = []\n #generate full moves list\n for i in range(self.width):\n for j in range(self.height):\n #make sure move has not been made\n if (i,j) not in self.moves_made:\n #make sure move is not a mine\n if (i,j) not in self.mines:\n options.append((i,j))\n #if there are no options, return None\n if len(options) == 0:\n return None\n\n #pick a random option from generated list\n choice = random.choice(options)\n return choice\n\n \"\"\"\n For kicks and giggles I wrote this extra bit to determine a\n rough intuitive probability for each option based on the knowledge\n base, so rather than picking a choice randomly the AI can choose\n the option that is, at least intuitively, least likely to blow up.\n Better to take the 1/8 chance than the 1/3 chance, right?\n \"\"\"\n best_chance = 1\n #iterate through generated options\n for option in options:\n #Could set chance to 1/8, but the AI wouldn't actually know that. I\n #only know it because I can read the code...But for the purposes of this\n #drill we'll say the AI doesn't know how many bombs are placed.\n #Better then to pick a square we know nothing about than one that\n #has a 1/8 chance of exploding. Gather more information that way.\n chance = 0\n for sentence in self.knowledge:\n #look to see if current option is in sentences\n if option in sentence.cells:\n #use sentence count and length of cell set to calculate probability\n prob = sentence.count / len(sentence.cells)\n if prob > chance:\n #Looking for the highest explosive probability for this square\n chance = prob\n if chance < best_chance:\n #If this option has lower odds of exploding than current best, it becomes\n #the optimal\n best_chance = chance\n choice = option\n\n #return choice", "def random_play(state, player):\n import random\n actions = YoteRules.get_player_actions(state, player, reward_move=state.rewarding_move)\n choice = random.choice(actions)\n return choice" ]
[ "0.69298387", "0.6927804", "0.67378783", "0.6535386", "0.6518367", "0.6518367", "0.6518367", "0.6459356", "0.6443634", "0.6269449", "0.625524", "0.62198985", "0.6202906", "0.61732984", "0.6137767", "0.613293", "0.61287475", "0.6128412", "0.6127908", "0.61184627", "0.60811716", "0.6077155", "0.6069547", "0.60436434", "0.6030259", "0.6029476", "0.6024889", "0.6020298", "0.60032946", "0.60019726" ]
0.75507355
0
Create the purchase order from the procurement, using the provided field values, after adding the given purchase order line in the purchase order.
def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None): line_vals.update({'name': ''}) po_vals.update({'order_line': [(0,0,line_vals)]}) return self.pool.get('purchase.order').create(cr, uid, po_vals, context=context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _action_procurement_create(self):\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n new_procs = self.env['procurement.order'] #Empty recordset\n for line in self:\n if line.state != 'sale' or not line.product_id._need_procurement():\n continue\n qty = 0.0\n for proc in line.procurement_ids:\n qty += proc.product_qty\n if float_compare(qty, line.product_uom_qty, precision_digits=precision) >= 0:\n continue\n\n if not line.order_id.procurement_group_id:\n vals = line.order_id._prepare_procurement_group()\n line.order_id.procurement_group_id = self.env[\"procurement.group\"].create(vals)\n\n vals = line._prepare_order_line_procurement(\n group_id=line.order_id.procurement_group_id.id)\n vals['product_qty'] = line.product_uom_qty - qty\n new_proc = self.env[\"procurement.order\"].with_context(\n procurement_autorun_defer=True,\n ).create(vals)\n # Do one by one because need pass specific context values\n new_proc.with_context(\n width=line.origin_width,\n height=line.origin_height).run()\n new_procs += new_proc\n return new_procs", "def pl_create_order(self):\n\tprint()\n\tprint('Pl - Create Order')\n\n\n\tpartner = self.env['res.partner'].search([\n\t\t\t\t\t\t\t\t\t\t\t\t\t('name', '=', self.patient.name),\n\t\t\t\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\t\t\t\t#order='appointment_date desc',\n\t\t\t\t\t\t\t\t\t\t\t\tlimit=1,)\n\n\n\t# Create Order\n\torder = self.env['sale.order'].create({\n\t\t\t\t\t\t\t\t\t\t\t\t\t'state':'draft',\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_doctor': self.physician.id,\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t#'partner_id': self.partner_id.id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'partner_id': partner.id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t#'x_ruc': self.partner_id.x_ruc,\n\t\t\t\t\t\t\t\t\t\t\t\t\t#'x_dni': self.partner_id.x_dni,\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t'patient': self.patient.id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_id_doc': self.patient.x_id_doc,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_id_doc_type': self.patient.x_id_doc_type,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_family': 'procedure',\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t'treatment': self.id,\n\t\t\t\t\t\t\t\t\t\t\t\t})\n\t#print(order)\n\n\n\n\t# Create Order Lines\n\tfor cart_line in self.shopping_cart_ids:\n\n\t\tproduct = cart_line.product\n\n\t\t#print(product)\n\t\t#print(product.name)\n\n\t\t# Create Order Line\n\t\tol = order.order_line.create({\n\t\t\t\t\t\t\t\t\t\t'name': \t\tproduct.name,\n\t\t\t\t\t\t\t\t\t\t'product_id': \tproduct.id,\n\t\t\t\t\t\t\t\t\t\t'price_unit': \tcart_line.price,\n\t\t\t\t\t\t\t\t\t\t'product_uom_qty': cart_line.qty,\n\t\t\t\t\t\t\t\t\t\t'order_id': \torder.id,\n\t\t\t\t\t\t\t\t\t})\n\treturn order\n\n\t# pl_create_order", "def create_order(self):\n\tprint()\n\tprint('OH - pl_create_order')\n\n\t# Search Partner\n\tprint()\n\tprint('Search partner')\n\tpartner = self.env['res.partner'].search([\n\t\t\t\t\t\t\t\t\t\t\t\t\t('name', '=', self.patient.name),\n\t\t\t\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\t\t\t\t#order='appointment_date desc',\n\t\t\t\t\t\t\t\t\t\t\t\tlimit=1,)\n\n\t# Search Pl\n\tprint()\n\tprint('Search pricelist')\n\tpricelist = self.env['product.pricelist'].search([\n\t\t\t\t\t\t\t\t\t\t\t#('active', 'in', [True]),\n\t\t\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\t\t\t#order='x_serial_nr asc',\n\t\t\t\t\t\t\t\t\t\t\tlimit=1,\n\t\t\t\t\t\t\t\t\t\t)\n\tprint(pricelist)\n\n\t# Create Order\n\torder = self.env['sale.order'].create({\n\t\t\t\t\t\t\t\t\t\t\t\t\t'state':'draft',\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_doctor': self.physician.id,\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t#'partner_id': self.partner_id.id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'partner_id': partner.id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t#'x_ruc': self.partner_id.x_ruc,\n\t\t\t\t\t\t\t\t\t\t\t\t\t#'x_dni': self.partner_id.x_dni,\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t'patient': self.patient.id,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_id_doc': self.patient.x_id_doc,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_id_doc_type': self.patient.x_id_doc_type,\n\t\t\t\t\t\t\t\t\t\t\t\t\t'x_family': 'procedure',\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t'treatment': self.id,\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t'pricelist_id': pricelist.id,\n\t\t\t\t\t\t\t\t\t\t\t\t})\n\t#print(order)\n\n\n\n\t# Create Order Lines\n\tfor cart_line in self.shopping_cart_ids:\n\n\t\tproduct = cart_line.product\n\n\t\t#print(product)\n\t\t#print(product.name)\n\n\t\t# Create Order Line\n\t\tol = order.order_line.create({\n\t\t\t\t\t\t\t\t\t\t'name': \t\tproduct.name,\n\t\t\t\t\t\t\t\t\t\t'product_id': \tproduct.id,\n\t\t\t\t\t\t\t\t\t\t'price_unit': \tcart_line.price,\n\t\t\t\t\t\t\t\t\t\t'product_uom_qty': cart_line.qty,\n\t\t\t\t\t\t\t\t\t\t'order_id': \torder.id,\n\t\t\t\t\t\t\t\t\t})\n\treturn order", "def make_po(self, cr, uid, ids, context=None):\n res = super(procurement_order, self).make_po(cr, uid, ids, context=None)\n for procurement in self.browse(cr, uid, ids, context=context):\n # da procurement prendo id ordine x ripassare le righe e vedere il listino for\n pricelist_item = self.pool.get('product.pricelist').price_get(cr, uid, [procurement.purchase_id.pricelist_id.id], procurement.purchase_id.product_id.id, procurement.product_qty or 1.0, procurement.purchase_id.partner_id.id)\n pricelist_item_id = pricelist_item['item_id'][procurement.purchase_id.pricelist_id.id]\n price_item = self.pool.get('product.pricelist.item').browse(cr, uid, pricelist_item_id, context=context)\n \n if price_item:\n for line in procurement.purchase_id.order_line:\n vals = {\n 'discount': price_item.discount_line,\n 'discount2': price_item.discount2_line\n }\n self.pool.get('purchase.order.line').write(cr, uid, [line.id], vals)\n \n return res", "def create_purchase_order(self, cr, uid, ids, context=None):\n sale_obj = self.pool.get('sale.order')\n act_window = self.pool.get('ir.actions.act_window')\n wizard = self.browse(cr, uid, ids[0], context)\n sale_ids = context.get('active_ids', [])\n if wizard.advance_purchase_order == 'all':\n # create the final invoices of the active sales orders\n res = sale_obj.manual_purchase_order(cr, uid, sale_ids, context)\n \n return {'type': 'ir.actions.act_window_close'}\n\n if wizard.advance_purchase_order == 'lines':\n # open the list view of sales order lines to invoice\n res = act_window.for_xml_id(cr, uid, 'sale', 'action_order_line_tree2', context)\n res['context'] = {\n \n 'search_default_order_id': sale_ids and sale_ids[0] or False,\n }\n return res \n\n inv_ids = []\n for sale_id, inv_values in self._prepare_advance_po_vals(cr, uid, ids, context=context):\n inv_ids.append(self._create_purchase_order(cr, uid, inv_values, sale_id, context=context))\n\n \n return {'type': 'ir.actions.act_window_close'}", "def create(self, values):\n res = super(PurchaseOrderLine, self).create(values)\n states = ['purchase', 'done']\n if res.order_id.state in states:\n raise UserError(_('You can not create an additional purchase order line in a confirmed order '))\n return res", "def test_05_purchase_order(self):\n # Create purchase Order and check purchase order was created correctly\n # (without lines)\n order = self.create_po()\n self.assertTrue(order)\n self.assertFalse(order.order_line)\n\n # Add one sellable line (first line)\n sellable_product = self.product_obj.browse(self.sellable_product)\n self.create_pol(order, sellable_product)\n self.assertTrue(order.order_line)\n self.assertEquals(len(order.order_line), 1)\n self.assertIn(sellable_product, order.order_line.mapped('product_id'))\n self.assertEquals(order.order_line.product_id.state2, 'sellable')\n\n # Add one draft line (second line)\n draft_product = self.product_obj.browse(self.draft_product)\n self.create_pol(order, draft_product)\n self.assertEquals(len(order.order_line), 2)\n self.assertIn(draft_product, order.order_line.mapped('product_id'))\n self.assertEquals(set(order.order_line.mapped('product_id.state2')),\n set(['sellable', 'draft']))\n\n # Add one obsolete line. This will raise an exception.\n obsolete_product = self.product_obj.browse(self.obsolete_product)\n with self.assertRaises(exceptions.Warning):\n self.create_pol(order, obsolete_product)", "def create_sale_order_line_vals_amazon(self,order_line,qty_price_dict,tax_id,amazon_product=False,odoo_product=False,amazon_order=False,instance=False,title=False):\n sale_order_line = self.env['sale.order.line']\n# new_record=self.env['sale.order.line'].new({'order_id':amazon_order.id,\n# 'company_id':amazon_order.company_id.id,\n# 'product_id':amazon_product and amazon_product.product_id.id or odoo_product and odoo_product.id or False,\n# 'product_uom':amazon_product and amazon_product.product_tmpl_id.uom_id or odoo_product and odoo_product.product_tmpl_id.uom_id,\n# 'name':title\n# })\n# new_record.product_id_change()\n# order_vals=new_record._convert_to_write({name: new_record[name] for name in new_record._cache}) \n# \n# order_qty=qty_price_dict.get('order_qty')\n# order_vals.update({\n# 'product_uom_qty' : order_qty,\n# 'amazon_order_qty':order_line.get('QuantityOrdered',{}).get('value',0.0),\n# 'price_unit' : qty_price_dict.get('amount_per_unit'),\n# 'customer_lead' :amazon_product and amazon_product.sale_delay or False,\n# 'invoice_status' : False,\n# 'state' : 'draft',\n# 'amazon_order_item_id':order_line.get('OrderItemId',{}).get('value'),\n# 'discount':0.0,\n# 'amazon_product_id':amazon_product and amazon_product.id or False,\n# 'product_uom':new_record.product_uom.id,\n# 'producturl':\"%s%s\"%(instance.producturl_prefix or '',order_line.getvalue(\"ASIN\", \"value\"))\n# }) \n\n vals = ({\n 'order_id':amazon_order.id,\n 'product_id':amazon_product and amazon_product.product_id.id or odoo_product and odoo_product.id or False,\n 'company_id':amazon_order.company_id.id,\n 'description':title,\n 'order_qty':qty_price_dict.get('order_qty'),\n 'price_unit':qty_price_dict.get('amount_per_unit'),\n 'discount':0.0,\n 'product_uom':amazon_product and amazon_product.product_tmpl_id.uom_id or odoo_product and odoo_product.product_tmpl_id.uom_id\n }) \n order_vals = sale_order_line.create_sale_order_line_ept(vals)\n \n order_vals.update({\n 'amazon_order_qty':order_line.get('QuantityOrdered',{}).get('value',0.0),\n 'customer_lead' :amazon_product and amazon_product.sale_delay or False,\n 'invoice_status' : False,\n 'amazon_order_item_id':order_line.get('OrderItemId',{}).get('value'),\n 'amazon_product_id':amazon_product and amazon_product.id or False,\n 'producturl':\"%s%s\"%(instance.producturl_prefix or '',order_line.getvalue(\"ASIN\", \"value\"))\n })\n return order_vals", "def create_po(self):\n pricelist_id = 1\n partner_id = self.ref('base.res_partner_1')\n order = self.order_obj.create({\n 'partner_id': partner_id,\n 'location_id': self.ref('stock.stock_location_stock'),\n 'pricelist_id': pricelist_id})\n return order", "def create_parcel_order(self,parcel_description,parcel_weight,parcel_source,parcel_destination,receiver_name,receiver_telephone,current_location,status, user_id):\n\n date_created = datetime.datetime.utcnow()\n price_quote = Helpers.gen_price(parcel_weight)\n sql = \"INSERT INTO parcel_orders(parcel_description,parcel_weight,price_quote,parcel_source,parcel_destination,receiver_name,receiver_telephone,date_created,current_location,status,user_id) VALUES('{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}') \".format(parcel_description,parcel_weight,price_quote,parcel_source,parcel_destination,receiver_name,receiver_telephone,date_created,current_location,status,user_id)\n self.db_object.cursor.execute(sql)", "def create_order_line_item(cls, order, invoice_line_item):\n order_line_item = cls(\n order_line_item_id=str(uuid.uuid4().int),\n order=order,\n invoice_line_item=invoice_line_item,\n product=invoice_line_item.product,\n quantity=invoice_line_item.quantity\n ).save()\n\n return order_line_item", "def create_order():", "def create(self, values):\n if values.get('name', _('New')) == _('New'):\n values['name'] = self.env['ir.sequence'].next_by_code('order.reference',\n None) or _('New')\n values['marks'] = values['name']\n customer_code = ''\n if values.get('customer_id'):\n customer = self.env['res.partner'].browse(values.get('customer_id'))\n customer_code = customer.customer_code\n if values.get('marks'):\n marks_field = values.get('marks')\n else:\n marks_field = ' '\n\n values['marks'] = '%s %s %s' % (customer_code, values['name'], marks_field)\n return super(PurchaseOrder, self).create(values)", "def create(self, vals):\n res = super(SaleOrder, self).create(vals)\n if not vals.get('fiscal_position'):\n fiscal_position = self._get_fiscal_position(\n res.partner_shipping_id)\n if fiscal_position:\n res.fiscal_position = fiscal_position\n return res", "def create_pol(self, order, product):\n order.write({\n 'order_line': [(0, 0, {\n 'product_id': product.id,\n 'product_qty': 10.0,\n 'product_uom': product.uom_id.id,\n 'price_unit': product.price,\n 'name': product.name_template,\n 'sequence': len(order.order_line) + 1,\n 'date_planned': time.strftime('%Y-%m-%d')\n })]})", "def create_order(order_type, quantity, action):\n order = Order()\n order.m_orderType = order_type\n order.m_totalQuantity = quantity\n order.m_action = action\n return order", "def createOrder(self, item, units, quantity, delivery_time):\n self.order = Order(item, units, quantity, delivery_time)", "def create_purchase_requestion(self, cr, uid, ids, context=None):\n #TODO change the state of the purchase requestion to quotes and let the wizard in specefic state \n purchase_requestion_obj = self.pool.get('ireq.m')\n exchange = self.pool.get('exchange.order').browse(cr, uid, context['active_id'])\n requestion_lines_obj = self.pool.get('ireq.products')\n prod = self.pool.get('product.product')\n wf_service = netsvc.LocalService(\"workflow\")\n if exchange.purchase_requestion_id:\n raise osv.except_osv(_('Warning'), _('You allredy create a purchase requestion for this exchange order '))\n for wizard in self.browse(cr, uid, ids):\n requestion_id = purchase_requestion_obj.create(cr, uid, {'company_id': exchange.company_id.id,\n 'user': context['uid'],\n 'cat_id':exchange.category_id.id or False,\n 'ir_ref': exchange.name, \n 'department_id' : exchange.department_id.id,\n 'exchane_order_id':[(4, exchange.id)],})\n for wizard_lines in wizard.products_ids:\n product = prod.browse(cr, uid,wizard_lines.product_id.id)\n requestion_lines_obj.create(cr, uid, {'pr_rq_id':requestion_id,\n 'product_id': wizard_lines.product_id.id,\n 'name': product.name,\n 'product_qty': wizard_lines.product_qty,\n 'product_uom': product.uom_po_id.id, \n 'desc': wizard_lines.description,})\n \n exchange.write({'purchase_requestion_id':requestion_id , 'state' : 'wait_purchase' }) \n wf_service.trg_validate(uid, 'ireq.m', requestion_id, 'draft', cr)\n return requestion_id", "def create(self, validated_data):\n orderlines = validated_data.pop('orderlines', None)\n if not (orderlines and len(orderlines)):\n raise EmptyOrderException\n\n # Create order and associated orderlines\n order = models.Order.objects.create(**validated_data)\n for orderline in orderlines:\n order.orderlines.create(**orderline)\n\n return order", "def createOrders(self):\n self.ordersDict = {}\n for pstep in self.processingSteps:\n if pstep.orderid not in self.ordersDict:\n self.ordersDict[pstep.orderid] = Order()\n self.ordersDict[pstep.orderid].addProcessingStep(pstep)", "def create_order(self, date, agreement_lines):\n self.ensure_one()\n order_line_obj = self.env['sale.order.line'].with_context(\n company_id=self.company_id.id)\n order_vals = self._prepare_sale_order_vals(self, date)\n order = self.env['sale.order'].create(order_vals)\n # Create order lines objects\n for agreement_line in agreement_lines:\n order_line_vals = self._prepare_sale_order_line_vals(\n agreement_line, order)\n order_line_obj.create(order_line_vals)\n # Update last order date for lines\n agreement_lines.write({'last_order_date': fields.Date.today()})\n # Update agreement state\n if self.state != 'orders':\n self.state = 'orders'\n return order", "def create_unfulfilled_order(validated_basket, affiliate_id=None):\n with transaction.atomic():\n total_price_paid = get_product_version_price_with_discount(\n coupon_version=validated_basket.coupon_version,\n product_version=validated_basket.product_version,\n )\n order = Order.objects.create(\n status=Order.CREATED,\n purchaser=validated_basket.basket.user,\n total_price_paid=total_price_paid,\n )\n line = Line.objects.create(\n order=order,\n product_version=validated_basket.product_version,\n quantity=validated_basket.basket_item.quantity,\n )\n if validated_basket.basket_item.program_run:\n ProgramRunLine.objects.create(\n line=line, program_run=validated_basket.basket_item.program_run\n )\n if validated_basket.run_selection_ids:\n LineRunSelection.objects.bulk_create(\n LineRunSelection(line=line, run_id=run_id)\n for run_id in validated_basket.run_selection_ids\n )\n if validated_basket.coupon_version:\n redeem_coupon(coupon_version=validated_basket.coupon_version, order=order)\n if affiliate_id is not None:\n AffiliateReferralAction.objects.create(\n affiliate_id=affiliate_id, created_order=order\n )\n sync_hubspot_deal(order)\n return order", "def create_order(cls, invoice):\n order = cls(\n order_id=str(uuid.uuid4().int),\n invoice=invoice\n ).save()\n\n invoice_line_items = InvoiceLineItem.objects.filter(invoice=invoice, type=\"item\").all()\n\n for invoice_line_item in invoice_line_items:\n OrderLineItem.create_order_line_item(order=order, invoice_line_item=invoice_line_item)\n\n return order", "def create_order(order_type, quantity, action, price = None):\n order = Order()\n order.m_orderType = order_type\n order.m_totalQuantity = quantity\n order.m_action = action\n order.m_account = ConfigMgr.get_ib_config()['account_code']\n if order_type == 'LMT':\n order.m_lmtPrice = price\n elif order_type == 'STP':\n order.m_auxPrice = price\n return order", "def create_order(self, row):\n\n order_dict = {}\n #error = False\n #error_list = []\n\n #break_err= False\n #break_err_list = []\n\n order_number = row[\"Order Number\"]\n order_dict[\"order_number\"] = order_number\n\n # convert date string to datetime obj\n try:\n order_datetime = datetime.datetime.strptime(row[\"Order Date\"], \"%Y-%m-%d %H:%M:%S\" )\n order_date = order_datetime.date()\n order_time = order_datetime.time()\n except:\n order_date, order_time = None # assign none to date and time \n \n #error = True\n #datetime_conversion_err_str = \"Couldn't convert for order -> %s\" % order_dict[\"order_number\"] # detailerror in error list\n #error_list.append(datetime_conversion_err_str)\n \n # assign date,time to dict \n order_dict[\"order_date\"] = order_date \n order_dict[\"order_time\"] = order_time\n\n order_dict[\"customer_uid\"] = row[\"Customer Uid\"]\n order_dict[\"customer_name\"] = row[\"Customer Name\"]\n order_dict[\"customer_email\"] = row[\"Customer Email\"]\n\n order_dict[\"billing_name\"] = row[\"Bill To Name\"]\n order_dict[\"billing_address\"] = row[\"Bill To Address\"]\n order_dict[\"billing_district\"] = row[\"Bill To District\"]\n order_dict[\"billing_state\"] = row[\"Bill To State\"]\n order_dict[\"billing_zip_code\"] = row[\"Bill To Zip\"]\n order_dict[\"billing_country\"] = row[\"Bill To Country\"]\n order_dict[\"billing_phone_No\"] = row[\"Bill To Mobile\"]\n\n order_dict[\"shipping_name\"] = row[\"Ship To Name\"]\n order_dict[\"shipping_address\"] = row[\"Ship To Address\"]\n order_dict[\"shipping_district\"] = row[\"Ship To District\"]\n order_dict[\"shipping_state\"] = row[\"Ship To State\"]\n order_dict[\"shipping_zip_code\"] = row[\"Ship To Zip\"]\n order_dict[\"shipping_country\"] = row[\"Ship To Country\"]\n order_dict[\"shipping_phone_No\"] = row[\"Ship To Mobile\"]\n\n order_dict[\"order_currency\"] = row[\"Order Currency\"]\n\n # convert order total to decimal\n order_total = self.str_to_decimal( row[\"Order Total\"], \n \"Order Total\", True)\n order_dict[\"order_total\"] = order_total \n \n #convert order_taxes to decimal \n order_taxes = self.str_to_decimal(\n row[\"Order Taxes\"], \n \"Order Taxes\")\n order_dict[\"order_taxes\"] = order_taxes\n\n # convert order_discount to decimal\n order_discounts = self.str_to_decimal( \n row[\"Order Discounts\"], \n \"Order Discounts\") \n order_dict[\"order_discounts\"] = order_discounts\n\n # convert order_taxes to decimal\n order_subtotal = self.str_to_decimal(\n row[\"Order Subtotal\"], \n \"Order Subtotal\", True) \n order_dict[\"order_subtotal\"] = order_subtotal \n\n # convert shipping cost to decimal\n order_shipping_cost = self.str_to_decimal(\n row[\"Order Shipping\"], \n \"Order Shipping\", True) \n order_dict[\"order_shipping_cost\"] = order_shipping_cost \n\n # convert shipping_TBD to decimal\n order_shipping_TBD = self.str_to_decimal( \n row[\"Order Ship Tbd\"], \n \"Order Ship Tbd\") \n order_dict[\"order_ship_TBD\"] = order_shipping_TBD\n \n # convert cart_total\n order_cart_total = self.str_to_decimal(\n row[\"Order Cart Total\"], \n \"Order Cart Total\", True) \n order_dict[\"order_cart_total\"] = order_cart_total \n\n # convert Cart Taxes to decimal\n order_cart_taxes = self.str_to_decimal( \n row[\"Order Cart Taxes\"], \n \"Order Cart Taxes\") \n order_dict[\"order_cart_taxes\"] = order_cart_taxes\n \n # convert Cart Discount to decimal\n order_cart_discount = self.str_to_decimal(\n row[\"Order Cart Discounts\"], \n \"Order Cart Discounts\") \n order_dict[\"order_cart_discount\"] = order_cart_discount\n\n # convert Grand Total\n order_grand_total = self.str_to_decimal( \n row[\"Order Grand Total\"], \n \"Order Grand Total\", True) \n order_dict[\"order_grand_total\"] = order_grand_total\n\n # convert coupon value to decimal\n order_coupon_value = self.str_to_decimal(\n row[\"Order Coupon Value\"], \n \"Order Coupon Value\") \n order_dict[\"order_coupon_value\"] = order_coupon_value\n\n # convert Payment fee to decimal\n order_payment_fee = self.str_to_decimal( \n row[\"Payment Fee\"], \n \"Payment Fee\") \n order_dict[\"payment_fee\"] = order_payment_fee\n\n # convert payment amt to decimal\n order_payment_amt = self.str_to_decimal( \n row[\"Payment Amount\"], \n \"Payment Amount\") \n order_dict[\"payment_amount\"] = order_payment_amt \n\n\n order_dict[\"order_coupon_code\"] = row[\"Order Coupon Code\"] \n order_dict[\"order_status\"] = row[\"Order Status\"]\n\n order_dict[\"payment_method\"] = row[\"Payment Method\"]\n order_dict[\"payment_response\"] = row[\"Payment Response\"]\n\n # Translate Yes/NO to True/false\n if row[\"Payment Is Live\"] == \"No\":\n order_dict[\"payment_live\"] = False\n else:\n order_dict[\"payment_live\"] = True\n\n # translate {Yes/No} to True/False\n if row[\"Payment Successful\"] == \"Yes\":\n order_dict[\"payment_successful\"] = True\n else:\n order_dict[\"payment_successful\"] = False\n \n return order_dict #", "def merge_purchase_invoice(self):\r\n active_id = self.env['purchase.order'].browse(self.env['purchase.order']._context.get('active_ids'))\r\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')]) \r\n active_id_count = 0\r\n active_count = 0\r\n exist_vendor = []; invoice = [];exist_vendors = [];ctx = ();invoice_id = []\r\n for rec in active_id : \r\n po_reference = self.env['account.invoice'].search([('origin', 'like', rec.name)])\r\n active_count = len(active_id)\r\n if rec.picking_count >= 1 and rec.picking_count != rec.invoice_count:\r\n len_name = [] \r\n for inv in po_reference: \r\n len_name = inv.origin.split(\":\") \r\n if rec.name in len_name:\r\n if po_reference.state == 'draft':\r\n for record in po_reference.invoice_line_ids:\r\n print (record.line_id)\r\n for res in rec.order_line:\r\n if res.id == record.line_id: \r\n record.write({'quantity':res.qty_received})\r\n res.write({'qty_invoiced':record.quantity})\r\n \r\n else:\r\n \r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received \r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':datetime.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n else:\r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':date.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n invoices = []\r\n invoice_counts = 0\r\n for record in invoice:\r\n invoice_id = self.env['account.invoice'].create(record)\r\n invoices.append(invoice_id.id)\r\n invoice_counts = len(invoices)\r\n if active_id_count == 1:\r\n if invoice_counts == 1:\r\n form_view = self.env.ref('purchase.view_invoice_supplier_purchase_form').id\r\n tree_view = self.env.ref('account.invoice_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'res_id':invoices[0],\r\n 'views_id':False,\r\n 'views':[(form_view , 'form'), (tree_view , 'tree')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n } \r\n else: \r\n form_view = self.env.ref('account.invoice_supplier_form').id\r\n tree_view = self.env.ref('account.invoice_supplier_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'views_id':True,\r\n 'views':[(tree_view , 'tree'), (form_view , 'form')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n }", "def prepare(self):\n # Create a purchase order from a supplier\n Company = self.old_state.apps.get_model('company', 'company')\n PurchaseOrder = self.old_state.apps.get_model('order', 'purchaseorder')\n Part = self.old_state.apps.get_model('part', 'part')\n Supplierpart = self.old_state.apps.get_model('company', 'supplierpart')\n # TODO @matmair fix this test!!!\n # SalesOrder = self.old_state.apps.get_model('order', 'salesorder')\n\n supplier = Company.objects.create(\n name='Supplier A',\n description='A great supplier!',\n is_supplier=True,\n is_customer=True,\n )\n\n part = Part.objects.create(\n name='Bob',\n description='Can we build it?',\n assembly=True,\n salable=True,\n purchaseable=False,\n tree_id=0,\n level=0,\n lft=0,\n rght=0,\n )\n supplierpart = Supplierpart.objects.create(\n part=part,\n supplier=supplier\n )\n\n # Create some orders\n for ii in range(10):\n\n order = PurchaseOrder.objects.create(\n supplier=supplier,\n reference=f\"{ii}-abcde\",\n description=\"Just a test order\"\n )\n order.lines.create(\n part=supplierpart,\n quantity=12,\n received=1\n )\n order.lines.create(\n quantity=12,\n received=1\n )\n\n # TODO @matmair fix this test!!!\n # sales_order = SalesOrder.objects.create(\n # customer=supplier,\n # reference=f\"{ii}-xyz\",\n # description=\"A test sales order\",\n # )\n # sales_order.lines.create(\n # part=part,\n # quantity=12,\n # received=1\n # )", "def _prepare_add_missing_fields(self, values):\n res = {}\n onchange_fields = ['name', 'price_unit', 'product_uom', 'tax_id']\n if values.get('order_id') and values.get('product_id') and any(f not in values for f in onchange_fields):\n line = self.new(values)\n line.product_id_change()\n for field in onchange_fields:\n if field not in values:\n res[field] = line._fields[field].convert_to_write(line[field], line)\n res['init_qty'] = values.get('product_uom_qty')\n _logger.debug(\"********************* dropship_portal\\sale_order res **********************: %r\", res)\n return res", "def action_invoice_create(self, cr, uid, ids, context=None):\n res = False\n\n journal_obj = self.pool.get('account.journal')\n inv_obj = self.pool.get('account.invoice')\n inv_line_obj = self.pool.get('account.invoice.line')\n fiscal_obj = self.pool.get('account.fiscal.position')\n\n for order in self.browse(cr, uid, ids, context=context):\n# pay_acc_id = order.partner_id.property_account_payable.id\n #use a new method to get the account_id\n pay_acc_id = self._get_inv_pay_acc_id(cr,uid,order) \n journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error!'),\n _('Define purchase journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n\n # generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line\n inv_lines = []\n for po_line in order.order_line:\n #check if this line have quantity to generate invoice, by johnw\n if po_line.product_qty <= po_line.invoice_qty:\n continue \n# if po_line.product_id:\n# acc_id = po_line.product_id.property_account_expense.id\n# if not acc_id:\n# acc_id = po_line.product_id.categ_id.property_account_expense_categ.id\n# if not acc_id:\n# raise osv.except_osv(_('Error!'), _('Define expense account for this company: \"%s\" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))\n# else:\n# acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id \n #use a new method to get the account_id, by johnw \n acc_id = self._get_inv_line_exp_acc_id(cr,uid,order,po_line)\n fpos = order.fiscal_position or False\n acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)\n\n inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)\n #update the quantity to the quantity, by johnw\n inv_line_data.update({'quantity':(po_line.product_qty - po_line.invoice_qty)})\n inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)\n inv_lines.append(inv_line_id)\n\n po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)\n \n #if no lines then return direct, by johnw\n if len(inv_lines) == 0:\n continue\n \n # get invoice data and create invoice\n inv_data = {\n 'name': order.partner_ref or order.name,\n 'reference': order.partner_ref or order.name,\n 'account_id': pay_acc_id,\n 'type': 'in_invoice',\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'invoice_line': [(6, 0, inv_lines)],\n 'origin': order.name,\n 'fiscal_position': order.fiscal_position.id or False,\n 'payment_term': order.payment_term_id.id or False,\n 'company_id': order.company_id.id,\n }\n inv_id = inv_obj.create(cr, uid, inv_data, context=context)\n\n # compute the invoice\n inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)\n\n # Link this new invoice to related purchase order\n order.write({'invoice_ids': [(4, inv_id)]}, context=context)\n res = inv_id\n return res", "def prepare_order(request):\n distributer_id = request.GET.get('distributer')\n billing_address_id = request.GET.get('bill_addr')\n pickup_method = 2 # this is left in place if we ever decide to have door-to-door deliveries - otherwise it should be deleted\n cart = Cart.objects.get_or_create(user=request.user, processed_to_order=False)[0]\n user_bill_addr = UserBillingAddress.objects.get_or_create(pk=billing_address_id, user=request.user)[0]\n distributer = Distributer.objects.get(pk=distributer_id)\n\n # Create order\n order = Order()\n order.user = request.user\n order.distributer = distributer\n order.subtotal = cart.subtotal\n order.tax_total = cart.tax_total\n order.total = cart.total\n order.discount_for_returned_package = 0 #TODO implement returned packaging\n order.to_pay = 0 #TODO implement returned packaging\n order.delivery_method = pickup_method\n order.save()\n\n # create all order items\n for item in cart.cartitem_set.all():\n order_item = OrderItem()\n order_item.order = order\n order_item.item_name = str(item.item)\n order_item.item_price = item.item.price\n order_item.item_quantity = item.quantity\n order_item.item_decimal_quantity = 0 #TODO implement decimal quantity\n order_item.item_unit_of_measure = \"kom\" #TODO implement decimal quantity\n order_item.item_tax_bracket = item.item.tax_bracket\n order_item.item_subtotal = item.line_subtotal\n order_item.item_tax_total = item.line_tax_total\n order_item.item_total = item.line_tax_total\n if item.item.package_type == None:\n order_item.item_package = None\n order_item.item_package_price = 0\n else:\n order_item.item_package = item.item.package_type.type\n order_item.item_package_price = item.item.package_type.price\n order_item.item_package_subtotal = item.line_package_subtotal\n order_item.item_package_tax_total = item.line_package_tax_total\n order_item.item_package_total = item.line_package_total\n order_item.save()\n\n billing_address = OrderBillingAddress()\n billing_address.order = order\n billing_address.name = user_bill_addr.name\n billing_address.surname = user_bill_addr.surname\n billing_address.street_name = user_bill_addr.street_name\n billing_address.street_nr = user_bill_addr.street_nr\n billing_address.zip_code = user_bill_addr.zip_code\n billing_address.city = user_bill_addr.city\n billing_address.country = user_bill_addr.country\n billing_address.vat_nr = user_bill_addr.vat_nr\n billing_address.vat_taxpayer = user_bill_addr.vat_taxpayer\n billing_address.save()\n\n return redirect(reverse('orders_overview', kwargs={'pk': str(order.pk)}))" ]
[ "0.6888037", "0.65397227", "0.63925135", "0.6348369", "0.6280681", "0.6279953", "0.6240536", "0.619458", "0.6017149", "0.5950889", "0.59003526", "0.58743674", "0.5850543", "0.58399665", "0.5826049", "0.57910156", "0.5758798", "0.57562804", "0.57397044", "0.57277566", "0.5726977", "0.5708241", "0.5700561", "0.5682838", "0.5663719", "0.5646196", "0.56043154", "0.5580754", "0.54989475", "0.54949695" ]
0.7962728
0
Print out instructions for users to initialize shell support.
def shell_init_instructions(cmd, equivalent): shell_specific = "{sh_arg}" in equivalent msg = [ "`%s` requires Spack's shell support." % cmd, "", "To set up shell support, run the command below for your shell.", "", color.colorize("@*c{For bash/zsh/sh:}"), " . %s/setup-env.sh" % spack.paths.share_path, "", color.colorize("@*c{For csh/tcsh:}"), " source %s/setup-env.csh" % spack.paths.share_path, "", color.colorize("@*c{For fish:}"), " source %s/setup-env.fish" % spack.paths.share_path, "", color.colorize("@*c{For Windows batch:}"), " source %s/spack_cmd.bat" % spack.paths.share_path, "", "Or, if you do not want to use shell support, run " + ("one of these" if shell_specific else "this") + " instead:", "", ] if shell_specific: msg += [ equivalent.format(sh_arg="--sh ") + " # bash/zsh/sh", equivalent.format(sh_arg="--csh ") + " # csh/tcsh", equivalent.format(sh_arg="--fish") + " # fish", equivalent.format(sh_arg="--bat ") + " # batch", ] else: msg += [" " + equivalent] msg += [ "", "If you have already set up Spack's shell support but still receive", "this message, please make sure to call Spack via the `spack` command", "without any path components (such as `bin/spack`).", ] msg += [""] tty.error(*msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def help_shell(self):\n help_str = \"\"\"Execute a command as if at the OS prompt.\n\n Usage: shell cmd\"\"\"\n self.stdout.write(\"{}\\n\".format(help_str))", "def shell():\n pass", "def main_menu_for_testing():\n print(PROMPT_TEXT)", "def shell(self, **options):\n pass", "def user_help():\n author = 'Will Garside'\n description = 'Toggles the laptop screen display source. Requires the circuit created in Schematic_src.fzz'\n expected_args = {}\n env_list = {}\n\n return author, description, expected_args, env_list", "def init():\n print(\"Installed everything under {0} \"\n \"virtual environment\".format(package_name()))", "def available_shells(self):", "def init_shell(self):\n self.shell = PlayerTerminalInteractiveShell.instance(\n commands=self.commands,\n speed=self.speed,\n parent=self,\n display_banner=False,\n profile_dir=self.profile_dir,\n ipython_dir=self.ipython_dir,\n user_ns=self.user_ns,\n )\n self.shell.configurables.append(self)", "def help():\n print(UI.HELP)", "def setup_completion(shell, show_code):\n click.echo('Setup completion for shell {!r}'.format(shell))\n\n if show_code:\n code = click_completion.get_code(shell=shell)\n click.echo('Installing code: \\n{}'.format(code))\n\n shell_, path = click_completion.install(shell=shell)\n click.secho('Installed completion in path {!r}'.format(path))", "def init():\n click.secho(\"[+] Initialize permissions\", fg=\"cyan\")\n init_permissions()\n click.secho(\"[+] Initialize permissions successfully\", fg=\"green\")", "def print_usage():\n print(helptxt)\n sys.exit(2)", "def test_cli_help(self):\n output = self.update_command('-h')", "def quick_test():\n do_command('Help: Command=Help')\n do_command('Help: Command=\"GetInfo\"')\n #do_command('SetPreference: Name=GUI/Theme Value=classic Reload=1')", "def startup():\n menuhook.register(\n \"quickpreview\",\n \"howtos\",\n quickpreview,\n menu=[\"&Scripting\", \"Python3 Development\", \"How To\"],\n text=\"Create a quick preview\",\n tooltip=\"Create a quick preview\")", "def syntax():\n\tversion()\n print \"Goal: Install NodeJS and NPM on a Debian Stable system\"\n print \"Syntax: \"\n print \" -h: Display the help message and exit\"\n print \" -v: Display the version and exit\"\n print \" -d: Run the script in debug mode (log in the \"+_LOG_FILE+\" file)\"\n print \" -o PATH: Set the installation PATH (default is \"+_DEFAULT_PATH+\")\"", "def exec_init_cmd(self):\n\n sys.argv = ['-c']\n self.push(self.rc.c)", "def shell_cmd(ctx, extra_flags):\n ctx.load_plugins(extra_flags=extra_flags)\n import code\n from lektor.db import F, Tree\n from lektor.builder import Builder\n\n banner = \"Python %s on %s\\nLektor Project: %s\" % (\n sys.version,\n sys.platform,\n ctx.get_env().root_path,\n )\n ns = {}\n startup = os.environ.get(\"PYTHONSTARTUP\")\n if startup and os.path.isfile(startup):\n with open(startup, \"r\", encoding=\"utf-8\") as f:\n eval(compile(f.read(), startup, \"exec\"), ns) # pylint: disable=eval-used\n pad = ctx.get_env().new_pad()\n ns.update(\n project=ctx.get_project(),\n env=ctx.get_env(),\n pad=pad,\n tree=Tree(pad),\n config=ctx.get_env().load_config(),\n make_builder=lambda: Builder(\n ctx.get_env().new_pad(), ctx.get_default_output_path()\n ),\n F=F,\n )\n try:\n c = Config()\n c.TerminalInteractiveShell.banner2 = banner\n embed(config=c, user_ns=ns)\n except NameError: # No IPython\n code.interact(banner=banner, local=ns)", "def init():\n\n @click.group(cls=cli.make_commands(__name__))\n def run():\n \"\"\"Cross-cell supervision tools.\"\"\"\n cli.init_logger('daemon.conf')\n\n return run", "def cli_help(self):\n self._generate_cli_version()\n self._generate_cli_help()\n sys.exit(0)", "def setup():\n\n generators = {\"man\": gen_manpage, \"cpl\": gen_completions}\n\n prsr = argparse.ArgumentParser(\n description=\"xNVMe CLI Bash-completions and man page generator\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n prsr.add_argument(\n \"generator\",\n help=\"Generator to run\",\n default=sorted(generators.keys())[0],\n choices=sorted(generators.keys()),\n )\n prsr.add_argument(\n \"--tools\",\n nargs=\"*\",\n help=\"Name of tools to generate bash-completions for\",\n )\n prsr.add_argument(\n \"--output\",\n help=\"Path to directory in which to emit completion scripts\",\n default=os.sep.join([\".\"]),\n )\n prsr.add_argument(\n \"--log-level\",\n help=\"log-devel\",\n default=\"INFO\",\n choices=[\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"],\n )\n\n args = prsr.parse_args()\n args.output = expand_path(args.output)\n args.gen = generators[args.generator]\n\n if not args.tools:\n args.tools = find_binaries()\n\n logging.basicConfig(\n format=\"%(asctime)s %(message)s\",\n level=getattr(logging, args.log_level.upper(), None),\n )\n\n return args", "def start_shell(self):\n cmd = 'shell'\n end_strs = ['>']\n self.run_with_output(cmd, end_strs)\n return True", "def cli():\n return", "def terminal_init(self):\n pass", "def cli():\n pass", "def cli():\r\n pass", "def shell():\n from flask.globals import _app_ctx_stack\n banner = 'Welcome to Opsy!'\n app = _app_ctx_stack.top.app\n shell_ctx = {'create_app': create_app,\n 'db': db,\n 'User': User,\n 'Role': Role,\n 'Permission': Permission,\n 'Zone': Zone,\n 'Host': Host,\n 'Group': Group,\n 'HostGroupMapping': HostGroupMapping}\n shell_ctx.update(app.make_shell_context())\n try:\n from IPython import embed\n embed(user_ns=shell_ctx, banner1=banner)\n return\n except ImportError:\n import code\n code.interact(banner, local=shell_ctx)", "async def os(self):\n\n await self.bot.say(box(system() + \"\\n\" + release(), 'Bash'))", "def when_start():\n\n print(f\"{'=' * shutil.get_terminal_size()[0]}\")\n print(Info.name)\n print(f\"Version: {Info.version}\")\n print(f\"Started at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\")\n print(f\"{'=' * shutil.get_terminal_size()[0]}\")", "def cli():" ]
[ "0.69160414", "0.6561335", "0.63099104", "0.61669785", "0.6131323", "0.6127535", "0.60904056", "0.6050623", "0.6006464", "0.5993894", "0.5986725", "0.594844", "0.5937397", "0.59171444", "0.5916378", "0.5912071", "0.59116024", "0.5891829", "0.5866645", "0.5843955", "0.5842719", "0.5821698", "0.58144057", "0.57940763", "0.5765703", "0.5763284", "0.5760836", "0.5756908", "0.5753512", "0.5747537" ]
0.7073582
0
Get all the media files from the timeline.
def get_media(api, num_tweets=25, profile="@hakeemangulu", admin=False): # Store the media urls in a list media_files = [] # Create cursor object for the timeline if admin: # If the admin is using the application, return his timeline tl = tweepy.Cursor(api.home_timeline).items(num_tweets) else: # If the admin is not using the application, return the specified # user's timeline tl = tweepy.Cursor(api.user_timeline, screen_name=profile).items(num_tweets) # Iterate through the timeline and extract images for status in tl: # Get all media from a tweet media = status.entities.get('media', []) # Add non-empty media to the set for image in media: # Only add the image if it is a photo or GIF (as opposed to a # video) if image['type'] == 'photo' or image['type'] == 'animated_gif': media_files.append(image['media_url']) return media_files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getMediaFiles(path):\n fileList = getMediaFileList(path)\n # dirList = getDirectoryList(path)\n\n # results = map(getMediaFiles, dirList)\n\n # for result in results:\n # fileList = fileList + result\n\n return fileList", "def get_list(self ):\n headers = { 'Authorization' : self.client.authorization_header }\n response = requests.get(\n self.client.url + '/media', \n headers = headers\n )\n\n return json.loads(response.text)", "def list_of_medias(args, sourcedir, recursive):\n files = list_of_files(sourcedir, recursive)\n return [_ for _ in files if is_media_within_dates(_, args.dates)]", "def listall(self):\n list_query = \"\"\"SELECT * FROM %s\"\"\" % MediaCollection.COLLECTIONS_TABLE\n self.cursor.execute(list_query)\n return [Media.fromtuple(media) for media in self.cursor.fetchall()]", "def get_media_files(tweets, today, hour, output_folder):\n media_file = \"\"\n tweet_id = \"\"\n create_picture_folder(output_folder)\n\n for tweet in tweets:\n if tweet.get('delete') != None:\n continue\n if not tweet['retweeted'] and 'RT @' not in tweet['text'] and not tweet['in_reply_to_status_id']:\n media = tweet.get('entities').get('media', [])\n if len(media) > 0:\n # media_files.append(media[0]['media_url'])\n media_file += media[0]['media_url']\n # tweet_ids.append(tweet['id'])\n tweet_id += tweet['id_str']\n return media_file, tweet_id", "def list_of_medias_ext(args, sourcedir):\n result = list()\n listdir = sorted_listdir(os.listdir(sourcedir))\n if '.nomedia' not in listdir:\n for basename in listdir:\n fullname = os.path.join(sourcedir, basename)\n if os.path.isdir(fullname) and basename != '$RECYCLE.BIN' and contains_media(args, fullname):\n result.append(fullname)\n else:\n if is_media_within_dates(fullname, args.dates):\n result.append(fullname)\n return result", "def read_all_files():\n paths = get_all_recording_paths()\n\n return read_by_paths(paths)", "def filter(self):\n for f in FileHelper.ALL_PATHS:\n media_obj = MediaObject(FileHelper.get_url(f), FileHelper.get_title(f), FileHelper.get_media_type(f), FileHelper.get_icon(f), FileHelper.get_duration(f), FileHelper.get_ctype(f))\n _id = media_obj.uuid\n if media_obj.media_type == \"image\":\n DB.IMAGES[_id] = media_obj\n elif media_obj.media_type == \"audio\":\n DB.MUSIC[_id] = media_obj\n elif media_obj.media_type == \"video\":\n DB.VIDEOS[_id] = media_obj\n else:\n print \"File '%s' doesn't play nice.\" % (f)", "def getMediaFileList(path):\n\n fileTypes = (\"jpg\", \"mov\", \"mp4\")\n fileList = []\n for base_dir, dirs, files in os.walk(path):\n fileList.extend([os.path.join(base_dir, f) for f in files if f.split(\".\")[1].lower() in fileTypes])\n\n # for the new canon camera, ther are some .Trash and trashinfo files, want to ignore them\n fileList = [file for file in fileList if \"trash\" not in file and \"Trash\" not in file]\n return fileList", "def download_media_from_bandwidth(media_urls):\n downloaded_media_files = []\n for media_url in media_urls:\n media_id = get_media_id(media_url)\n filename = get_media_filename(media_url)\n with open(filename, \"wb\") as f:\n try:\n downloaded_media = messaging_client.get_media(MESSAGING_ACCOUNT_ID, media_id)\n f.write(downloaded_media.body)\n except Exception as e:\n print(e)\n downloaded_media_files.append(filename)\n return downloaded_media_files", "def media(self, path):\n path = \"/media/%s%s\" % (self.session.root, format_path(path))\n\n url, params, headers = self.request(path, method='GET')\n\n return self.rest_client.GET(url, headers)", "def get_files(self):\r\n return self._filelist", "def files(self):\r\n url = '{0}/files'.format(self.get_url())\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def get_real_media(self, provider_name):\n return [Media(f, provider_name) for f in self.videos]", "def getFiles(self):\n\t\treturn os.listdir(self.getPath())", "def listFiles(self):\n pass", "def get_list_twitter_files(self):\n list_twitter_files = []\n for site in self.sites:\n for news_type in self.news_types:\n\n # accessing files through directories\n site_folder = join(self.dirname, site)\n news_path = join(site_folder, news_type)\n\n # only obtaining the tweets/retweets at this time\n exclude = [\"news\", \"user_profile\", \"user_timeline_tweets\", \"user_followers\",\n \"user_following\"]\n\n # iterating through directories only focusing on ones containing the news content\n for root, dirs, files in walk(news_path, topdown=True):\n dirs[:] = [d for d in dirs if d not in exclude]\n\n # collecting all articles\n for f in files:\n if f.endswith(\".json\") and len(dirs) == 0:\n yield join(root, f)\n list_twitter_files.append(join(root, f))\n print(len(list_twitter_files))\n # return list_news_files", "def self_media(self):\n\n url = \"https://api.instagram.com/v1/users/self/media/recent/?access_token={0}\".format(self.access_token)\n request = requests.get(url)\n return request.json()", "def get_file_list(self):\n try:\n for filename in os.listdir(SHARED_DIR):\n self.file_list.append(filename)\n except Exception as e:\n print \"Error: retriving file list, %s\" % e", "def get_all(self) -> Generator:\n\n for filename in self.list_files():\n yield self.get(filename)", "def content_list(self):\n return self.face.FACES.files.find({})", "def location_medias_recent_v1(\n self, location_pk: int, amount: int = 63\n ) -> List[Media]:\n return self.location_medias_v1(location_pk, amount, tab_key=\"recent\")", "def show_medias():\n t0 = time.time()\n print(f\"--- {request}\")\n print(f\"--- {user_session}\")\n # Set context by owner and the data selections\n u_context = UserContext(user_session, current_user, request)\n # Which range of data is shown\n u_context.set_scope_from_request(request, \"media_scope\")\n u_context.count = 20\n\n with MediaReader(\"read\", u_context) as service:\n # datareader = MediaReader(readservice, u_context)\n res = service.read_my_media_list()\n\n if Status.has_failed(res, False):\n flash(f'{res.get(\"statustext\",\"error\")}', \"error\")\n medias = res.get(\"items\", [])\n\n stk_logger(u_context, f\"-> bp.scene.media.show_medias fw n={len(medias)}\")\n return render_template(\n \"/scene/medias.html\",\n medias=medias,\n user_context=u_context,\n elapsed=time.time() - t0,\n )", "def _get_parsed_files(self):\n\n parsed = []\n with Historical_ROAs_Parsed_Table() as t:\n for row in t.execute(f'SELECT * FROM {t.name}'):\n parsed.append(row['file'])\n return parsed", "def get_media():\n\n error_on_unauthorized()\n\n media = Upload.query.order_by(Upload.id)\n total_num = media.count()\n\n if total_num == 0:\n return jsonify(total=0, uploads=[])\n\n try:\n count = int(request.args.get('max', total_num))\n page = int(request.args.get('page', 1))\n\n if count <= 0 or page <= 0:\n raise APIError(422, \"Query parameters out of range\")\n\n begin = (page - 1) * count\n end = min(begin + count, total_num)\n\n return jsonify(total=total_num, uploads=[upload_to_dict(u) for u in media.all()[begin:end]]), 200\n except ValueError:\n raise APIError(422, \"Invalid query parameter\")", "def getExternalFiles(self):\n return []", "def get_list_news_files(self):\n # list_news_files = []\n for site in self.sites:\n for news_type in self.news_types:\n\n # accessing files through directories\n site_folder = join(self.dirname, site)\n news_path = join(site_folder, news_type)\n\n # only obtaining the news articles at this time\n exclude = [\"tweets\", \"retweets\", \"user_profile\", \"user_timeline_tweets\", \"user_followers\",\n \"user_following\"]\n\n # iterating through directories only focusing on ones containing the news content\n for root, dirs, files in walk(news_path, topdown=True):\n dirs[:] = [d for d in dirs if d not in exclude]\n\n # collecting all articles\n for f in files:\n if f.endswith(\".json\") and len(dirs) == 0:\n yield join(root, f)\n # list_news_files.append(join(root, f))\n # print(len(list_news_files))\n # return list_news_files\n\n def get_list_twitter_files(self):\n \"\"\"Return files path iterator of news\"\"\"\n list_twitter_files = []\n for site in self.sites:\n for news_type in self.news_types:\n\n # accessing files through directories\n site_folder = join(self.dirname, site)\n news_path = join(site_folder, news_type)\n\n # only obtaining the tweets/retweets at this time\n exclude = [\"news\", \"user_profile\", \"user_timeline_tweets\", \"user_followers\",\n \"user_following\"]\n\n # iterating through directories only focusing on ones containing the news content\n for root, dirs, files in walk(news_path, topdown=True):\n dirs[:] = [d for d in dirs if d not in exclude]\n\n # collecting all articles\n for f in files:\n if f.endswith(\".json\") and len(dirs) == 0:\n yield join(root, f)\n list_twitter_files.append(join(root, f))\n print(len(list_twitter_files))\n # return list_news_files", "def list_media(self,\n series_id: str,\n sort: Optional[SortOption] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n locale: Optional[Any] = None) -> list:\n params: Dict[str, Any] = {\n \"series_id\": series_id,\n }\n\n if sort:\n params[\"sort\"] = sort.value\n if limit:\n params[\"limit\"] = limit\n if offset:\n params[\"offset\"] = offset\n if locale:\n params[\"locale\"] = locale\n\n return self._api._api_call(\"list_media\", params)", "def files(self, **kwargs) -> \"FileMetadataList\":\n return self._cognite_client.files.list(asset_ids=[self.id], **kwargs)", "def files(self, start=None, limit=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/files'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json" ]
[ "0.7197177", "0.6829401", "0.6617265", "0.65482754", "0.6439673", "0.6379388", "0.6306178", "0.6199665", "0.6124367", "0.6110009", "0.6092357", "0.5991812", "0.5875019", "0.5816229", "0.5801913", "0.5788004", "0.5765499", "0.57631904", "0.57608795", "0.5754463", "0.57203853", "0.57131183", "0.5704394", "0.5679188", "0.5676249", "0.5672966", "0.5663464", "0.56632614", "0.56623024", "0.5659518" ]
0.70954514
1
A combination of the get_media and downloader functions, thus finding the media then downloading it in one call.
def get_and_download(api, path, num_tweets=25, profile="@hakeemangulu", admin=False): return downloader(get_media(api, num_tweets, profile, admin), path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def _get_remote_media_impl(\n self, server_name: str, media_id: str\n ) -> Tuple[Optional[Responder], dict]:\n media_info = await self.store.get_cached_remote_media(server_name, media_id)\n\n # file_id is the ID we use to track the file locally. If we've already\n # seen the file then reuse the existing ID, otherwise generate a new\n # one.\n\n # If we have an entry in the DB, try and look for it\n if media_info:\n file_id = media_info[\"filesystem_id\"]\n file_info = FileInfo(server_name, file_id)\n\n if media_info[\"quarantined_by\"]:\n logger.info(\"Media is quarantined\")\n raise NotFoundError()\n\n if not media_info[\"media_type\"]:\n media_info[\"media_type\"] = \"application/octet-stream\"\n\n responder = await self.media_storage.fetch_media(file_info)\n if responder:\n return responder, media_info\n\n # Failed to find the file anywhere, lets download it.\n\n try:\n media_info = await self._download_remote_file(\n server_name,\n media_id,\n )\n except SynapseError:\n raise\n except Exception as e:\n # An exception may be because we downloaded media in another\n # process, so let's check if we magically have the media.\n media_info = await self.store.get_cached_remote_media(server_name, media_id)\n if not media_info:\n raise e\n\n file_id = media_info[\"filesystem_id\"]\n if not media_info[\"media_type\"]:\n media_info[\"media_type\"] = \"application/octet-stream\"\n file_info = FileInfo(server_name, file_id)\n\n # We generate thumbnails even if another process downloaded the media\n # as a) it's conceivable that the other download request dies before it\n # generates thumbnails, but mainly b) we want to be sure the thumbnails\n # have finished being generated before responding to the client,\n # otherwise they'll request thumbnails and get a 404 if they're not\n # ready yet.\n await self._generate_thumbnails(\n server_name, media_id, file_id, media_info[\"media_type\"]\n )\n\n responder = await self.media_storage.fetch_media(file_info)\n return responder, media_info", "def get_media(self, url, out_filename=None, raw_data=False):\n if not raw_data:\n if not out_filename:\n out_filename = os.path.join(settings.BW_MMS_DIRECTORY,\n url.split('/')[-1])\n\n if not os.path.isdir(os.path.dirname(out_filename)):\n raise ValueError('Invalid output directory: {} - '\n 'unable to download MMS'.\n format(os.path.dirname(out_filename)))\n\n if os.path.isfile(out_filename):\n logging.info('filename {}, already exists - will be '\n 'overwritten.....'.format(out_filename))\n\n try:\n resp = requests.get(url, auth=(self.token, self.secret))\n except requests.exceptions.RequestException as e:\n logging.info('Error while fetching media: {}'.format(e))\n return\n\n if resp.status_code == requests.codes.ok:\n try:\n if raw_data:\n return resp.content\n else:\n with open(out_filename, 'wb') as fd:\n fd.write(resp.content)\n\n return out_filename\n except Exception as e:\n logging.info('Error: {} while writing file: {}'.\n format(e, out_filename))\n return\n\n logging.info('Invalid URI or an error occured, response: {}, '\n 'response content: {}'.format(resp.status_code,\n resp.text))", "def download(self, output_dir=None, chunk_size=1024):\n def download_content(content_link, output_dir):\n \"\"\"Download the content of a media and save it in a existing\n directory.\n\n Args:\n content_link (str):\n output_dir (str):\n Returns:\n dict: local version of the media object\n \"\"\"\n if content_link is None: return None\n res = requests.get(content_link, stream=True)\n try:\n res.raise_for_status()\n except requests.exceptions.HTTPError:\n return None\n img_name, img_format = parse_image_url(res.url)\n filepath = '{}/{}.{}'.format(output_dir, img_name, img_format)\n\n with open(filepath, mode='wb') as image_file:\n for chunk in res.iter_content(chunk_size=chunk_size):\n image_file.write(chunk)\n\n return abspath(filepath)\n\n output_dir = output_dir or getcwd()\n\n media_links = dict(\n image=[],\n video=[]\n )\n if self['media'] and self['media']['image']:\n downloaded_images = [\n download_content(item, output_dir) for item in self['media']['image']\n ]\n media_links['image'].extend(list(filter(None, downloaded_images)))\n if self['media'] and self['media']['video']:\n downloaded_videos = [\n {\n 'url': download_content(item['url'], output_dir),\n 'thumbnail': download_content(item['thumbnail'], output_dir)\n } for item in self['media']['video']\n ]\n media_links['video'].extend(\n filter(lambda x: x['url'] and x['thumbnail'], downloaded_videos)\n )\n\n return media_links", "def download(self):\n\n # os.open *should* give a thread-safe way to exlusivly open files\n filepath = self.film\n try:\n # os.O_BINARY is only avilable and needed on windows\n flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY | os.O_BINARY\n except:\n flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY\n try:\n fd = os.open(filepath, flags)\n except:\n return\n\n try:\n response = self.session.get(self.filmurl, stream=True)\n if response.status_code == 200:\n for chunk in response.iter_content(1024):\n os.write(fd, chunk)\n except:\n # Remove partial img file if request or stream fails\n os.close(fd)\n os.remove(filepath)", "async def get_remote_media(\n self,\n request: SynapseRequest,\n server_name: str,\n media_id: str,\n name: Optional[str],\n ) -> None:\n if (\n self.federation_domain_whitelist is not None\n and server_name not in self.federation_domain_whitelist\n ):\n raise FederationDeniedError(server_name)\n\n # Don't let users download media from domains listed in the config, even\n # if we might have the media to serve. This is Trust & Safety tooling to\n # block some servers' media from being accessible to local users.\n # See `prevent_media_downloads_from` config docs for more info.\n if server_name in self.prevent_media_downloads_from:\n respond_404(request)\n return\n\n self.mark_recently_accessed(server_name, media_id)\n\n # We linearize here to ensure that we don't try and download remote\n # media multiple times concurrently\n key = (server_name, media_id)\n async with self.remote_media_linearizer.queue(key):\n responder, media_info = await self._get_remote_media_impl(\n server_name, media_id\n )\n\n # We deliberately stream the file outside the lock\n if responder:\n media_type = media_info[\"media_type\"]\n media_length = media_info[\"media_length\"]\n upload_name = name if name else media_info[\"upload_name\"]\n await respond_with_responder(\n request, responder, media_type, media_length, upload_name\n )\n else:\n respond_404(request)", "def download_media_from_bandwidth(media_urls):\n downloaded_media_files = []\n for media_url in media_urls:\n media_id = get_media_id(media_url)\n filename = get_media_filename(media_url)\n with open(filename, \"wb\") as f:\n try:\n downloaded_media = messaging_client.get_media(MESSAGING_ACCOUNT_ID, media_id)\n f.write(downloaded_media.body)\n except Exception as e:\n print(e)\n downloaded_media_files.append(filename)\n return downloaded_media_files", "def download(self, account, code):\n\n url = Spider.BASE_URL + \"/p/%s/?taken-by=%s\" % (code, account)\n r = self.session.get(url)\n content_match = re.search(r\"<script.*?>\\s*?window._sharedData\\s*?=\\s*?({.*}).*?</script>\", r.text,\n re.MULTILINE)\n data = json.loads(content_match.group(1))\n media = data['entry_data']['PostPage'][0]['graphql']['shortcode_media']\n download_urls = []\n if media['__typename'] == 'GraphVideo': # video\n download_urls.append(media[\"video_url\"])\n if media['__typename'] == 'GraphImage': # image\n download_urls.append(media[\"display_url\"])\n if media['__typename'] == 'GraphSidecar': # slide\n nodes = media['edge_sidecar_to_children']['edges']\n for node in nodes:\n node = node['node']\n if node['is_video']:\n download_urls.append(node['video_url'])\n else:\n download_urls.append(node['display_url'])\n\n actual_download_dir = os.path.join(download_dir, account)\n if not os.path.isdir(actual_download_dir):\n os.mkdir(actual_download_dir)\n for url in download_urls:\n filename = os.path.join(actual_download_dir, url.split('/')[-1].split('?')[0])\n temp_name = filename + '.tmp'\n if os.path.isfile(filename):\n if self.spider.auto_stop:\n print('file', filename, \"already exists, exiting......\")\n sys.exit()\n print('file', filename, \"already exists, skipping\")\n else:\n print('downloading %s:' % filename)\n r = self.session.get(url, stream=True)\n content_length = int(r.headers['content-length'])\n curr = 0\n with open(temp_name, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n f.write(chunk)\n curr += 1024\n progress(curr, content_length)\n os.rename(temp_name, filename)\n self.spider.item_count += 1", "def download(self):\n #the link has some meta data in it that we need to get a hold of so we cant use metaData.getLink()\n data = None\n\n for link in self.metaData.jsonObj['links']:\n if link.get('rel') == \"content\":\n data = link\n\n assert data is not None\n\n response = self._adapter.getRequest(data['href'], self._baseHeader)\n return {\"filename\": data['title'], \"mime\": data['type'], \"binary\": response['Body'] }", "async def download_media(\n self,\n message: Union[\"types.Message\", str],\n file_name: str = DEFAULT_DOWNLOAD_DIR,\n block: bool = True,\n progress: callable = None,\n progress_args: tuple = ()\n ) -> Optional[str]:\n available_media = (\"audio\", \"document\", \"photo\", \"sticker\", \"animation\", \"video\", \"voice\", \"video_note\",\n \"new_chat_photo\")\n\n if isinstance(message, types.Message):\n for kind in available_media:\n media = getattr(message, kind, None)\n\n if media is not None:\n break\n else:\n raise ValueError(\"This message doesn't contain any downloadable media\")\n else:\n media = message\n\n if isinstance(media, str):\n file_id_str = media\n else:\n file_id_str = media.file_id\n\n file_id_obj = FileId.decode(file_id_str)\n\n file_type = file_id_obj.file_type\n media_file_name = getattr(media, \"file_name\", \"\")\n file_size = getattr(media, \"file_size\", 0)\n mime_type = getattr(media, \"mime_type\", \"\")\n date = getattr(media, \"date\", 0)\n\n directory, file_name = os.path.split(file_name)\n file_name = file_name or media_file_name or \"\"\n\n if not os.path.isabs(file_name):\n directory = self.PARENT_DIR / (directory or DEFAULT_DOWNLOAD_DIR)\n\n if not file_name:\n guessed_extension = self.guess_extension(mime_type)\n\n if file_type in PHOTO_TYPES:\n extension = \".jpg\"\n elif file_type == FileType.VOICE:\n extension = guessed_extension or \".ogg\"\n elif file_type in (FileType.VIDEO, FileType.ANIMATION, FileType.VIDEO_NOTE):\n extension = guessed_extension or \".mp4\"\n elif file_type == FileType.DOCUMENT:\n extension = guessed_extension or \".zip\"\n elif file_type == FileType.STICKER:\n extension = guessed_extension or \".webp\"\n elif file_type == FileType.AUDIO:\n extension = guessed_extension or \".mp3\"\n else:\n extension = \".unknown\"\n\n file_name = \"{}_{}_{}{}\".format(\n FileType(file_id_obj.file_type).name.lower(),\n datetime.fromtimestamp(date or time.time()).strftime(\"%Y-%m-%d_%H-%M-%S\"),\n self.rnd_id(),\n extension\n )\n\n downloader = self.handle_download((file_id_obj, directory, file_name, file_size, progress, progress_args))\n\n if block:\n return await downloader\n else:\n asyncio.get_event_loop().create_task(downloader)", "def _getURL(self, params):\n qs = Media.objects.filter(pk=params['id'], deleted=False)\n if not qs.exists():\n raise Http404\n response_data = list(qs.values(*MEDIA_PROPERTIES))\n # Use 24-hour URLS\n _presign(24*3600, response_data)\n\n element = params['element']\n if element == 'auto':\n if qs[0].meta.dtype == 'video':\n element = 'streaming'\n elif qs[0].meta.dtype == 'image':\n element = 'image'\n elif qs[0].meta.dtype == 'multi':\n return None\n if element == 'audio':\n return response_data[0].get('media_files',{}).get('audio',[])[0]['path']\n elif element == 'thumbnail':\n search_in = response_data[0].get('media_files',{}).get('thumbnail',[])\n elif element == 'thumbnail_gif':\n search_in = response_data[0].get('media_files',{}).get('thumbnail_gif',[])\n elif element == 'image':\n search_in = response_data[0].get('media_files',{}).get('image',[])\n elif element == 'streaming':\n search_in = response_data[0].get('media_files',{}).get('streaming',[])\n elif element == 'archival':\n search_in = response_data[0].get('media_files',{}).get('archival',[])\n elif element == 'attachment':\n search_in = response_data[0].get('media_files',{}).get('attachment',[])\n\n if not search_in:\n return None\n quality = params['quality']\n max_delta = sys.maxsize\n quality_idx = 0\n for idx, info in enumerate(search_in):\n delta = abs(quality-info['resolution'][0])\n if delta < max_delta:\n quality_idx = idx\n max_delta = delta\n return search_in[quality_idx]['path']", "def download_files(self):", "def download_and_prepare(self):\n self._download_and_prepare()", "async def get_file(self, link, name, md5, session):\n if os.path.exists(name) or md5 in opts.archived_md5:\n self.count += 1\n return\n\n async with session.get(link) as media:\n # Open file initially with .part suffix\n with open(f\"{name}.part\", \"wb\") as f:\n while True:\n chunk = await media.content.read(1024)\n if not chunk:\n break\n f.write(chunk)\n\n # Remove .part suffix once complete\n # After this point file won't get removed if script gets interrupted\n os.rename(f\"{name}.part\", name)\n\n if opts.archive:\n log_hash(md5)\n self.count += 1\n msg(f\"{self.fetch_progress()} {self.board}/{self.dir}/{name}\")", "async def _download_remote_file(\n self,\n server_name: str,\n media_id: str,\n ) -> dict:\n\n file_id = random_string(24)\n\n file_info = FileInfo(server_name=server_name, file_id=file_id)\n\n with self.media_storage.store_into_file(file_info) as (f, fname, finish):\n request_path = \"/\".join(\n (\"/_matrix/media/r0/download\", server_name, media_id)\n )\n try:\n length, headers = await self.client.get_file(\n server_name,\n request_path,\n output_stream=f,\n max_size=self.max_upload_size,\n args={\n # tell the remote server to 404 if it doesn't\n # recognise the server_name, to make sure we don't\n # end up with a routing loop.\n \"allow_remote\": \"false\"\n },\n )\n except RequestSendFailed as e:\n logger.warning(\n \"Request failed fetching remote media %s/%s: %r\",\n server_name,\n media_id,\n e,\n )\n raise SynapseError(502, \"Failed to fetch remote media\")\n\n except HttpResponseException as e:\n logger.warning(\n \"HTTP error fetching remote media %s/%s: %s\",\n server_name,\n media_id,\n e.response,\n )\n if e.code == twisted.web.http.NOT_FOUND:\n raise e.to_synapse_error()\n raise SynapseError(502, \"Failed to fetch remote media\")\n\n except SynapseError:\n logger.warning(\n \"Failed to fetch remote media %s/%s\", server_name, media_id\n )\n raise\n except NotRetryingDestination:\n logger.warning(\"Not retrying destination %r\", server_name)\n raise SynapseError(502, \"Failed to fetch remote media\")\n except Exception:\n logger.exception(\n \"Failed to fetch remote media %s/%s\", server_name, media_id\n )\n raise SynapseError(502, \"Failed to fetch remote media\")\n\n await finish()\n\n if b\"Content-Type\" in headers:\n media_type = headers[b\"Content-Type\"][0].decode(\"ascii\")\n else:\n media_type = \"application/octet-stream\"\n upload_name = get_filename_from_headers(headers)\n time_now_ms = self.clock.time_msec()\n\n # Multiple remote media download requests can race (when using\n # multiple media repos), so this may throw a violation constraint\n # exception. If it does we'll delete the newly downloaded file from\n # disk (as we're in the ctx manager).\n #\n # However: we've already called `finish()` so we may have also\n # written to the storage providers. This is preferable to the\n # alternative where we call `finish()` *after* this, where we could\n # end up having an entry in the DB but fail to write the files to\n # the storage providers.\n await self.store.store_cached_remote_media(\n origin=server_name,\n media_id=media_id,\n media_type=media_type,\n time_now_ms=self.clock.time_msec(),\n upload_name=upload_name,\n media_length=length,\n filesystem_id=file_id,\n )\n\n logger.info(\"Stored remote media in file %r\", fname)\n\n media_info = {\n \"media_type\": media_type,\n \"media_length\": length,\n \"upload_name\": upload_name,\n \"created_ts\": time_now_ms,\n \"filesystem_id\": file_id,\n }\n\n return media_info", "def _download_file(self, video_objects):\n downloaded_video = []\n path=\"media/\"\n for video_object in video_objects:\n if 'contentUrl' in video_object.keys() and video_object['contentUrl']!='':\n \n url = video_object['contentUrl']\n filename = url.split('/')[-1]\n r = requests.get(url, stream=True)\n \n with open(filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024): \n if chunk:\n f.write(chunk)\n\n path+=filename\n return path", "def downloader(thread_num):\n tid = 'Thread ' + numprefix.format(thread_num) + ': '\n for i in range(thread_num, len(self.titles), thread_count):\n title, link = self.titles[i], self.download_urls[i]\n name = vidprefix.format(i) + ' ' + title + '.mp4'\n tries = 0\n while (not os.path.exists(name) or os.path.getsize(name) == 0) \\\n and tries <= trycount:\n if os.path.exists(name): os.remove(name)\n self.log(tid + 'Calling wget for ' + name)\n subprocess.call(['wget', '--output-document=' + name, link])\n tries += 1\n if (not os.path.exists(name) or os.path.getsize(name) == 0):\n self.log(tid + 'wget failed for ' + name)\n else:\n self.log(tid + 'wget successfully downloaded ' + name)", "def download_cdn_videos(filenames,sub_urls,handout_urls,video_urls, target_dir):\n \"\"\" using a simple file downloader \"\"\"\n for i, v in enumerate(video_urls):\n filename_prefix = str(i+1).zfill(2) + '-'\n #original_filename = v.rsplit('/', 1)[1]\n video_filename = filename_prefix + filenames[i] + '.mp4'\n sub_filename = filename_prefix + filenames[i] + '.srt'\n handout_filename = filename_prefix + filenames[i] + '.srt'\n video_path = os.path.join(target_dir, video_filename)\n sub_path = os.path.join(target_dir, sub_filename)\n handout_path = os.path.join(target_dir, handout_filename)\n #print('[debug] GET %s' % v)\n print('[download] Destination: %s' % video_path)\n v = quote(v,safe=\":/\")\n if len(v) != YOUTUBE_VIDEO_ID_LENGTH:\n req = Request(v) \n try:\n video = urlopen(v)\n fileSize = int(video.headers['content-length'])\n finish = False\n existSize = 0\n if os.path.exists(video_path):\n output = open(video_path,\"ab\")\n existSize = os.path.getsize(video_path)\n #If the file exists, then only download the remainder\n if existSize < fileSize:\n #print(\"[debug] bytes range is: %s-%s\" % (existSize,fileSize))\n req.headers[\"Range\"]= \"bytes=%s-%s\" % (existSize,fileSize)\n video = urlopen(req)\n else:\n finish = True\n else:\n output = open(video_path,\"wb\")\n if finish == False:\n file_size_dl = existSize\n block_sz = 262144\n while True:\n buffer = video.read(block_sz)\n if not buffer:\n break\n \n file_size_dl += len(buffer)\n output.write(buffer)\n status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100. / fileSize)\n status = status + chr(8)*(len(status)+1)\n sys.stdout.write(status)\n sys.stdout.flush()\n \n output.close()\n\n except URLError as e:\n print(\"[warning]error: %r when downloading %s\" % (e.reason,v) )\n\n else:\n download_youtube_video(v,video_path)\n \n if sub_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(sub_path):\n subs_string = edx_get_subtitle(sub_urls[i], headers)\n if subs_string:\n print('[info] Writing edX subtitles: %s' % sub_path)\n open(os.path.join(os.getcwd(), sub_path),\n 'wb+').write(subs_string.encode('utf-8'))\n\n if handout_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(handout_path):\n handout_content = urlopen(BASE_URL+handout_urls[i]).read()\n if handout_content:\n print('[info] Writing handout: %s' % handout_path)\n open(os.path.join(os.getcwd(), handout_path),\n 'wb+').write(handout_content)\n #srtfile = urlopen(BASE_URL+sub_urls[i])\n #output = open(srt_path,'wb')\n #output.write(srtfile.read())\n #output.close()", "def download(self, url_match):\n pass", "async def _download(self) -> None:\n\n # do request\n async with aiohttp.ClientSession() as session:\n async with session.get(self.url, auth=self._auth, timeout=self._timeout) as response:\n # check response\n if response.status == 200:\n # get data and return it\n self._buffer = await response.read()\n elif response.status == 401:\n log.error(\"Wrong credentials for downloading file.\")\n raise FileNotFoundError\n else:\n log.error(\"Could not download file from filecache.\")\n raise FileNotFoundError", "def _getDeferred(self, size='thumb'):\n url = getattr(self, \"%sUrl\" % size)\n print \"fetching\", size, url\n if url.startswith(\"file://\"):\n d = localLoad(url, delaySecs=2 * random.random())\n else:\n d = getPage(url)\n d.addErrback(lambda e: [sys.stderr.write(str(e)),\n sys.stderr.flush()])\n @d.addCallback\n def prn(r):\n print \"done\", url\n return r\n return d", "async def fetch_media(self, community_id, media_id) -> Optional[Media]:\n media_url = self._api_communities_url + str(community_id) + \"/medias/\" + str(media_id)\n async with self.web_session.get(media_url, headers=self._headers) as resp:\n if self.check_status(resp.status, media_url):\n data = await resp.json()\n return create_media_object(data.get('media'))", "def run(self):\n download(self.attempt)", "def getDownload(self, html, episode_number):\n soup = BeautifulSoup(html, \"html.parser\")\n download = soup.find_all('source')\n if download:\n self.downloads[\"Episode %s.mp4\" % str(episode_number)] = download[0]['src']\n return\n\n print(\"[!] Download link not found for episode %s\" % str(episode_number))", "def __getFile_httplib(self, _src, _dst):\n\n #-------------------- \n # Pre-download callbacks\n #-------------------- \n self.runEventCallbacks('downloadStarted', _src, -1)\n self.runEventCallbacks('downloading', _src, 0)\n\n\n\n #-------------------- \n # Download\n #-------------------- \n response = self.__httpsRequest('GET', _src)\n data = response.read() \n with open(_dst, 'wb') as f:\n f.write(data) \n\n\n\n #-------------------- \n # Post-download callbacks\n #-------------------- \n self.removeFromDownloadQueue(_src)\n self.runEventCallbacks('downloadFinished', _src)", "def _download(self) -> None:\n download_url(\n self.url,\n self.root,\n filename=self.data_dir,\n md5=self.md5 if self.checksum else None,\n )\n self._extract()", "def media(self, path):\n path = \"/media/%s%s\" % (self.session.root, format_path(path))\n\n url, params, headers = self.request(path, method='GET')\n\n return self.rest_client.GET(url, headers)", "def download(self):\n file_url = posixpath.join(self.mirrors, self.resources)\n _urlretrieve(file_url, os.path.join(self.root, self.resources))", "def download(self):\n file_url = posixpath.join(self.mirrors, self.resources)\n _urlretrieve(file_url, os.path.join(self.root, self.resources))", "def download(self, url):\n url = URL(url)\n downloader = getattr(self, 'download_%s' % url.scheme, None)\n if downloader is None:\n msg = \"We haven't implemented the '%s' protocol yet.\" % url.scheme\n raise NotImplementedError(msg)\n fp = None\n else:\n fp = downloader(url)\n return fp", "def download():\n raise NotImplementedError" ]
[ "0.6771369", "0.67361933", "0.6502162", "0.63834363", "0.63795763", "0.6263145", "0.6207519", "0.61159295", "0.60781926", "0.606364", "0.60588175", "0.59851134", "0.5968101", "0.59372675", "0.5937135", "0.59367", "0.5936326", "0.59268355", "0.59197426", "0.5913188", "0.5871099", "0.5833943", "0.58095235", "0.5796673", "0.57934415", "0.5793367", "0.5771133", "0.5771133", "0.57696676", "0.5759123" ]
0.68603694
0
Given a unique string 'job', connect to Redis and look at the Sorted Set which holds the results for that job. Take the top 5 and report them into the report key store.
def report_to_redis(job, count=5): # it's important that these main python methods # don't call the Singleton - _connection needs to be None to be # properly serialized. r = StrictRedis.from_url("redis://10.0.0.10:6379") for i in range(count): res = r.zpopmax('temp0') print(res) title = r.hget(res[0][0][:-1],res[0][0][-1:]+':t') r.set('success:'+str(job)+'|'+str(i), res[0][0]+'|%1.3f'%res[0][1]) r.delete('temp0') return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getjobsbydatabase(self):\n select_jobsbydb = (\n \"SELECT count(*), database FROM jobs \"\n \"WHERE latestjobversion=True AND insertdate BETWEEN %s AND %s \"\n \"AND (username NOT IN (%s)) GROUP BY database\"\n )\n\n self.pgcursor.execute(select_jobsbydb, (self.startdate, self.enddate, self.adminusers))\n\n\n databasejobs = {}\n\n rows = self.pgcursor.fetchall()\n for (jobs, database) in rows:\n if database != '': #discard jobs without database\n # Multidark dataset\n if 'multidark' in database:\n if 'multidark' in databasejobs:\n # add to jobs count if already exists\n databasejobs['Multidark'] += jobs\n else:\n # add new key if it doesn't exist\n databasejobs['Multidark'] = jobs\n # Vishnu Bolshoi dataset\n elif 'vishnu_bolshoi' in database:\n if 'Vishnu Bolshoi' in databasejobs:\n databasejobs['Vishnu Bolshoi'] += jobs\n else:\n databasejobs['Vishnu Bolshoi'] = jobs\n # Bolshoi Planck dataset\n elif 'bolshoi_planck' in database:\n if 'Bolshoi Planck' in databasejobs:\n databasejobs['Bolshoi Planck'] += jobs\n else:\n databasejobs['Bolshoi Planck'] = jobs\n # Bolshoi dataset\n elif 'bolshoi' in database:\n if 'Bolshoi' in databasejobs:\n databasejobs['Bolshoi'] += jobs\n else:\n databasejobs['Bolshoi'] = jobs\n # Millenium dataset, including mini Millenium\n elif 'millennium' in database:\n if 'Millennium' in databasejobs:\n databasejobs['Millennium'] += jobs\n else:\n databasejobs['Millennium'] = jobs\n # All pre-made datasets\n else:\n if 'Ready-Made' in databasejobs:\n databasejobs['Ready-Made'] += jobs\n else:\n databasejobs['Ready-Made'] = jobs\n\n # print(databasejobs)\n # print(\"Database: {0}, Jobs: {1}\".format(database, jobs))\n return databasejobs", "def run(self):\n\n host_jobs_reported = { }\n host_users_reported = { }\n\n while True:\n\n now = int(time.time())\n sock = None\n if self.server:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n (addr, port) = self.server.split(':', 1)\n try:\n sock.connect((addr, int(port)))\n except Exception, x:\n self.log('%s' % x)\n\n for line in commands.getoutput('qstat -x').split(\"</Job>\"):\n\n if line.find('<job_state>R</job_state>') < 0:\n # Not a running job\n continue\n\n match_info = re.search('<Job_Id>(\\d+)', line)\n if not match_info:\n self.log(\"Bad qstat line format '%s'\" % line)\n continue\n job = \"%s-%s\" % (match_info.group(1), self.resource)\n\n hosts = ''\n match_info = re.search('<exec_host>(.*)</exec_host>', line)\n if match_info:\n hosts = match_info.group(1)\n user = ''\n match_info = re.search('<Job_Owner>(.*)</Job_Owner>', line)\n if match_info:\n user = match_info.group(1).split('@')[0]\n else:\n self.log('job owner pattern not found in \"%s\"' % line)\n continue\n\n for host in re.split('\\+', hosts):\n host = host.split('/', 1)[0]\n if not host_jobs_reported.has_key(host) or \\\n host_jobs_reported[host] != job:\n message = \"SET %s %s %s job %s\" % (self.resource, host, now, job)\n self.log(message)\n if sock:\n try:\n sock.send(message + \"\\n\")\n # Receive and discard ack\n sock.recv(256)\n except Exception, x:\n self.log('%s' % x)\n host_jobs_reported[host] = job\n if not host_users_reported.has_key(host) or \\\n host_users_reported[host] != user:\n message = \"SET %s %s %s user %s\" % \\\n (self.resource, host, now, user)\n self.log(message)\n if sock:\n try:\n sock.send(message + \"\\n\")\n # Receive and discard ack\n sock.recv(256)\n except Exception, x:\n self.log('%s' % x)\n host_users_reported[host] = user\n \n if sock:\n sock.close()\n time.sleep(self.pause)", "def process_job():\n r = redis.StrictRedis()\n while True:\n curr_job = r.blpop('job_queue', 0)[1]\n r.hset('status', curr_job, 'processing')\n print('current job ID:', curr_job)\n # convert byte to string\n url = r.hget('urls', curr_job).decode(\"utf-8\")\n print('Current URL:', url)\n\n # if this url has not been requested before/is not in the db\n if Site.query.filter_by(url=url).first():\n r.hset('status', curr_job, 'complete')\n print('Job', curr_job, 'Completed')\n else:\n # fetches url page source\n try:\n html = str(get_html(url))\n print('Successfully retrieved HTML')\n # add results to database\n db.session.add(Site(url=url, html=html))\n db.session.commit()\n print('Added to database')\n r.hset('status', curr_job, 'complete')\n print('Job', curr_job, 'Completed')\n except ValueError:\n r.hset('status', curr_job, 'abort')\n print('Job', curr_job, 'Aborted')\n except TimeoutError:\n r.hset('status', curr_job, 'timeout')\n print('Job', curr_job, 'Timed Out')\n return", "def _get_njobs_in_queue(self, username):", "def get_results(job, limit):\n reader = results.ResultsReader(job.results(count=limit))\n return {\"results\": [row for row in reader]}", "def test_get_job_executions(self):\n url = '/%s/jobs/%d/executions/' % (self.api, self.job_1.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n job_exe_count = results['count']\n self.assertEqual(job_exe_count, 4)\n #check that we order by descending exe_num\n self.assertEqual(results['results'][0]['exe_num'], 4)", "def genJobList():\n nit=10\n reply=[]\n while len(reply)<10: #assume qstat fails if less that 10 jobs on cluster\n reply=chomp(os.popen('qstat|expand|tr -s \\' \\'|cut -d\\' \\' -f 1,2,5').readlines())\n nit+=1\n if nit>10: break\n return reply", "def job_sorter(self, job):\n key = natsort.natsort_keygen(key=self.job_title, alg=natsort.REAL)\n return key(job)", "def find_best_match_jobs(self,candidate_matrix):\n\n \"\"\"\n Example format for input:\n candidate_matrix =\n {\n 0: {\"This is a skill description\": 10, \"This is another skill description\": 7},\n 1: {\"This is a knowledge description\" : 8, \"This is another knowledge description\": 6}\n }\n \"\"\"\n\n\n\n\n\n\n # Get a list of all job codes to begin importing details for\n cursor = self.conn.execute(\"SELECT job_code FROM job\")\n\n # Get python list of all job codes\n job_codes = cursor.fetchall()\n\n\n job_score = {}\n\n for job_code in job_codes:\n job_code = job_code[0]\n job_importance_matrix = self.generate_job_importance_matrix(job_code)\n\n\n job_score[job_code] = self.calculate_job_score(candidate_matrix, job_importance_matrix)\n\n\n # Sort by job scores!\n sorted_jobs = sorted(job_score.iteritems(), key=operator.itemgetter(1))\n\n # Get in descending order\n sorted_jobs.reverse()\n\n # Return the jobs with the top 5 job scores for the candidate\n return sorted_jobs[:5]", "def rq_worker():\n setup_experiment(log)\n with Connection(db.redis_conn):\n # right now we care about low queue for bots\n worker = Worker(\"low\")\n worker.work()", "def filter_scrape_jobs(self,\n scrape_jobs,\n session,\n scraper_search):\n files = self._get_all_cache_files()\n num_cached = num_total = 0\n mapping = {}\n for job in scrape_jobs:\n cache_name = self.cached_file_name(\n job['query'],\n job['search_engine'],\n job['scrape_method'],\n job['page_number']\n )\n mapping[cache_name] = job\n num_total += 1\n\n for path in files:\n file_name = os.path.split(path)[1]\n job = mapping.get(file_name, None)\n\n if job:\n try:\n serp = self.get_serp_from_database(\n session,\n job['query'],\n job['search_engine'],\n job['scrape_method'],\n job['page_number']\n )\n except Exception:\n pass\n\n if not serp:\n serp = self.parse_again(\n file_name,\n job['search_engine'],\n job['query']\n )\n\n serp.scraper_searches.append(scraper_search)\n session.add(serp)\n\n if num_cached % 200 == 0:\n session.commit()\n\n self.result_writer.store_serp_result(serp, self.config)\n num_cached += 1\n scrape_jobs.remove(job)\n\n # self.logger.info('{} cache files found in {}'.format(\n # len(files),\n # self.config.get('cachedir'))\n # )\n # self.logger.info('''{}/{} objects have been read from the cache.\n # {} remain to get scraped.'''.format(\n # num_cached,\n # num_total,\n # num_total - num_cached)\n # )\n\n session.add(scraper_search)\n session.commit()\n\n return scrape_jobs", "def do_processinq(self, job):\n p = web.input(max=500)\n maxn = int(p.max)\n result = dict(job=job, inq=0, processed=0, scheduled=0, max=maxn,\n td=0.0, ts=0.0)\n start = time.time()\n\n result.update(self.get_job(job).processinq(maxn))\n \n result.update(t=(time.time() - start))\n return result", "def run_redis_example():\n\n try:\n print('\\nStep 1: Connect to Redis')\n r = login_redis_cloud()\n print('\\nStep 2: Cache some data in Redis and read it back')\n r.set('andy', '[email protected]')\n email = r.get('andy')\n print(f\"r.get('andy'): {email}\")\n\n print('\\nStep 3: Cache more data in Redis')\n r.set('pam', '[email protected]')\n r.set('fred', '[email protected]')\n\n print(\"\\nStep 4: Delete 'andy' from cache\")\n r.delete('andy')\n\n print('\\nStep 5: Make a unique ID and use it to count.')\n r.set('user_count', 21)\n r.incr('user_count')\n r.incr('user_count')\n r.decr('user_count')\n result = r.get('user_count')\n print(f'user_count=21+1+1-1={result}')\n\n print('\\nStep 6: Make richer data for a SKU')\n r.rpush('186675', 'chair')\n r.rpush('186675', 'red')\n r.rpush('186675', 'leather')\n r.rpush('186675', '5.99')\n\n print('\\nStep 7: Pull some data from the SKU structure')\n cover_type = r.lindex('186675', 2)\n print(f'Type of cover = {cover_type}')\n\n print('\\nStep 8: Add customer data for 6 customers')\n PHONE_IDX = 0\n ZIP_IDX = 1\n customer_data = {\n 'apple': {\n 'phone': '012-345-6789',\n 'zip': '01234'\n },\n 'lucky': {\n 'phone': '503-832-2833',\n 'zip': '53098'\n },\n 'zeke': {\n 'phone': '555-555-5555',\n 'zip': '98000'\n },\n 'blake': {\n 'phone': '838-608-0199',\n 'zip': '12011'\n },\n 'naomi': {\n 'phone': '721-608-8223',\n 'zip': '24587'\n },\n 'kale': {\n 'phone': '444-385-9115',\n 'zip': '62214'\n },\n }\n for customer, data in customer_data.items():\n print(f\"Inserting {customer}: [phone: {data['phone']}\"\n f\", zip: {data['zip']}]\")\n r.rpush(customer, data['phone'])\n r.rpush(customer, data['zip'])\n\n print('\\nStep 9. Retrieve zip and phone for blake')\n blake_phone = r.lindex('blake', PHONE_IDX)\n blake_zip = r.lindex('blake', ZIP_IDX)\n print(f\"Blake's info: [phone: {blake_phone}, zip: {blake_zip}]\")\n\n print('\\nFinally: Delete all data so we can start over.')\n r.flushdb()\n\n except Exception as e:\n print(f'Redis error: {e}')", "def get_jobs():\n \n rate_limit()\n command = [\"bjobs\", \"-o\", \"\\\"JOBID\", \"USER\", \"STAT\", \"QUEUE\", \"JOB_NAME\", \\\n \"delimiter=';'\\\"\"]\n command = \" \".join(command)\n jobs = subprocess.check_output(command, shell=True, stderr=open(os.devnull))\n \n # if there aren't any currently running or pending jobs, then the output\n if jobs == \"\":\n return set([])\n \n jobs = jobs.decode().strip().split(\"\\n\")\n \n current_jobs = set([])\n for line in jobs:\n if line.startswith(\"JOBID\"): # ignore the header line\n continue\n \n line = line.split(\";\")\n job_name = line[4]\n current_jobs.add(job_name)\n \n return current_jobs", "def query_queue(self, job_name=None, user=None, qformat=None,\n skip_rows=None):", "async def get_result(request):\n job_id = request.match_info['job_id']\n r = redis.Redis(\n host=os.environ['REDIS_HOST'],\n port=6379,\n decode_responses=True,\n )\n if not r.exists(job_id):\n return web.HTTPNotFound(text='Results are unavailable.')\n output_id = r.get(job_id)\n filename = output_id + '.json'\n try:\n with open(os.path.join(CACHE_DIR, filename), 'r') as f:\n response = json.load(f)\n except FileNotFoundError:\n # Redis is out-of-sync with file system. Remove the offending key.\n r.delete(job_id)\n return web.HTTPNotFound(text='Results are unavailable.')\n return web.json_response(response, dumps=functools.partial(json.dumps, indent=4))", "def list_jobs():\n\n name_to_job_details = redis_controller.get_name_to_job_details()\n return list(name_to_job_details.values())", "def main():\n ap = argparse.ArgumentParser()\n apg = ap.add_mutually_exclusive_group()\n apg.add_argument('--job-id', dest='jobId', default='', help='Cuckoo job id to query.')\n apg.add_argument('--md5', dest='md5', default='', help='File md5 hash to query.')\n apg.add_argument('--sha1', dest='sha1', default='', help='File sha1 hash to query.')\n apg.add_argument('--sha256', dest='sha256', default='', help='File sha256 hash to query.')\n apg.add_argument('--sha512', dest='sha512', default='', help='File sha512 hash to query.')\n args = ap.parse_args()\n config = ConfigParser.ConfigParser()\n config.read('app.conf')\n conn = pymongo.MongoClient(config.get('mongo','dbUrl'))\n with open(config.get('filterOut','fIpv4Addresses'), 'r+') as fIpv4AddressesFH:\n fIpv4Addresses = [line.rstrip('\\n') for line in fIpv4AddressesFH]\n fIpv4AddressesFH.closed\n with open(config.get('filterOut','fHostNames'), 'r+') as fHostNamesFH:\n fHostNames = [line.rstrip('\\n') for line in fHostNamesFH]\n fHostNamesFH.closed\n with open(config.get('filterOut','fSeenEntries'), 'w+') as fSeenEntriesFH:\n fSeenEntries = [line.rstrip('\\n') for line in fSeenEntriesFH]\n fSeenEntriesFH.closed\n\n networkItems = []\n ipv4Addresses = []\n hostNames = []\n _l.info('Starting...')\n\n fSeenEntriesFH = open(config.get('filterOut','fSeenEntries'), 'a', 0)\n \n cfg_collections = config.get('mongo','dbCollectionNames')\n if ',' in cfg_collections:\n db_collection_names = cfg_collections.split(',')\n else:\n db_collection_names = [cfg_collections]\n \n cuckoo_names = config.get('dbsList','cuckoo')\n if ',' in cuckoo_names:\n cuckoo_servers = cuckoo_names.split(',')\n else:\n cuckoo_servers = [cuckoo_names]\n\n for dbkey, dbs in enumerate(cuckoo_servers):\n db = conn[dbs]\n mongo_collection = getattr(db, db_collection_names[dbkey])\n _l.debug('Connected to data source.')\n\n # Get a list of file names and hashes from db\n if args.jobId:\n cs = mongo_collection.aggregate([{\"$match\": {\"info.id\": int(args.jobId)}},\n {\"$group\": {\"_id\": {\"targetFileSha1\": \"$target.file.sha1\",\n \"targetFileSha256\": \"$target.file.sha256\",\n \"targetFileSha512\": \"$target.file.sha512\",\n \"targetFileSsdeep\": \"$target.file.ssdeep\",\n \"targetFileMd5\": \"$target.file.md5\",\n \"targetFileSize\": \"$target.file.size\",\n \"targetFileName\": \"$target.file.name\"}}}])\n elif args.md5:\n cs = mongo_collection.aggregate([{\"$match\": {\"target.file.md5\": args.md5}},\n {\"$group\": {\"_id\": {\"targetFileSha1\": \"$target.file.sha1\",\n \"targetFileSha256\": \"$target.file.sha256\",\n \"targetFileSha512\": \"$target.file.sha512\",\n \"targetFileSsdeep\": \"$target.file.ssdeep\",\n \"targetFileMd5\": \"$target.file.md5\",\n \"targetFileSize\": \"$target.file.size\",\n \"targetFileName\": \"$target.file.name\"}}}])\n elif args.sha1:\n cs = mongo_collection.aggregate([{\"$match\": {\"target.file.sha1\": args.sha1}},\n {\"$group\": {\"_id\": {\"targetFileSha1\": \"$target.file.sha1\",\n \"targetFileSha256\": \"$target.file.sha256\",\n \"targetFileSha512\": \"$target.file.sha512\",\n \"targetFileSsdeep\": \"$target.file.ssdeep\",\n \"targetFileMd5\": \"$target.file.md5\",\n \"targetFileSize\": \"$target.file.size\",\n \"targetFileName\": \"$target.file.name\"}}}])\n elif args.sha256:\n cs = mongo_collection.aggregate([{\"$match\": {\"target.file.sha256\": args.sha256}},\n {\"$group\": {\"_id\": {\"targetFileSha1\": \"$target.file.sha1\",\n \"targetFileSha256\": \"$target.file.sha256\",\n \"targetFileSha512\": \"$target.file.sha512\",\n \"targetFileSsdeep\": \"$target.file.ssdeep\",\n \"targetFileMd5\": \"$target.file.md5\",\n \"targetFileSize\": \"$target.file.size\",\n \"targetFileName\": \"$target.file.name\"}}}])\n elif args.sha512:\n cs = mongo_collection.aggregate([{\"$match\": {\"target.file.sha512\": args.sha512}},\n {\"$group\": {\"_id\": {\"targetFileSha1\": \"$target.file.sha1\",\n \"targetFileSha256\": \"$target.file.sha256\",\n \"targetFileSha512\": \"$target.file.sha512\",\n \"targetFileSsdeep\": \"$target.file.ssdeep\",\n \"targetFileMd5\": \"$target.file.md5\",\n \"targetFileSize\": \"$target.file.size\",\n \"targetFileName\": \"$target.file.name\"}}}])\n else:\n cs = mongo_collection.aggregate([{\"$group\": {\"_id\": {\"targetFileSha1\": \"$target.file.sha1\",\n \"targetFileSha256\": \"$target.file.sha256\",\n \"targetFileSha512\": \"$target.file.sha512\",\n \"targetFileSsdeep\": \"$target.file.ssdeep\",\n \"targetFileMd5\": \"$target.file.md5\",\n \"targetFileSize\": \"$target.file.size\",\n \"targetFileName\": \"$target.file.name\"}}}])\n _l.debug('Executed initial aggregation query.')\n for i in cs['result']:\n try:\n # Get all network indicators: addresses and names\n networkItems[:] = []\n ipv4Addresses[:] = []\n hostNames[:] = []\n networkUdpSrc = mongo_collection.find(\n {\n \"target.file.sha1\": i['_id']['targetFileSha1'],\n \"target.file.sha256\": i['_id']['targetFileSha256'],\n \"target.file.sha512\": i['_id']['targetFileSha512'],\n \"target.file.ssdeep\": i['_id']['targetFileSsdeep'],\n \"target.file.md5\": i['_id']['targetFileMd5'],\n \"target.file.size\": i['_id']['targetFileSize'],\n \"target.file.name\": i['_id']['targetFileName']}).distinct('network.udp.src')\n networkUdpDst = mongo_collection.find(\n {\n \"target.file.sha1\": i['_id']['targetFileSha1'],\n \"target.file.sha256\": i['_id']['targetFileSha256'],\n \"target.file.sha512\": i['_id']['targetFileSha512'],\n \"target.file.ssdeep\": i['_id']['targetFileSsdeep'],\n \"target.file.md5\": i['_id']['targetFileMd5'],\n \"target.file.size\": i['_id']['targetFileSize'],\n \"target.file.name\": i['_id']['targetFileName']}).distinct('network.udp.dst')\n networkIcmpSrc = mongo_collection.find(\n {\n \"target.file.sha1\": i['_id']['targetFileSha1'],\n \"target.file.sha256\": i['_id']['targetFileSha256'],\n \"target.file.sha512\": i['_id']['targetFileSha512'],\n \"target.file.ssdeep\": i['_id']['targetFileSsdeep'],\n \"target.file.md5\": i['_id']['targetFileMd5'],\n \"target.file.size\": i['_id']['targetFileSize'],\n \"target.file.name\": i['_id']['targetFileName']}).distinct('network.icmp.src')\n networkIcmpDst = mongo_collection.find(\n {\n \"target.file.sha1\": i['_id']['targetFileSha1'],\n \"target.file.sha256\": i['_id']['targetFileSha256'],\n \"target.file.sha512\": i['_id']['targetFileSha512'],\n \"target.file.ssdeep\": i['_id']['targetFileSsdeep'],\n \"target.file.md5\": i['_id']['targetFileMd5'],\n \"target.file.size\": i['_id']['targetFileSize'],\n \"target.file.name\": i['_id']['targetFileName']}).distinct('network.icmp.dst')\n networkTcpSrc = mongo_collection.find(\n {\n \"target.file.sha1\": i['_id']['targetFileSha1'],\n \"target.file.sha256\": i['_id']['targetFileSha256'],\n \"target.file.sha512\": i['_id']['targetFileSha512'],\n \"target.file.ssdeep\": i['_id']['targetFileSsdeep'],\n \"target.file.md5\": i['_id']['targetFileMd5'],\n \"target.file.size\": i['_id']['targetFileSize'],\n \"target.file.name\": i['_id']['targetFileName']}).distinct('network.tcp.src')\n networkTcpDst = mongo_collection.find(\n {\n \"target.file.sha1\": i['_id']['targetFileSha1'],\n \"target.file.sha256\": i['_id']['targetFileSha256'],\n \"target.file.sha512\": i['_id']['targetFileSha512'],\n \"target.file.ssdeep\": i['_id']['targetFileSsdeep'],\n \"target.file.md5\": i['_id']['targetFileMd5'],\n \"target.file.size\": i['_id']['targetFileSize'],\n \"target.file.name\": i['_id']['targetFileName']}).distinct('network.tcp.dst')\n networkDnsAnswersData = mongo_collection.find(\n {\n \"target.file.sha1\": i['_id']['targetFileSha1'],\n \"target.file.sha256\": i['_id']['targetFileSha256'],\n \"target.file.sha512\": i['_id']['targetFileSha512'],\n \"target.file.ssdeep\": i['_id']['targetFileSsdeep'],\n \"target.file.md5\": i['_id']['targetFileMd5'],\n \"target.file.size\": i['_id']['targetFileSize'],\n \"target.file.name\": i['_id']['targetFileName']}).distinct('network.dns.answers.data')\n networkDomainsIp = mongo_collection.find(\n {\n \"target.file.sha1\": i['_id']['targetFileSha1'],\n \"target.file.sha256\": i['_id']['targetFileSha256'],\n \"target.file.sha512\": i['_id']['targetFileSha512'],\n \"target.file.ssdeep\": i['_id']['targetFileSsdeep'],\n \"target.file.md5\": i['_id']['targetFileMd5'],\n \"target.file.size\": i['_id']['targetFileSize'],\n \"target.file.name\": i['_id']['targetFileName']}).distinct('network.domains.ip')\n networkHttpHost = mongo_collection.find(\n {\n \"target.file.sha1\": i['_id']['targetFileSha1'],\n \"target.file.sha256\": i['_id']['targetFileSha256'],\n \"target.file.sha512\": i['_id']['targetFileSha512'],\n \"target.file.ssdeep\": i['_id']['targetFileSsdeep'],\n \"target.file.md5\": i['_id']['targetFileMd5'],\n \"target.file.size\": i['_id']['targetFileSize'],\n \"target.file.name\": i['_id']['targetFileName']}).distinct('network.http.host')\n networkHosts = mongo_collection.find(\n {\n \"target.file.sha1\": i['_id']['targetFileSha1'],\n \"target.file.sha256\": i['_id']['targetFileSha256'],\n \"target.file.sha512\": i['_id']['targetFileSha512'],\n \"target.file.ssdeep\": i['_id']['targetFileSsdeep'],\n \"target.file.md5\": i['_id']['targetFileMd5'],\n \"target.file.size\": i['_id']['targetFileSize'],\n \"target.file.name\": i['_id']['targetFileName']}).distinct('network.hosts')\n networkDnsRequest = mongo_collection.find(\n {\n \"target.file.sha1\": i['_id']['targetFileSha1'],\n \"target.file.sha256\": i['_id']['targetFileSha256'],\n \"target.file.sha512\": i['_id']['targetFileSha512'],\n \"target.file.ssdeep\": i['_id']['targetFileSsdeep'],\n \"target.file.md5\": i['_id']['targetFileMd5'],\n \"target.file.size\": i['_id']['targetFileSize'],\n \"target.file.name\": i['_id']['targetFileName']}).distinct('network.dns.request')\n networkDomainsDomain = mongo_collection.find(\n {\n \"target.file.sha1\": i['_id']['targetFileSha1'],\n \"target.file.sha256\": i['_id']['targetFileSha256'],\n \"target.file.sha512\": i['_id']['targetFileSha512'],\n \"target.file.ssdeep\": i['_id']['targetFileSsdeep'],\n \"target.file.md5\": i['_id']['targetFileMd5'],\n \"target.file.size\": i['_id']['targetFileSize'],\n \"target.file.name\": i['_id']['targetFileName']}).distinct('network.domains.domain')\n \n # Aggregate all found items and remove duplicates and empty\n networkItems += networkUdpSrc + networkUdpDst + networkIcmpSrc + \\\n networkIcmpDst + networkTcpSrc + networkTcpDst + \\\n networkDnsAnswersData + networkDomainsIp + networkHttpHost + \\\n networkHosts + networkDnsRequest + networkDomainsDomain\n networkItems = list(set(networkItems))\n networkItems = filter(None, networkItems)\n \n # Split into one list for addresses and one for host names\n ipv4Addresses = keepAddresses(networkItems[:])\n hostNames = keepHostNames(networkItems[:])\n \n # Delete addresses and host names if in whitelist files\n ipv4Addresses = delIfMatchedAddr(ipv4Addresses, fIpv4Addresses)\n hostNames = delIfMatchedHostName(hostNames, fHostNames)\n \n # Get file names\n targetFileName = mongo_collection.find(\n {\n \"target.file.sha1\": i['_id']['targetFileSha1'],\n \"target.file.sha256\": i['_id']['targetFileSha256'],\n \"target.file.sha512\": i['_id']['targetFileSha512'],\n \"target.file.ssdeep\": i['_id']['targetFileSsdeep'],\n \"target.file.md5\": i['_id']['targetFileMd5'],\n \"target.file.size\": i['_id']['targetFileSize'],\n \"target.file.name\": i['_id']['targetFileName']}).distinct('target.file.name')\n \n #BEGIN DEBUG SECTION\n '''\n pp = pprint.PrettyPrinter(indent=4)\n print \"IPv4 Addresses\"\n pp.pprint(ipv4Addresses)\n print \"HOSTNAMES\"\n pp.pprint(hostNames)\n #pp.pprint(str(i['_id']['targetFileSha1']))\n print \"ANALYSIS\"\n #pp.pprint(i['_id']['targetFileSha1'])\n pp.pprint(i)\n '''\n \n #####EXAMPLE OUTPUT#####\n '''\n IPv4 Addresses\n\t\t\t\t[ '23.207.9.92',\n\t\t\t\t\t'157.56.141.102',\n\t\t\t\t\t'165.254.57.27',\n\t\t\t\t\t'192.168.1.117',\n\t\t\t\t\t'192.168.1.119',\n\t\t\t\t\t'192.168.1.121',\n\t\t\t\t\t'192.168.1.124',\n\t\t\t\t\t'192.168.1.201',\n\t\t\t\t\t'192.168.1.255',\n\t\t\t\t\t'209.8.115.63',\n\t\t\t\t\t'239.255.255.250']\n\t\t\t\t\t\n\t\t\t\tHOSTNAMES\n\t\t\t\t[u'watson.microsoft.com', u'www.msftncsi.com', u'armmf.adobe.com']\n\t\t\t\t\n\t\t\t\tANALYSIS\n\t\t\t\t{ u'_id': { u'targetFileMd5': u'f6a390105740b99a211e51a5b9f018ea',\n\t\t\t\t\t\t\t\tu'targetFileName': u'requirements.pdf.scr',\n\t\t\t\t\t\t\t\tu'targetFileSha1': u'e284ba4a06d1dc2e211aceaf9bce14803f094cb6',\n\t\t\t\t\t\t\t\tu'targetFileSha256': u'd0419cd6f455c56ae77657e3590508a13b298395fce50609daf7e09ce28de63c',\n\t\t\t\t\t\t\t\tu'targetFileSha512': u'8957af673a1272cc2eb34fb5f699d475df7c6af84f77c8d169b22225ad8b6d64c36ee150eb3982fb73aafd3c8fb5047fe752a7f01976a519e861353495ddb445',\n\t\t\t\t\t\t\t\tu'targetFileSize': 105984,\n\t\t\t\t\t\t\t\tu'targetFileSsdeep': u'1536:OVfPLtI38adjA3wkJUOoMiqcy8hnzkqUJC2NjIFwBSjh36kJJ4cq:OVfPLtkqUNhby8hnz/cDNjlSjh33q'}}\n\n '''\n #Taking out for debugging b/c we don't want to make any xml's nor write to the \n #seen-entries log yet, just print to screen\n \n #'''\n \n # Call the function to create the output, check if seen before first\n if str(i['_id']['targetFileSha1']) + ',' + \\\n str(i['_id']['targetFileSha256']) + ',' + \\\n str(i['_id']['targetFileSha512']) + ',' + \\\n str(i['_id']['targetFileSsdeep']) + ',' + \\\n str(i['_id']['targetFileMd5']) + ',' + \\\n str(i['_id']['targetFileSize']) not in str(fSeenEntries): \n if ipv4Addresses or hostNames:\t#Does this need to be here? What if there were no callouts?\n genStixDoc(config.get('output','outputDir'),\n str(i['_id']['targetFileSha1']),\n str(i['_id']['targetFileSha256']),\n str(i['_id']['targetFileSha512']),\n str(i['_id']['targetFileSsdeep']),\n str(i['_id']['targetFileMd5']),\n str(i['_id']['targetFileSize']),\n i['_id']['targetFileName'],\n ipv4Addresses,\n hostNames)\n # Write to file so that we can read back in as filter later\n fSeenEntriesFH.write(str(i['_id']['targetFileSha1']) + ',' + \\\n str(i['_id']['targetFileSha256']) + ',' + \\\n str(i['_id']['targetFileSha512']) + ',' + \\\n str(i['_id']['targetFileSsdeep']) + ',' + \\\n str(i['_id']['targetFileMd5']) + ',' + \\\n str(i['_id']['targetFileSize']) + '\\n')\n _l.debug('Updated SeenEntries file with: ' + \\\n str(i['_id']['targetFileSha256']) + ',' + \\\n str(i['_id']['targetFileSha512']) + ',' + \\\n str(i['_id']['targetFileSsdeep']) + ',' + \\\n str(i['_id']['targetFileMd5']) + ',' + \\\n str(i['_id']['targetFileSize']) + \\\n ' since content has been written to stix file.\\n')\n \n #''' \n \n #END DEBUG SECTION\n \n \n except Exception as e:\n import traceback\n tb = traceback.format_exc()\n _l.error('Row failed due to: ' + str(e) + \"\\n\\n\" + str(tb) + \"\\n\\n\" + str(repr(i)))\n conn.disconnect()\n fSeenEntriesFH.closed\n _l.info('Ended.')", "def app_index_job(cls):\n import time\n s = time.time()\n print('init--redis')\n news = json.dumps(DB.index_news(), ensure_ascii=False)\n mvs = json.dumps(DB.index_mvs('mv'), ensure_ascii=False)\n dsjs = json.dumps(DB.index_mvs('dsj'), ensure_ascii=False)\n dms = json.dumps(DB.index_mvs('dm'), ensure_ascii=False)\n zys = json.dumps(DB.index_mvs('zy'), ensure_ascii=False)\n mv_top = json.dumps(DB.index_tops('mv')[0:6], ensure_ascii=False)\n dsj_top = json.dumps(DB.index_tops('dsj')[0:6], ensure_ascii=False)\n zy_top = json.dumps(DB.index_tops('zy')[0:6], ensure_ascii=False)\n dm_top = json.dumps(DB.index_tops('dm')[0:6], ensure_ascii=False)\n # 今日更新和总视频数量\n today, total = DB.today_total(None)\n # 淘宝广告\n ads = json.dumps(TBApi.get_tb_goods(), ensure_ascii=False)\n cls.r.set('news', news)\n cls.r.set('mvs', mvs)\n cls.r.set('dsjs', dsjs)\n cls.r.set('dms', dms)\n cls.r.set('zys', zys)\n cls.r.set('mv_top', mv_top)\n cls.r.set('dsj_top', dsj_top)\n cls.r.set('zy_top', zy_top)\n cls.r.set('dm_top', dm_top)\n cls.r.set('today', today)\n cls.r.set('total', total)\n cls.r.set('ads', ads)\n del news, mvs, dsjs, dms, zys, mv_top, dsj_top, zy_top, dm_top, ads\n print(f'{time.time() - s}')", "def probe_worker(ip, thread):\n fileno = 0\n r = redis.Redis(ip)\n while True:\n url = r.lpop('blogs') # pull URL off queue.\n if not url:\n break # means we are done.\n try:\n content = probe(eval(url), fileno, ip) # does the work.\n r.incr(\"count_%d\" % thread)\n if content:\n r.incr('successes')\n else:\n r.incr('errors')\n except Exception, e:\n # TODO(ryan): What kind of error occurred?\n r.incr('errors')\n logging.info(\"THREAD %d; Error occured: %s, URL: %s\" % (thread, e, url))\n ERR = open(prefix + \"meta/errors-%s.log\" % str(os.getpid()), \"a\")\n print >> ERR, url, e\n ERR.close()\n r.rpush('error_messages', '|'.join([url, str(e)]))\n fileno += 1", "def running_jobs_sherlock():\n user = os.environ['USER']\n\n return subprocess.check_output(['squeue', '-u',user,'-o','%Z']).split()[1:]", "def thingspeak_job():\n try:\n # init thingspeak data dict\n data_d = dict()\n # populate it with valid redis values\n try:\n r_value = int(rdb.get('cvm16:good'))\n if r_value not in [0, 1]:\n raise ValueError\n data_d['field1'] = r_value\n except (TypeError, ValueError):\n logging.warning(f'unable to process redis key \"cvm16:good\" value must be 0 or 1')\n try:\n data_d['field2'] = round(float(rdb.get('cvm16:wobbe')), 2)\n except (TypeError, ValueError):\n logging.warning(f'unable to process redis key \"cvm16:wobbe\" value must be a valid float')\n # add API key\n data_d['api_key'] = API_KEY\n # do thingspeak request\n resp = urlopen(f'https://api.thingspeak.com/update?{urlencode(data_d)}', timeout=5.0)\n # print request status\n try:\n # HTTP request return current entry ID or 0 on error\n entry_id = int(resp.read())\n if entry_id < 1:\n raise ValueError\n logging.info(f'successful data update to entry ID: {entry_id}')\n except ValueError:\n logging.warning(f'unable to update data')\n except redis.RedisError as e:\n logging.error(f'redis error occur: {e!r}')\n except urllib.error.URLError as e:\n logging.error(f'network error occur: {e!r}')", "def get(self):\n server = self.get_argument(\"server\")\n redis_info = self.stats_provider.get_info(server)\n databases=[]\n\n for key in sorted(redis_info.keys()):\n if key.startswith(\"db\"):\n database = redis_info[key]\n database['name']=key\n databases.append(database)\n\n total_keys=0\n for database in databases:\n total_keys+=database.get(\"keys\")\n\n if(total_keys==0):\n databases=[{\"name\" : \"db0\", \"keys\" : \"0\", \"expires\" : \"0\"}]\n\n redis_info['databases'] = databases\n redis_info['total_keys']= self.shorten_number(total_keys)\n\n uptime_seconds = redis_info['uptime_in_seconds']\n redis_info['uptime'] = self.shorten_time(uptime_seconds)\n\n commands_processed = redis_info['total_commands_processed']\n commands_processed = self.shorten_number(commands_processed)\n redis_info['total_commands_processed_human'] = commands_processed\n\n self.write(redis_info)", "def get_next_query(redis_client):\n return redis_client.sort('search_terms', by='*->score')[0].decode('utf-8')", "def get_summary_job(job):\n if job is None:\n return None\n url = conf.WIKIPEDIA_URL.format(\n urllib.parse.quote(job))\n\n r = requests.get(url).content\n data_loaded = json.loads(r)\n if type(data_loaded) == dict and data_loaded['title'] != 'Not found.':\n summary = data_loaded['extract']\n if len(summary) < 65535:\n return summary\n else:\n logger.info('Summary for job is too long to fit into the table',\n job)\n return None\n else:\n logger.info('Description not found for the following job', job)\n return None", "def get_many_memes(subs, reddit, upper_b):\n top_memes = {}\n for s in subs:\n tmp = reddit.subreddit(s)\n top_memes[s] = tmp.new(limit=upper_b)\n return top_memes", "def crawl_queue(q, result_set):\n _log = logging.getLogger(crawl_queue.__name__)\n while not q.empty():\n worker = q.get() #get an itme from the queue\n\n try:\n req = requests.get(worker[1], verify = False, timeout = (30,30), headers = create_fakeheader(ua,browser))\n cont = req.content\n result_set[worker[0]] = cont\n except:\n _log.warning(f' couldnt find a request for index {worker[0]}')\n result_set[worker[0]] = ''\n if q.qsize() % 100 == 0:\n _log.info(f'things left to process {q.qsize()}')\n q.task_done()\n return True", "def oq_run_classical_psha(job_ini, exports='csv', oq_version=default_oq_version, dir_info=None): \n # the run() method has been turned into private since v3.11\n # the get_last_calc_id() and get_datadir() have been moved to commonlib.logs since v3.12\n # the datastore has been moved to commonlib since v3.12\n # Note: the extracting realizations method was kindly shared by Dr. Anne Husley\n vtag = int(oq_version.split('.')[1])\n if vtag <= 10:\n try:\n print('FetchOpenQuake: running Version {}.'.format(oq_version))\n # reloading \n from openquake.commands.run import run\n from openquake.calculators.export.hazard import export_realizations\n\n #run.main([job_ini], exports=exports)\n # invoke/modify deeper openquake commands here to make it compatible with \n # the pylauncher on stampede2 for parallel runs... \n from openquake.baselib import datastore, performance, general\n from openquake.server import dbserver\n from openquake.calculators import base\n from openquake.commonlib import readinput, logs\n dbserver.ensure_on()\n global calc_path\n loglevel = 'info'\n params = {}\n reuse_input = False\n concurrent_tasks = None\n pdb = None\n hc_id = None\n for i in range(1000):\n try:\n calc_id = logs.init('nojob', getattr(logging, loglevel.upper()))\n except:\n time.sleep(0.01)\n continue\n else:\n print('FetchOpenQuake: log created.')\n break\n # disable gzip_input\n base.BaseCalculator.gzip_inputs = lambda self: None\n with performance.Monitor('total runtime', measuremem=True) as monitor:\n if os.environ.get('OQ_DISTRIBUTE') not in ('no', 'processpool'):\n os.environ['OQ_DISTRIBUTE'] = 'processpool'\n oqparam = readinput.get_oqparam(job_ini, hc_id=hc_id)\n if hc_id and hc_id < 0: # interpret negative calculation ids\n calc_ids = datastore.get_calc_ids()\n try:\n hc_id = calc_ids[hc_id]\n except IndexError:\n raise SystemExit(\n 'There are %d old calculations, cannot '\n 'retrieve the %s' % (len(calc_ids), hc_id))\n calc = base.calculators(oqparam, calc_id)\n calc.run(concurrent_tasks=concurrent_tasks, pdb=pdb,\n exports=exports, hazard_calculation_id=hc_id,\n rlz_ids=())\n\n calc_id = datastore.get_last_calc_id()\n path = os.path.join(datastore.get_datadir(), 'calc_%d.hdf5' % calc_id)\n dstore = datastore.read(path)\n export_realizations('realizations', dstore)\n except:\n print('FetchOpenQuake: Classical PSHA failed.')\n return 1\n elif vtag == 11:\n try:\n print('FetchOpenQuake: running Version {}.'.format(oq_version))\n # reloading \n from openquake.commands import run\n from openquake.calculators.export.hazard import export_realizations\n\n #run.main([job_ini], exports=exports)\n # invoke/modify deeper openquake commands here to make it compatible with \n # the pylauncher on stampede2 for parallel runs... \n from openquake.baselib import datastore, performance, general\n from openquake.server import dbserver\n from openquake.calculators import base\n from openquake.commonlib import readinput, logs\n dbserver.ensure_on()\n global calc_path\n loglevel = 'info'\n params = {}\n reuse_input = False\n concurrent_tasks = None\n pdb = False\n for i in range(1000):\n try:\n calc_id = logs.init('nojob', getattr(logging, loglevel.upper()))\n except:\n time.sleep(0.01)\n continue\n else:\n print('FetchOpenQuake: log created.')\n break\n # disable gzip_input\n base.BaseCalculator.gzip_inputs = lambda self: None\n with performance.Monitor('total runtime', measuremem=True) as monitor:\n if os.environ.get('OQ_DISTRIBUTE') not in ('no', 'processpool'):\n os.environ['OQ_DISTRIBUTE'] = 'processpool'\n if 'hazard_calculation_id' in params:\n hc_id = int(params['hazard_calculation_id'])\n else:\n hc_id = None\n if hc_id and hc_id < 0: # interpret negative calculation ids\n calc_ids = datastore.get_calc_ids()\n try:\n params['hazard_calculation_id'] = str(calc_ids[hc_id])\n except IndexError:\n raise SystemExit(\n 'There are %d old calculations, cannot '\n 'retrieve the %s' % (len(calc_ids), hc_id))\n oqparam = readinput.get_oqparam(job_ini, kw=params)\n calc = base.calculators(oqparam, calc_id)\n if reuse_input: # enable caching\n oqparam.cachedir = datastore.get_datadir()\n calc.run(concurrent_tasks=concurrent_tasks, pdb=pdb,exports=exports)\n \n calc_id = datastore.get_last_calc_id()\n path = os.path.join(datastore.get_datadir(), 'calc_%d.hdf5' % calc_id)\n dstore = datastore.read(path)\n export_realizations('realizations', dstore)\n except:\n print('FetchOpenQuake: Classical PSHA failed.')\n return 1 \n else:\n try:\n print('FetchOpenQuake: running Version {}.'.format(oq_version))\n # reloading \n from openquake.commands import run\n from openquake.commonlib import logs, datastore\n from openquake.calculators.export.hazard import export_realizations\n\n #run.main([job_ini], exports=exports)\n # invoke/modify deeper openquake commands here to make it compatible with \n # the pylauncher on stampede2 for parallel runs... \n from openquake.baselib import performance, general\n from openquake.server import dbserver\n from openquake.calculators import base\n dbserver.ensure_on()\n global calc_path\n loglevel = 'info'\n params = {}\n reuse_input = False\n concurrent_tasks = None\n pdb = False\n for i in range(1000):\n try:\n log = logs.init(\"job\", job_ini, getattr(logging, loglevel.upper()))\n except:\n time.sleep(0.01)\n continue\n else:\n print('FetchOpenQuake: log created.')\n break\n log.params.update(params)\n base.BaseCalculator.gzip_inputs = lambda self: None\n with log, performance.Monitor('total runtime', measuremem=True) as monitor:\n calc = base.calculators(log.get_oqparam(), log.calc_id)\n if reuse_input: # enable caching\n calc.oqparam.cachedir = datastore.get_datadir()\n calc.run(concurrent_tasks=concurrent_tasks, pdb=pdb, exports=exports)\n\n logging.info('Total time spent: %s s', monitor.duration)\n logging.info('Memory allocated: %s', general.humansize(monitor.mem))\n print('See the output with silx view %s' % calc.datastore.filename)\n\n calc_id = logs.get_last_calc_id()\n path = os.path.join(logs.get_datadir(), 'calc_%d.hdf5' % calc_id)\n dstore = datastore.read(path)\n export_realizations('realizations', dstore)\n except:\n print('FetchOpenQuake: Classical PSHA failed.')\n return 1 \n\n # h5 clear for stampede2 (this is somewhat inelegant...)\n if 'stampede2' in socket.gethostname():\n # h5clear\n if oq_h5clear(path) == 0:\n print('FetchOpenQuake.oq_run_classical_psha: h5clear completed')\n else:\n print('FetchOpenQuake.oq_run_classical_psha: h5clear failed')\n \n # copy the calc file to output directory\n if dir_info:\n dir_output = dir_info['Output']\n try:\n shutil.copy2(path, dir_output)\n print('FetchOpenQuake: calc hdf file saved.')\n except:\n print('FetchOpenQuake: failed to copy calc hdf file.')\n \n return 0", "def ListJobs(cls):\n return [key.parent().string_id() for key in cls.query().fetch(\n 100, keys_only=True)]", "def get_job(job_name: str):\n\n job_details = redis_controller.get_job_details(job_name=job_name)\n return job_details" ]
[ "0.56763387", "0.5650112", "0.55427843", "0.55423814", "0.537743", "0.52602905", "0.52289313", "0.52130634", "0.52130085", "0.5157604", "0.50683767", "0.5059203", "0.50033915", "0.49904054", "0.49861166", "0.4960205", "0.4941266", "0.49406096", "0.49334633", "0.48996183", "0.48991328", "0.48917097", "0.48207712", "0.4814539", "0.48047164", "0.4787908", "0.47856256", "0.47855747", "0.47767824", "0.4773182" ]
0.7382193
0
Given a list of tags, return the set of keys common to all the tags, if common is set to true. Return the Union if it is set to false.
def retrieve_keys(tags, common=True): r = StrictRedis.from_url('redis://10.0.0.10:6379') # if tags exist, filter them (later) # print(tags) if tags == []: return [] else: print('FILTERING') if common: available_keys = set([]) else: available_keys = [set([]) for tag in tags] # implement union of sets for count, tag in enumerate(tags): try: keys_list = r.get(tag.strip()).split(',')[1:] for key in keys_list: if common: available_keys.add(key) else: available_keys[count].add(key) except: print('Tag %s not found - check spelling' % tag) if not common: available_keys = set().intersection(*available_keys) return list(available_keys)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_intersection(intersection_list):\n intersection_keys = set()\n for ij in intersection_list:\n if len(intersection_keys) == 0:\n intersection_keys = set(ij)\n else:\n intersection_keys = intersection_keys.union(set(ij))\n return intersection_keys", "def common_elements(s1, s2):\n\n return set(s1 & s2)", "def common_entries(*dcts):\n if not dcts:\n return\n for i in set(dcts[0]).intersection(*dcts[1:]):\n yield (i,) + tuple(d[i] for d in dcts)", "def get_keys_for_tag(self,tag):\r\n\r\n #using database\r\n if self.using_database:\r\n value_tuple = (notebookname, tag,)\r\n db_cursor.execute(\"SELECT keyword\"\r\n +\" FROM tags_to_keys\"\r\n +\" WHERE notebook=?\"\r\n +\" AND tag=?;\",\r\n value_tuple)\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {tag[0] for tag in fetched}\r\n\r\n return set()\r\n #using shelf\r\n if self.using_shelf:\r\n if self.tag_dict_contains(tag):\r\n return self.tag_dict[tag]\r\n return set()", "def get_shared_keys(param_list):\n\tif not param_list:\n\t\treturn\n\tkeys = set(param_list[0].keys())\n\tfor i in range(1, len(param_list)):\n\t\tkeys = keys.intersection(param_list[i].keys())\n\tkeys = list(keys)\n\tkeys.sort()\n\treturn keys", "def intersect_set(list1, list2):\n return (set(list1) & set(list2))", "def intersection(sets):\n return functools.reduce(set.intersection, [s for s in sets])", "def intersection(sets):\n return reduce(set.intersection, [s for s in sets])", "def get_similarities(tags):\n similar_tags = set()\n for tag_a, tag_b in itertools.combinations(tags, 2):\n if tag_a[0] != tag_b[0]: # ~12x faster\n continue\n ratio = difflib.SequenceMatcher(None, tag_a, tag_b).ratio()\n if ratio > SIMILAR:\n similar_tags.add((tag_a, tag_b))\n\n return similar_tags", "def variant_key_set_union(k, v_left, v_right, ordering=None):\n out_v = set(v_left) | set(v_right)\n return sorted(out_v, key=partial(_version_order, ordering=ordering))", "def union(set1, set2):", "def intersect(a,b):\n\treturn list(set(a) & set(b))", "def commonSetElementPredicate(field_set: Sequence[Any]) -> FrozenSet[str]:\n\n return frozenset(str(item) for item in field_set)", "def FilterToKnownTags(self, tags: Iterable[str]) -> Set[str]:\n return self._GetKnownTags() & set(tags)", "def intersection_signature(*sets):\n return reduce(operator.and_, sets)", "def _exclusive_intersect(self, keys):\n #inc_s = reduce(lambda x, y: x.intersection(y), \n # (self[x] for x in keys))\n inc_s = self[keys[0]].copy()\n for other_key in self:\n if other_key in keys:\n inc_s.intersection_update(self[other_key])\n else:\n inc_s.difference_update(self[other_key])\n\n return inc_s", "def get_jurisdiction_common_members(a: List[int], b: List[int]) -> Set[int]:\n a_set = set(a)\n b_set = set(b)\n\n if a_set & b_set:\n return a_set & b_set\n else:\n return set()", "def union_signature(*args):\n *sets, n = args\n if len(sets) == 2:\n return set(sorted(sets[0] | sets[1])[:n])\n else:\n return set(sorted(reduce(operator.or_, list(sets)))[:n])", "def union(a, b):\r\n return list(set(a) | set(b))", "def union(a, b):\n return list(set(a) | set(b))", "def union(a, b):\n return list(set(a) | set(b))", "def intersect(a, b):\r\n return list(set(a) & set(b))", "def intersect(a, b):\n return(list(set(a) & set(b)))", "def intersect(a, b):\n return list(set(a) & set(b))", "def find_unique_common_items(list1, list2):\n\n # convert each list to set; will remove duplicates\n set1 = set(list1)\n set2 = set(list2)\n\n # use & operator to find common values between set1 and set2\n unique_set = set1 & set2\n\n return unique_set", "def common(input1, input2):\r\n return set(input1)&set(input2) # write a single line of code\r", "def union_sets(S):\n res = set()\n for s in S:\n res |= s\n return res", "def find_intersection(wire_one_map, wire_two_map):\n return set(wire_one_map.keys()) & set(wire_two_map.keys())", "def union(self, key: str, skip_duplicates=False) -> list:\n result = []\n for items in self.get(key):\n for item in items:\n if skip_duplicates and item in result:\n continue\n result.append(item)\n return result", "def tagkeyindex(self,tag):\r\n\r\n returnset = set()\r\n if self.tag_dict_contains(tag):\r\n\r\n for x_temp in self.get_keys_for_tag(tag):\r\n if self.key_dict_contains(x_temp+'/'+tag):\r\n for y_temp in self.get_indexes_for_key(x_temp+'/'+tag):\r\n returnset.add(y_temp)\r\n return returnset" ]
[ "0.633568", "0.61055833", "0.6078032", "0.607099", "0.6065052", "0.60457534", "0.5982037", "0.59574974", "0.5949828", "0.59448105", "0.59397596", "0.5928033", "0.591519", "0.59143674", "0.58971894", "0.5877742", "0.5863845", "0.5802865", "0.5800691", "0.5798681", "0.5798681", "0.57896626", "0.5783053", "0.57707787", "0.57500494", "0.5731991", "0.57261735", "0.57194257", "0.571406", "0.5683467" ]
0.691533
0
Send the new colors for each vertex to the GPU. This method must set the contents of `self._vertex_list.colors` using a list or tuple that contains the RGBA color components for each vertex in the shape. This is usually done by repeating `self._rgba` for each vertex.
def _update_color(self): self._vertex_list.colors[:] = self._rgba * self._num_verts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def color_vertices(obj, vert_ind, colors):\n thisfunc = thisfile + '->color_vertices()'\n\n # Validate inputs\n if isinstance(vert_ind, int):\n vert_ind = [vert_ind]\n if isinstance(colors, tuple):\n colors = [colors] * len(vert_ind)\n assert (len(colors) == len(vert_ind)), \\\n \"'colors' and 'vert_ind' must be of the same length, or 'colors' is a single tuple\"\n\n scene = bpy.context.scene\n scene.objects.active = obj\n obj.select = True\n bpy.ops.object.mode_set(mode='OBJECT')\n\n mesh = obj.data\n\n if mesh.vertex_colors:\n vcol_layer = mesh.vertex_colors.active\n else:\n vcol_layer = mesh.vertex_colors.new()\n\n # A vertex and one of its edges combined are called a loop, which has a color\n # So if a vertex has four outgoing edges, it has four colors for the four loops\n for poly in mesh.polygons:\n for loop_idx in poly.loop_indices:\n loop_vert_idx = mesh.loops[loop_idx].vertex_index\n try:\n # In the list\n color_idx = vert_ind.index(loop_vert_idx)\n vcol_layer.data[loop_idx].color = colors[color_idx]\n except ValueError:\n # Not found\n pass\n\n # Set up nodes for vertex colors\n node_tree, nodes = _clear_nodetree_for_active_material(obj)\n nodes.new('ShaderNodeAttribute')\n nodes.new('ShaderNodeBsdfDiffuse')\n nodes.new('ShaderNodeOutputMaterial')\n nodes['Attribute'].attribute_name = vcol_layer.name\n node_tree.links.new(nodes['Attribute'].outputs[0], nodes['Diffuse BSDF'].inputs[0])\n node_tree.links.new(nodes['Diffuse BSDF'].outputs[0], nodes['Material Output'].inputs[0])\n\n # Scene update necessary, as matrix_world is updated lazily\n scene.update()\n\n logging.info(\"%s: Vertex color(s) added to '%s'\", thisfunc, obj.name)\n logging.warning(\"%s: ..., so node tree of '%s' has changed\", thisfunc, obj.name)", "def set_color_list(self, new_list):\n self.__clr_list = itertools.cycle(new_list)", "def show_vertex_colors():\n if bpy.app.version > (2, 80, 0):\n for area in bpy.context.screen.areas:\n if area.type == 'VIEW_3D':\n for space in area.spaces:\n if space.type == 'VIEW_3D':\n space.shading.type = 'SOLID'\n space.shading.color_type = 'VERTEX'", "def color_vertex(obj, color):\r\n\t\r\n\tmesh = obj.data\r\n\tscn = bpy.context.scene\r\n\t\r\n\t#we need to make sure it's the active object\r\n\tscn.objects.active = obj\r\n\tobj.select = True\r\n\tvcol_layer = mesh.vertex_colors.active\r\n\tfor poly in mesh.polygons:\r\n\t\tfor loop_index in poly.loop_indices:\r\n\t\t\tloop_vert_index = mesh.loops[loop_index].vertex_index\r\n\t\t\tvcol_layer.data[loop_index].color = color\r\n\t\r\n\treturn 0", "def setColors(self, colors, indexes=None):\n colors = np.array(colors, np.float32)\n if indexes is None:\n # Change colors to the whole string\n self.allVertices['rgba'][:] = glm.vec4(colors)\n for item in self.colors:\n item[-1] = colors\n else:\n indexes = np.array(indexes, np.int32)\n assert len(colors) == len(indexes)\n # Adjust indexes\n off = 0\n j = 0\n for i, c in enumerate(self.text):\n if c in self.NO_GLYPH_CHARS:\n off += 1\n if i == indexes[j]:\n if j < len(indexes) - 1:\n j += 1\n break\n continue\n elif i < indexes[j]:\n continue\n else:\n self.allVertices['rgba'][\n 4 * (i - off):4 * (i - off + 1)] = colors[j]\n self.colors[i][-1] = colors[j]\n if j < len(indexes) - 1:\n j += 1\n else:\n break\n self.mesh.update()", "def createVertexColorData(self, vertex_colors, fv_indices_flattened):\n return vertex_colors[fv_indices_flattened]", "def _update_vertices(self):\n raise NotImplementedError(\"_update_vertices must be defined\"\n \"for every ShapeBase subclass\")", "def draw(self):\n self._vertex_list.draw(self._draw_mode)", "def add_vertices(self, vertices_list):\n for vertex in vertices_list:\n self.add_vertex(vertex)", "def color(self, verify=False, sort=\"id\"):\n\t\t\n\t\tcolored = 0\n\t\tround = 0\n\t\t\n\t\t# Sort\n\t\tif sort == \"id\":\n\t\t\tsorted_vertices = sorted(self.vertices, key = lambda x : x)\n\t\telif sort == \"ascending\":\n\t\t\tsorted_vertices = sorted(self.vertices, key = lambda x : len(self.vertices[x].adjacent_to))\n\t\telif sort == \"descending\":\n\t\t\tsorted_vertices = sorted(self.vertices, key = lambda x : -len(self.vertices[x].adjacent_to))\n\t\t\n\t\t# Assign numbers\n\t\tfor i in range(0, len(sorted_vertices)):\n\t\t\tself.graph.set_vertex_value(sorted_vertices[i], {'number': i})\n\t\t\n\t\t# Color all vertices\n\t\twhile colored < len(self.graph.vertices):\n\t\t\t\n\t\t\t# Remove colored vertices\n\t\t\tsorted_vertices = [x for x in sorted_vertices if not 'color' in self.graph.get_vertex_value(x)]\n\t\t\t\n\t\t\t# Test if vertex needs to be colored this round\n\t\t\tfor vertex in sorted_vertices:\n\t\t\t\tif 'color' in self.graph.get_vertex_value(vertex):\n\t\t\t\t\tcontinue\n\t\t\t\n\t\t\t\tvertex_number = self.graph.get_vertex_value(vertex)['number']\n\t\t\t\tneighbors = self.graph.neighbors(vertex)\n\t\t\t\tlocal_max = True\n\t\t\t\t\n\t\t\t\t# Check if local max\n\t\t\t\tfor neighbor in neighbors:\n\t\t\t\t\tdata_neighbor = self.graph.get_vertex_value(neighbor)\n\t\t\t\t\tif 'color' in data_neighbor and data_neighbor['color'] != round:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif data_neighbor['number'] > vertex_number:\n\t\t\t\t\t\tlocal_max = False\n\t\t\t\t\n\t\t\t\t# Color if local max\n\t\t\t\tif local_max:\n\t\t\t\t\tself.graph.set_vertex_value(vertex, {u'color': round})\n\t\t\t\t\tcolored += 1\n\t\t\t\t\t\t\t\t\n\t\t\t# Increment round\n\t\t\tround += 1\n\t\t\n\t\t# Verify\n\t\tcorrect = True\n\t\tif verify:\n\t\t\tfor vertex in self.graph.vertices:\n\t\t\t\tneighbors = self.graph.neighbors(vertex)\n\t\t\t\tfor neighbor in neighbors:\n\t\t\t\t\tif self.graph.get_vertex_value(vertex)['color'] == self.graph.get_vertex_value(neighbor)['color']:\n\t\t\t\t\t\treturn -1\n\t\t\n\t\treturn round", "def populate_vertices(self, vertices_list):\n vertices = []\n for vertex in vertices_list:\n vertex_id = vertex[0]\n vertices.append(Vertex(vertex_id))\n self.vertices = vertices", "def _create_vertex_list(self):\n raise NotImplementedError('_create_vertex_list must be defined in '\n 'order to use group or batch properties')", "def setReference(self, updatedIndices):\n # self.colors[:] = [self.colors[i] for i in updatedIndices]\n self.cellData[:] = [self.cellData[i] for i in updatedIndices]", "def update_color(self):\r\n \r\n \r\n colorset = self.colorset\r\n \r\n self.grfx[0].colorset = colorset\r\n pass", "def render(self):\n gl.glEnableClientState(gl.GL_VERTEX_ARRAY);\n gl.glEnableClientState(gl.GL_NORMAL_ARRAY);\n gl.glVertexPointer(3, gl.GL_DOUBLE, 0, self._vertices);\n gl.glNormalPointer(gl.GL_DOUBLE, 0, self._normals);\n for patch in self._patches:\n patch.render()\n gl.glDisableClientState(gl.GL_VERTEX_ARRAY);\n gl.glDisableClientState(gl.GL_NORMAL_ARRAY);", "def draw(self):\n glColor3f(1.0, 0.0, 0.0)\n glBegin(GL_LINES)\n for vertex in self.edges[0]:\n glVertex3fv(self.vertices[vertex])\n glColor3f(0.0, 1.0, 0.0)\n for vertex in self.edges[1]:\n glVertex3fv(self.vertices[vertex])\n glColor3f(0.0, 0.0, 1.0)\n for vertex in self.edges[2]:\n glVertex3fv(self.vertices[vertex])\n glEnd()", "def assign_some_vertex_colors(mesh, triangle_indices, triangle_colors, mask=None):\n split_mesh = split_triangles(mesh)\n vertex_colors = np.asarray(split_mesh.vertex_colors)\n triangles = np.asarray(split_mesh.triangles)\n if mask is not None:\n triangles = triangles[mask, :]\n\n if isinstance(triangle_indices, list):\n for triangle_set, color in zip(triangle_indices, triangle_colors):\n triangle_set = np.asarray(triangle_set)\n for i in range(np.asarray(triangle_set).shape[0]):\n # import ipdb; ipdb.set_trace()\n t_idx = triangle_set[i]\n p_idx = triangles[t_idx, :]\n vertex_colors[p_idx] = color\n else:\n for i in range(triangle_indices.shape[0]):\n # import ipdb; ipdb.set_trace()\n t_idx = triangle_indices[i]\n color = triangle_colors[i, :]\n p_idx = triangles[t_idx, :]\n vertex_colors[p_idx] = color\n if not split_mesh.has_triangle_normals():\n split_mesh.compute_triangle_normals()\n split_mesh.compute_vertex_normals()\n\n return split_mesh", "def registerVertices(self,vl):\n self.set('patchmesh.vertices',FuzzList(vl))", "def vertices(self, v):\n self._vertices = v", "def setlistdata_f3xyzf3nf4rgba(self, key, vertex_data, normal_data, color_data):\n self._dentsvertsdata[key].setlistdata_f3xyzf3nf4rgba(vertex_data, normal_data, color_data)", "def draw(self):\n\n if self.support != \"tablette\":\n for user in self.parent.group.users:\n if user.identifier == 1:\n self.color_user1 = [user.color[0], user.color[1], user.color[2]]\n elif user.identifier == 2:\n self.color_user2 = [user.color[0], user.color[1], user.color[2]]\n if user.identifier == 3:\n self.color_user3 = [user.color[0], user.color[1], user.color[2]]\n else:\n self.color_user4 = [user.color[0], user.color[1], user.color[2]]\n else:\n for user in self.parent.group.users:\n if user.identifier == 1:\n self.color_user1 = [user.color[0], user.color[1], user.color[2]]\n elif user.identifier == 2:\n self.color_user2 = [user.color[0], user.color[1], user.color[2]]\n if user.identifier == 3:\n self.color_user3 = [user.color[0], user.color[1], user.color[2]]\n else:\n self.color_user4 = [user.color[0], user.color[1], user.color[2]]", "def setColors(self):\r\n # productive\r\n profprint()\r\n self.color = [[0, 0, 0] for i in range(MAXCOL)]\r\n self.color255 = self.setColors255()\r\n for i in range(MAXCOL):\r\n for j in range(3):\r\n self.color[i][j] = self.color255[i][j] / float(255)\r\n\r\n return self.color", "def _onEdit(self, event):\n index = self.colorlist.GetSelection()\n icol = self._indexTupleToColor(index)\n icd = wx.ColourData()\n icd.SetColour(icol)\n dialog = wx.ColourDialog(self, icd)\n\n if dialog.ShowModal() == wx.ID_OK:\n tup = _colorDataToTuple(dialog.GetColourData())\n self.graphColors[index] = tup\n self._tupleListToStrings()\n self._updateButtons(None)", "def send_colors(colors: np.ndarray):\n send(\"colors\", json.dumps(np.ravel(colors).tolist()))", "def set_color():\n function = LegacyFunctionSpecification()\n function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)\n for par in [\"red\", \"green\", \"blue\"]:\n function.addParameter(par, dtype='float64', direction=function.IN, \n description = \"The new RGB color vector of the particle\")\n function.addParameter('npoints', dtype='int32', direction=function.LENGTH)\n function.result_type = 'int32'\n function.must_handle_array = True\n return function", "def update_rgba(self, index=None):\n # return a rendering factory\n f = self._rendering_factory()\n\n if index == None:\n if self.is_4d():\n layer_list = [self._data[..., i, self._time_point] for i in \n range(self.get_data_shape()[2])]\n else:\n layer_list = [self._data[..., i] for i in \n range(self.get_data_shape()[2])]\n self._rgba_list = map(f, layer_list)\n else:\n if self.is_4d():\n self._rgba_list[index] = f(self._data[..., index,\n self._time_point])\n else:\n self._rgba_list[index] = f(self._data[..., index])", "def _onAdd(self, event):\n dialog = wx.ColourDialog(self)\n\n if dialog.ShowModal() == wx.ID_OK:\n tup = _colorDataToTuple(dialog.GetColourData())\n self.graphColors.append(tup)\n self._tupleListToStrings()\n self._updateButtons(None)", "def add_poly_mesh_arrays_data_to_gl(self, key, fv_indices, points, face_normals, cstype, c, face_colors, vertex_colors):\n n_vertices_max = len(fv_indices[0])\n\n data_mesh_points_list = np.array([])\n data_mesh_normals_list = np.array([])\n data_mesh_colors_list = np.array([])\n n_all_vertices = 0\n\n max_iter = n_vertices_max - 1\n\n for corner_idx in range(1, max_iter):\n if n_vertices_max > 3:\n existing_triangles = fv_indices[:, corner_idx + 1] != -1\n\n if True not in existing_triangles:\n continue\n\n fv_indices_to_draw_all_vertices = fv_indices[existing_triangles]\n fv_indices_to_draw = fv_indices_to_draw_all_vertices[:, [0, corner_idx, corner_idx + 1]]\n face_normals_to_draw = face_normals[existing_triangles]\n else:\n fv_indices_to_draw = fv_indices\n face_normals_to_draw = face_normals\n\n fv_indices_flattened = fv_indices_to_draw.flatten()\n n_all_vertices += len(fv_indices_flattened)\n\n n_faces = len(fv_indices_to_draw)\n\n vertexData = self.createVertexData(fv_indices_flattened, points)\n\n normalData = self.createNormaldata(face_normals_to_draw)\n\n if cstype == 0:\n colorData = self.createConstantColorData(c, n_faces)\n elif cstype == 1:\n colorData = self.createFaceColorData(face_colors)\n elif cstype == 2:\n colorData = self.createVertexColorData(vertex_colors, fv_indices_flattened)\n\n if self._showBack:\n fv_indices_flattened_reversed = fv_indices_flattened[::-1]\n n_all_vertices += len(fv_indices_flattened_reversed)\n\n reversed_mesh_points = self.createVertexData(fv_indices_flattened_reversed, points)\n\n reversed_normals = self.createNormaldata(-face_normals_to_draw[::-1])\n\n if cstype == 0:\n reversed_colors = colorData\n elif cstype == 1:\n reversed_colors = self.createFaceColorData(face_colors[::-1])\n elif cstype == 2:\n reversed_colors = self.createVertexColorData(vertex_colors, fv_indices_flattened_reversed)\n\n data_mesh_points_list = np.concatenate([data_mesh_points_list, vertexData, reversed_mesh_points])\n data_mesh_normals_list = np.concatenate([data_mesh_normals_list, normalData, reversed_normals])\n data_mesh_colors_list = np.concatenate([data_mesh_colors_list, colorData, reversed_colors])\n else:\n data_mesh_points_list = np.concatenate([data_mesh_points_list, vertexData])\n data_mesh_normals_list = np.concatenate([data_mesh_normals_list, normalData])\n data_mesh_colors_list = np.concatenate([data_mesh_colors_list, colorData])\n\n vertex_data = np.array(data_mesh_points_list, dtype=GLHelpFun.numpydatatype(GLDataType.FLOAT))\n normal_data = np.array(data_mesh_normals_list, dtype=GLHelpFun.numpydatatype(GLDataType.FLOAT))\n color_data = np.array(data_mesh_colors_list, dtype=GLHelpFun.numpydatatype(GLDataType.FLOAT))\n\n self.setlistdata_f3xyzf3nf4rgba(key, vertex_data, normal_data, color_data)\n self.setVertexCounter_byNum(key, n_all_vertices)\n return", "def vertex_coloring(self, display = False):\r\n stack = self.SL_algorithm()\r\n color_of_vertex = self.greedily_coloring(stack)\r\n if(display):\r\n self.display_graph(color_of_vertex)\r\n return color_of_vertex\r\n else: \r\n return color_of_vertex", "def polyColorPerVertex(*args, alpha: Union[float, bool]=0.0, clamped: bool=True, colorB:\n Union[float, bool]=0.0, colorDisplayOption: bool=True, colorG:\n Union[float, bool]=0.0, colorR: Union[float, bool]=0.0, colorRGB:\n Union[List[float, float, float], bool]=None, notUndoable: bool=True,\n relative: bool=True, remove: bool=True, representation: Union[int,\n bool]=0, q=True, query=True, e=True, edit=True, **kwargs)->Union[bool,\n Any]:\n pass" ]
[ "0.66109", "0.63820016", "0.61168706", "0.59581137", "0.5888359", "0.577895", "0.57686615", "0.57098585", "0.5686346", "0.56688666", "0.56354624", "0.5607529", "0.5606135", "0.5555189", "0.5537485", "0.5527043", "0.5501716", "0.5500356", "0.5498688", "0.54671127", "0.543129", "0.5425047", "0.5391202", "0.53818864", "0.53810245", "0.5353221", "0.53426915", "0.5338775", "0.5317864", "0.53167933" ]
0.79452085
0
Build internal vertex list. This method must create a vertex list and assign it to `self._vertex_list`. It is advisable to use it during `__init__` and to then update the vertices accordingly with `self._update_vertices`. While it is not mandatory to implement it, some properties ( namely `batch` and `group`) rely on this method to properly recreate the vertex list.
def _create_vertex_list(self): raise NotImplementedError('_create_vertex_list must be defined in ' 'order to use group or batch properties')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_vertices(self):\n self.vertices = []\n for key in self.graph:\n self.vertices.append(self.Vertex(key, self.graph[key]))", "def _generate_vertexes(self):\n # generate list of sets for each vms\n self._graph = [set() for _ in range(self._vm_count)]", "def populate_vertices(self, vertices_list):\n vertices = []\n for vertex in vertices_list:\n vertex_id = vertex[0]\n vertices.append(Vertex(vertex_id))\n self.vertices = vertices", "def __init__(self):\n self.vert_list = {}\n self.num_vertices = 0", "def __init__(self,vertices):\n self._vertices = vertices\n self._edges = []\n for i in range(len(self._vertices)-1)\n self._edges.append( [i,i+1] )", "def __init__(self, numvertices, directed=False):\n self._numvertices = numvertices\n self._directed = directed\n self._numedges = 0\n self._adjacents = [list() for _ in range(0, numvertices)]", "def initialize_vertices(self, objects_: Union[Dict[str, List[Union[str, int]]], NamedTuple],\n root_name: str, independent: bool, group: int,\n vertices_list: list = None) -> list:\n if vertices_list is None:\n vertices_list = []\n if not isinstance(vertices_list, list):\n raise TypeError(f\"Expected List, got {type(vertices_list)}\")\n\n group_store = {}\n for key, values in objects_.items():\n if key == root_name:\n if type(values) == int or type(values) == str:\n node = Graph_Node(subtype_name=key, value=values,\n independent=True, level=\"root\")\n node.group.add(group)\n group_store[key] = node\n\n # add root nodes to the class node list\n self._add_node(node)\n else:\n raise TypeError(\"Expected value of 'root_name' key to be a str or int\")\n else:\n node = Graph_Node(subtype_name=key, value=values,\n independent=independent)\n node.group.add(group)\n group_store[key] = node\n vertices_list.append(group_store)\n\n return vertices_list", "def vertices(self):\n try:\n return self._vertices\n except:\n self._vertices = [list(x) for x in self.vertex_generator()]\n return self._vertices", "def __init__(self) -> None:\n self._vertices = {}", "def build_graph(self):\n for each_list in self.lab.look():\n vertice = self._add_vertice(each_list)\n if vertice:\n self.unvisited.add(vertice)\n self.graph.addEdge((self.current, vertice))\n \n self.unvisited -= self.visited\n self._connect_neighbours()", "def __init__(self, vertices):\n self.vertices = vertices", "def add_vertex(self):\n self.visited_node += [False]\n self.V = self.V + 1\n self.adjacency_list.append(list())", "def __init__(self):\n self.vertices = {}", "def initialize_graph(self, V, edge_list):\n # ---------- INSERT CODE BELOW ----------\n for _ in range(V):\n self.add_vertex()\n \n for node in edge_list:\n self.add_edge(node[0],node[1],node[2])\n\n # ---------- INSERT CODE ABOVE ----------", "def __init__(self):\n self.vert_dict = {}\n # self.vert_dict = []\n self.num_vertices = 0", "def vertices(self, v):\n self._vertices = v", "def add_vertex(self, key):\n # increment the number of vertices\n self.num_vertices += 1\n # create a new vertex\n vertex = Vertex(key)\n # add the new vertex to the vertex dictionary with a list as the value\n # self.vert_dict[vertex] = []\n # add the new vertex to the vertex list\n self.vert_dict[key] = vertex\n # return the new vertex\n return vertex", "def __init__(self):\n self.vertices = ((0, 0, 0),(1, 0, 0),(0, 1, 0),(0, 0, 1))\n self.edges=(0,1),(0,2),(0,3)", "def build_edges(self):\n print(\"Constructing Edges.\")\n # -----------------------------------------\n # TODO: You should write this method!\n\n # Note: this method may take some time to run - it is likely to be O(N^2), and some lists have N = 10,000 words or more.\n # (I've had students decide that their program was \"broken\" and quit it before this process finished... every time,\n # not realizing that the program was working hard behind the scenes.)\n # I recommend that you keep track of the number of edges you have added, and if it is a multiple of 1000, print\n # something so that you know your program is making progress.\n n = len(self.vertices)\n\n\n\n \n # -----------------------------------------\n print(\"Done Constructing Edges.\\n------------------------------------\")", "def _update_vertices(self):\n raise NotImplementedError(\"_update_vertices must be defined\"\n \"for every ShapeBase subclass\")", "def __init__(self, verts=None, frags=None, geoms=None, count=0):\n\n GLObject.__init__(self)\n self._count = count\n self._buffer = None\n\n # Make sure shaders are shaders\n self._verts = self._get_shaders(verts, VertexShader)\n self._frags = self._get_shaders(frags, FragmentShader)\n self._geoms = self._get_shaders(geoms, GeometryShader)\n\n self._uniforms = {}\n self._attributes = {}\n\n # Build hooks, uniforms and attributes\n self._build_hooks()\n self._build_uniforms()\n self._build_attributes()\n\n # Build associated structured vertex buffer if count is given\n if self._count > 0:\n dtype = []\n for attribute in self._attributes.values():\n dtype.append(attribute.dtype)\n self._buffer = np.zeros(self._count, dtype=dtype).view(VertexBuffer)\n self.bind(self._buffer)", "def _init_vertex_adjacency_matrix(self, verbose=False):\n self._init_from_cdd_input(self.cdd_Vrepresentation(),\n '--adjacency', verbose)", "def __init__(self, N, vertices):\n\n self._parent = {i: i for i in vertices}\n self._count = N\n self._rank = {i: 0 for i in vertices}", "def __init__(self):\n self.vertList = {}\n self.vertCount = 0", "def __init__(self, vertices=None):\n\n self._vertices = dict() \n if vertices is not None:\n for label in vertices.keys():\n self.add_vertex(label)\n for label, heads in vertices.items():\n for head in heads:\n self.add_edge(label, head)", "def build(self):\n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.walker_layer = DeepWalker(self.args, self.vocab_size, self.degrees)\n self.cluster_layer = Clustering(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.walker_layer()+self.gamma*self.cluster_layer(self.walker_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n\n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss,\n global_step=self.batch)\n\n self.init = tf.global_variables_initializer()", "def vertices(self):\n return list(self._graph)", "def get_vertices(self):\n vertices = []\n V = [[-self.base_vectors[:,n], self.base_vectors[:,n]] for n in range(self.base_vectors.shape[1])]\n combs = list(itertools.product(*V))\n for cb in combs:\n cb = np.sum(np.array(cb).T, axis=1, keepdims=True)\n vertices.append(self.base_vertices + cb)\n\n vertices = np.concatenate(vertices,axis=1)\n return vertices", "def build_graph(self):\n pass", "def copy(self):\n vList = GeneralVertexList(len(self.V))\n vList.setVertices(list(self.V.values()))\n return vList" ]
[ "0.701082", "0.66579324", "0.6513389", "0.64290905", "0.61587995", "0.6149416", "0.6126646", "0.60945904", "0.6051399", "0.5948048", "0.59308505", "0.5899427", "0.585141", "0.5845312", "0.5817854", "0.5809941", "0.58075523", "0.57637733", "0.57617563", "0.5758524", "0.5758283", "0.5721913", "0.5666414", "0.56546026", "0.5652261", "0.56518346", "0.5626314", "0.56212395", "0.56071925", "0.56059676" ]
0.8401217
0
Generate uptodate vertex positions & send them to the GPU. This method must set the contents of `self._vertex_list.vertices` using a list or tuple that contains the new vertex coordinates for each vertex in the shape. See the `ShapeBase` subclasses in this module for examples of how to do this.
def _update_vertices(self): raise NotImplementedError("_update_vertices must be defined" "for every ShapeBase subclass")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_vertex_list(self):\n raise NotImplementedError('_create_vertex_list must be defined in '\n 'order to use group or batch properties')", "def populate_vertices(self, vertices_list):\n vertices = []\n for vertex in vertices_list:\n vertex_id = vertex[0]\n vertices.append(Vertex(vertex_id))\n self.vertices = vertices", "def vertices(self, v):\n self._vertices = v", "def update_rect(self):\n if self.vertex_list:\n self.vertex_list.vertices = (\n self._rect.bottom_left.tuple +\n self._rect.bottom_right.tuple +\n self._rect.top_right.tuple +\n self._rect.top_left.tuple\n )", "def draw(self):\n self._vertex_list.draw(self._draw_mode)", "def _generate_vertexes(self):\n # generate list of sets for each vms\n self._graph = [set() for _ in range(self._vm_count)]", "def init_vertices(self):\n self.vertices = []\n for key in self.graph:\n self.vertices.append(self.Vertex(key, self.graph[key]))", "def redefinir_vertices(self):\n self.nueva_posicion_posible_parte_inferior = [0,0]\n self.nueva_posicion_posible_parte_superior = [0,0]\n self.vertice_1 = self.posicion\n self.vertice_2 = [self.posicion[0] + self.medidas, self.posicion[1]]\n self.vertice_3 = [self.posicion[0], self.posicion[1] + self.medidas]\n self.vertice_4 = [self.posicion[0] + self.medidas, self.posicion[1] + self.medidas]", "def update_shape_vaos(self, instance, show):\n shape = self._shape(instance)\n\n shape_object_id = id(shape)\n if not shape_object_id in self._shape_vaos:\n self._shape_vaos[shape_object_id] = VertexArray({\n 'vertex_position': VertexBuffer.from_numpy(shape.verticies),\n 'texture_coords': VertexBuffer.from_numpy(shape.texture_coords),\n }, self.program.attributes)", "def add_vertices(self, vertices_list):\n for vertex in vertices_list:\n self.add_vertex(vertex)", "def add_vertices(self, vertices: Iterable[\"Vertex\"]) -> Sequence[int]:\n start_index = len(self.vertices)\n self.vertices.extend(Vec3.generate(vertices))\n return tuple(range(start_index, len(self.vertices)))", "def __init__(self):\n self.vert_list = {}\n self.num_vertices = 0", "def _update(self):\n self.parametrize_beam()\n self.update_ranks()\n self._points = tf.reshape(self._endpoint, (1, 2)) * tf.reshape(self._ranks, (-1, 1))", "def update_position(\n self, front_left_vertex, front_right_vertex, back_left_vertex, back_right_vertex\n ):\n\n self.front_left_vertex = front_left_vertex\n self.front_right_vertex = front_right_vertex\n self.back_left_vertex = back_left_vertex\n self.back_right_vertex = back_right_vertex\n\n # Initialize the line vortices that make up the ring vortex.\n self.front_leg = LineVortex(\n origin=self.front_right_vertex,\n termination=self.front_left_vertex,\n strength=self.strength,\n )\n self.left_leg = LineVortex(\n origin=self.front_left_vertex,\n termination=self.back_left_vertex,\n strength=self.strength,\n )\n self.back_leg = LineVortex(\n origin=self.back_left_vertex,\n termination=self.back_right_vertex,\n strength=self.strength,\n )\n self.right_leg = LineVortex(\n origin=self.back_right_vertex,\n termination=self.front_right_vertex,\n strength=self.strength,\n )\n\n # Initialize a variable to hold the centroid of the ring vortex.\n self.center = ps.geometry.centroid_of_quadrilateral(\n self.front_left_vertex,\n self.front_right_vertex,\n self.back_left_vertex,\n self.back_right_vertex,\n )", "def vertices(self):\n try:\n return self._vertices\n except:\n self._vertices = [list(x) for x in self.vertex_generator()]\n return self._vertices", "def registerVertices(self,vl):\n self.set('patchmesh.vertices',FuzzList(vl))", "def vertices(self, *args, **kwargs) -> Any:\n pass", "def _update_positions(self):\n self._velocities += self._accelerations * self.time_step\n self._positions += self._velocities * self.time_step", "def __init__(self, vertices):\n self.vertices = vertices", "def update_rect(self):\n self._update_vertex_list()", "def __init__(self,vertices):\n self._vertices = vertices\n self._edges = []\n for i in range(len(self._vertices)-1)\n self._edges.append( [i,i+1] )", "def generate_positions(self):\n raise NotImplementedError(\"Should implement generate_positions()!\")", "def update_rect(self):\n if self.vertex_list:\n # Shrink the rectangle by half-a-pixel so there's no ambiguity \n # about where the line should be drawn. (The problem is that the \n # widget rect is always rounded to the nearest pixel, but OpenGL \n # doesn't seem deterministic about which side of the pixel it draws \n # the line on.)\n rect = self._rect.get_shrunk(0.5)\n self.vertex_list.vertices = (\n rect.bottom_left.tuple +\n # Don't know why this offset is necessary, but without it \n # the bottom-right pixel doesn't get filled in...\n (rect.bottom_right + (1,0)).tuple +\n rect.bottom_right.tuple +\n rect.top_right.tuple +\n rect.top_right.tuple +\n rect.top_left.tuple +\n rect.top_left.tuple +\n rect.bottom_left.tuple\n )", "def set_vertices(self):\n if self.rotation == 0:\n self.v0, self.v1, self.v2, self.v3 = self.non_rotated_vertices()\n else:\n self.v0, self.v1, self.v2, self.v3 = self.rotated_vertices()", "def _onmove(self, event):\n # Move the active vertex (ToolHandle).\n if self._active_handle_idx >= 0:\n idx = self._active_handle_idx\n self._xys[idx] = event.xdata, event.ydata\n # Also update the end of the polygon line if the first vertex is\n # the active handle and the polygon is completed.\n if idx == 0 and self._selection_completed:\n self._xys[-1] = event.xdata, event.ydata\n\n # Move all vertices.\n elif 'move_all' in self._state and self._eventpress:\n dx = event.xdata - self._eventpress.xdata\n dy = event.ydata - self._eventpress.ydata\n for k in range(len(self._xys)):\n x_at_press, y_at_press = self._xys_at_press[k]\n self._xys[k] = x_at_press + dx, y_at_press + dy\n\n # Do nothing if completed or waiting for a move.\n elif (self._selection_completed\n or 'move_vertex' in self._state or 'move_all' in self._state):\n return\n\n # Position pending vertex.\n else:\n # Calculate distance to the start vertex.\n x0, y0 = \\\n self._selection_artist.get_transform().transform(self._xys[0])\n v0_dist = np.hypot(x0 - event.x, y0 - event.y)\n # Lock on to the start vertex if near it and ready to complete.\n if len(self._xys) > 3 and v0_dist < self.grab_range:\n self._xys[-1] = self._xys[0]\n else:\n self._xys[-1] = event.xdata, event.ydata\n\n self._draw_polygon()", "def __init__(self) -> None:\n self._vertices = {}", "def vertices(self):\n d = self.space_dimension()\n v = vector(ZZ, d)\n points = []\n for g in self.minimized_generators():\n for i in range(0,d):\n v[i] = g.coefficient(Variable(i))\n v_copy = copy.copy(v)\n v_copy.set_immutable()\n points.append(v_copy)\n return tuple(points)", "def update(self):\r\n self.g = self.create_graph()", "def glVertex(self, x, y):\n print(\"pointSize\")\n print(self.pointSize)\n if self.vpHeight != 0 and self.vpWidth != 0:\n xx = x * ((self.vpWidth - self.pointSize) / 2)\n yy = y * ((self.vpHeight - self.pointSize) / 2)\n localX = self.vpX + int((self.vpWidth - self.pointSize) / 2) + int(xx)\n localY = self.vpY + int((self.vpHeight - self.pointSize) / 2) + int(yy)\n print(x, y, localX, localY)\n for x in range(self.pointSize):\n for y in range(self.pointSize):\n self.point(localX + x, localY + y, color(self.vr, self.vb, self.vg))\n else:\n print('Initialize glViewPort')\n sys.exit()", "def update_verts(self, dx, dy):\n self._x += dx\n self._y += dy\n if self._vertex_list:\n vertices = self._vertex_list.vertices[:]\n vertices[0::2] = [x + dx for x in vertices[0::2]]\n vertices[1::2] = [y + dy for y in vertices[1::2]]\n self._vertex_list.vertices[:] = vertices\n if self._label:\n self._label.x += dx\n self._label.y += dy" ]
[ "0.685211", "0.66294765", "0.63282067", "0.6211691", "0.61160797", "0.5978315", "0.59519964", "0.59514546", "0.58506083", "0.583711", "0.582701", "0.58230495", "0.57949495", "0.5790536", "0.57774246", "0.5764628", "0.5744858", "0.573755", "0.57267916", "0.5708336", "0.5698905", "0.56399405", "0.5627479", "0.5620589", "0.5619297", "0.5609809", "0.5579726", "0.5558493", "0.5547469", "0.55311316" ]
0.74700004
0
The X coordinate of the anchor point
def anchor_x(self): return self._anchor_x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_x(self):\n return self.posX", "def _get_x(self):\n return self.position.x", "def get_x_position(self):\n return self.rect.x", "def get_x_position(self):\n return self.actual_coordinates[0]", "def get_x(self):\n return self.coords[0]", "def get_pos_x(self):\n return self.__pos_x", "def x(self):\n return _libsbml.Point_x(self)", "def x(self):\n return self.coords[0]", "def x(self):\n return self._coords[0]", "def x(self):\r\n return self.position.x", "def origin_x(self):\n return self._origin[0]", "def anchor_position(self):\n return self._anchor_x, self._anchor_y", "def centerx(self):\n return self.left + self.width / 2", "def reflect_x(self):\n\n return Point(self.x, - self.y)", "def get_x(self):\n\t\treturn self._collision_rect.x + 14", "def get_alien_x(self):\n return self.x", "def getXCoordinate(self) -> float:\n return self.x_coord", "def get_origin_x_position(self):\n return self.origin_coordinates[0]", "def Getxcoord(self):\n return self.x_coord", "def border_box_x(self):\n return self.position_x + self.margin_left", "def x_origin(self):\n return self._x_origin", "def __get_x__(self):\n return self.Direction['x']", "def x(self):\n if self._x is None:\n self.compute_coordinates()\n return self._x", "def reflect_x(self):\n r_x = self.x\n r_y = self.y *-1\n\n return (Point(r_x,r_y))", "def x_coord(self):\n\n return self.x0 + np.arange(self.nx) * self.dx", "def getX(self):\n return self.position[0]", "def getX(self):\n return self.position.getX()", "def getXOffset(self):\n return _libsbml.Point_getXOffset(self)", "def content_box_x(self):\n return self.position_x + self.margin_left + self.padding_left + \\\n self.border_left_width", "def get_ship_x(self):\n return self.x" ]
[ "0.7760154", "0.7716742", "0.7668874", "0.7646122", "0.7621986", "0.7559974", "0.7516883", "0.75067914", "0.74623626", "0.7436206", "0.74320745", "0.73874915", "0.7360508", "0.7344753", "0.7308688", "0.73033357", "0.724241", "0.7212631", "0.71839815", "0.7164864", "0.7118556", "0.7091466", "0.70843357", "0.707584", "0.705123", "0.69932777", "0.69317585", "0.6904573", "0.6829178", "0.68021375" ]
0.90373385
0
The Y coordinate of the anchor point
def anchor_y(self): return self._anchor_y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_y(self):\n return self.position.y", "def get_y_position(self):\n return self.actual_coordinates[1]", "def y(self):\r\n return self.position.y", "def get_y_position(self): \n return self.rect.y", "def get_alien_y(self):\n return self.y", "def get_y(self):\n\t\treturn self._collision_rect.y + 25", "def get_y(self):\n return self.posY", "def get_y(self):\n return self.coords[1]", "def __get_y__(self):\n return self.Direction['y']", "def y(self):\n return self.top", "def getY(self):\n return self.position[1]", "def y(self):\n return self.coords[1]", "def bottom_y(self):\r\n return self.position.y - self.size.y - self.bulk", "def top_y(self):\r\n return self.position.y + self.size.y + self.bulk", "def border_box_y(self):\n return self.position_y + self.margin_top", "def y_coord(self):\n\n return self.y0 + np.arange(self.ny) * self.dy", "def get_pos_y(self):\n return self.__pos_y", "def origin_y(self):\n return self._origin[1]", "def getY(self):\n return self.position.getY()", "def y(self):\n return self._coords[1]", "def get_virtual_y_position(self):\n x_real = (\n - 1 * (self.get_x_position() - self.get_origin_x_position()) * cos(\n self.get_origin_direction() * pi / 180\n )\n )\n y_real = (\n (self.get_y_position() - self.get_origin_y_position()) *\n sin(self.get_origin_direction() * pi / 180)\n )\n return x_real + y_real", "def y(self):\n return _libsbml.Point_y(self)", "def anchor_position(self):\n return self._anchor_x, self._anchor_y", "def getYpos(self):\n return self.y", "def getY(self):\n return self.y", "def getY(self):\n return self.y", "def getYCoordinate(self) -> float:\n return self.y_coord", "def get_origin_y_position(self):\n return self.origin_coordinates[1]", "def getY(self):\n return self.__y", "def getY(self):\r\n\t\treturn self._y" ]
[ "0.7840486", "0.775186", "0.775122", "0.77391493", "0.7671916", "0.7608013", "0.7590078", "0.75695264", "0.74748814", "0.7469654", "0.74505246", "0.7402634", "0.74024355", "0.737545", "0.73700285", "0.73483944", "0.7334472", "0.7331511", "0.73242426", "0.72643715", "0.72592527", "0.72424185", "0.7227245", "0.7221457", "0.71503574", "0.71503574", "0.71240526", "0.7107469", "0.7080425", "0.7077132" ]
0.8872084
0
The (x, y) coordinates of the anchor point, as a tuple.
def anchor_position(self): return self._anchor_x, self._anchor_y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def xy(self) -> Tuple[float, float]:\n return (self.x, self.y)", "def coordinates(self) -> Tuple[int, int]:\n return self.x, self.y", "def coordinates(self) -> Tuple[float, float, float, float, float]:\n return (self.x, self.y, self.x + self.width, self.y + self.height)", "def xy(self) -> Tuple[int, int]:\n return self._x, self._y", "def get_pos(self):\n return (self.x, self.y)", "def get_position(self):\n return (self.x_pos, self.y_pos)", "def get(self):\n return (self.x,self.y);", "def coordinate(self):\n\t\tif self.boldness_coord is None and self.price_coord is None and self.hold_coord is None:\n\t\t\treturn None\n\n\t\treturn (self.boldness_coord, self.price_coord, self.hold_coord)", "def position(self):\n return self.x, self.y", "def get(self):\r\n return ((self.x, self.y), self.dir)", "def getXY(self):\n return (self.X,self.Y)", "def getBallPos(self) -> (int,int):\n return self.x, self.y", "def get_point(self):\n return self._x, self._y", "def get_location(self):\r\n return self.__x, self.__y", "def position(self):\n return self._x, self._y", "def coords(self) -> Tuple[float, float]:\n return self.lat, self.lng", "def center(self) -> Tuple[int, int]:\n center_x = int((self.x1 + self.x2) // 2)\n center_y = int((self.y1 + self.y2) // 2)\n return (center_x, center_y)", "def coordinate(self) -> Tuple[float, float]:\n return self.lat, self.lon", "def coords(self):\n return (self.x, self.y, self.z)", "def coords(self):\n return (self.x, self.y, self.z)", "def get_anchor_points(self):\n rows, cols = np.where(self.overlap_mask)\n self.anchor_points = tuple(zip(rows, cols))[:: self.sampling_int]\n print(\"# of anchors: {}\".format(len(self.anchor_points)))", "def get_anchor_pos(self, anchor) -> Vec:\n x, y = self.pos\n w, h = self.size\n\n # faster and prettier than if/elif chains\n rct = {\n Anchor.top_left: Vec(x, y),\n Anchor.top: Vec(x + (w / 2), y),\n Anchor.top_right: Vec(x + (w / 2), y + h),\n Anchor.middle_left: Vec(x, y + (h / 2)),\n Anchor.middle: Vec(x + (w / 2), y + (h / 2)),\n Anchor.middle_right: Vec(x + w, y + (h / 2)),\n Anchor.bottom_left: Vec(x, y + h),\n Anchor.bottom: Vec(x + (w / 2), y + h),\n Anchor.bottom_right: Vec(x + w, y + h)\n }\n\n if anchor in rct:\n return rct[anchor]\n return rct[Anchor.top_left]", "def tuple(self) -> Tuple[float, float]:\n return (self.latitude, self.longitude)", "def getPosition(self):\n\tleft = self.getLeft()\n\ttop = self.getTop()\n\treturn (left,top)", "def coords2D(self):\n return (self.x, self.y)", "def getMachineCoordinates(self):\n return (self.x, self.y, self.z)", "def get(self):\n return self.x, self.y", "def anchor_x(self):\n return self._anchor_x", "def getMousePosition(self):\n return (self.mouseData.x, self.mouseData.y)", "def origin(self):\n return (self._x_origin, self._y_origin)" ]
[ "0.76912844", "0.76704925", "0.75844866", "0.757345", "0.75154847", "0.73809236", "0.7226224", "0.70824313", "0.7065255", "0.70325917", "0.70245636", "0.69454324", "0.69359416", "0.6896294", "0.68918604", "0.68812925", "0.68750954", "0.6868717", "0.6834308", "0.6834308", "0.67683214", "0.6710933", "0.6693798", "0.6686162", "0.66853756", "0.6628984", "0.6627311", "0.66089976", "0.6584024", "0.6559359" ]
0.7830221
0
Blend opacity. This property sets the alpha component of the color of the shape. With the default blend mode (see the constructor), this allows the shape to be drawn with fractional opacity, blending with the background. An opacity of 255 (the default) has no effect. An opacity of 128 will make the shape appear translucent.
def opacity(self): return self._rgba[3]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _opacity(self):\n item = self.item()\n return 255 if item is None else item.getColor().alpha()", "def opacity(im,alpha):\n if im.mode != 'RGBA':\n im = im.convert('RGBA')\n else:\n im = im.copy()\n alphachannel = im.split()[3]\n alphachannel = ImageEnhance.Brightness(alphachannel).enhance(alpha)\n im.putalpha(alphachannel)\n return im", "def SetFillAlpha(self, alpha):\n self._attalpha[\"fill\"] = alpha\n self.SetFillColorAlpha(self.GetFillColor(), alpha)", "def setTransparency(self, transparency):\n self.render_context.alpha_mode = transparency", "def putalpha(self, alpha):\r\n channels, depth = self._get_channels_and_depth(self._mode)\r\n\r\n if isinstance(alpha, np.ndarray): \r\n paste_image = True\r\n else:\r\n paste_image = False\r\n\r\n if channels==4:\r\n r, g, b, a = self.split()\r\n if not paste_image:\r\n a[:] = alpha\r\n else:\r\n a = alpha.copy()\r\n colorband = (r, g, b, a)\r\n self._instance = merge(\"RGBA\", colorband, image=True)\r\n elif channels == 3:\r\n if not paste_image:\r\n sh = self._instance.shape\r\n sh = (sh[0], sh[1], 1)\r\n a = np.zeros(sh, dtype=depth)\r\n a[:] = alpha\r\n else:\r\n a = alpha.copy()\r\n r, g, b = self.split()\r\n colorband = (r, g, b, a)\r\n self._instance = merge(\"RGBA\", colorband, image=True)\r\n elif channels < 2: # \"L\" or \"LA\"\r\n if not paste_image:\r\n sh = self._instance.shape\r\n sh = (sh[0], sh[1], 1)\r\n a = np.zeros(sh, dtype=depth)\r\n a[:] = alpha\r\n else:\r\n a = alpha.copy()\r\n if channels == 2:\r\n l, a_old = self.split()\r\n colorband = (l, a)\r\n else:\r\n colorband = (self._instance, a)\r\n self._instance = merge(\"LA\", colorband, image=True)", "def opacity(self) -> int:\n\n return self._opacity", "def BlendColour(fg, bg, alpha):\r\n \r\n result = bg + (alpha*(fg - bg))\r\n \r\n if result < 0.0:\r\n result = 0.0\r\n if result > 255:\r\n result = 255\r\n \r\n return result", "def alpha_blend(input_image, segmentation_mask, alpha=0.5):\n blended = np.zeros(input_image.size, dtype=np.float32)\n blended = input_image * alpha + segmentation_mask * (1 - alpha)\n return blended", "def layer_blend(foreground, background, foreground_alpha=.6):\n cv2.addWeighted(foreground, foreground_alpha,\n background, 1 - foreground_alpha, 0, background)\n\n return background", "def set_alpha(self, value: Optional[int], flags: int = 0) -> 'BaseImage':\n if value is None:\n self._surface.set_alpha(None)\n return self\n assert isinstance(value, int)\n assert 0 <= value <= 255, 'alpha value must be an integer between 0 and 255'\n self._surface.set_alpha(value, flags)\n return self", "def opacity(self, opacity=None):\n\n if opacity is None:\n return self._opacity\n else:\n if not isinstance(opacity, int) and not isinstance(opacity, float):\n raise TypeError(\"opacity must be numeric, not '%s'\" % opacity)\n if not 0 <= opacity <= 1:\n raise ValueError(\n \"opacity must be between 0 and 1, not %s\" % (str(opacity))\n )\n self._opacity = opacity", "def opacity(self):\n # type: () -> float\n return self._opacity", "def apply_opacity(im, opacity):\n if im.mode == 'RGB':\n im.putalpha(opacity)\n return im\n elif im.mode == 'RGBA':\n r, g, b, a = im.split()\n opacity_scale = opacity / 255\n a = a.point(lambda i: i*opacity_scale)\n return Image.merge('RGBA', [r, g, b, a])\n else:\n raise NotImplementedError()", "def Alpha(*args, **kwargs):\n return _gdi_.Colour_Alpha(*args, **kwargs)", "def blend(self, color, alpha):\n return Color(rgb=lerp(self.rgb, color.rgb, alpha))", "def draw_alpha(self):\n if self.alpha == 255:\n self.current_sprite_alpha = self.current_sprite\n else:\n mask = pygame.Surface(self.current_sprite.get_size(), flags=pygame.SRCALPHA)\n mask.fill((255, 255, 255, self.alpha))\n self.current_sprite_alpha = self.current_sprite.copy()\n self.current_sprite_alpha.blit(mask, (0, 0), special_flags=pygame.BLEND_RGBA_MULT)", "def opacity( self ):\n if ( self.isIsolateHidden() ):\n return 0.1\n \n opacity = super(XNode, self).opacity()\n layer = self.layer()\n if ( layer ):\n return layer.opacity() * opacity\n \n return opacity", "def opacity(self, opacity):\n # type: (float) -> None\n\n if opacity is not None:\n if not isinstance(opacity, (float, int)):\n raise TypeError(\"Invalid type for `opacity`, type has to be `float`\")\n\n self._opacity = opacity", "def alpha(cls, rgb_color, transparency):\n\n if transparency > 1:\n transparency = 1\n elif transparency < 0:\n transparency = 0\n return rgb_color + str(hex(int(254 * transparency)))[2:]", "def apply_alpha(self, background=\"#000000FF\"):\n\n def tx_alpha(cf, af, cb, ab):\n \"\"\"Translate the color channel with the alpha channel and background channel color.\"\"\"\n\n return round_int(\n abs(\n cf * (af * RGB_CHANNEL_SCALE) + cb * (ab * RGB_CHANNEL_SCALE) * (1 - (af * RGB_CHANNEL_SCALE))\n )\n ) & 0xFF\n\n if self.a < 0xFF:\n r, g, b, a = self._split_channels(background)\n\n self.r = tx_alpha(self.r, self.a, r, a)\n self.g = tx_alpha(self.g, self.a, g, a)\n self.b = tx_alpha(self.b, self.a, b, a)\n\n return self.get_rgb()", "def _setOpacity(self, opacity):\n item = self.item()\n if item is not None:\n color = item.getColor()\n color.setAlpha(opacity)\n item.setColor(color)", "def set_alpha(self, alpha):\n if alpha < 0 or alpha > 255:\n raise ValueError(\"alpha must be betweeen 0 and 255\")\n\n self.alpha = alpha\n self.draw_alpha()", "def overlay_image_alpha(self,img, img_overlay, pos, alpha_mask):\n\n x, y = pos\n\n # Image ranges\n y1, y2 = max(0, y), min(img.shape[0], y + img_overlay.shape[0])\n x1, x2 = max(0, x), min(img.shape[1], x + img_overlay.shape[1])\n\n # Overlay ranges\n y1o, y2o = max(0, -y), min(img_overlay.shape[0], img.shape[0] - y)\n x1o, x2o = max(0, -x), min(img_overlay.shape[1], img.shape[1] - x)\n\n # Exit if nothing to do\n if y1 >= y2 or x1 >= x2 or y1o >= y2o or x1o >= x2o:\n return\n\n channels = img.shape[2]\n\n alpha = alpha_mask[y1o:y2o, x1o:x2o]\n alpha_inv = 1.0 - alpha\n\n for c in range(channels):\n img[y1:y2, x1:x2, c] = (alpha * img_overlay[y1o:y2o, x1o:x2o, c] +\n alpha_inv * img[y1:y2, x1:x2, c])", "def setOpacity(self, opa):\n if self._displayPjt:\n self._displayPjt.setOpacityPalette(opa)\n if self._displayUsr:\n self._displayUsr.setOpacityPalette(opa)\n if self._displayVtk:\n self._displayVtk.setOpacityPalette(opa)", "def set_blend_mode_over(self):\n self.image_item.setCompositionMode(QtGui.QPainter.CompositionMode_SourceOver)", "def opacity(self, opacity: int) -> None:\n\n for sprite in self._sprites:\n sprite.opacity = opacity", "def Transparent(self, alpha):\r\n\r\n if alpha < 0 or alpha > 255:\r\n raise Exception(\"Invalid transparency value (%s)\"%repr(alpha))\r\n \r\n self.transparent = alpha\r\n self.needsTransparency = True", "def _blend(img1, img2, alpha):\n return img1.mul(alpha).add(1 - alpha, img2)", "def alpha_extend(color: C3I, alpha: int = 255) -> C4I:\n return (*color, alpha)", "def GetFillAlpha(self):\n return self._attalpha[\"fill\"]" ]
[ "0.5919304", "0.58347225", "0.5765835", "0.56959087", "0.56722987", "0.56599414", "0.5651657", "0.5604866", "0.56035215", "0.5593729", "0.5592929", "0.55849886", "0.55826104", "0.5575257", "0.5568694", "0.5568251", "0.5501839", "0.5460512", "0.5455902", "0.5431659", "0.5364404", "0.5353129", "0.5338576", "0.53104806", "0.52914137", "0.52463347", "0.51302826", "0.5113368", "0.5081985", "0.50682545" ]
0.5972214
0
The start angle of the arc.
def start_angle(self): return self._start_angle
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def angle(self):\n return arccos(dot((self.a - self.o) / self.r, (self.b - self.o) / self.r))", "def angle(self):\n return 0", "def angle(self) -> float:\n ...", "def angle(self) -> float:\n return self._angle", "def angle(self) -> float:\n return self._angle", "def angle(self):\n return self._angle", "def angle(self):\n return self._angle", "def angle(self):\n return self._angle", "def getAngle(self):\n return self._angle", "def angle(self):\n return math.degrees(math.atan2(self[1], self[0]))", "def angle(self) -> int:", "def getAngle(self):\n return self.articulateEncoder.getDistance()+self.angleOffset", "def getAngle(self):\n x, y = self.components\n return math.atan2(y, x)", "def get_angle(self):\n return self.__angle", "def getAngle(self):\n return self.vector.angle", "def angle(self):\n self.convert_window(\"Angle\", \"degree\", [\"arcminute\", \"arcsecond\", \"circle\", \"degree\", \"gon\", \"gradian\", \"mil(Nato)\", \"mil(Soviet Union)\", \"mil(Sweden)\", \"octant\", \"quadrant\", \"radian\", \"revolution\", \"sextant\", \"sign\", \"turn\"])", "def angle(self):\n v = self.p1 - self.p0\n return atan2(v.y, v.x)", "def angle(self):\n return atan2(self.v.y, self.v.x)", "def convergence_angle(self):\n return np.arctan2(self.radius, self.focal_length)", "def angle(self):\n return angle(self.force, self.forceXYZ, self.excited_axis,\n self.distance, self.distanceXYZ)", "def test_start_angle(self):\n\n assert self.test_shape.azimuth_placement_angle == [\n 0,\n 45,\n 90,\n 135,\n 180,\n 225,\n 270,\n 315,\n ]\n self.test_shape.start_angle = 10\n assert self.test_shape.azimuth_placement_angle == [\n 10,\n 55,\n 100,\n 145,\n 190,\n 235,\n 280,\n 325,\n ]", "def interior_angle(self):\n if self.interior_angle_l is not None:\n return self.interior_angle_l\n else:\n self.interior_angle_l = ((self.vert_count - 2)*180)/math.pi\n return self.interior_angle_l", "def calculate_attitude_angle(self):\n return np.arctan(np.pi * (1 - self.eccentricity_ratio ** 2) / (4 * self.eccentricity_ratio))", "def phase_angle_arcsec(self):\n if hasattr(self, '_m_phase_angle_arcsec'):\n return self._m_phase_angle_arcsec\n\n self._m_phase_angle_arcsec = (self.sphase / 1500)\n return getattr(self, '_m_phase_angle_arcsec', None)", "def _rad_center(self):\n return ((self.rad_hi + self.rad_lo) / 2).to(\"deg\")", "def gona(self):\n return GONAngle(self.gon())", "def gona(self):\n return GONAngle(self.gon())", "def gona(self):\n return GONAngle(self.gon())", "def start_point(self) -> Vec3:\n v = list(self.vertices([self.dxf.start_angle]))\n return v[0]", "def angle(self, factor):\n n1 = self.getNormalizedVector()\n n2 = factor.getNormalizedVector()\n\n # Determine angle between the two vectors.\n cos_angle = n1.scalarProduct(n2)\n angle = np.arccos(cos_angle)\n # Edoardo: numpy.arccos() always returns an angle in radians in [0, pi].\n\n # Mark's version:\n # By convention always return the smaller angle.\n # while angle > 2.0 * np.pi:\n # angle -= 2.0 * np.pi\n\n # if angle > np.pi:\n # angle = 2.0 * np.pi - angle\n\n return angle" ]
[ "0.7364975", "0.7243839", "0.7123329", "0.7072685", "0.7072685", "0.7015025", "0.7015025", "0.7015025", "0.69397306", "0.6842029", "0.6822126", "0.6776148", "0.66612154", "0.6619934", "0.66096723", "0.6608768", "0.6604382", "0.6560013", "0.6542135", "0.6500179", "0.64864135", "0.64290327", "0.64041084", "0.63944983", "0.6308608", "0.6290011", "0.6290011", "0.6290011", "0.6278759", "0.6266986" ]
0.8060585
0
The radius of the circle.
def radius(self): return self._radius
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def radius(self) -> float:\n return self._radius", "def getRadius(self):\n return self.radius", "def getRadius(self):\n return self.__radius", "def get_radius(self):\n return self.R", "def radius(self) -> Union[int, float]:\n return self.proto.radius", "def get_radius(self):\n return self.r", "def radius(self) -> int:\n pass", "def radius(self):\n return sqrt(self.radius_square())", "def get_radius(self):\n return self.__radius", "def get_radius(self):\n return self.radius", "def get_radius(self):\n return self.radius", "def get_radius(self):\n if self.no_dist is False:\n dist = self.distance\n radius = (dist * self.ang_size / 60. *\n np.pi/180. * ct._kpc_over_pc_)/2.\n self.radius = radius\n else:\n self.radius = -1 # use -1 to indicate unknown diameter\n\n return self.radius", "def circle_radius(self):\n return min([self.container.width, self.container.height]) / 4", "def radius(self) -> float:\n return get_radius_from_element(self.element)", "def polar_radius(self):\n return self.r * (1 - self.f)", "def radius(self):\n if self._radius is None:\n translated_xyz = translate_to_center_of_mass(self.get_xyz())\n _, symbols, x, y, z = get_xyz_matrix(translated_xyz)\n border_elements = list() # a list of the farthest element/s\n r = 0\n for si, xi, yi, zi in zip(symbols, x, y, z):\n ri = xi ** 2 + yi ** 2 + zi ** 2\n if ri == r:\n border_elements.append(si)\n elif ri > r:\n r = ri\n border_elements = [si]\n atom_r = max([get_atom_radius(si) if get_atom_radius(si) is not None else 1.50 for si in border_elements])\n self._radius = r ** 0.5 + atom_r\n logger.info('Determined a radius of {0:.2f} Angstrom for {1}'.format(self._radius, self.label))\n return self._radius", "def radius(self) -> float:\n return math.hypot(self.x, self.y)", "def circumference(self):\n return (2 * math.pi * self.__radius)", "def radius(self) -> npt.NDArray[np.float_]:\n return dist(self.center, self.vertices[0])", "def circumference(self):\n return math.pi * self.radius * 2", "def get_radius(self):\r\n return self._handler.get_radius()", "def get_radius(self):\r\n return 1", "def diameter(self):\n return self.radius * 2", "def diameter(self):\n return self.radius * 2", "def diameter(self):\n return 2 * self.radius", "def inner_radius(self):\n return self._inner_radius", "def radius(self):\n if self._radius is None:\n self._radius = self.stem / 2\n if self._radius * 2 > self.stem:\n raise Exception('Invalid radius. Maximum radius = 2 * stem.')\n return self._radius", "def circle_area(self):\n return np.pi * self.ring_radius ** 2", "def scatteringRadius(self):\n\n return self.__scatteringRadius", "def radius_square(self):\n try: \n return self._radius_2\n except AttributeError:\n center = self.center()\n self._radius_2 = max( (v.vector() - center).dot_product(\n v.vector() - center) for v in\n self.vertex_generator() )\n return self._radius_2" ]
[ "0.88121676", "0.8728803", "0.8715825", "0.8557909", "0.8500211", "0.8490493", "0.8431619", "0.83906394", "0.8388057", "0.8369628", "0.8369628", "0.8269019", "0.82527936", "0.8229431", "0.8172032", "0.80954605", "0.80517036", "0.79516727", "0.7894692", "0.78869784", "0.7884495", "0.7785552", "0.7652257", "0.7652257", "0.76414365", "0.76335317", "0.75829643", "0.7569757", "0.75140023", "0.74749327" ]
0.8809348
1
The start angle of the sector.
def start_angle(self): return self._start_angle
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def angle(self):\n return 0", "def angle(self) -> float:\n ...", "def angle(self):\n return self._angle", "def angle(self):\n return self._angle", "def angle(self):\n return self._angle", "def angle(self) -> float:\n return self._angle", "def angle(self) -> float:\n return self._angle", "def angle(self) -> int:", "def getAngle(self):\n return self.articulateEncoder.getDistance()+self.angleOffset", "def angle(self):\n return arccos(dot((self.a - self.o) / self.r, (self.b - self.o) / self.r))", "def angle(self):\n return math.degrees(math.atan2(self[1], self[0]))", "def getAngle(self):\n return self._angle", "def get_angle(self):\n return self.__angle", "def start_point(self) -> Vec3:\n v = list(self.vertices([self.dxf.start_angle]))\n return v[0]", "def getAngle(self):\n return self.vector.angle", "def first_slice_angle(self):\n return self.container['first_slice_angle']", "def getAngle(self):\n x, y = self.components\n return math.atan2(y, x)", "def heading(self):\n x, y = self._orient\n result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0\n result /= self._degreesPerAU\n return (self._angleOffset + self._angleOrient*result) % self._fullcircle", "def interior_angle(self):\n if self.interior_angle_l is not None:\n return self.interior_angle_l\n else:\n self.interior_angle_l = ((self.vert_count - 2)*180)/math.pi\n return self.interior_angle_l", "def angle(self):\n return atan2(self.v.y, self.v.x)", "def angle(self):\n v = self.p1 - self.p0\n return atan2(v.y, v.x)", "def angle(self):\n return angle(self.force, self.forceXYZ, self.excited_axis,\n self.distance, self.distanceXYZ)", "def getFinalLarmorAngle(self):\n return np.degrees(self.theta_L_array[-1])", "def angle(self):\r\n return self.model.angle", "def sector_angles(self) -> np.ndarray:\n return self._sector_angles", "def phase_angle_arcsec(self):\n if hasattr(self, '_m_phase_angle_arcsec'):\n return self._m_phase_angle_arcsec\n\n self._m_phase_angle_arcsec = (self.sphase / 1500)\n return getattr(self, '_m_phase_angle_arcsec', None)", "def test_start_angle(self):\n\n assert self.test_shape.azimuth_placement_angle == [\n 0,\n 45,\n 90,\n 135,\n 180,\n 225,\n 270,\n 315,\n ]\n self.test_shape.start_angle = 10\n assert self.test_shape.azimuth_placement_angle == [\n 10,\n 55,\n 100,\n 145,\n 190,\n 235,\n 280,\n 325,\n ]", "def radians(self) -> float:\n return math.atan2(self.y, self.x)", "def sector(self):\n return self.sub_sector.sector", "def rotation_angle(self):\n return self.container['rotation_angle']" ]
[ "0.732036", "0.70946956", "0.7063976", "0.7063976", "0.7063976", "0.6993282", "0.6993282", "0.6937274", "0.679846", "0.67892903", "0.6756747", "0.673555", "0.6734926", "0.6668017", "0.66629857", "0.6628418", "0.65350014", "0.6508508", "0.6504248", "0.64290774", "0.64239025", "0.635417", "0.6307982", "0.6304002", "0.6292133", "0.62689537", "0.622592", "0.6167886", "0.61630577", "0.61493015" ]
0.79691404
1
The rectangle's border color. This property sets the color of the border of a bordered rectangle. The color is specified as an RGB tuple of integers '(red, green, blue)' or an RGBA tuple of integers '(red, green, blue, alpha)`. Setting the alpha on this property will change the alpha of the entire shape, including both the fill and the border. Each color component must be in the range 0 (dark) to 255 (saturated).
def border_color(self): return self._border_rgba
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def border_color(self):\n if self.has_border:\n return Color(self._border_actor.GetProperty().GetColor())\n return None", "def border_color(self) -> int:\n return self._border_color", "def borderColor( self ):\n return self._borderColor", "def set_border(self, color: tuple = (0, 0, 0, 255), width: int = 1):\n self.border_color = color\n self.border = width", "def border_color(self, color: Union[int, Tuple[int, int, int]]) -> None:\n\n if not (isinstance(color, int) or color is None):\n raise TypeError(\"A color must be represented by a integer value\")\n\n self._border_color = color\n\n if color is None:\n self._palette[1] = 0x00\n self._palette.make_transparent(1)\n else:\n self._palette[1] = color\n self._palette.make_opaque(1)", "def bordercolor(self):\n return self[\"bordercolor\"]", "def drawRectWithBorder(screen, bColor, fColor, posX, posY, height, width, bWidth):\n \n #draw outline rect \n pygame.draw.rect(screen, bColor, (posX, posY, height, width))\n #draw fill rect\n pygame.draw.rect(screen, fColor, (posX + bWidth, posY + bWidth, height - bWidth * 2, width - bWidth * 2))", "def set_border(self, colour):\n if colour in (BLACK, WHITE, GREEN, BLUE, RED, YELLOW, ORANGE, CLEAN):\n self.border_colour = colour", "def add_border(self, color='white', width=2.0):\n points = np.array([[1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0], [1.0, 0.0, 0.0]])\n\n lines = np.array([[2, 0, 1], [2, 1, 2], [2, 2, 3], [2, 3, 0]]).ravel()\n\n poly = pyvista.PolyData()\n poly.points = points\n poly.lines = lines\n\n coordinate = _vtk.vtkCoordinate()\n coordinate.SetCoordinateSystemToNormalizedViewport()\n\n mapper = _vtk.vtkPolyDataMapper2D()\n mapper.SetInputData(poly)\n mapper.SetTransformCoordinate(coordinate)\n\n actor = _vtk.vtkActor2D()\n actor.SetMapper(mapper)\n actor.GetProperty().SetColor(Color(color).float_rgb)\n actor.GetProperty().SetLineWidth(width)\n\n self.AddViewProp(actor)\n self.Modified()\n\n self._border_actor = actor\n return actor", "def __init__(self, x, y, width, height, border=1, color=(255, 255, 255),\n border_color=(100, 100, 100), batch=None, group=None):\n self._x = x\n self._y = y\n self._width = width\n self._height = height\n self._rotation = 0\n self._border = border\n self._num_verts = 8\n\n fill_r, fill_g, fill_b, *fill_a = color\n border_r, border_g, border_b, *border_a = border_color\n\n # Start with a default alpha value of 255.\n alpha = 255\n # Raise Exception if we have conflicting alpha values\n if fill_a and border_a and fill_a[0] != border_a[0]:\n raise ValueError(\"When color and border_color are both RGBA values,\"\n \"they must both have the same opacity\")\n\n # Choose a value to use if there is no conflict\n elif fill_a:\n alpha = fill_a[0]\n elif border_a:\n alpha = border_a[0]\n\n # Although the shape is only allowed one opacity, the alpha is\n # stored twice to keep other code concise and reduce cpu usage\n # from stitching together sequences.\n self._rgba = fill_r, fill_g, fill_b, alpha\n self._border_rgba = border_r, border_g, border_b, alpha\n\n program = get_default_shader()\n self._batch = batch or Batch()\n self._group = self.group_class(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, program, group)\n\n self._create_vertex_list()\n self._update_vertices()", "def get_border(self):\n return self._border", "def border(self):\n return self._border", "def border(self):\n return self._border", "def set_border_color(self, color):\n self._border.set_background_color(color)", "def draw_rect(surface, fill_color, outline_color, rect, border=1):\n surface.fill(outline_color, rect)\n surface.fill(fill_color, rect.inflate(-border * 2, -border * 2))\n return rect", "def border(self):\r\n\t\treturn self._border", "def set_borders(self, val):\n self.rborder = val\n self.lborder = val\n self.tborder = val\n self.bborder = val", "def draw_lrtb_rectangle_outline(left, right, top, bottom, color,\n border_width=1):\n if left > right:\n raise AttributeError(\"Left coordinate must be less than or equal to \"\n \"the right coordinate\")\n\n if bottom > right:\n raise AttributeError(\"Bottom coordinate must be less than or equal to \"\n \"the top coordinate\")\n\n center_x = (left + right) / 2\n center_y = (top + bottom) / 2\n width = right - left\n height = top - bottom\n draw_rectangle_outline(center_x, center_y, width, height, color,\n border_width)", "def getBorder(self):\n return self.__border", "def tile_border(draw, r_s, r_e, c_s, c_e, color, border_size=TILE_BORDER_SIZE):\n for x in range(0, border_size):\n draw.rectangle([(c_s + x, r_s + x), (c_e - 1 - x, r_e - 1 - x)], outline=color)", "def _draw_border(self, grid):\n # Left and Right border\n for i, x in enumerate(grid):\n x[0] = x[len(grid) - 1] = self._wall_color\n grid[i] = x\n\n # Top and Bottom border\n grid[0] = grid[len(grid) - 1] = [self._wall_color for _ in range(len(grid))]\n return grid", "def drawBorder(self,color,x1,y1,x2,y2,thick):\n self.drawRect(color,x1,y1,x2,y1+thick)\n self.drawRect(color,x1,y1,x1+thick,y2)\n self.drawRect(color,x2-thick,y1,x2,y2)\n self.drawRect(color,x1,y2-thick,x2,y2)", "def drawBorder(self):\n\t\t# horizontal lines\n\t\tself.wts(0, 0, '╭' + '─' * (self.width - 2) + '╮', self._borderColor)\t\t\t\t\t\t# Top\n\t\tself.wts(self.height - 2, 0, '└' + '─' * (self.width - 2) + '╯', self._borderColor)\t\t\t# Bottom\n\t\t# vertical lines\n\t\tfor yPos in range(1, self.height - 2):\n\t\t\tself.wts(yPos, 0, '│', self._borderColor)\n\t\t\tself.wts(yPos, self.width - 1, '│', self._borderColor)", "def draw_border():\n \n length = len(BORDER_COORDS)\n \n # Constants for sine wave\n b = 2 * math.pi / length\n speed = 2\n \n # Draw sinusoid red/green design\n for i in range(length):\n # Sine function\n t = perf_counter()\n sine = math.sin(b*i + speed*t) # Wave with period 28\n \n # Map sine value from [-1, 1] to [0, 4)\n red = min(math.floor(2 * sine + 2), 3)\n \n # Fade red and green colors\n lp.led_ctrl_xy(*BORDER_COORDS[i], red, 3 - red)", "def GetBorderPen(self):\r\n\r\n return self._borderPen", "def draw_xywh_rectangle_outline(top_left_x, top_left_y, width, height, color,\n border_width=1):\n center_x = top_left_x + (width / 2)\n center_y = top_left_y + (height / 2)\n draw_rectangle_outline(center_x, center_y, width, height, color,\n border_width)", "def _render_borders(self):\n\n # XXX\n # - read the old glBlendFunc value and restore it if neccessary.\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n\n self.border_program.use()\n for shape_object_id, instances in self._instances.items():\n self._shape_vaos[shape_object_id].bind()\n for instance in instances:\n border_size = instance.border['size']\n if len(instance.border) > 0:\n glEnable(GL_BLEND)\n # XXX\n # - cache the modelview matrix\n modelview = ModelView()\n modelview.set_scaling(instance.size[0]+2*border_size, instance.size[1]+2*border_size)\n modelview.set_position(instance.position[0]-border_size, instance.position[1]-border_size)\n self.border_program.uniform('mat_modelview', modelview.mat4)\n self.border_program.uniform('color', instance.border['color'])\n glDrawArrays(GL_TRIANGLES, 0, 6)\n\n glDisable(GL_BLEND)\n # XXX\n # - cache the modelview matrix\n modelview = ModelView()\n modelview.set_scaling(*instance.size)\n modelview.set_position(*instance.position)\n self.border_program.uniform('color', [0,0,0,0])\n self.border_program.uniform('mat_modelview', modelview.mat4)\n glDrawArrays(GL_TRIANGLES, 0, 6)\n\n self._shape_vaos[shape_object_id].unbind()\n self.border_program.unuse()\n\n glEnable(GL_BLEND)", "def draw_rectangle_outline(center_x, center_y, width, height, color,\n border_width=1, tilt_angle=0):\n\n GL.glEnable(GL.GL_BLEND)\n GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)\n GL.glEnable(GL.GL_LINE_SMOOTH)\n GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)\n GL.glHint(GL.GL_POLYGON_SMOOTH_HINT, GL.GL_NICEST)\n\n GL.glLoadIdentity()\n GL.glTranslatef(center_x, center_y, 0)\n if tilt_angle:\n GL.glRotatef(tilt_angle, 0, 0, 1)\n\n # Set line width\n GL.glLineWidth(border_width)\n\n # Set color\n if len(color) == 4:\n GL.glColor4ub(color[0], color[1], color[2], color[3])\n elif len(color) == 3:\n GL.glColor4ub(color[0], color[1], color[2], 255)\n\n GL.glBegin(GL.GL_LINE_LOOP)\n GL.glVertex3f(-width // 2, -height // 2, 0.5)\n GL.glVertex3f(width // 2, -height // 2, 0.5)\n GL.glVertex3f(width // 2, height // 2, 0.5)\n GL.glVertex3f(-width // 2, height // 2, 0.5)\n GL.glEnd()", "def set_bottom_border(self, val):\n self.bborder = val", "def borders(self):\n border_left = pm.Segment(self.space.static_body, (-5, 0), (-5, self.screen_height), 10)\n border_right = pm.Segment(self.space.static_body, (self.screen_width + 5, 0),\n (self.screen_width + 5, self.screen_height), 10)\n border_top = pm.Segment(self.space.static_body, (0, self.screen_height + 5),\n (self.screen_width, self.screen_height + 5), 10)\n border_bottom = pm.Segment(self.space.static_body, (0, 0), (self.screen_width, 0),\n self.screen_height * 0.1)\n border_bottom.friction = TERRAIN_FRICTION # Set the bottom border friction\n border_bottom.color = DARK_GREY # Set the bottom border color\n\n # Set the collision types so that the collision handlers check for them\n border_top.collision_type = 4\n border_left.collision_type = 4\n border_right.collision_type = 4\n border_bottom.collision_type = 4\n self.space.add(border_left, border_right, border_top, border_bottom) # Add the borders to the Pymunk space" ]
[ "0.70855075", "0.6881495", "0.65604347", "0.6481082", "0.63925225", "0.62421936", "0.5909977", "0.58470607", "0.58169484", "0.58093", "0.5666582", "0.5656656", "0.5656656", "0.56459826", "0.5612554", "0.54604846", "0.54521537", "0.54230624", "0.54169893", "0.5317806", "0.5285453", "0.5202133", "0.52009016", "0.51865816", "0.51679486", "0.51314175", "0.51013756", "0.50748897", "0.50656855", "0.5046801" ]
0.72605103
0
Third X coordinate of the shape.
def x3(self): return self._x + self._x3
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def x(self):\n return self.coords[0]", "def get_x(self):\n return self.coords[0]", "def x(self):\n return self._coords[0]", "def get_x(self):\n return self.posX", "def _get_x(self):\n return self.position.x", "def x(self):\n return _libsbml.Point_x(self)", "def x(self) -> int:\n return self.data.x_centre >> 4", "def get_xshape(self):\n return self.__xshape", "def get_x_position(self):\n return self.actual_coordinates[0]", "def getX(self):\n return self.x", "def get_x(self):\n\t\treturn self._collision_rect.x + 14", "def getX(self):\n return self.position[0]", "def x(self):\r\n return self.position.x", "def x(self):\n if self._x is None:\n self.compute_coordinates()\n return self._x", "def GetX(self):\r\n\r\n return self._x", "def get_x_position(self):\n return self.rect.x", "def x(self):\n return self._turtle.xcor()", "def x(self):\n return self._turtle.xcor()", "def x(self):\n return self._kml['x']", "def get_x(self) -> int:\n return self.__x", "def getX(self):\r\n\t\treturn self._x", "def origin_x(self):\n return self._origin[0]", "def getXCoordinate(self) -> float:\n return self.x_coord", "def __get_x__(self):\n return self.Direction['x']", "def Getxcoord(self):\n return self.x_coord", "def xy(self):\n return self.to_xyah()[0:2]", "def getX(self):\n return self.components[0]", "def getX(self):\n return self.components[0]", "def get_pos_x(self):\n return self.__pos_x", "def xx(self):\n if self._xx is None:\n self._create_meshgrid(self.x, self.y)\n return self._xx" ]
[ "0.70563585", "0.6986707", "0.6912074", "0.67736113", "0.66708046", "0.6601498", "0.6595005", "0.65564144", "0.654688", "0.646505", "0.64605474", "0.64541453", "0.6450418", "0.6446166", "0.64437145", "0.64261997", "0.64108616", "0.64108616", "0.6409632", "0.63983107", "0.6397202", "0.6387337", "0.6380784", "0.6366484", "0.6362068", "0.63450193", "0.6339659", "0.6339659", "0.6335167", "0.6306961" ]
0.70080376
1
Third Y coordinate of the shape.
def y3(self): return self._y + self._y3
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def y(self):\n return self.coords[1]", "def get_y(self):\n return self.coords[1]", "def y(self):\n return self._coords[1]", "def getY(self):\n return self.position[1]", "def getY(self):\n return self.components[1]", "def getY(self):\n return self.components[1]", "def y(self) -> int:\n return self.data.y_centre >> 4", "def get_y_position(self):\n return self.actual_coordinates[1]", "def y_coord(self):\n\n return self.y0 + np.arange(self.ny) * self.dy", "def get_ly(self):\r\n return self.dy * self.ny - self.oy", "def GetY(self):\r\n\r\n return self._y", "def findY(self):\n return self.y", "def y(self):\n return self[1]", "def _get_y(self):\n return self.position.y", "def get_y(self):\n return self.__y", "def getY(self):\r\n\t\treturn self._y", "def y(self):\r\n return self.position.y", "def y ( self ) :\n return self.yvar", "def __get_y__(self):\n return self.Direction['y']", "def y(self):\n return self.y", "def y(self):\n return self._data[1]", "def y(self):\n return _libsbml.Point_y(self)", "def y(self):\n return self._arr[1]", "def y(self):\n return self._turtle.ycor()", "def y(self):\n return self._turtle.ycor()", "def getY(self):\n return self.y", "def getY(self):\n return self.y", "def Y(self):\n return self.y\n pass", "def y(self):\n return self._translation[1, 0]", "def y(self,) -> int:\n return self._y" ]
[ "0.7237422", "0.7184859", "0.7050024", "0.7018006", "0.69789183", "0.69789183", "0.6907676", "0.68496335", "0.6797792", "0.67078674", "0.6696344", "0.6619756", "0.66165763", "0.6595526", "0.6586894", "0.65845793", "0.6581554", "0.6558877", "0.6552422", "0.65516716", "0.6541403", "0.6539874", "0.65352905", "0.65221226", "0.65221226", "0.6522067", "0.6522067", "0.64829576", "0.6482312", "0.648177" ]
0.80560815
0
The outer radius of the star.
def outer_radius(self): return self._outer_radius
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inner_radius(self):\n return self._inner_radius", "def get_radius(self):\n return self.R", "def get_radius(self):\r\n return 1", "def get_radius(self):\n return self.r", "def radius(self):\n return sqrt(self.radius_square())", "def radius(self) -> int:\n pass", "def circle_radius(self):\n return min([self.container.width, self.container.height]) / 4", "def radius(self):\n if self._radius is None:\n translated_xyz = translate_to_center_of_mass(self.get_xyz())\n _, symbols, x, y, z = get_xyz_matrix(translated_xyz)\n border_elements = list() # a list of the farthest element/s\n r = 0\n for si, xi, yi, zi in zip(symbols, x, y, z):\n ri = xi ** 2 + yi ** 2 + zi ** 2\n if ri == r:\n border_elements.append(si)\n elif ri > r:\n r = ri\n border_elements = [si]\n atom_r = max([get_atom_radius(si) if get_atom_radius(si) is not None else 1.50 for si in border_elements])\n self._radius = r ** 0.5 + atom_r\n logger.info('Determined a radius of {0:.2f} Angstrom for {1}'.format(self._radius, self.label))\n return self._radius", "def outer_rad(self):\n return self._outer_rad", "def radius(self) -> float:\n return self._radius", "def radius(self):\n return self._radius", "def radius(self):\n return self._radius", "def radius(self):\n return self._radius", "def radius(self):\n return self._radius", "def radius(self):\n return self._radius", "def get_radius(self):\n return self.radius", "def get_radius(self):\n return self.radius", "def radius_square(self):\n try: \n return self._radius_2\n except AttributeError:\n center = self.center()\n self._radius_2 = max( (v.vector() - center).dot_product(\n v.vector() - center) for v in\n self.vertex_generator() )\n return self._radius_2", "def getRadius(self):\n return self.radius", "def get_radius(self):\n return self.__radius", "def radius(self) -> float:\n return get_radius_from_element(self.element)", "def radius(self) -> float:\n return math.hypot(self.x, self.y)", "def getRadius(self):\n return self.__radius", "def circle_area(self):\n return np.pi * self.ring_radius ** 2", "def polar_radius(self):\n return self.r * (1 - self.f)", "def eggleton_roche_radius(self):\n return self.eggleton_roche_over_separation() * self.separation()", "def get_radius(self):", "def circumference(self):\n return math.pi * self.radius * 2", "def radius(self) -> npt.NDArray[np.float_]:\n return dist(self.center, self.vertices[0])", "def radius(self) -> Union[int, float]:\n return self.proto.radius" ]
[ "0.7974437", "0.7683473", "0.76440513", "0.75512946", "0.7493376", "0.7478774", "0.74299973", "0.7410966", "0.7409471", "0.7294475", "0.7293487", "0.7293487", "0.7293487", "0.7293487", "0.7293487", "0.72553426", "0.72553426", "0.7249025", "0.72409123", "0.7231385", "0.722837", "0.7204818", "0.7138238", "0.71230686", "0.70896125", "0.70602804", "0.7037267", "0.7036358", "0.7035043", "0.70342666" ]
0.8559289
0
The inner radius of the star.
def inner_radius(self): return self._inner_radius
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def outer_radius(self):\n return self._outer_radius", "def get_radius(self):\n return self.R", "def get_radius(self):\r\n return 1", "def get_radius(self):\n return self.r", "def radius(self):\n return sqrt(self.radius_square())", "def radius_square(self):\n try: \n return self._radius_2\n except AttributeError:\n center = self.center()\n self._radius_2 = max( (v.vector() - center).dot_product(\n v.vector() - center) for v in\n self.vertex_generator() )\n return self._radius_2", "def radius(self) -> int:\n pass", "def radius(self):\n if self._radius is None:\n translated_xyz = translate_to_center_of_mass(self.get_xyz())\n _, symbols, x, y, z = get_xyz_matrix(translated_xyz)\n border_elements = list() # a list of the farthest element/s\n r = 0\n for si, xi, yi, zi in zip(symbols, x, y, z):\n ri = xi ** 2 + yi ** 2 + zi ** 2\n if ri == r:\n border_elements.append(si)\n elif ri > r:\n r = ri\n border_elements = [si]\n atom_r = max([get_atom_radius(si) if get_atom_radius(si) is not None else 1.50 for si in border_elements])\n self._radius = r ** 0.5 + atom_r\n logger.info('Determined a radius of {0:.2f} Angstrom for {1}'.format(self._radius, self.label))\n return self._radius", "def getRadius(self):\n return self.radius", "def radius(self) -> float:\n return self._radius", "def circle_radius(self):\n return min([self.container.width, self.container.height]) / 4", "def radius(self):\n return self._radius", "def radius(self):\n return self._radius", "def radius(self):\n return self._radius", "def radius(self):\n return self._radius", "def radius(self):\n return self._radius", "def diameter(self):\n return 2 * self.radius", "def get_radius(self):\n return self.radius", "def get_radius(self):\n return self.radius", "def radius(self) -> float:\n return get_radius_from_element(self.element)", "def get_radius(self):\n return self.__radius", "def getRadius(self):\n return self.__radius", "def diameter(self):\n return self.radius * 2", "def diameter(self):\n return self.radius * 2", "def radius(self) -> float:\n return math.hypot(self.x, self.y)", "def get_radius(self):", "def circumference(self):\n return math.pi * self.radius * 2", "def circumference(self):\n return (2 * math.pi * self.__radius)", "def eggleton_roche_radius(self):\n return self.eggleton_roche_over_separation() * self.separation()", "def radius(self) -> Union[int, float]:\n return self.proto.radius" ]
[ "0.80226016", "0.7799017", "0.77007794", "0.7670528", "0.7602944", "0.75848025", "0.7573816", "0.75393444", "0.74344003", "0.7425558", "0.7412033", "0.73999923", "0.73999923", "0.73999923", "0.73999923", "0.73999923", "0.73865736", "0.73805606", "0.73805606", "0.7353282", "0.73377705", "0.7312581", "0.7275428", "0.7275428", "0.7273058", "0.72145325", "0.7201996", "0.71896183", "0.71795756", "0.7160313" ]
0.8454345
0
Number of spikes of the star.
def num_spikes(self): return self._num_spikes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def n_spikes(self):\n return self.model.n_spikes", "def numberOfPoints(self):\n return 20000", "def count_spikes(spk):\n shspk = numpy.shape(spk)\n if len(shspk) == 0:\n nspk = 0\n elif shspk[0] == 0:\n nspk = 0\n else:\n nspk = shspk[0]\n return(nspk)", "def flaky_count(self) -> int:\n return pulumi.get(self, \"flaky_count\")", "def get_number_of_cheeses(self):\n number = 0\n for i in range(len(self._stools)):\n number += len(self._stools[i])\n return number", "def number_of_bells(self) -> int:\n return self._tower.number_of_bells", "def total_KE(particles):\r\n return sum([particle.kinetic_energy() for particle in particles])", "def n_rays(self):\n try: \n return self._n_rays\n except AttributeError:\n self._n_rays = 0\n for r in self.rays(): self._n_rays += 1\n return self._n_rays", "def vlass_stars(duration, n_beams):\n n_pointings = duration//4.2\n n_observed = n_pointings*n_beams\n return n_observed", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def number_of_iterations(self) -> int:\n pass", "def num_quadrature_points(self) -> int:", "def num_wet(self):\n return np.sum(self.array == 5)", "def num_polys(self):\n ret_val = self._num_polys()\n return ret_val", "def count(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"count\")", "def count(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"count\")", "def number_of_electrodes(self):\n return self._pre_kernel.shape[1]", "def n_rounds(self) -> int:\n return self.y.shape[0]", "def count_star(self, tokens):\n return self.counts[tokens] - self.beta", "def numberOfSamples (self) :\n S = self.mdp.S\n A = self.mdp.A\n gamma = self.mdp.gamma\n\n factor = 1 / (self.epsilon ** 2 * (1 - gamma) ** 4)\n term2 = np.log((S * A) / (self.epsilon * (1 - gamma) ** self.delta))\n return (S + term2) * factor", "def num_wires(self):", "def num_sigmas(self):\n return 2*self.n + 1", "def num_particles(self) -> int:\n return len(self.particles)", "def n_points(self):\n\n if self.data_reduced:\n return len(self.data_reduced[0])\n else:\n return 0", "def well_count(self):\n return(len(self.wells))", "def n_timesteps(self) -> int:\n return len(self.time)", "def n(self):\n if not self.table:\n return 0\n return max(self.omega) + 1" ]
[ "0.68215555", "0.6694392", "0.6427395", "0.62704337", "0.6255422", "0.6169472", "0.6124636", "0.6118094", "0.61015534", "0.6085608", "0.6085608", "0.6085608", "0.6085608", "0.6058355", "0.60444564", "0.60277", "0.60111445", "0.59934705", "0.59934705", "0.59739375", "0.5970636", "0.5966701", "0.59590226", "0.5949868", "0.5948", "0.5920909", "0.58825165", "0.58788973", "0.5874239", "0.58708984" ]
0.71719515
0
Returns the workspace container as determined by the settings.
def getTargetContainer(self): settings = zope.component.getUtility(IPMR2GlobalSettings) if settings.create_user_workspace: uwc = settings.getCurrentUserWorkspaceContainer() if uwc is not None: return uwc # Otherwise return the global workspace container. target = settings.getWorkspaceContainer() if target is None: raise NotFound(self.context, settings.default_workspace_subpath) return target
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_workspace(client):\n return client._creoson_post(\"windchill\", \"get_workspace\", key_data=\"workspace\")", "def get_workspace(client):\n workspace = getattr(settings, 'ASANA_WORKSPACE', None)\n if not workspace:\n workspaces = [\n workspace for workspace in client.workspaces.find_all(item_limit=1)\n ]\n if not workspaces:\n logger.error('Any workspaces was not found')\n return\n workspace = workspaces[0]['gid']\n return workspace", "def get_workspace(self) -> Workspace:\n if self.ws:\n return self.ws\n self.ws = Workspace.from_config()\n return self.ws", "def get_workspace(self):\n wid = self._config[\"workspace\"]\n return sim_consts.workspace_origin[wid], sim_consts.workspace_size[wid]", "def workspace(self):\n return self.rpc.call(MsfRpcMethod.DbCurrentWorkspace)['workspace']", "def workspace(self) -> str:\n return self._workspace", "def workspace(self):\n\n # get workspace specified for Vehicle or from its driver\n if self._workspace is not None:\n return self._workspace\n if self._control is not None:\n return self._control._workspace", "def mainWorkspace(self):\n return self._mainWorkspace", "def Workspace(self):\n return self._module.workspace", "def get_workspace(self):\n\n # Our AML config file\n with open(\"/usr/src/api/config.json\", \"r\") as json_file:\n config_data = json.load(json_file)\n\n # Let's connect to our workspace\n sp = ServicePrincipalAuthentication(tenant_id=config_data['tenant_id'], # tenantID\n service_principal_id=config_data['service_principal_id'], # clientId\n service_principal_password=config_data[\n 'service_principal_password']) # clientSecret\n\n ws = Workspace.get(name=config_data['workspace_name'],\n auth=sp,\n subscription_id=config_data['subscription_id'],\n resource_group=config_data['resource_group'])\n\n return ws", "def get(self, name):\n res = self.rpc.call(MsfRpcMethod.DbGetWorkspace, [name])\n if 'workspace' in res:\n return res['workspace']\n else:\n return", "def current_container(self):\n return self.layout.container", "def workspace(self, name='default'):\n w = self.list\n if name not in w:\n self.add(name)\n return Workspace(self.rpc, name)", "def get_container(self, profile, exec_cmd):\n container = None\n if self._value.has_option(profile, 'container'):\n container = self._value.get(profile, 'container')\n elif exec_cmd is not None:\n self.logger.error(\n \"No container parameter found\"\n )\n exit(1)\n\n self.logger.info(\"%s is selected as container\" % container)\n return container", "def _get_container(self) -> Container:\n obj = self.get_container()\n return to_container(obj)", "def getGSWorkspace(self, desired=None):\n role = self.getRole(desired)\n ws = role[\"roleName\"]\n return ws", "def get_container(self) -> CNT:\n raise NotImplementedError()", "def get_ws():\n tenant = os.environ.get('TENANT')\n if tenant:\n auth = InteractiveLoginAuthentication(tenant_id = tenant)\n ws = Workspace.from_config(auth = auth)\n else:\n ws = Workspace.from_config()\n return ws", "def CurrentWorkspace():\n return _C.CurrentWorkspace()", "def workspace_id(self) -> Optional[str]:\n return pulumi.get(self, \"workspace_id\")", "def getContainer(self):\n\n workerPorts = self.hz.get_map(self.container_map)\n unused: List[Container] = workerPorts.values(sql(\"active = 'no'\")).result()\n if len(unused) == 0:\n return \"max containers reached\"\n else:\n port = unused[0].port\n try:\n browser: Browser = self.client.containers.get('worker-{port}'.format(port=port))\n workerPorts.replace(key=port, value=Container(port=port, active=\"yes\", status=browser.status)).result()\n browser.restart()\n except APIError as e:\n if e.status_code == 404:\n browser = self.client.containers.run(self.client.images.get(browser_params['image']),\n detach=True,\n name='worker-{}'.format(port),\n ports={'4444/tcp': port}, network=os.getenv(\"NETWORK\", \"car_default\"))\n workerPorts.replace(key=port,\n value=Container(port=port, active=\"yes\", status=browser.status)).result()\n else:\n return str(port)\n logging.info(msg='started browser named worker-{port}'.format(port=port))\n self.wait_for_log(browser, BrowserConstants().CONTAINER_SUCCESS)\n return str(port)", "def getMainContainer(self, port):\n map = self.hz.get_map(self.container_map)\n try:\n browser: Browser = self.client.containers.run(self.client.images.get(browser_params['image']),\n detach=True,\n name='worker-{}'.format(port),\n ports={'4444/tcp': port},\n restart_policy={\"Name\": 'always'}, network=os.getenv(\"NETWORK\", \"car_default\"))\n self.wait_for_log(browser, BrowserConstants().CONTAINER_SUCCESS)\n except APIError as e:\n if e.status_code == 409:\n browser = self.client.containers.get('worker-{port}'.format(port=port))\n browser.restart()\n self.wait_for_log(browser, BrowserConstants().CONTAINER_SUCCESS)\n else:\n raise e\n map.replace(key=port, value=Container(port, active=\"yes\", status=browser.status))\n return str(port)", "def workspace_id(self) -> str:\n return pulumi.get(self, \"workspace_id\")", "def workspace_id(self) -> str:\n return pulumi.get(self, \"workspace_id\")", "def workspace_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workspace_id\")", "def workspace_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workspace_id\")", "def get_workspace(self, user_id=None, alias=None, unique_id=None, include_deleted=False):\n # Get UUID for workspace\n if alias == 'default_workspace':\n unique_id = 'default_workspace'\n else:\n uuid_mapping = self._get_uuid_mapping_object(user_id)\n status = self.include_status[:]\n if include_deleted:\n status.append('deleted')\n if not unique_id:\n unique_id = uuid_mapping.get_uuid(alias, user_id, status=status)\n if not unique_id:\n return False\n # return matching workspace \n self._logger.debug('Getting workspace \"{}\" with alias \"{}\"'.format(unique_id, alias)) \n \n return self.workspaces.get(unique_id, None)", "def container (self):\n return self.__container", "def workspace_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_name\")", "def workspace_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"workspace_name\")" ]
[ "0.7000218", "0.683348", "0.6810205", "0.6738207", "0.67233706", "0.6647305", "0.6563112", "0.6479329", "0.6458167", "0.6424094", "0.6359544", "0.63457716", "0.63282126", "0.6298409", "0.6236829", "0.6234185", "0.622422", "0.61569285", "0.6118726", "0.60800993", "0.60078186", "0.59498364", "0.5922682", "0.5922682", "0.5920013", "0.5920013", "0.58624345", "0.5861396", "0.58456707", "0.58456707" ]
0.7890182
0
If the QuoteItems has nothing joined to it, then I'll go through and actually delete the record from the system, otherwise, I'll just mark the record deleted.
def QuoteItemsDel(Id, id='', **kw): if id != '': Id = id if Id != '': try: int_id = int(Id) record = model.InvQuoteItems.get(int_id) except (ValueError, SQLObjectNotFound): int_id = -1 else: int_id = -1 try: if int_id > 0: #No references for these objects, so I'll delete completely record.destroySelf() result=1 result_msg = "Record deleted" else: result=0 result_msg="Couldn't find the record" except: result=0 result_msg = "Failed to modify the record" raise return dict(result=result, result_msg=result_msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete(self):\n\n\t\titem_id = mock_item()[0]\n\t\tmodels.delete(item_id)\n\n\t\titem = models.item(item_id)\n\t\tself.assertIsNone(item)", "def clear(self):\r\n self.orderitem_set.all().delete()", "def delete(self):\n return self.items.delete(item_id=self.id)", "def test_delete_item_using_delete(self):\n pass", "def delete_item(self):\n\n\t\tdb.session.delete(self)\n\t\tdb.session.commit()", "def delete(self):\n request = self.request\n raise_operation_error(\n request,\n \"Can't {} bid in Price Quotation tender\".format(\n OPERATIONS.get(request.method),\n ),\n )", "def delete(self):\r\n self.domain.delete_item(self)", "def delete(self):\n items = ShopcartItem.find_by_shopcartid(self.id)\n\n for item in items:\n item.delete()\n\n db.session.delete(self)\n db.session.commit()", "def _delete(self, table, _id, return_item=False):\n data = {\"Key\": _id, \"ReturnValues\": \"ALL_OLD\" if return_item else \"NONE\"}\n\n return self._response_handler(table, \"delete_item\", data)", "def delete(self, item):\r\n self.fetch()\r\n t = self.make_item_tuple(item)\r\n changed = False\r\n while t in self.data:\r\n self.data.remove(t)\r\n changed = True\r\n \r\n if changed:\r\n query_cache.set(self.iden, self.data)", "def check_and_delete(self, inventory): # used in a transaction block only so dont initiate a transaction here\n try:\n lines = inventory.lines\n for i in lines:\n if i.quantity == 0:\n i.delete((i,))\n # inventory.reload()\n inventory.save()\n chk = inventory.lines\n if len(chk) == 0:\n inventory.state = 'cancel'\n inventory.save()\n inventory.delete((inventory,))\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def item_delete(request):\n if request.method == 'POST':\n item_to_delete = get_object_or_404(StockItem, pk=request.POST['id'])\n item_to_delete.active = False\n item_to_delete.save()\n return HttpResponse(status=200)", "def delete(self):\n\n def _delete(result):\n oldid = self.id\n self.id = None\n self._deleted = True\n if self._transaction:\n return self.__class__.deleteAll(where=[\"id = ?\", oldid], transaction=self._transaction)\n else:\n return self.__class__.deleteAll(where=[\"id = ?\", oldid])\n\n def _deleteOnSuccess(result):\n if result == False:\n return defer.succeed(self)\n else:\n ds = []\n for relation in self.HABTM:\n name = relation['name'] if isinstance(relation, dict) else relation\n ds.append(getattr(self, name).clear(transaction=self._transaction))\n return defer.DeferredList(ds).addCallback(_delete)\n\n return defer.maybeDeferred(self.beforeDelete).addCallback(_deleteOnSuccess)", "def delete(self):\n try:\n from_table = self.get_from_table(self.content_type.name)\n\n if from_table is not None:\n combined_obj = CombinedTeledata.objects.get(id=self.object_id, from_table=from_table)\n combined_obj.keywords_combined.remove(self)\n except:\n logger.warn('Cannot remove keywords_combined record for {0} - {1}. Record may not exist.'.format(self.phrase, self.content_object.name))\n combined_obj = None\n\n super(Keyword, self).delete()", "def delete_item(self):\n for item in self.selection():\n origin_name = self.set(item)[\"1\"]\n origin_url = self.set(item)[\"2\"]\n for row in s.item:\n if row[\"item\"] == origin_name and row[\"url\"] == origin_url:\n s.item.remove(row)\n self.delete(item)", "def lineitem_post_delete(**kwargs):\n lineitem = kwargs['instance']\n try:\n bill = lineitem.bill\n bill.update_cached_totals()\n except ObjectDoesNotExist:\n logger.warn(\"Deleting a BillLineItem that does not have a Bill!\")", "def delete(self):\n if not self.is_deleted:\n self.is_deleted = True\n self.save()", "def delete_order():", "def delete(self):\n if not self.isNew:\n #We do not check the hasData property, so we can use this function to delete records\n #without reading them first.\n #TODO: this is stupid and unclean, change it\n try:\n CFG.CX.delete ( CFG.DB.SCHEMA + \".object\", { 'objectid' : self._objectid } )\n self.clearRecord()\n self.raiseEvent ( \"record_deleted\", self )\n except pg.DatabaseError, e:\n raise Record.DataManipulationError ( \"Deleting record {1} of '{0}'\".format(self._table.name, self._objectid),\n \"\",\n e)", "def done(self):\n\t\tdef txn():\n\t\t\tother = db.get(self.key())\n\t\t\tif other and other.eta == self.eta:\n\t\t\t\tother.delete()\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\treturn db.run_in_transaction(txn)", "def test_delete_order(self):\n self.orders_list.place_order(self.order)\n self.orders_list.place_order(self.order)\n self.assertEqual(self.orders_list.deletes_order(1), \"Order successfully trashed\")\n self.assertEqual(len(self.orders_list.get_orders()), 1)", "def test_delete_bucketlist_item(self):\r\n email = \"[email protected]\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=\"test bucketlist\").first()\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertTrue(item)\r\n\r\n response = self.delete_bucketlist_item(email, _pword, bucketlist.id, item.id)\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(response.status, '200 OK')\r\n self.assertEqual(\r\n result['message'],\r\n 'Bucketlist Item with ID {} deleted'.format(item.id)\r\n )\r\n item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first()\r\n self.assertFalse(item)", "def post_delete(self, *args, **kw):\n #obtenemos el id de la fase para hacer el filtrado despues de la redireccion\n item_to_del = DBSession.query(Item).filter_by(id_item=args[0]).one()\n fid = item_to_del.id_fase_fk\n pks = self.provider.get_primary_fields(self.model)\n d = {}\n for i, arg in enumerate(args):\n d[pks[i]] = arg\n self.provider.delete(self.model, d)\n\n path = './' + '../' * (len(pks) - 1) + '?fid=' + str(fid)\n\n redirect(path)", "def has_delete(self) -> bool:\n return \"delete_item\" not in self.__abstractmethods__", "def delete_item(self, id: str, user: User) -> bool:", "def test_deleted(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=False, title=\"Test TODO3\",\n due=datetime.date.today(), additional=\"\",\n )\n\n assert todo in event.todoitem_set.all()\n\n self.client.get(reverse('todo_delete', args=[todo.pk]))\n\n assert event.todoitem_set.all().count() == 0", "def test_delete_non_existing_order(self):\n self.orders_list.place_order(self.order)\n self.orders_list.place_order(self.order)\n self.assertEqual(self.orders_list.deletes_order(2),\n \"order not found\")\n self.assertEqual(len(self.orders_list.get_orders()), 2)", "def delete(self):\n ...", "def __chore_delete(self, db):\n delete_chore = {\"id\": 1,\n \"worker_id\": 2}\n\n response = None\n while response is None:\n response = self.remove_api(body=json.dumps(delete_chore)).body\n\n # Test that response is success\n expected = {\"success\": True}\n self.assertEqual(response, json.dumps(expected))\n\n # Test that database contains updated chore info\n # chore_in_db = db.query(Chore).filter(Chore.id == delete_chore[\"id\"]).one()\n # self.assertIsNone(chore_in_db)", "def delete_bots(self, quote:str):\n\n\t\tif quote:\n\t\t\tconn \t\t\t = sqlite3.connect(self.name, detect_types=sqlite3.PARSE_DECLTYPES)\n\t\t\tconn.row_factory = sqlite3.Row\n\t\t\tc \t\t\t\t = conn.cursor()\n\n\t\t\t# Check if bots exist on the quote\n\t\t\tc.execute('SELECT * FROM bots WHERE quote=?', (quote, ))\n\t\t\tdetails = c.fetchall()\n\n\t\t\tif details:\n\t\t\t\tfor item in details:\n\t\t\t\t\tif 'Looking to exit' in dict(item)['status']:\n\t\t\t\t\t\tanswer = input(f\"You have open position(s) on {quote}, are you sure you want to delete its records ?\")\n\t\t\t\t\t\tif answer == 'n':\n\t\t\t\t\t\t\treturn\n\t\t\t\tc.execute('DELETE FROM bots WHERE quote=?', (quote, ))\n\t\t\t\tconn.commit()\n\t\t\t\tprint(f\"Deleted all records of {quote}.\")" ]
[ "0.6471906", "0.6318868", "0.6103051", "0.61027896", "0.6094735", "0.60656655", "0.5921691", "0.5912371", "0.59087306", "0.5865899", "0.5856185", "0.58085054", "0.5731289", "0.57115304", "0.57032573", "0.5684151", "0.56772625", "0.56723213", "0.56496936", "0.5649413", "0.5625069", "0.56100786", "0.5602828", "0.5588255", "0.55709964", "0.5569442", "0.55554855", "0.5544427", "0.5544287", "0.5538997" ]
0.6537003
0
Return a dict where keys are setup wizard step names, and the values the form class matching the step.
def get_setup_forms(self, wizard): return {} # pragma: no cover
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_form_classes(self):\n return {\n **self.form_classes\n }", "def _collect_kwargs(step):\n dicts = {}\n for s in _expand_inputs(step):\n name = s.name if s.name is not None else s.__class__.__name__\n if name in dicts.keys():\n raise ValueError(\"Duplicate step names: %s\" % name)\n\n d = dict(s._kwargs)\n d.pop('inputs', None)\n dicts[name] = d\n\n return dicts", "def get_form_initial(self, step):\n if step == self.DETAILS_STEP and step == self.steps.next:\n return self.get_details_form_initial()\n else:\n return super().get_form_initial(step)", "def generation_step_to_dict(generation_step: GenerationStep) -> Dict[str, Any]:\n return {\n \"__type\": generation_step.__class__.__name__,\n \"model\": generation_step.model,\n \"num_trials\": generation_step.num_trials,\n \"min_trials_observed\": generation_step.min_trials_observed,\n \"completion_criteria\": generation_step.completion_criteria,\n \"max_parallelism\": generation_step.max_parallelism,\n \"use_update\": generation_step.use_update,\n \"enforce_num_trials\": generation_step.enforce_num_trials,\n \"model_kwargs\": _encode_callables_as_references(\n generation_step.model_kwargs or {}\n ),\n \"model_gen_kwargs\": _encode_callables_as_references(\n generation_step.model_gen_kwargs or {}\n ),\n \"index\": generation_step.index,\n \"should_deduplicate\": generation_step.should_deduplicate,\n }", "def get_form_initial(self, step):\n initial_data = self.initial_dict.get(step, {}) # initial data of current step\n\n # SimulationForm0_LoadPdb\n step_0_prev_data = self.storage.get_step_data('0')\n step_0_prev_data = {} if step_0_prev_data is None \\\n else {'num_of_proteins': step_0_prev_data.get('0-num_of_proteins'),\n 'first_pdb_type': step_0_prev_data.get('0-first_pdb_type'),\n 'first_pdb_id': step_0_prev_data.get('0-first_pdb_id'),\n 'first_pdb_file': step_0_prev_data.get('0-first_pdb_file'),\n 'second_pdb_type': step_0_prev_data.get('0-second_pdb_type'),\n 'second_pdb_id': step_0_prev_data.get('0-second_pdb_id'),\n 'second_pdb_file': step_0_prev_data.get('0-second_pdb_file'),\n 'user_rand': step_0_prev_data.get('0-user_rand')}\n\n # SimulationForm1_DetermineRelativePosition\n step_1_prev_data = self.storage.get_step_data('1')\n step_1_prev_data = {} if step_1_prev_data is None \\\n else {'x1': step_1_prev_data.get('1-x1'), 'x2': step_1_prev_data.get('1-x2'),\n 'y1': step_1_prev_data.get('1-y1'), 'y2': step_1_prev_data.get('1-y2'),\n 'z1': step_1_prev_data.get('1-z1'), 'z2': step_1_prev_data.get('1-z2'),\n 'degXY_1': step_1_prev_data.get('1-degXY_1'), 'degYZ_1': step_1_prev_data.get('1-degYZ_1'),\n 'degXY_2': step_1_prev_data.get('1-degXY_2'), 'degYZ_2': step_1_prev_data.get('1-degYZ_2')}\n\n # SimulationForm2_SimulationParameters\n step_2_prev_data = self.storage.get_step_data('2')\n step_2_prev_data = {} if step_2_prev_data is None \\\n else {'temperature_scale': step_2_prev_data.get('2-temperature_scale'),\n 'temperature': step_2_prev_data.get('2-temperature'),\n 'time_step_number': step_2_prev_data.get('2-time_step_number')}\n\n update_data = {**step_0_prev_data, **step_1_prev_data, **step_2_prev_data, **initial_data}\n return self.initial_dict.get(step, update_data)", "def get_processed_form_wizard_data(form_wizard, form_list,\n form_element_entries):\n field_name_to_label_map = {}\n cleaned_data = {}\n for form in form_list:\n _field_name_to_label_map, _cleaned_data = get_processed_form_data(\n form,\n form_element_entries\n )\n field_name_to_label_map.update(_field_name_to_label_map)\n cleaned_data.update(_cleaned_data)\n\n return (\n field_name_to_label_map,\n cleaned_data\n )", "def get_template(self, request, step, form):\n return 'forms/wizard.html'", "def process_step(self, request, step, form):\n pass", "def default_steps(self):\n steps = {}\n for code, color in (\n ('Client', '255,255,255'),\n ('Online', '254,92,255'),\n ('Roto', '253,254,152'),\n ('MM', '254,151,152'),\n ('Anm', '254,173,146'),\n ('FX', '255,218,137'),\n ('Light', '254,205,138'),\n ('Comp', '161,236,154'),\n ('Art', '0,230,254'),\n ('Model', '254,205,138'),\n ('Rig', '253,254,152'),\n ('Surface', '231,251,154'),\n ):\n steps[code] = self.find_or_create('Step', dict(\n code=code,\n short_name=code,\n color=color,\n ))\n return steps", "def abc_transfer_steps(self):\n return [\n (20, 'abc_transfer_wizard'),\n (40, 'abc_create_invoice'),\n (60, 'abc_confirm_invoice')]", "def _wrap_form(self, parent_form_class):\n steptitle = pd_mf(u'Add ${name}',\n mapping={'name': self.fti.Title()})\n\n form_class = self._create_form_class(parent_form_class, steptitle)\n\n form_class.__name__ = 'WizardForm: %s' % parent_form_class.__name__\n return form_class", "def _get_bulk_change_form_class(self):\n return BulkChangeFormWizardHandlerPluginsForm", "def get_cleaned_data(self, request, step):\n return self._get_state(request).form_data.get(step.slug, None)", "def save(self, *args, **kwargs):\n step_numeral, step_name = kwargs.pop('step', (None, None))\n\n if step_numeral == 1:\n \"\"\"\n Basic Form: Application & File Uploader\n \"\"\"\n return self.cleaned_data\n if step_numeral == 2:\n \"\"\"\n Basic Form + Mapping Fields\n \"\"\"\n return self.cleaned_data\n\n if step_numeral == 3:\n pass # end-user is previewing", "def process_show_form(self, request, step, form):\n pass", "def _get_plugin_form_data(self, fields):\n form_data = {}\n for field, default_value in fields:\n try:\n form_data.update(\n {field: self.plugin_data.get(field, default_value)}\n )\n except Exception as err:\n logger.debug(\n \"Error in class %s. Details: %s\",\n self.__class__.__name__,\n str(err)\n )\n return form_data", "def _get_tab_definitions(self) -> Dict[str, CETabControlDef]:\n return {\n name: (cls, [self.mp_controls])\n for name, cls in self._TAB_DEFINITIONS.items()\n }", "def get_next_step(self, step=None):\n if step is None:\n step = self.steps.current\n form_list = self.get_form_list() \n key = form_list.keys().index(step) + 1 \n if len(form_list.keys()) > key:\n return form_list.keys()[key]\n return None", "def extract_step_settings(self,\n step_class: Type[Step],\n step_label: str,\n step_settings=None,\n lookups=None,\n sublookups=None):\n if not issubclass(step_class, Step):\n raise NotImplementedError(\"Steps have to inherit from class Step.\")\n\n if step_settings is None:\n step_settings = {}\n # No 'General' lookup since at this point we are only interested\n # in retrieving the settings of each step of a routine, not the settings\n # of the routine itself\n if lookups is None:\n lookups = [self.step_label, self.get_lookup_class().__name__]\n if sublookups is None:\n sublookups = [step_label, step_class.get_lookup_class().__name__]\n\n autocalib_settings = self.settings.copy({\n step_class.get_lookup_class().__name__:\n self.merge_settings(lookups, sublookups)\n })\n update_nested_dictionary(autocalib_settings,\n step_settings.get('settings', {}))\n return autocalib_settings", "def addForm(self, formDict):\n if self._track_nav:\n last_url = self._spider_path.pop_step()\n form_data = []\n for elem_name in formDict:\n elem_value = formDict[\"value\"]\n elem_type = formDict[\"type\"]\n elem_type = elem_type.lower()\n if elem_type == \"select\":\n elem_type = FormElemInfo.INPUT_SELECT\n elif elem_type == \"text\":\n elem_type = FormElemInfo.INPUT_TEXT\n form_data.append(FormElemInfo(elem_name, elem_value, elem_type))\n form = Form(last_url, form_data)\n self._spider_path.add_step(form)", "def generate_steplist(my_factory):\n steps = []\n stepnames = {}\n\n for factory, cmdargs in my_factory.steps:\n cmdargs = cmdargs.copy()\n try:\n step = factory(**cmdargs)\n except:\n print >>sys.stderr, ('error while creating step, factory=%s, args=%s'\n % (factory, cmdargs))\n raise\n name = step.name\n if name in stepnames:\n count = stepnames[name]\n count += 1\n stepnames[name] = count\n name = step.name + ('_%d' % count)\n else:\n stepnames[name] = 0\n step.name = name\n\n #TODO: is this a bug in FileUpload?\n if not hasattr(step, 'description') or not step.description:\n step.description = [step.name]\n if not hasattr(step, 'descriptionDone') or not step.descriptionDone:\n step.descriptionDone = [step.name]\n\n step.locks = []\n steps.append(step)\n\n return steps", "def get_registered_form_wizard_handler_plugins(as_instances=False):\n return get_registered_plugins(form_wizard_handler_plugin_registry,\n as_instances=as_instances)", "def get_ordered_form_wizard_handler_plugins():\n form_wizard_handler_plugins = SortableDict()\n\n # Priority goes to the ones specified as first in the settings\n for uid in FORM_WIZARD_HANDLER_PLUGINS_EXECUTION_ORDER:\n form_wizard_handler_plugins[uid] = []\n\n # Adding all the rest\n for uid in form_wizard_handler_plugin_registry._registry.keys():\n if uid not in form_wizard_handler_plugins:\n form_wizard_handler_plugins[uid] = []\n\n return form_wizard_handler_plugins", "def get_step_class_at_index(self, index):\n return self.routine_template.get_step_class_at_index(index)", "def initial_form_data(self, request, step, form):\n return None", "def process_step(self, form):\n #print(form.data)\n\n #print(form.data)\n #print(self)\n \n institution = {}\n inst_list = []\n if self.steps.current == '1':\n \n institution['institution'] = form.data['1-0-institution']\n institution['date_from'] = form.data['1-0-date_from']\n institution['date_to'] = form.data['1-0-date_to']\n inst_list.append(institution)\n inst_keys = dict(form.data.lists())\n \n #Create dictionary dynamically for the other institutions incase more than two institutions are entered\n if inst_keys.get('1-NaN-institution') and type(inst_keys.get('1-NaN-institution')) is list:\n inst_list2 = []\n #Add institutions \n for i,insti in enumerate(inst_keys.get('1-NaN-institution')):\n inst_i = {}\n #print(i)\n date_from = inst_keys['1-NaN-date_from'][i]\n date_to = inst_keys['1-NaN-date_to'][i]\n course_duration = inst_keys['1-NaN-course_duration'][i]\n inst_i['institution'] = insti\n inst_i['date_from'] = date_from\n inst_i['date_to'] = date_to\n \n inst_list2.append(inst_i)\n #print(inst_list2)\n inst_list.extend(inst_list2)\n #Create dictionary dynamically for the other institutions incase more than two institutions are entered\n if inst_keys.get('1-NaN-institution') and type(inst_keys.get('1-NaN-institution')) is not list:\n inst_0 = {}\n inst_0['institution'] = form.data['1-NaN-institution']\n inst_0['date_from'] = form.data['1-NaN-date_from']\n inst_0['date_to'] = form.data['1-NaN-date_to']\n inst_0['course_duration'] = form.data['1-NaN-course_duration']\n #inst_0['achievements'] = ''\n inst_list.append(inst_0)\n \n #Add the entered information to a session object\n self.request.session['institution'] = inst_list", "def fields(self, required=False):\n form = h.simplsale_form(self._index_xml)\n if required:\n required = '.required'\n else:\n required = ''\n elements = CSSSelector('input[type!=\"submit\"]%s, select%s'\n % (required, required))(form)\n names = []\n for e in elements:\n name = e.attrib.get('name', None)\n if name is not None:\n names.append(name)\n if 'billing_amount' in names and not required:\n names.extend(['billing_amount_price', 'billing_amount_name'])\n d = dict((key, '') for key in names)\n return d", "def set_cleaned_data(self, request, step, data):\n self._get_state(request).form_data[step.slug] = data", "def get_form_class(self, request):\n\t\treturn RegistrationForm", "def wizard_process_received_form(form):\n lines = {key.split('_')[1]: value.split('_')[1] for key, value in form.items() if key.startswith(\"line\")}\n # print(lines)\n times = {key.split('_')[1]: value for key, value in form.items() if key.startswith(\"time\")}\n # print(times)\n return {int(value): times[key] for key, value in lines.items()}" ]
[ "0.6392801", "0.5942996", "0.5747267", "0.5710753", "0.5690151", "0.56838536", "0.5620147", "0.5595128", "0.5472937", "0.54096055", "0.540147", "0.5370474", "0.5283803", "0.5282305", "0.5278821", "0.518117", "0.51735497", "0.51548064", "0.50842416", "0.50651014", "0.50636625", "0.5038836", "0.50145113", "0.5013076", "0.50012517", "0.499474", "0.49903917", "0.49839303", "0.49717614", "0.49260694" ]
0.78980774
0
Obtain device instance from 2fa setup wizard data.
def get_device_from_setup_data(self, request, setup_data): return None # pragma: no cover
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_device():\n return context.get_context('device_target')", "def get_device(self):\n self.connect_button = 1\n self.device_name = self.deviceEntry.text()", "async def device_fixture(hass: HomeAssistant, ufp: MockUFPFixture):\n\n await init_entry(hass, ufp, [])\n\n device_registry = dr.async_get(hass)\n\n return list(device_registry.devices.values())[0]", "def device(self):\n return self._vars[0].device", "def get_device(arn=None):\n pass", "def get_device(l):\n if not l.device:\n l.device = find_device()\n setup_device(l.device)\n return l.device", "def device(self):\n hw = self.hw()\n if hw: return hw.device()", "def device(self):\n return next(self.parameters()).device", "def device(self):\n return next(self.parameters()).device", "def device(self):\n return next(self.parameters()).device", "def device(self):\n return next(self.parameters()).device", "def device(self):\n return next(self.parameters()).device", "def get_device(self, field):\n return self._devices[field]", "def get_device(self):\n raise NotImplementedError()", "def _get_device(self, dev_id):\n tuya = self.hass.data[DOMAIN][TUYA_DATA]\n return tuya.get_device_by_id(dev_id)", "def device():\n return G.DEVICE", "def device(self):\n return self.broker.device(**{\"DeviceRouteID\": self.DeviceRouteID})", "def getDevice(driver):\n devices = list(listDevices(driver))\n if not devices:\n print('No devices found. Ensure your camera is connected.')\n elif len(devices) != 1:\n print('Too many devices found. Only one camera is supported')\n else:\n return devices[0]", "def get_device(model):\n p = next(model.parameters())\n return p.device", "def load_device():", "def find_device():\n device = usb.core.find(\n idVendor=LuxaforFlag.DEVICE_VENDOR_ID,\n idProduct=LuxaforFlag.DEVICE_PRODUCT_ID\n )\n return device", "def get_device(self):\n return self.parent.get_device()", "def device(self):\n return self.broker.device(**{\"JobDetailID\": self.JobDetailID})", "def test_get_device(self):\n pass", "def test_get_device(self):\n pass", "def get_dev(self, data_file):\n return self.read_data(data_file)", "def finddevice():\n\n return next((device for device in [\"xpu\"] if hasattr(torch, device) and getattr(torch, device).is_available()), None)", "def get_dev(self, data_file):\r\n return self.read_data(data_file)", "def device(self, primary_name=\"\", secondary_name=\"\"):\n logging.debug(\"In device() for FTDDeviceHAPairs class.\")\n primary = Device(fmc=self.fmc)\n primary.get(name=primary_name)\n secondary = Device(fmc=self.fmc)\n secondary.get(name=secondary_name)\n if \"id\" in primary.__dict__:\n self.primary_id = primary.id\n else:\n logging.warning(\n f\"Device {primary_name} not found. Cannot set up device for FTDDeviceHAPairs.\"\n )\n if \"id\" in secondary.__dict__:\n self.secondary_id = secondary.id\n else:\n logging.warning(\n f\"Device {secondary_name} not found. Cannot set up device for FTDDeviceHAPairs.\"\n )", "def device(self):\n return self.broker.device(**{\"VirtualNetworkMemberID\": self.VirtualNetworkMemberID})" ]
[ "0.63822013", "0.62747025", "0.6120741", "0.60991645", "0.6043958", "0.60063523", "0.5944667", "0.5940644", "0.5940644", "0.5940644", "0.5940644", "0.5940644", "0.5913711", "0.58581173", "0.5853114", "0.5792714", "0.57867616", "0.57287025", "0.56949663", "0.5657453", "0.56494844", "0.5648378", "0.563577", "0.56317425", "0.56317425", "0.55904716", "0.558795", "0.5563322", "0.5552104", "0.5543984" ]
0.6423652
0
Return the authentication token form class.
def get_token_form_class(self): from two_factor.forms import AuthenticationTokenForm return AuthenticationTokenForm
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_form(self, form_class=None):\n if form_class is None:\n form_class = self.get_form_class()\n return form_class(\n token=self.request.session.get('token', False),\n aiid=self.kwargs['aiid'],\n **self.get_form_kwargs()\n )", "def get_form_class(self):\n return self.form_class", "def UserToken(self) -> object:", "def cls_token(self):\r\n if self._cls_token is None:\r\n logger.error(\"Using cls_token, but it is not set yet.\")\r\n return self._cls_token", "def get_form(self, form_class=None):\n\t\tif form_class is None:\n\t\t\tform_class = self.get_form_class()\n\t\treturn form_class(self.request.user, **self.get_form_kwargs())", "def get_form_class(self, form_key):\n return self.get_form_classes()[form_key]", "def auth_token(self):", "def _get_form_token(self, req):\n if req.incookie.has_key('trac_form_token'):\n return req.incookie['trac_form_token'].value\n else:\n req.outcookie['trac_form_token'] = hex_entropy(24)\n req.outcookie['trac_form_token']['path'] = req.base_path or '/'\n if self.env.secure_cookies:\n req.outcookie['trac_form_token']['secure'] = True\n if sys.version_info >= (2, 6):\n req.outcookie['trac_form_token']['httponly'] = True\n return req.outcookie['trac_form_token'].value", "def token(self):\n if self.is_auth_needed():\n self.authorize()\n\n return self.get_from_cache('token')", "def __repr__(self) -> str:\n\n return \"<Twilio.Oauth.V1.TokenInstance>\"", "def get_token(self):\n if not self.is_valid():\n logger.warn(\"TokenWall form data is not valid.\")\n return None\n \n tt = self.cleaned_data['token']\n logger.debug(\"Looking for token '%s'\"%tt)\n return Token.objects.get(value=tt)", "def get_form_class():\n return RazorPaymentForm", "def get_form_class(self, request):\n\t\treturn RegistrationForm", "def __get_authentication_token(self):\n cache = load_json(self._tokenPath)\n return cache[\"authentication_token\"]", "def token(self):\n if not self._token:\n self._token = self.authenicate().token\n\n return self._token", "def getToken(self):\n tokens=self._CFG.CRED_TYPE.split(\":\")\n CredType=tokens[0].lower()\n if len(tokens) > 1 :\n CredArgs=self._CFG.CRED_TYPE[len(CredType)+1:]\n else :\n CredArgs = \"\"\n # acquire token, if required \n if CredType == \"pag\" :\n pass\n elif CredType == \"krb5_keytab\" :\n KRB5CCNAME=self._krb5DAO.getTicketbyKeytab(CredArgs, self._CFG.KRB5_PRINC,self._CFG.KRB5_REALM)\n self._pagDAO.obtainTokenFromTicket(KRB5CCNAME, self._CFG.KRB5_REALM, self._CFG.CELL_NAME)\n self._krb5DAO.destroyTicket(KRB5CCNAME)\n elif CredType == \"krb5_password\" :\n if CredArgs != \"\" :\n passwd=CredArgs\n else :\n passwd = getpass.getpass(\"Password for %s@%s: \" % (self._CFG.KRB5_PRINC,self._CFG.KRB5_REALM))\n KRB5CCNAME=self._krb5DAO.getTicketbyPassword(passwd, self._CFG.KRB5_PRINC,self._CFG.KRB5_REALM)\n self._pagDAO.obtainTokenFromTicket(KRB5CCNAME, self._CFG.KRB5_REALM, self._CFG.CELL_NAME)\n self._krb5DAO.destroyTicket(KRB5CCNAME)\n # get token-info from pag\n AFSID, Cellname = self._pagDAO.getTokeninPAG(cellname=self._CFG.CELL_NAME)\n Cellname=Cellname.lower()\n token=afs.model.Token.Token(AFSID, Cellname)\n return token", "def get_form_class(self, request):\n return RegistrationForm", "def get_form_class(self):\n return get_review_form(review=self.get_object(), user=self.request.user)", "def get_form():\n global form_class\n from fluent_comments import appsettings\n\n if form_class is None:\n if appsettings.FLUENT_COMMENTS_FORM_CLASS:\n from django.utils.module_loading import import_string\n\n form_class = import_string(appsettings.FLUENT_COMMENTS_FORM_CLASS)\n else:\n from fluent_comments.forms import FluentCommentForm\n\n form_class = FluentCommentForm\n\n return form_class", "def get_form_class(self):\n login_try_count = self.request.session.get('login_try_count', 0)\n\n # If the form has been submitted...\n if self.request.method == \"POST\":\n self.request.session['login_try_count'] = login_try_count + 1\n\n if login_try_count >= 20:\n return CaptchaAuthenticationForm\n\n return super(LoginView, self).get_form_class()", "def get_form(self, form_class):\n return form_class(**self.get_form_kwargs())", "def create(self, request):\n token = AuthTokenClass().post(request)\n return token", "def auth_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_type\")", "def auth_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_type\")", "def auth_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_type\")", "def auth_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_type\")", "def auth_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_type\")", "def auth_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_type\")", "def auth_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_type\")", "def auth_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_type\")" ]
[ "0.69181645", "0.6369291", "0.62895435", "0.62263733", "0.6211552", "0.61699533", "0.6150896", "0.60946137", "0.6051999", "0.6045131", "0.60052425", "0.59485215", "0.59444714", "0.5924026", "0.5838784", "0.5834265", "0.58061504", "0.57755065", "0.5772766", "0.57478577", "0.5724854", "0.56984085", "0.56930494", "0.56930494", "0.56930494", "0.56930494", "0.56930494", "0.56930494", "0.56930494", "0.56930494" ]
0.86554384
0
Test that math is outputting TeX code.
def test_math_extension_outputs_tex(): sample = r":math:`e^{ix} = \cos x + i\sin x`" html = get_html_from_rst(sample) assert_html_contains( html, "span", attributes={"class": "math"}, text=r"\(e^{ix} = \cos x + i\sin x\)", )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_simple(self):\r\n math = 'x^2'\r\n obj = preview.LatexRendered(math, tall=True)\r\n self.assertEquals(obj.latex, math)\r\n self.assertEquals(obj.sans_parens, math)\r\n self.assertEquals(obj.tall, True)", "def test_power_simple(self):\r\n self.assertEquals(preview.latex_preview('2^3^4'), '2^{3^{4}}')", "def test_sum_tall(self):\r\n self.assertEquals(\r\n preview.latex_preview('(2+3^2)'),\r\n r'\\left(2+3^{2}\\right)'\r\n )", "def test_custom_decorator_displaytex_no_maths(self):\n self.assertEqual(\n DOM.render(\n DOM.create_element(\n ashley_render_children,\n {\n \"block\": {\n \"key\": \"a215p\",\n \"text\": \"\",\n \"type\": \"atomic\",\n \"data\": {\"tex\": \"a common string\", \"type\": \"TEXBLOCK\"},\n }\n },\n )\n ),\n '<span class=\"ashley-latex-display\">a common string</span>',\n )", "def test_function_tall(self):\r\n self.assertEquals(\r\n preview.latex_preview('f(3^2)', functions=['f']),\r\n r'\\text{f}\\left(3^{2}\\right)'\r\n )", "def test_custom_decorator_displaytex_ok(self):\n\n tex = \"\\left.\\frac{x^3}{3}\\right|_0^1\" # noqa: W605\n\n self.assertEqual(\n DOM.render(\n DOM.create_element(\n ashley_render_children,\n {\n \"block\": {\n \"key\": \"a215p\",\n \"text\": \"\",\n \"type\": \"atomic\",\n \"data\": {\"tex\": tex, \"type\": \"TEXBLOCK\"},\n }\n },\n )\n ),\n f'<span class=\"ashley-latex-display\">{tex}</span>',\n )", "def test_function_sqrt(self):\r\n self.assertEquals(preview.latex_preview('sqrt(3)'), r'\\sqrt{3}')", "def test_product_mult_only(self):\r\n self.assertEquals(preview.latex_preview('2*3'), r'2\\cdot 3')", "def test_power_parens(self):\r\n self.assertEquals(preview.latex_preview('2^3^(4+5)'), '2^{3^{4+5}}')", "def test_number_simple(self):\r\n self.assertEquals(preview.latex_preview('3.1415'), '3.1415')", "def test_complicated(self):\r\n self.assertEquals(\r\n preview.latex_preview('11*f(x)+x^2*(3||4)/sqrt(pi)'),\r\n r'11\\cdot \\text{f}(x)+\\frac{x^{2}\\cdot (3\\|4)}{\\sqrt{\\pi}}'\r\n )\r\n\r\n self.assertEquals(\r\n preview.latex_preview('log10(1+3/4/Cos(x^2)*(x+1))',\r\n case_sensitive=True),\r\n (r'\\log_{10}\\left(1+\\frac{3}{4\\cdot \\text{Cos}\\left(x^{2}\\right)}'\r\n r'\\cdot (x+1)\\right)')\r\n )", "def test_greek(self):\r\n self.assertEquals(preview.latex_preview('pi'), r'\\pi')", "def test_sum(self):\r\n # Use 'x' as the first term (instead of, say, '1'), so it can't be\r\n # interpreted as a negative number.\r\n self.assertEquals(\r\n preview.latex_preview('-x+2-3+4', variables=['x']),\r\n '-x+2-3+4'\r\n )", "def test01_math_operators(self):\n\n import _cppyy\n number = _cppyy.gbl.number\n\n assert (number(20) + number(10)) == number(30)\n assert (number(20) + 10 ) == number(30)\n assert (number(20) - number(10)) == number(10)\n assert (number(20) - 10 ) == number(10)\n assert (number(20) / number(10)) == number(2)\n assert (number(20) / 10 ) == number(2)\n assert (number(20) * number(10)) == number(200)\n assert (number(20) * 10 ) == number(200)\n assert (number(20) % 10 ) == number(0)\n assert (number(20) % number(10)) == number(0)\n assert (number(5) & number(14)) == number(4)\n assert (number(5) | number(14)) == number(15)\n assert (number(5) ^ number(14)) == number(11)\n assert (number(5) << 2) == number(20)\n assert (number(20) >> 2) == number(5)", "def test_custom_decorator_displaytex_no_malformed(self):\n self.assertEqual(\n DOM.render(\n DOM.create_element(\n ashley_render_children,\n {\n \"block\": {\n \"key\": \"a215p\",\n \"text\": \"\",\n \"type\": \"atomic\",\n \"data\": {\"type\": \"TEXBLOCK\"},\n }\n },\n )\n ),\n '<span class=\"ashley-latex-display\"></span>',\n )", "def test_function_simple(self):\r\n self.assertEquals(\r\n preview.latex_preview('f(3)', functions=['f']),\r\n r'\\text{f}(3)'\r\n )", "def test_transE_display():\n testing_function('transe', display=True)", "def test_trigonometry(doctest):", "def test_mathjax_content(self):\n settings = get_settings(\n PANDOC_EXTENSIONS=PANDOC_EXTENSIONS, PANDOC_ARGS=PANDOC_ARGS\n )\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"mathjax_content.md\")\n output, metadata = pandoc_reader.read(source_path)\n\n self.assertEqual(\n (\n '<p><span class=\"math display\">\\\\[\\ne^{i\\\\theta} = '\n \"\\\\cos\\\\theta + i \\\\sin\\\\theta.\\n\\\\]</span></p>\\n\"\n ),\n output,\n )\n\n self.assertEqual(\"MathJax Content\", str(metadata[\"title\"]))\n self.assertEqual(\"My Author\", str(metadata[\"author\"]))\n self.assertEqual(\"2020-10-16 00:00:00\", str(metadata[\"date\"]))", "def test_parallel(self):\r\n self.assertEquals(preview.latex_preview('2||3'), r'2\\|3')", "def evaluateText(compiled_expression):", "def test_product_single_frac(self):\r\n self.assertEquals(\r\n preview.latex_preview('(2+3)/(4+5)'),\r\n r'\\frac{2+3}{4+5}'\r\n )", "def test_number_sci_notation(self):\r\n self.assertEquals(\r\n preview.latex_preview('6.0221413E+23'),\r\n r'6.0221413\\!\\times\\!10^{+23}'\r\n )\r\n self.assertEquals(\r\n preview.latex_preview('-6.0221413E+23'),\r\n r'-6.0221413\\!\\times\\!10^{+23}'\r\n )", "def test_failing_rendering(self):\n with self.assertRaisesMessage(\n LatexConversionException, \"Couldn't compile LaTeX document\"\n ):\n render_latex_to_image(r\"invalid $ LaTeX\")", "def test_variable_simple(self):\r\n self.assertEquals(preview.latex_preview('x', variables=['x']), 'x')", "def test_function_log10(self):\r\n self.assertEquals(preview.latex_preview('log10(3)'), r'\\log_{10}(3)')", "def test_product_keep_going(self):\r\n self.assertEquals(\r\n preview.latex_preview('2/3*4/5*6'),\r\n r'\\frac{2}{3}\\cdot \\frac{4}{5}\\cdot 6'\r\n )", "def test_expression_sanitizer(self):\n\n self.assertFalse(_is_math_expr_safe('INSERT INTO students VALUES (?,?)'))\n self.assertFalse(_is_math_expr_safe('import math'))\n self.assertFalse(_is_math_expr_safe('complex'))\n self.assertFalse(_is_math_expr_safe('__import__(\"os\").system(\"clear\")'))\n self.assertFalse(_is_math_expr_safe('eval(\"()._\" + \"_class_\" + \"_._\" +'\n ' \"_bases_\" + \"_[0]\")'))\n self.assertFalse(_is_math_expr_safe('2***2'))\n self.assertFalse(_is_math_expr_safe('avdfd*3'))\n self.assertFalse(_is_math_expr_safe('Cos(1+2)'))\n self.assertFalse(_is_math_expr_safe('hello'))\n self.assertFalse(_is_math_expr_safe('hello_world'))\n self.assertFalse(_is_math_expr_safe('1_2'))\n self.assertFalse(_is_math_expr_safe('2+-2'))\n self.assertFalse(_is_math_expr_safe('print(1.0)'))\n self.assertFalse(_is_math_expr_safe('1.1.1.1'))\n self.assertFalse(_is_math_expr_safe('abc.1'))\n\n self.assertTrue(_is_math_expr_safe('1+1*2*3.2+8*cos(1)**2'))\n self.assertTrue(_is_math_expr_safe('pi*2'))\n self.assertTrue(_is_math_expr_safe('-P1*cos(P2)'))\n self.assertTrue(_is_math_expr_safe('-P1*P2*P3'))\n self.assertTrue(_is_math_expr_safe('-P1'))\n self.assertTrue(_is_math_expr_safe('-1.*P1'))\n self.assertTrue(_is_math_expr_safe('-1.*P1*P2'))\n self.assertTrue(_is_math_expr_safe('-(P1)'))", "def render_to_html(raw):\r\n if not raw:\r\n return ''\r\n\r\n reg = re.finditer(r\"(^|(?<!\\\\))\\$(([^\\$]|\\\\\\$)*[^\\\\])\\$\", raw)\r\n\r\n # generate_html.js must be passed all the math text ask command line args. \r\n # The dollar signs get stripped in advanced because the shell will interpret \r\n # those as variables. The program will return each math object separated by\r\n # newlines. KaTeX doesn't understand actual dollar signs if they are\r\n # followed by another character (like x=\\$2), so add a space after those\r\n results = [(mat.start(2), \r\n mat.end(2), \r\n mat.group(2).strip().replace('\\\\$', '\\\\$ ')\r\n ) for mat in reg if mat]\r\n\r\n if results == []:\r\n return raw\r\n\r\n math_start_positions, math_end_positions, raw_math = zip(*results)\r\n\r\n # prepare the shell to get the LaTeX via a call to Node.js\r\n # the shell is not explicitly called so there's no danger of shell injection\r\n # The command `node` must be on the system path\r\n env = dict(os.environ)\r\n env['LC_ALL'] = 'en_US.UTF-8' # accept unicode characters as output\r\n try:\r\n p = subprocess.Popen([\r\n 'node', \r\n os.path.join(os.path.dirname(__file__), 'generate_html.js')] \r\n + list(raw_math),\r\n env=env, \r\n stdout=subprocess.PIPE, \r\n stderr=subprocess.PIPE)\r\n except (WindowsError, OSError):\r\n raise NodeError(\"Node.js is not on your system path.\")\r\n else:\r\n node_output, node_error = p.communicate()\r\n \r\n if node_error:\r\n raise NodeError(node_error)\r\n\r\n if six.PY3:\r\n node_output = node_output.decode('UTF-8')\r\n \r\n html_bits = node_output.strip('\\n').split('\\n')\r\n\r\n final = []\r\n loc = 0\r\n for index, code in enumerate(html_bits):\r\n # measurements are one off from the index of the math to eliminate the\r\n # dollar sign specifiers\r\n # KaTeX will handle HTML encoding for the math text, but regular text\r\n # must have HTML stripped out for security reasons.\r\n final.append(cgi.escape(raw[loc:math_start_positions[index]]\r\n .strip('$').replace('\\\\$', '$')))\r\n final.append(smart_unicode(code))\r\n loc = math_end_positions[index] + 1\r\n\r\n final.append(cgi.escape(raw[loc:].replace('\\\\$', '$')))\r\n return u''.join(final)", "def test_base(self):\n output_filename = get_resource_filename(\"rendered_simple_latex.svg\")\n with open(output_filename, \"r\", encoding=\"utf-8\") as expected_output:\n self.assertSvgEquals(\n render_latex_to_image(r\"I = \\int \\rho R^{2} dV\"), expected_output.read()\n )" ]
[ "0.7507089", "0.71978104", "0.7131753", "0.6940951", "0.69192207", "0.6900085", "0.6803327", "0.6689187", "0.66572785", "0.66396725", "0.65305763", "0.64991444", "0.6460122", "0.6416397", "0.64075565", "0.637307", "0.63704425", "0.6288456", "0.62506396", "0.62028116", "0.6167357", "0.6147835", "0.6139216", "0.60917246", "0.6087151", "0.6076427", "0.60659117", "0.6052566", "0.60278684", "0.6026345" ]
0.7372334
1
Test SoundCloud iframe tag generation
def test_soundcloud_iframe(): sample = ".. soundcloud:: SID\n :height: 400\n :width: 600" html = get_html_from_rst(sample) assert_html_contains( html, "iframe", attributes={ "src": ( "https://w.soundcloud.com/player/" "?url=http://api.soundcloud.com/" "tracks/SID" ), "height": "400", "width": "600", }, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_youtube_iframe():\n\n sample = \".. youtube:: YID\\n :height: 400\\n :width: 600\"\n html = get_html_from_rst(sample)\n assert_html_contains(\n html,\n \"iframe\",\n attributes={\n \"src\": (\n \"https://www.youtube-nocookie.com\"\n \"/embed/YID?rel=0&\"\n \"wmode=transparent\"\n ),\n \"height\": \"400\",\n \"width\": \"600\",\n \"frameborder\": \"0\",\n \"allowfullscreen\": \"\",\n \"allow\": \"encrypted-media\",\n },\n )", "def test_embed_ok(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'id_embed_video', YOUTUBE_URL) \n self.submit200()\n self.notfind(\"Невірний\")\n self.show()\n self.find(\"youtube_video\")\n self.find(\"<object width\")\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', NOTAGS_FILE)\n self.fv('minus_upload', 'id_embed_video', YOUTUBE_EMBED) \n self.submit200()\n self.notfind(\"Невірний\")\n self.show()\n self.find(\"<object width\")", "def test_vimeo(disable_vimeo_api_query):\n\n sample = \".. vimeo:: VID\\n :height: 400\\n :width: 600\"\n html = get_html_from_rst(sample)\n assert_html_contains(\n html,\n \"iframe\",\n attributes={\n \"src\": (\"https://player.vimeo.com/\" \"video/VID\"),\n \"height\": \"400\",\n \"width\": \"600\",\n },\n )", "def test_embed(self):\r\n self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.embed_dirty), self.embed_clean)", "def _handle_iframe_tag(self, attrs):\n\n def _get_source_from_attrs(attrs):\n for key, value in attrs:\n if key == \"src\":\n return value\n\n url = _get_source_from_attrs(attrs)\n video_url = \"\"\n video_id = url.split(\"?\")[0].rsplit(\"/\", 1)[1]\n if \"vimeo\" in url:\n # We need to get the video id from the url attribute and create a new URL.\n video_url = f\"https://vimeo.com/{video_id}\"\n elif \"youtube\" in url:\n # Not sure if youtube has been removed or not, but adding youtube videos seems impossible\n video_url = f\"https://youtu.be/{video_id}\"\n else:\n # Unclear what providers are supported or notself.\n # Full list https://github.com/wagtail/wagtail/blob/main/wagtail/embeds/oembed_providers.py\n return\n\n return f\"<embed embedtype='media' url='{video_url}'/>\"", "def test_iframe(self):\r\n self.assertRegexpMatches(OpenEndedChild.sanitize_html(self.iframe_dirty), self.iframe_clean)", "def iframe(src, width=400, height=300):\n return TAG.iframe(_src=src, _width=width, _height=height)", "def test_691(self):\r\n test_id = 691\r\n sel = self.selenium\r\n testpage = \"/pagedemo/nytimes_youtube_embed\"\r\n subtextfile = os.path.join(testvars.MSTestVariables[\"DataDirectory\"],\"OctopusGarden.txt\")\r\n sel.open(testpage)\r\n sel.wait_for_page_to_load(testvars.timeout)\r\n sel.window_maximize()\r\n mslib.wait_for_element_present(self,sel,testvars.WebsiteUI[\"SubtitleMe_menu\"])\r\n time.sleep(5)\r\n sel.get_eval('this.browserbot.getUserWindow().unisubs.widget.Widget.getAllWidgets()[0].openMenu()')\r\n widget.open_starter_dialog(self,sel)\r\n widget.starter_dialog_fork(self,sel,to_lang='hr')\r\n widget.transcribe_video(self, sel, subtextfile)\r\n widget.sync_video(self, sel, subtextfile)\r\n widget.site_login_from_widget_link(self,sel)\r\n #verify subs still present\r\n print \"verifying subtitles are still present\"\r\n sel.select_window(\"null\")\r\n mslib.wait_for_element_present(self,sel,\"css=.unisubs-titlesList\")\r\n widget.verify_sub_text(self,sel,subtextfile)\r\n if sel.is_element_present(\"css=.unisubs-modal-login\"): #Login\r\n sel.click(\"css=.unisubs-log\")\r\n widget.site_login_auth(self,sel)\r\n sel.select_window(\"null\")\r\n widget.submit_sub_edits(self,sel,offsite=True)", "def test_youtube_iframe_start_at():\n\n sample = \".. youtube:: YID\\n :height: 400\\n :width: 600\\n :start_at: 60\"\n html = get_html_from_rst(sample)\n assert_html_contains(\n html,\n \"iframe\",\n attributes={\n \"src\": (\n \"https://www.youtube-nocookie.com\"\n \"/embed/YID?rel=0&\"\n \"wmode=transparent&start=60\"\n ),\n \"height\": \"400\",\n \"width\": \"600\",\n \"frameborder\": \"0\",\n \"allowfullscreen\": \"\",\n \"allow\": \"encrypted-media\",\n },\n )", "def save(self, force_insert=False, force_update=False, using=None, update_fields=None):\n if self.url:\n iframe_html = '<iframe src=\"{}\" frameborder=\"0\" title=\"{}\" allowfullscreen></iframe>'\n self.html = iframe_html.format(\n self.get_embed_url(),\n self.title\n )\n return super().save(force_insert, force_update, using, update_fields)", "def preview(self, obj):\n return format_html(\n '<iframe width=\"640\" height=\"360\" src=\"{}\"></iframe>',\n 'https://sms.cam.ac.uk/media/{}/embed'.format(obj.id)\n )", "def _render_no_tracking(self, video_id):\n you_tube_url = (\n 'https://www.youtube.com/embed/%s'\n '?feature=player_embedded&amp;rel=0') % video_id\n iframe = cElementTree.XML(\"\"\"\n<div class=\"gcb-video-container\">\n <iframe class=\"youtube-player\" title=\"YouTube Video Player\"\n type=\"text/html\" frameborder=\"0\" allowfullscreen=\"allowfullscreen\">\n </iframe>\n</div>\"\"\")\n iframe[0].set('src', you_tube_url)\n return iframe", "def embed():", "def as_iframe(self, html_data):\n\n srcdoc = html_data.replace('\"', \"'\")\n return ('<iframe id=\"{div_id}\", srcdoc=\"{srcdoc}\" style=\"width: {width}; '\n 'height: {height};\"></iframe>'.format(\n div_id=self.div_id,\n srcdoc=srcdoc,\n width=self.width,\n height=self.height))", "def display_iframe_url(target, **kwargs):\n\n txt = iframe_url(target, **kwargs)\n display(HTML(txt))", "def test_public_component_preview_html(self):\r\n self.validate_preview_html(self.video, 'student_view',\r\n can_edit=True, can_reorder=True, can_add=False)", "def mediaplayer(src,width=400,height=250):\n return XML('<embed allowfullscreen=\"true\" allowscriptaccess=\"always\" flashvars=\"height=%(height)s&width=%(width)s&file=%(src)s\" height=\"%(height)spx\" src=\"%(url)s\" width=\"%(width)spx\"></embed>'%dict(url=URL('static','plugin_wiki/mediaplayer.swf'),src=src,width=width,height=height))", "def test_library_page_preview_html(self):\n # Add some content to library.\n self._add_simple_content_block()\n self.validate_preview_html(self.library, self.container_view, can_reorder=False, can_move=False)", "def video_stream_demo():\n return render_template('video_stream_demo.html')", "def test_690(self):\r\n test_id = 690\r\n sel = self.selenium\r\n testpage = \"/pagedemo/nytimes_youtube_embed\"\r\n subtextfile = os.path.join(testvars.MSTestVariables[\"DataDirectory\"],\"OctopusGarden.txt\")\r\n sel.open(testpage)\r\n sel.wait_for_page_to_load(testvars.timeout)\r\n sel.window_maximize()\r\n mslib.wait_for_element_present(self,sel,testvars.WebsiteUI[\"SubtitleMe_menu\"])\r\n sel.get_eval('this.browserbot.getUserWindow().unisubs.widget.Widget.getAllWidgets()[0].openMenu()')\r\n time.sleep(5)\r\n widget.open_starter_dialog(self,sel)\r\n widget.starter_dialog_translate_from_orig(self,sel,to_lang='pl')\r\n widget.edit_translation(self,sel,subtextfile)", "def test_video_embed(\n logged_in_client, settings\n): # pylint: disable=redefined-outer-name\n client, user = logged_in_client\n settings.GA_DIMENSION_CAMERA = \"camera1\"\n settings.GA_TRACKING_ID = \"UA-xyz-1\"\n settings.ENVIRONMENT = \"test\"\n settings.VERSION = \"1.2.3\"\n settings.ENABLE_VIDEO_PERMISSIONS = False\n settings.USE_WEBPACK_DEV_SERVER = False\n\n videofileHLS = VideoFileFactory(\n hls=True,\n video__collection__owner=user,\n video__multiangle=True,\n video__status=\"Complete\",\n )\n video = videofileHLS.video\n url = reverse(\"video-embed\", kwargs={\"video_key\": video.hexkey})\n response = client.get(url)\n js_settings_json = json.loads(response.context_data[\"js_settings_json\"])\n assert js_settings_json == {\n \"video\": VideoSerializer(video).data,\n \"gaTrackingID\": settings.GA_TRACKING_ID,\n \"release_version\": settings.VERSION,\n \"environment\": settings.ENVIRONMENT,\n \"sentry_dsn\": \"\",\n \"ga_dimension_camera\": settings.GA_DIMENSION_CAMERA,\n \"public_path\": \"/static/bundles/\",\n \"cloudfront_base_url\": settings.VIDEO_CLOUDFRONT_BASE_URL,\n \"user\": user.username,\n \"email\": user.email,\n \"is_app_admin\": False,\n \"support_email_address\": settings.EMAIL_SUPPORT,\n \"FEATURES\": {\n \"ENABLE_VIDEO_PERMISSIONS\": False,\n \"VIDEOJS_ANNOTATIONS\": False,\n },\n }", "def test_construct_frame_tag(attributes):\n frame_ = Frame(**attributes)\n assert frame_.construct() == frame.render(attributes)", "def testPlayback(self):\n \n pass", "def test_si_sample_html_partial(self):\n sample = load_sample('si-game.sample.html')\n doc = Document('http://sportsillustrated.cnn.com/baseball/mlb/gameflash/2012/04/16/40630_preview.html',\n sample)\n res = doc.get_clean_article()\n self.assertEqual('<div><div class=\"', res[0:17])", "def get_embed_url(self):\n if not self._oembed:\n return ''\n \n if not self.original_url:\n return ''\n \n return 'https://w.soundcloud.com/player/?url=%s' % (self.original_url)", "def get_iframes(site, save_dir, **kwargs):\n driver = kwargs['driver']\n iframe_elements = driver.find_elements_by_tag_name('iframe')\n ad_contents = []\n for i, e in enumerate(iframe_elements):\n driver.switch_to.frame(e)\n el = driver.find_element_by_xpath('html/body')\n data = el.get_attribute('innerHTML')\n with open(save_dir + '/' + site.split('/')[2] + '-iframes.txt', 'a') as f:\n f.write('<potential-ad-iframe>\\n\\n')\n f.write(data)\n f.write('</potential-ad-iframe>\\n\\n')\n ad_contents.append(data)\n driver.switch_to.default_content()\n #sock = clientsocket()\n #table_name = 'iframes'\n #manager_params = kwargs['manager_params']\n #print manager_params['aggregator_address']\n #sock.connect(manager_params['aggregator_address'][0], manager_params['aggregator_address'][1])\n #query = (\"CREATE TABLE IF NOT EXISTS %s (\"\n # \"top_url TEXT, link TEXT);\" % table_name)\n #sock.send((query, ()))\n #current_url = driver.current_url\n #for content in ad_contents:\n # query = (\"INSERT INTO %s (top_url, link) \"\n # \"VALUES (?, ?)\" % table_name)\n # sock.send((query, (current_url, content)))\n # sock.close()", "def create_frame(session):\n def create_frame():\n append = \"\"\"\n var frame = document.createElement('iframe');\n document.body.appendChild(frame);\n return frame;\n \"\"\"\n response = session.execute_script(append)\n\n return create_frame", "def ShowHTML(pTitle, href):\n\n oc = ObjectContainer(title2=pTitle)\n\n href = href if href else ''\n html = HTML.ElementFromURL(BASE_URL + href)\n\n if '/pornstars-click/' in href:\n href = '/profiles/' + href.rsplit('/', 1)[1]\n url = BASE_URL + href\n\n xvideosBest = \"thumb-block \"\n if (len(html.xpath('//div[@class=\"thumbBlock\"]')) > 0):\n xvideosBest = \"thumbBlock\"\n\n if (len(html.xpath('//title//text()')) > 0):\n if 'Pornstar page' in html.xpath('//title//text()')[0]:\n url = url + '/pornstar_videos/0/0'\n html = HTML.ElementFromURL(url)\n elif 'Channel page' in html.xpath('//title//text()')[0]:\n url = url + '/uploads/0/0'\n html = HTML.ElementFromURL(url)\n\n for video in html.xpath('//div[@class=\"%s\"]' %xvideosBest):\n try:\n if '/profiles/' not in url and '/pornstars-click' not in url:\n if (len(video.xpath('./div/div/a//@href')) == 0):\n oc.add(VideoClipObject(\n url=BASE_URL + video.xpath('./p/a//@href')[0],\n title=video.xpath('./p/a//text()')[0],\n thumb=THUMB_REG.search(video.xpath('./div/div/script//text()')[0]).group(1)\n ))\n else:\n vhref = video.xpath('./p/a//@href')[0]\n vtitle = video.xpath('./p/a//text()')[0]\n oc.add(DirectoryObject(\n key=Callback(ShowHTML, href=vhref, pTitle=vtitle),\n title=vtitle, thumb=THUMB_REG.search(video.xpath('./div/div/a/script//text()')[0]).group(1)\n ))\n else:\n oc.add(VideoClipObject(\n url=BASE_URL + video.xpath('./div/p/a//@href')[0],\n title=video.xpath('./div/p/a//text()')[0],\n thumb=video.xpath('./div/div/a/img//@src')[0]\n ))\n except:\n Log.Warn('nothing')\n\n # setup nextURL\n try:\n nextURL = None\n if html.xpath('//li/a[@data-page][text()=\"Next\"]'):\n next_page = int(html.xpath('//li/a[text()=\"Next\"]/@data-page')[0])\n nextURL = '/{}/{}'.format(url.split('/', 3)[3].rsplit('/', 1)[0], next_page)\n elif html.xpath('//li/a[@class=\"no-page\"][text()=\"Next\"]'):\n nextURL = html.xpath('//li/a[@class=\"no-page\"][text()=\"Next\"]/@href')[0]\n elif html.xpath('//div[contains(@class,\"pagination\")]//a[@class=\"active\"]/../following-sibling::li/a/@href'):\n nextURL = html.xpath(\"//div[contains(@class,'pagination')]/ul/li/a[@class='active']/../following-sibling::li/a/@href\")[0]\n\n if nextURL:\n next_page_num = nextURL.split('=')[-1] if '&' in nextURL else nextURL.split('/')[-1]\n next_page_num = next_page_num if next_page_num else nextURL.split('/')[-2]\n #Log(u\"next page number = '{}'\".format(next_page_num))\n oc.add(NextPageObject(\n key=Callback(ShowHTML, href=nextURL, pTitle='Page ' + next_page_num),\n title=\"More ...\"))\n except:\n Log.Exception(\"Cannot find next page\")\n # it will loop through and return the values for all items in the page\n return oc", "def test_689(self):\r\n test_id = 689\r\n sel = self.selenium\r\n testpage = \"/pagedemo/nytimes_youtube_embed\"\r\n subtextfile = os.path.join(testvars.MSTestVariables[\"DataDirectory\"],\"OctopusGarden.txt\")\r\n sel.open(testpage)\r\n sel.wait_for_page_to_load(testvars.timeout)\r\n sel.window_maximize()\r\n mslib.wait_for_element_present(self,sel,testvars.WebsiteUI[\"SubtitleMe_menu\"])\r\n time.sleep(5)\r\n sel.get_eval('this.browserbot.getUserWindow().unisubs.widget.Widget.getAllWidgets()[0].openMenu()')\r\n widget.starter_dialog_edit_orig(self,sel)\r\n widget.goto_step(self,sel,step=\"2\")\r\n widget.edit_text(self,sel,subtextfile)\r\n #Login\r\n\r\n if sel.is_element_present(\"css=div div.unisubs-needLogin a\"):\r\n sel.click(\"css=div div.unisubs-needLogin a\")\r\n mslib.wait_for_element_present(self,sel,\"css=.unisubs-modal-login\")\r\n sel.click(\"css=.unisubs-log\")\r\n widget.site_login_auth(self,sel)\r\n sel.select_window(\"null\")\r\n widget.submit_sub_edits(self,sel,offsite=True)", "def use_chart_frame(self, chart_id: str) -> Generator:\n frame = self.driver.find_element_by_css_selector(f'iframe[src*=\"//datawrapper.dwcdn.net/{chart_id}/\"]')\n self.driver.switch_to.frame(frame)\n try:\n yield frame\n finally:\n self.driver.switch_to.default_content()" ]
[ "0.65098196", "0.64758885", "0.6079338", "0.60508907", "0.60338223", "0.58340585", "0.5785092", "0.577833", "0.5698256", "0.56800616", "0.56670403", "0.56104773", "0.5603128", "0.5570797", "0.5514298", "0.5487627", "0.5458812", "0.5428872", "0.54101866", "0.5364591", "0.53508866", "0.5347059", "0.5314895", "0.5276496", "0.52630067", "0.52504194", "0.5238708", "0.5238319", "0.5221453", "0.5192366" ]
0.80010784
0
Test Youtube iframe tag generation
def test_youtube_iframe(): sample = ".. youtube:: YID\n :height: 400\n :width: 600" html = get_html_from_rst(sample) assert_html_contains( html, "iframe", attributes={ "src": ( "https://www.youtube-nocookie.com" "/embed/YID?rel=0&" "wmode=transparent" ), "height": "400", "width": "600", "frameborder": "0", "allowfullscreen": "", "allow": "encrypted-media", }, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bb_youtube(hit):\n video = hit.group(1)\n return '<object width=\"425\" height=\"350\"><param name=\"movie\" value=\"http://www.youtube.com/v/%s\"></param><param name=\"wmode\" value=\"transparent\"></param><embed src=\"http://www.youtube.com/v/%s\" type=\"application/x-shockwave-flash\" wmode=\"transparent\" width=\"425\" height=\"350\"></embed></object>' % (video, video)", "def test_embed_ok(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'id_embed_video', YOUTUBE_URL) \n self.submit200()\n self.notfind(\"Невірний\")\n self.show()\n self.find(\"youtube_video\")\n self.find(\"<object width\")\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', NOTAGS_FILE)\n self.fv('minus_upload', 'id_embed_video', YOUTUBE_EMBED) \n self.submit200()\n self.notfind(\"Невірний\")\n self.show()\n self.find(\"<object width\")", "def youtube(code,width=400,height=250):\n\n return XML(\"\"\"<object width=\"%(width)s\" height=\"%(height)s\"><param name=\"movie\" value=\"http://www.youtube.com/v/%(code)s&hl=en_US&fs=1&\"></param><param name=\"allowFullScreen\" value=\"true\"></param><param name=\"allowscriptaccess\" value=\"always\"></param><embed src=\"http://www.youtube.com/v/%(code)s&hl=en_US&fs=1&\" type=\"application/x-shockwave-flash\" allowscriptaccess=\"always\" allowfullscreen=\"true\" width=\"%(width)s\" height=\"%(height)s\"></embed></object>\"\"\" % dict(code=code, width=width, height=height))", "def test_youtube_iframe_start_at():\n\n sample = \".. youtube:: YID\\n :height: 400\\n :width: 600\\n :start_at: 60\"\n html = get_html_from_rst(sample)\n assert_html_contains(\n html,\n \"iframe\",\n attributes={\n \"src\": (\n \"https://www.youtube-nocookie.com\"\n \"/embed/YID?rel=0&\"\n \"wmode=transparent&start=60\"\n ),\n \"height\": \"400\",\n \"width\": \"600\",\n \"frameborder\": \"0\",\n \"allowfullscreen\": \"\",\n \"allow\": \"encrypted-media\",\n },\n )", "def _render_no_tracking(self, video_id):\n you_tube_url = (\n 'https://www.youtube.com/embed/%s'\n '?feature=player_embedded&amp;rel=0') % video_id\n iframe = cElementTree.XML(\"\"\"\n<div class=\"gcb-video-container\">\n <iframe class=\"youtube-player\" title=\"YouTube Video Player\"\n type=\"text/html\" frameborder=\"0\" allowfullscreen=\"allowfullscreen\">\n </iframe>\n</div>\"\"\")\n iframe[0].set('src', you_tube_url)\n return iframe", "def test_691(self):\r\n test_id = 691\r\n sel = self.selenium\r\n testpage = \"/pagedemo/nytimes_youtube_embed\"\r\n subtextfile = os.path.join(testvars.MSTestVariables[\"DataDirectory\"],\"OctopusGarden.txt\")\r\n sel.open(testpage)\r\n sel.wait_for_page_to_load(testvars.timeout)\r\n sel.window_maximize()\r\n mslib.wait_for_element_present(self,sel,testvars.WebsiteUI[\"SubtitleMe_menu\"])\r\n time.sleep(5)\r\n sel.get_eval('this.browserbot.getUserWindow().unisubs.widget.Widget.getAllWidgets()[0].openMenu()')\r\n widget.open_starter_dialog(self,sel)\r\n widget.starter_dialog_fork(self,sel,to_lang='hr')\r\n widget.transcribe_video(self, sel, subtextfile)\r\n widget.sync_video(self, sel, subtextfile)\r\n widget.site_login_from_widget_link(self,sel)\r\n #verify subs still present\r\n print \"verifying subtitles are still present\"\r\n sel.select_window(\"null\")\r\n mslib.wait_for_element_present(self,sel,\"css=.unisubs-titlesList\")\r\n widget.verify_sub_text(self,sel,subtextfile)\r\n if sel.is_element_present(\"css=.unisubs-modal-login\"): #Login\r\n sel.click(\"css=.unisubs-log\")\r\n widget.site_login_auth(self,sel)\r\n sel.select_window(\"null\")\r\n widget.submit_sub_edits(self,sel,offsite=True)", "async def youtube(self, ctx, *, query):\r\n\r\n utub = 'https://youtube.com/results?search_query='\r\n url = utub + query.replace(\" \", \"+\")\r\n r = requests.get(url).text\r\n num1 = r.find('{\"videoRenderer')\r\n num2 = r.find('{\"videoRenderer', num1+1)\r\n # print (num1)\r\n # print (num2)\r\n videoRenderer = (json.loads(r[num1:num2-1])[\"videoRenderer\"])\r\n vid = (videoRenderer[\"videoId\"])\r\n page = (\"https://youtube.com/watch?v=\" + vid)\r\n await ctx.send(page)", "def test_video_constructor(self):\r\n context = self.item_descriptor.render('student_view').content\r\n\r\n sources = {\r\n 'main': u'example.mp4',\r\n u'mp4': u'example.mp4',\r\n u'webm': u'example.webm',\r\n }\r\n\r\n expected_context = {\r\n 'ajax_url': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',\r\n 'autoplay': settings.FEATURES.get('AUTOPLAY_VIDEOS', False),\r\n 'data_dir': getattr(self, 'data_dir', None),\r\n 'display_name': u'A Name',\r\n 'end': 3610.0,\r\n 'id': self.item_descriptor.location.html_id(),\r\n 'show_captions': 'true',\r\n 'handout': None,\r\n 'sources': sources,\r\n 'speed': 'null',\r\n 'general_speed': 1.0,\r\n 'start': 3603.0,\r\n 'saved_video_position': 0.0,\r\n 'sub': u'a_sub_file.srt.sjson',\r\n 'track': None,\r\n 'youtube_streams': create_youtube_string(self.item_descriptor),\r\n 'yt_test_timeout': 1500,\r\n 'yt_api_url': 'www.youtube.com/iframe_api',\r\n 'yt_test_url': 'gdata.youtube.com/feeds/api/videos/',\r\n 'transcript_download_format': 'srt',\r\n 'transcript_download_formats_list': [{'display_name': 'SubRip (.srt) file', 'value': 'srt'}, {'display_name': 'Text (.txt) file', 'value': 'txt'}],\r\n 'transcript_language': u'en',\r\n 'transcript_languages': json.dumps(OrderedDict({\"en\": \"English\", \"uk\": u\"Українська\"})),\r\n 'transcript_translation_url': self.item_descriptor.xmodule_runtime.handler_url(\r\n self.item_descriptor, 'transcript', 'translation'\r\n ).rstrip('/?'),\r\n 'transcript_available_translations_url': self.item_descriptor.xmodule_runtime.handler_url(\r\n self.item_descriptor, 'transcript', 'available_translations'\r\n ).rstrip('/?'),\r\n }\r\n\r\n self.assertEqual(\r\n context,\r\n self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context),\r\n )", "def test_youtube(self):\n\n test = Unfurl()\n test.add_to_queue(\n data_type='url', key=None,\n value='https://www.youtube.com/watch?v=LnhSTZgzKuY&list=PLlFGZ98XmfGfV6RAY9fQSeRfyIuhVGSdm&index=2&t=42s')\n test.parse_queue()\n\n # test number of nodes\n self.assertEqual(len(test.nodes.keys()), 16)\n self.assertEqual(test.total_nodes, 16)\n\n # Test query parsing\n self.assertEqual('Video will start playing at 42 seconds', test.nodes[16].label)\n\n # is processing finished empty\n self.assertTrue(test.queue.empty())\n self.assertEqual(len(test.edges), 0)", "def compose_embed_youtube(video_id = None):\n assert(video_id != None)\n return \"http://www.youtube.com/embed/{0}?enablejsapi=1&wmode=opaque\".format(\n video_id\n )", "def test_vimeo(disable_vimeo_api_query):\n\n sample = \".. vimeo:: VID\\n :height: 400\\n :width: 600\"\n html = get_html_from_rst(sample)\n assert_html_contains(\n html,\n \"iframe\",\n attributes={\n \"src\": (\"https://player.vimeo.com/\" \"video/VID\"),\n \"height\": \"400\",\n \"width\": \"600\",\n },\n )", "def test_create_youtube_string(self):\r\n system = DummySystem(load_error_modules=True)\r\n location = Location(\"edX\", 'course', 'run', \"video\", 'SampleProblem1', None)\r\n field_data = DictFieldData({'location': location})\r\n descriptor = VideoDescriptor(system, field_data, Mock())\r\n descriptor.youtube_id_0_75 = 'izygArpw-Qo'\r\n descriptor.youtube_id_1_0 = 'p2Q6BrNhdh8'\r\n descriptor.youtube_id_1_25 = '1EeWXzPdhSA'\r\n descriptor.youtube_id_1_5 = 'rABDYkeK0x8'\r\n expected = \"0.75:izygArpw-Qo,1.00:p2Q6BrNhdh8,1.25:1EeWXzPdhSA,1.50:rABDYkeK0x8\"\r\n self.assertEqual(create_youtube_string(descriptor), expected)", "def bb_youtube_ol(hit):\n video = hit.group(1)\n return '<a href=\"http://www.youtube.com/watch?v=%s\" target=\"_new\"><img src=\"/static/youtube_icon.png\" title=\"YouTube\" alt=\"YouTube\" border=\"0\" /> YouTube Link</a>' % (video)", "def _render_with_tracking(self, video_id):\n video_id = jinja_utils.js_string_raw(video_id)\n uid = common_utils.generate_instance_id()\n dom = cElementTree.XML(\"\"\"\n<p>\n <script></script>\n <script></script>\n</p>\"\"\")\n dom.attrib['id'] = uid\n dom[0].attrib['src'] = os.path.join(RESOURCE_FOLDER, 'youtube_video.js')\n dom[1].text = 'gcbTagYoutubeEnqueueVideo(\"%s\", \"%s\");' % (video_id, uid)\n return dom", "def _handle_iframe_tag(self, attrs):\n\n def _get_source_from_attrs(attrs):\n for key, value in attrs:\n if key == \"src\":\n return value\n\n url = _get_source_from_attrs(attrs)\n video_url = \"\"\n video_id = url.split(\"?\")[0].rsplit(\"/\", 1)[1]\n if \"vimeo\" in url:\n # We need to get the video id from the url attribute and create a new URL.\n video_url = f\"https://vimeo.com/{video_id}\"\n elif \"youtube\" in url:\n # Not sure if youtube has been removed or not, but adding youtube videos seems impossible\n video_url = f\"https://youtu.be/{video_id}\"\n else:\n # Unclear what providers are supported or notself.\n # Full list https://github.com/wagtail/wagtail/blob/main/wagtail/embeds/oembed_providers.py\n return\n\n return f\"<embed embedtype='media' url='{video_url}'/>\"", "def test_690(self):\r\n test_id = 690\r\n sel = self.selenium\r\n testpage = \"/pagedemo/nytimes_youtube_embed\"\r\n subtextfile = os.path.join(testvars.MSTestVariables[\"DataDirectory\"],\"OctopusGarden.txt\")\r\n sel.open(testpage)\r\n sel.wait_for_page_to_load(testvars.timeout)\r\n sel.window_maximize()\r\n mslib.wait_for_element_present(self,sel,testvars.WebsiteUI[\"SubtitleMe_menu\"])\r\n sel.get_eval('this.browserbot.getUserWindow().unisubs.widget.Widget.getAllWidgets()[0].openMenu()')\r\n time.sleep(5)\r\n widget.open_starter_dialog(self,sel)\r\n widget.starter_dialog_translate_from_orig(self,sel,to_lang='pl')\r\n widget.edit_translation(self,sel,subtextfile)", "def test_get_video_id_from_url(self):\n self.assertEqual(get_video_id_from_url('https://www.youtube.com/embed/DqGwxR_0d1M'),\n 'DqGwxR_0d1M')\n self.assertEqual(get_video_id_from_url('https://youtu.be/DqGwxR_0d1M'), 'DqGwxR_0d1M')\n self.assertEqual(\n get_video_id_from_url('https://www.youtube.com/watch?v=DqGwxR_0d1M&feature=youtu.be'),\n 'DqGwxR_0d1M')\n self.assertEqual(get_video_id_from_url('https://www.youtube.com/watch?v=DqGwxR_0d1M'),\n 'DqGwxR_0d1M')", "def test_video_constructor(self):\r\n sources = {\r\n 'main': u'example.mp4',\r\n u'mp4': u'example.mp4',\r\n u'webm': u'example.webm',\r\n }\r\n\r\n context = self.item_descriptor.render('student_view').content\r\n\r\n expected_context = {\r\n 'ajax_url': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',\r\n 'data_dir': getattr(self, 'data_dir', None),\r\n 'show_captions': 'true',\r\n 'handout': None,\r\n 'display_name': u'A Name',\r\n 'end': 3610.0,\r\n 'id': self.item_descriptor.location.html_id(),\r\n 'sources': sources,\r\n 'speed': 'null',\r\n 'general_speed': 1.0,\r\n 'start': 3603.0,\r\n 'saved_video_position': 0.0,\r\n 'sub': u'a_sub_file.srt.sjson',\r\n 'track': None,\r\n 'youtube_streams': '1.00:OEoXaMPEzfM',\r\n 'autoplay': settings.FEATURES.get('AUTOPLAY_VIDEOS', True),\r\n 'yt_test_timeout': 1500,\r\n 'yt_api_url': 'www.youtube.com/iframe_api',\r\n 'yt_test_url': 'gdata.youtube.com/feeds/api/videos/',\r\n 'transcript_download_format': 'srt',\r\n 'transcript_download_formats_list': [{'display_name': 'SubRip (.srt) file', 'value': 'srt'}, {'display_name': 'Text (.txt) file', 'value': 'txt'}],\r\n 'transcript_language': u'en',\r\n 'transcript_languages': '{\"en\": \"English\"}',\r\n 'transcript_translation_url': self.item_descriptor.xmodule_runtime.handler_url(\r\n self.item_descriptor, 'transcript', 'translation'\r\n ).rstrip('/?'),\r\n 'transcript_available_translations_url': self.item_descriptor.xmodule_runtime.handler_url(\r\n self.item_descriptor, 'transcript', 'available_translations'\r\n ).rstrip('/?')\r\n }\r\n\r\n self.assertEqual(\r\n context,\r\n self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context),\r\n )", "def test_soundcloud_iframe():\n\n sample = \".. soundcloud:: SID\\n :height: 400\\n :width: 600\"\n html = get_html_from_rst(sample)\n assert_html_contains(\n html,\n \"iframe\",\n attributes={\n \"src\": (\n \"https://w.soundcloud.com/player/\"\n \"?url=http://api.soundcloud.com/\"\n \"tracks/SID\"\n ),\n \"height\": \"400\",\n \"width\": \"600\",\n },\n )", "def test_create_youtube_string_missing(self):\r\n system = DummySystem(load_error_modules=True)\r\n location = Location(\"edX\", 'course', 'run', \"video\", \"SampleProblem1\", None)\r\n field_data = DictFieldData({'location': location})\r\n descriptor = VideoDescriptor(system, field_data, Mock())\r\n descriptor.youtube_id_0_75 = 'izygArpw-Qo'\r\n descriptor.youtube_id_1_0 = 'p2Q6BrNhdh8'\r\n descriptor.youtube_id_1_25 = '1EeWXzPdhSA'\r\n expected = \"0.75:izygArpw-Qo,1.00:p2Q6BrNhdh8,1.25:1EeWXzPdhSA\"\r\n self.assertEqual(create_youtube_string(descriptor), expected)", "def __ext_embed_id(self, youtube_url):\n youtube_id_match = re.search(r'(?<=v=)[^&#]+', youtube_url)\n youtube_id_match = youtube_id_match or re.search(\n r'(?<=be/)[^&#]+', youtube_url)\n trailer_youtube_id = (youtube_id_match.group(0) if youtube_id_match\n else None)\n return trailer_youtube_id", "def test_get_extension(self):\r\n expectedyoutube = 'video/youtube'\r\n expectednotyoutube = 'video/mp4'\r\n result1 = get_extension(self.sample_sourceurl)\r\n result2 = get_extension(self.sample_youtubeurl)\r\n self.assertEqual(expectedyoutube, result2)\r\n self.assertEqual(expectednotyoutube, result1)", "def bb_youtube_name_ol(hit):\n video = hit.group(1)\n title = hit.group(2)\n\n return '<a href=\"http://www.youtube.com/watch?v=%s\" target=\"_new\"><img src=\"/static/youtube_icon.png\" title=\"YouTube\" alt=\"YouTube\" border=\"0\"> %s</a>' % (title, video)", "def test_689(self):\r\n test_id = 689\r\n sel = self.selenium\r\n testpage = \"/pagedemo/nytimes_youtube_embed\"\r\n subtextfile = os.path.join(testvars.MSTestVariables[\"DataDirectory\"],\"OctopusGarden.txt\")\r\n sel.open(testpage)\r\n sel.wait_for_page_to_load(testvars.timeout)\r\n sel.window_maximize()\r\n mslib.wait_for_element_present(self,sel,testvars.WebsiteUI[\"SubtitleMe_menu\"])\r\n time.sleep(5)\r\n sel.get_eval('this.browserbot.getUserWindow().unisubs.widget.Widget.getAllWidgets()[0].openMenu()')\r\n widget.starter_dialog_edit_orig(self,sel)\r\n widget.goto_step(self,sel,step=\"2\")\r\n widget.edit_text(self,sel,subtextfile)\r\n #Login\r\n\r\n if sel.is_element_present(\"css=div div.unisubs-needLogin a\"):\r\n sel.click(\"css=div div.unisubs-needLogin a\")\r\n mslib.wait_for_element_present(self,sel,\"css=.unisubs-modal-login\")\r\n sel.click(\"css=.unisubs-log\")\r\n widget.site_login_auth(self,sel)\r\n sel.select_window(\"null\")\r\n widget.submit_sub_edits(self,sel,offsite=True)", "def validate_youtube(fragment):\n request=urllib.urlopen('https://www.youtube.com/watch?v=' + fragment)\n return request.getcode() == 200", "def url(yt_id: str) -> str:\n return \"https://www.youtube.com/watch?v={}\".format(yt_id)", "def playback_youtube(self, playtime):\n # each test case 1st check for the stop button flag\n if not self.stopLoop:\n # get time\n ts = datetime.datetime.now().strftime(self.tsFormat)\n # Create label\n x = Label(\n self.testFrame, text=f'{ts} - Playback YouTube',\n background=self.bgChooser(),\n foreground=\"#a5120d\",\n font=self.boldFont, anchor='w')\n x.pack(fill=X)\n # add counter for BG\n self.bgCounter += 1\n # allow window to catch up\n self.tkRoot.update()\n self.update_scrollbar()\n time.sleep(1)\n # Automation Script below --------------------\n\n self.tv.wait_in_minute(playtime)\n\n # Automation Script above --------------------\n\n # revert label color to black\n x.config(foreground=\"#000\", font=self.mainFont)\n self.LabelLists.append(x)\n else:\n print(\"stopping test\")", "def scrape_yt(soup) -> BaseProviderInput:\n # Check if video page has a \"Music in this video\" section\n if len(soup.find_all(\"li\", class_=\"watch-meta-item yt-uix-expander-body\")) > 1:\n output = scrape_embedded_yt_metadata(soup)\n if output.song_name is not None and output.artist_name is not None:\n return output\n\n raw_title = soup.find(\"meta\", {\"property\": \"og:title\"}).get(\"content\").strip()\n artist, title = None, None\n\n # In case the YouTube Title is in the commonly used format <Artist> - <Song name>\n if \"-\" in raw_title:\n raw_title = get_artist_title(raw_title)\n artist, title = raw_title.split(\" - \")\n\n # In case the YouTube Title only contains song name\n else:\n title = get_artist_title(raw_title)\n try:\n artist = soup.find(\n \"a\", class_=\"yt-uix-sessionlink spf-link\"\n ).text # Scrapes \"Artist\" from the YouTube Channel name\n artist = clean_channel(artist)\n except AttributeError:\n artist = None\n\n return DictInput(title, artist)", "def show_trailer(self):\r\n webbrowser.open(self.trailer_youtube_url)", "def tekstowo_youtube_url(source):\n reg = re.compile(r\"var videoID = \\\"(.*)\\\";\")\n try:\n video_id = reg.search(source).group(1)\n except Exception:\n raise Exception(ERROR_STR + '[crawler] cannot find videoID')\n if not video_id:\n raise Exception(ERROR_STR + '[crawler] empty videoID')\n\n return \"https://www.youtube.com/watch?v=\" + video_id" ]
[ "0.68769556", "0.6847462", "0.66609895", "0.6659137", "0.65431464", "0.6414613", "0.63702035", "0.6349991", "0.6284504", "0.62808007", "0.6231663", "0.61990386", "0.6182694", "0.6163565", "0.6138838", "0.61302125", "0.6112555", "0.61094904", "0.6041209", "0.6037975", "0.6005972", "0.59742296", "0.59395725", "0.588791", "0.58869535", "0.58738256", "0.5842756", "0.58388597", "0.5836565", "0.58295834" ]
0.78585184
0
Test Youtube iframe tag generation with start_at attribute
def test_youtube_iframe_start_at(): sample = ".. youtube:: YID\n :height: 400\n :width: 600\n :start_at: 60" html = get_html_from_rst(sample) assert_html_contains( html, "iframe", attributes={ "src": ( "https://www.youtube-nocookie.com" "/embed/YID?rel=0&" "wmode=transparent&start=60" ), "height": "400", "width": "600", "frameborder": "0", "allowfullscreen": "", "allow": "encrypted-media", }, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_youtube_iframe():\n\n sample = \".. youtube:: YID\\n :height: 400\\n :width: 600\"\n html = get_html_from_rst(sample)\n assert_html_contains(\n html,\n \"iframe\",\n attributes={\n \"src\": (\n \"https://www.youtube-nocookie.com\"\n \"/embed/YID?rel=0&\"\n \"wmode=transparent\"\n ),\n \"height\": \"400\",\n \"width\": \"600\",\n \"frameborder\": \"0\",\n \"allowfullscreen\": \"\",\n \"allow\": \"encrypted-media\",\n },\n )", "def bb_youtube(hit):\n video = hit.group(1)\n return '<object width=\"425\" height=\"350\"><param name=\"movie\" value=\"http://www.youtube.com/v/%s\"></param><param name=\"wmode\" value=\"transparent\"></param><embed src=\"http://www.youtube.com/v/%s\" type=\"application/x-shockwave-flash\" wmode=\"transparent\" width=\"425\" height=\"350\"></embed></object>' % (video, video)", "def test_soundcloud_iframe():\n\n sample = \".. soundcloud:: SID\\n :height: 400\\n :width: 600\"\n html = get_html_from_rst(sample)\n assert_html_contains(\n html,\n \"iframe\",\n attributes={\n \"src\": (\n \"https://w.soundcloud.com/player/\"\n \"?url=http://api.soundcloud.com/\"\n \"tracks/SID\"\n ),\n \"height\": \"400\",\n \"width\": \"600\",\n },\n )", "def test_youtube_videos(dates):\n test = pycmc.charts.youtube.videos(dates[\"start\"])\n assert isinstance(test, list)\n assert len(test) > 90\n assert test[0][\"name\"] != \"\"\n assert test[0][\"id\"] != \"\"", "def test_video_constructor(self):\r\n context = self.item_descriptor.render('student_view').content\r\n\r\n sources = {\r\n 'main': u'example.mp4',\r\n u'mp4': u'example.mp4',\r\n u'webm': u'example.webm',\r\n }\r\n\r\n expected_context = {\r\n 'ajax_url': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',\r\n 'autoplay': settings.FEATURES.get('AUTOPLAY_VIDEOS', False),\r\n 'data_dir': getattr(self, 'data_dir', None),\r\n 'display_name': u'A Name',\r\n 'end': 3610.0,\r\n 'id': self.item_descriptor.location.html_id(),\r\n 'show_captions': 'true',\r\n 'handout': None,\r\n 'sources': sources,\r\n 'speed': 'null',\r\n 'general_speed': 1.0,\r\n 'start': 3603.0,\r\n 'saved_video_position': 0.0,\r\n 'sub': u'a_sub_file.srt.sjson',\r\n 'track': None,\r\n 'youtube_streams': create_youtube_string(self.item_descriptor),\r\n 'yt_test_timeout': 1500,\r\n 'yt_api_url': 'www.youtube.com/iframe_api',\r\n 'yt_test_url': 'gdata.youtube.com/feeds/api/videos/',\r\n 'transcript_download_format': 'srt',\r\n 'transcript_download_formats_list': [{'display_name': 'SubRip (.srt) file', 'value': 'srt'}, {'display_name': 'Text (.txt) file', 'value': 'txt'}],\r\n 'transcript_language': u'en',\r\n 'transcript_languages': json.dumps(OrderedDict({\"en\": \"English\", \"uk\": u\"Українська\"})),\r\n 'transcript_translation_url': self.item_descriptor.xmodule_runtime.handler_url(\r\n self.item_descriptor, 'transcript', 'translation'\r\n ).rstrip('/?'),\r\n 'transcript_available_translations_url': self.item_descriptor.xmodule_runtime.handler_url(\r\n self.item_descriptor, 'transcript', 'available_translations'\r\n ).rstrip('/?'),\r\n }\r\n\r\n self.assertEqual(\r\n context,\r\n self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context),\r\n )", "def test_export_to_xml_empty_end_time(self):\r\n module_system = DummySystem(load_error_modules=True)\r\n desc = VideoDescriptor(module_system, DictFieldData({}), ScopeIds(None, None, self.location, self.location))\r\n\r\n desc.youtube_id_0_75 = 'izygArpw-Qo'\r\n desc.youtube_id_1_0 = 'p2Q6BrNhdh8'\r\n desc.youtube_id_1_25 = '1EeWXzPdhSA'\r\n desc.youtube_id_1_5 = 'rABDYkeK0x8'\r\n desc.show_captions = False\r\n desc.start_time = datetime.timedelta(seconds=5.0)\r\n desc.end_time = datetime.timedelta(seconds=0.0)\r\n desc.track = 'http://www.example.com/track'\r\n desc.download_track = True\r\n desc.html5_sources = ['http://www.example.com/source.mp4', 'http://www.example.com/source.ogg']\r\n desc.download_video = True\r\n\r\n xml = desc.definition_to_xml(None) # We don't use the `resource_fs` parameter\r\n expected = etree.fromstring('''\\\r\n <video url_name=\"SampleProblem1\" start_time=\"0:00:05\" youtube=\"0.75:izygArpw-Qo,1.00:p2Q6BrNhdh8,1.25:1EeWXzPdhSA,1.50:rABDYkeK0x8\" show_captions=\"false\" download_video=\"true\" download_track=\"true\">\r\n <source src=\"http://www.example.com/source.mp4\"/>\r\n <source src=\"http://www.example.com/source.ogg\"/>\r\n <track src=\"http://www.example.com/track\"/>\r\n </video>\r\n ''')\r\n\r\n self.assertXmlEqual(expected, xml)", "def test_embed_ok(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'id_embed_video', YOUTUBE_URL) \n self.submit200()\n self.notfind(\"Невірний\")\n self.show()\n self.find(\"youtube_video\")\n self.find(\"<object width\")\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', NOTAGS_FILE)\n self.fv('minus_upload', 'id_embed_video', YOUTUBE_EMBED) \n self.submit200()\n self.notfind(\"Невірний\")\n self.show()\n self.find(\"<object width\")", "def test_vimeo(disable_vimeo_api_query):\n\n sample = \".. vimeo:: VID\\n :height: 400\\n :width: 600\"\n html = get_html_from_rst(sample)\n assert_html_contains(\n html,\n \"iframe\",\n attributes={\n \"src\": (\"https://player.vimeo.com/\" \"video/VID\"),\n \"height\": \"400\",\n \"width\": \"600\",\n },\n )", "def _render_no_tracking(self, video_id):\n you_tube_url = (\n 'https://www.youtube.com/embed/%s'\n '?feature=player_embedded&amp;rel=0') % video_id\n iframe = cElementTree.XML(\"\"\"\n<div class=\"gcb-video-container\">\n <iframe class=\"youtube-player\" title=\"YouTube Video Player\"\n type=\"text/html\" frameborder=\"0\" allowfullscreen=\"allowfullscreen\">\n </iframe>\n</div>\"\"\")\n iframe[0].set('src', you_tube_url)\n return iframe", "def extraire(self, url, prefix):\n # Recuperer le code html de la page youtube\n print(url)\n code = urlopen(url).read().decode('utf8').split('\"')\n\n for elmt in code:\n if prefix in elmt:\n return elmt\n \n # Valeur par defaut\n return '/watch?v=jNQXAC9IVRw'", "def test_video_constructor(self):\r\n sources = {\r\n 'main': u'example.mp4',\r\n u'mp4': u'example.mp4',\r\n u'webm': u'example.webm',\r\n }\r\n\r\n context = self.item_descriptor.render('student_view').content\r\n\r\n expected_context = {\r\n 'ajax_url': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',\r\n 'data_dir': getattr(self, 'data_dir', None),\r\n 'show_captions': 'true',\r\n 'handout': None,\r\n 'display_name': u'A Name',\r\n 'end': 3610.0,\r\n 'id': self.item_descriptor.location.html_id(),\r\n 'sources': sources,\r\n 'speed': 'null',\r\n 'general_speed': 1.0,\r\n 'start': 3603.0,\r\n 'saved_video_position': 0.0,\r\n 'sub': u'a_sub_file.srt.sjson',\r\n 'track': None,\r\n 'youtube_streams': '1.00:OEoXaMPEzfM',\r\n 'autoplay': settings.FEATURES.get('AUTOPLAY_VIDEOS', True),\r\n 'yt_test_timeout': 1500,\r\n 'yt_api_url': 'www.youtube.com/iframe_api',\r\n 'yt_test_url': 'gdata.youtube.com/feeds/api/videos/',\r\n 'transcript_download_format': 'srt',\r\n 'transcript_download_formats_list': [{'display_name': 'SubRip (.srt) file', 'value': 'srt'}, {'display_name': 'Text (.txt) file', 'value': 'txt'}],\r\n 'transcript_language': u'en',\r\n 'transcript_languages': '{\"en\": \"English\"}',\r\n 'transcript_translation_url': self.item_descriptor.xmodule_runtime.handler_url(\r\n self.item_descriptor, 'transcript', 'translation'\r\n ).rstrip('/?'),\r\n 'transcript_available_translations_url': self.item_descriptor.xmodule_runtime.handler_url(\r\n self.item_descriptor, 'transcript', 'available_translations'\r\n ).rstrip('/?')\r\n }\r\n\r\n self.assertEqual(\r\n context,\r\n self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context),\r\n )", "def bb_youtube_ol(hit):\n video = hit.group(1)\n return '<a href=\"http://www.youtube.com/watch?v=%s\" target=\"_new\"><img src=\"/static/youtube_icon.png\" title=\"YouTube\" alt=\"YouTube\" border=\"0\" /> YouTube Link</a>' % (video)", "def __ext_embed_id(self, youtube_url):\n youtube_id_match = re.search(r'(?<=v=)[^&#]+', youtube_url)\n youtube_id_match = youtube_id_match or re.search(\n r'(?<=be/)[^&#]+', youtube_url)\n trailer_youtube_id = (youtube_id_match.group(0) if youtube_id_match\n else None)\n return trailer_youtube_id", "def test_subs_for_html5_vid_with_periods(self):\r\n html5_ids = transcripts_utils.get_html5_ids(['foo.mp4', 'foo.1.bar.mp4', 'foo/bar/baz.1.4.mp4', 'foo'])\r\n self.assertEqual(4, len(html5_ids))\r\n self.assertEqual(html5_ids[0], 'foo')\r\n self.assertEqual(html5_ids[1], 'foo.1.bar')\r\n self.assertEqual(html5_ids[2], 'baz.1.4')\r\n self.assertEqual(html5_ids[3], 'foo')", "def youtube(code,width=400,height=250):\n\n return XML(\"\"\"<object width=\"%(width)s\" height=\"%(height)s\"><param name=\"movie\" value=\"http://www.youtube.com/v/%(code)s&hl=en_US&fs=1&\"></param><param name=\"allowFullScreen\" value=\"true\"></param><param name=\"allowscriptaccess\" value=\"always\"></param><embed src=\"http://www.youtube.com/v/%(code)s&hl=en_US&fs=1&\" type=\"application/x-shockwave-flash\" allowscriptaccess=\"always\" allowfullscreen=\"true\" width=\"%(width)s\" height=\"%(height)s\"></embed></object>\"\"\" % dict(code=code, width=width, height=height))", "def test_old_video_format(self):\r\n module_system = DummySystem(load_error_modules=True)\r\n xml_data = \"\"\"\r\n <video display_name=\"Test Video\"\r\n youtube=\"1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8\"\r\n show_captions=\"false\"\r\n source=\"http://www.example.com/source.mp4\"\r\n from=\"00:00:01\"\r\n to=\"00:01:00\">\r\n <source src=\"http://www.example.com/source.mp4\"/>\r\n <track src=\"http://www.example.com/track\"/>\r\n </video>\r\n \"\"\"\r\n output = VideoDescriptor.from_xml(xml_data, module_system, Mock())\r\n self.assert_attributes_equal(output, {\r\n 'youtube_id_0_75': 'izygArpw-Qo',\r\n 'youtube_id_1_0': 'p2Q6BrNhdh8',\r\n 'youtube_id_1_25': '1EeWXzPdhSA',\r\n 'youtube_id_1_5': 'rABDYkeK0x8',\r\n 'show_captions': False,\r\n 'start_time': datetime.timedelta(seconds=1),\r\n 'end_time': datetime.timedelta(seconds=60),\r\n 'track': 'http://www.example.com/track',\r\n # 'download_track': True,\r\n 'html5_sources': ['http://www.example.com/source.mp4'],\r\n 'data': '',\r\n })", "def test_parse_youtube_one_video(self):\r\n youtube_str = '0.75:jNCf2gIqpeE'\r\n output = VideoDescriptor._parse_youtube(youtube_str)\r\n self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',\r\n '1.00': '',\r\n '1.25': '',\r\n '1.50': ''})", "def test_parse_youtube_one_video(self):\r\n youtube_str = '0.75:jNCf2gIqpeE'\r\n output = VideoDescriptor._parse_youtube(youtube_str)\r\n self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',\r\n '1.00': '',\r\n '1.25': '',\r\n '1.50': ''})", "def _render_with_tracking(self, video_id):\n video_id = jinja_utils.js_string_raw(video_id)\n uid = common_utils.generate_instance_id()\n dom = cElementTree.XML(\"\"\"\n<p>\n <script></script>\n <script></script>\n</p>\"\"\")\n dom.attrib['id'] = uid\n dom[0].attrib['src'] = os.path.join(RESOURCE_FOLDER, 'youtube_video.js')\n dom[1].text = 'gcbTagYoutubeEnqueueVideo(\"%s\", \"%s\");' % (video_id, uid)\n return dom", "def _handle_iframe_tag(self, attrs):\n\n def _get_source_from_attrs(attrs):\n for key, value in attrs:\n if key == \"src\":\n return value\n\n url = _get_source_from_attrs(attrs)\n video_url = \"\"\n video_id = url.split(\"?\")[0].rsplit(\"/\", 1)[1]\n if \"vimeo\" in url:\n # We need to get the video id from the url attribute and create a new URL.\n video_url = f\"https://vimeo.com/{video_id}\"\n elif \"youtube\" in url:\n # Not sure if youtube has been removed or not, but adding youtube videos seems impossible\n video_url = f\"https://youtu.be/{video_id}\"\n else:\n # Unclear what providers are supported or notself.\n # Full list https://github.com/wagtail/wagtail/blob/main/wagtail/embeds/oembed_providers.py\n return\n\n return f\"<embed embedtype='media' url='{video_url}'/>\"", "def test_parse_youtube_key_format(self):\r\n youtube_str = '1.00:p2Q6BrNhdh8'\r\n youtube_str_hack = '1.0:p2Q6BrNhdh8'\r\n self.assertEqual(\r\n VideoDescriptor._parse_youtube(youtube_str),\r\n VideoDescriptor._parse_youtube(youtube_str_hack)\r\n )", "def test_parse_youtube_key_format(self):\r\n youtube_str = '1.00:p2Q6BrNhdh8'\r\n youtube_str_hack = '1.0:p2Q6BrNhdh8'\r\n self.assertEqual(\r\n VideoDescriptor._parse_youtube(youtube_str),\r\n VideoDescriptor._parse_youtube(youtube_str_hack)\r\n )", "def test_691(self):\r\n test_id = 691\r\n sel = self.selenium\r\n testpage = \"/pagedemo/nytimes_youtube_embed\"\r\n subtextfile = os.path.join(testvars.MSTestVariables[\"DataDirectory\"],\"OctopusGarden.txt\")\r\n sel.open(testpage)\r\n sel.wait_for_page_to_load(testvars.timeout)\r\n sel.window_maximize()\r\n mslib.wait_for_element_present(self,sel,testvars.WebsiteUI[\"SubtitleMe_menu\"])\r\n time.sleep(5)\r\n sel.get_eval('this.browserbot.getUserWindow().unisubs.widget.Widget.getAllWidgets()[0].openMenu()')\r\n widget.open_starter_dialog(self,sel)\r\n widget.starter_dialog_fork(self,sel,to_lang='hr')\r\n widget.transcribe_video(self, sel, subtextfile)\r\n widget.sync_video(self, sel, subtextfile)\r\n widget.site_login_from_widget_link(self,sel)\r\n #verify subs still present\r\n print \"verifying subtitles are still present\"\r\n sel.select_window(\"null\")\r\n mslib.wait_for_element_present(self,sel,\"css=.unisubs-titlesList\")\r\n widget.verify_sub_text(self,sel,subtextfile)\r\n if sel.is_element_present(\"css=.unisubs-modal-login\"): #Login\r\n sel.click(\"css=.unisubs-log\")\r\n widget.site_login_auth(self,sel)\r\n sel.select_window(\"null\")\r\n widget.submit_sub_edits(self,sel,offsite=True)", "def bb_gvideo(hit):\n video = hit.group(1)\n return '<object width=\"400\" height=\"326\"><param name=\"movie\" value=\"http://video.google.com/googleplayer.swf?docId=%s\"></param><param name=\"wmode\" value=\"transparent\"></param><embed src=\"http://video.google.com/googleplayer.swf?docId=%s\" wmode=\"transparent\" style=\"width:400px; height:326px;\" id=\"VideoPlayback\" type=\"application/x-shockwave-flash\" flashvars=\"\"></embed></object>' % ( video, video )", "def test_get_video_id_from_url(self):\n self.assertEqual(get_video_id_from_url('https://www.youtube.com/embed/DqGwxR_0d1M'),\n 'DqGwxR_0d1M')\n self.assertEqual(get_video_id_from_url('https://youtu.be/DqGwxR_0d1M'), 'DqGwxR_0d1M')\n self.assertEqual(\n get_video_id_from_url('https://www.youtube.com/watch?v=DqGwxR_0d1M&feature=youtu.be'),\n 'DqGwxR_0d1M')\n self.assertEqual(get_video_id_from_url('https://www.youtube.com/watch?v=DqGwxR_0d1M'),\n 'DqGwxR_0d1M')", "def embed(parser, token):\n parsed_link = parser.parse(('endembed',))\n # first token it's closing tag. delete_first_token\n # just delete it - del self.tokens[0];)\n parser.delete_first_token()\n return YoutubeNode(parsed_link)", "def validate_youtube(fragment):\n request=urllib.urlopen('https://www.youtube.com/watch?v=' + fragment)\n return request.getcode() == 200", "async def youtube(self, ctx, *, query):\r\n\r\n utub = 'https://youtube.com/results?search_query='\r\n url = utub + query.replace(\" \", \"+\")\r\n r = requests.get(url).text\r\n num1 = r.find('{\"videoRenderer')\r\n num2 = r.find('{\"videoRenderer', num1+1)\r\n # print (num1)\r\n # print (num2)\r\n videoRenderer = (json.loads(r[num1:num2-1])[\"videoRenderer\"])\r\n vid = (videoRenderer[\"videoId\"])\r\n page = (\"https://youtube.com/watch?v=\" + vid)\r\n await ctx.send(page)", "def test_old_video_data(self):\r\n module_system = DummySystem(load_error_modules=True)\r\n xml_data = \"\"\"\r\n <video display_name=\"Test Video\"\r\n youtube=\"1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8\"\r\n show_captions=\"false\"\r\n from=\"00:00:01\"\r\n to=\"00:01:00\">\r\n <source src=\"http://www.example.com/source.mp4\"/>\r\n <track src=\"http://www.example.com/track\"/>\r\n </video>\r\n \"\"\"\r\n video = VideoDescriptor.from_xml(xml_data, module_system, Mock())\r\n self.assert_attributes_equal(video, {\r\n 'youtube_id_0_75': 'izygArpw-Qo',\r\n 'youtube_id_1_0': 'p2Q6BrNhdh8',\r\n 'youtube_id_1_25': '1EeWXzPdhSA',\r\n 'youtube_id_1_5': 'rABDYkeK0x8',\r\n 'show_captions': False,\r\n 'start_time': datetime.timedelta(seconds=1),\r\n 'end_time': datetime.timedelta(seconds=60),\r\n 'track': 'http://www.example.com/track',\r\n # 'download_track': True,\r\n 'html5_sources': ['http://www.example.com/source.mp4'],\r\n 'data': ''\r\n })", "def test_690(self):\r\n test_id = 690\r\n sel = self.selenium\r\n testpage = \"/pagedemo/nytimes_youtube_embed\"\r\n subtextfile = os.path.join(testvars.MSTestVariables[\"DataDirectory\"],\"OctopusGarden.txt\")\r\n sel.open(testpage)\r\n sel.wait_for_page_to_load(testvars.timeout)\r\n sel.window_maximize()\r\n mslib.wait_for_element_present(self,sel,testvars.WebsiteUI[\"SubtitleMe_menu\"])\r\n sel.get_eval('this.browserbot.getUserWindow().unisubs.widget.Widget.getAllWidgets()[0].openMenu()')\r\n time.sleep(5)\r\n widget.open_starter_dialog(self,sel)\r\n widget.starter_dialog_translate_from_orig(self,sel,to_lang='pl')\r\n widget.edit_translation(self,sel,subtextfile)" ]
[ "0.68381613", "0.58002746", "0.5656382", "0.55563515", "0.55528694", "0.5551181", "0.55401534", "0.5517988", "0.5499791", "0.5476163", "0.5425436", "0.5424562", "0.5404211", "0.53970695", "0.53929263", "0.5322906", "0.52933747", "0.52933747", "0.5274151", "0.52623284", "0.52561325", "0.52561325", "0.52531284", "0.52218115", "0.5221327", "0.5210233", "0.5196754", "0.5194215", "0.51554745", "0.5127224" ]
0.8507716
0