query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Update the navigation property services in print. Update the navigation property services in print.
def update_services( self, print_service_id, # type: str body, # type: "models.MicrosoftGraphPrintService" **kwargs # type: Any ): # type: (...) -> None cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.update_services.metadata['url'] # type: ignore path_format_arguments = { 'printService-id': self._serialize.url("print_service_id", print_service_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(body, 'MicrosoftGraphPrintService') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.OdataError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateScptRefs(self):\n for scpt in self.refs_scpt.keys():\n self.refs_scpt[scpt] = scpt.getRef()\n self.scptRefs = set(self.refs_scpt.values())", "def update(self):\n for object in reversed(self.addList):\n self.objects.append(object)\n self.addList.remove(object)\n\n for object in reversed(self.removeList):\n self.objects.remove(object)\n self.removeList.remove(object)\n\n self.objects = sorted(self.objects,key=priority)\n\n for object in self.objects:\n object.update()", "def update(self):\n self.data_service.update()\n self._state = self.data_service.data\n self._attributes = self.data_service.attributes", "def services(self, services):\n\n self._services = services", "def services(self, services):\n\n self._services = services", "def order_update_print():\n result = order_obj.order_update_print(request.forms) \n return result", "def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"VNF or e2e Service ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-info:\", self.info, sep='')\n print(indent, \"|-IP address:\", self.IP_address, sep='')\n print(indent, \"|-URL:\", self.URL, sep='')\n\n if self.related_phys_rsrc_ID_list != None:\n if len(self.related_phys_rsrc_ID_list) >0:\n print(indent, \"|-related/associated physical resource(s):\", sep='')\n for phys_resource_ID in self.related_phys_rsrc_ID_list:\n phys_resource_item = get_indexed_item_from_list(phys_resource_ID, AutoResilGlobal.physical_resource_list)\n if phys_resource_item != None:\n phys_resource_item.printout_all(indent_level+1)\n\n if self.related_cloud_virt_rsrc_ID_list != None:\n if len(self.related_cloud_virt_rsrc_ID_list) >0:\n print(indent, \"|-related/associated cloud virtual resource(s):\", sep='')\n for cloud_resource_ID in self.related_cloud_virt_rsrc_ID_list:\n cloud_resource_item = get_indexed_item_from_list(cloud_resource_ID, AutoResilGlobal.cloud_virtual_resource_list)\n if cloud_resource_item != None:\n cloud_resource_item.printout_all(indent_level+1)", "def update_associations(self):\n for dt_format, old_value, new_value in self.own_list:\n DescriptorFormatTypeManager.own(dt_format, self.entity, old_value, new_value)", "def presavemodel_serializationhelpers_updatefields(self):\n # get a collection IF it exists\n #print \"ATTN: in presavemodel_serializationhelpers_updatefields stage 1 for object {0}\".format(str(self))\n sdictcollection = self.getcreate_serializedbdictcollection(False)\n if (sdictcollection == None):\n # nothing to do\n #print \"ATTN: no sitecollection found for object.\"\n return\n #print \"ATTN: in presavemodel_serializationhelpers_updatefields stage 2\"\n # ok we have some that potentially need save/update\n alldicts = sdictcollection.get_alldicts()\n for sdictkey, sdict in alldicts.iteritems():\n # check if this has changed and so needs updating\n #print \"ATTN: in presavemodel_serializationhelpers_updatefields stage 3 with {0}.\".format(sdictkey)\n if (sdict.get_haschanged()):\n # it has changed, get serialized string representation of the field to save\n serializedstring = sdict.get_serializedstr()\n # ok now we want to SAVE it to our attribute/field of this model\n # the internal attribute name for this field is the dictionary key itself\n attributename = sdictkey\n setattr(self,attributename,serializedstring)\n #print \"ATTN: in presavemodel_serializationhelpers_updatefields stage 4 with {0} and {1} and {2}.\".format(sdictkey,attributename,serializedstring)\n # clear haschanged flag\n sdict.set_haschanged(False)", "def update_view(self):\n for row in self.view.obj_list:\n for obj in row:\n obj._update(self.model)", "def refresh(self):\n self.proxies = self._init_proxies(self.proxy_providers)", "def updateProperties(self):\n self.manage_changeProperties(title = self.getTitle(),\n description = self.getDescription(),\n basepath = self.getPath())", "def print_everything(self):\n def print_service(service):\n print\n print '====[ %s ]==== ' % service.__repr__(path_only=True)\n print\n\n print 'Actions:'\n for name, action in service.get_actions():\n print ' - ', name, action\n print\n\n for name, subservice in service.get_subservices():\n print_service(subservice)\n\n print_service(self.root)", "def updateAllLineFields(self):\n for format in self.values():\n format.updateLineFields()\n globalref.docRef.fileInfoFormat.updateLineFields()", "def update(self):\r\n self._revit_object.SetElementIds(self.as_element_id_list)", "def refresh(self):\n self._policies = self._get_policies()", "def updateStudentProposalReferences(request):\n\n return updateReferencesForModel('student_proposal')", "def servicesChanged(self) -> None:\n ...", "def update(self, **kwargs):\n for name, item in itertools.chain(\n self._cal_objs.items(),\n self._noise_objs.items()):\n logger.debug(\"update {}\".format(item))\n item.update(**kwargs)", "def save(self):\n Preferences.setPrinter(\n \"PrinterName\",\n self.printerNameEdit.text())\n if self.printerColorButton.isChecked():\n Preferences.setPrinter(\"ColorMode\", 1)\n else:\n Preferences.setPrinter(\"ColorMode\", 0)\n if self.printFirstPageFirstButton.isChecked():\n Preferences.setPrinter(\"FirstPageFirst\", 1)\n else:\n Preferences.setPrinter(\"FirstPageFirst\", 0)\n Preferences.setPrinter(\n \"Magnification\",\n self.printMagnificationSpinBox.value())\n Preferences.setPrinter(\"HeaderFont\", self.printheaderFont)\n Preferences.setPrinter(\n \"LeftMargin\",\n self.leftMarginSpinBox.value())\n Preferences.setPrinter(\n \"RightMargin\",\n self.rightMarginSpinBox.value())\n Preferences.setPrinter(\n \"TopMargin\",\n self.topMarginSpinBox.value())\n Preferences.setPrinter(\n \"BottomMargin\",\n self.bottomMarginSpinBox.value())\n Preferences.setPrinter(\n \"Resolution\",\n self.resolutionSpinBox.value())", "def update(self):\n self.data_service.update()\n self._state = self.data_service.data[self._json_key]\n self._attributes = self.data_service.attributes[self._json_key]", "def _refresh(self):\r\n self._tracer.info(\"Refresh called 1\")\r\n for obj in self._filter_objects:\r\n # Only call function if it exists, and is callable.\r\n if hasattr(obj, 'property_refresh') and \\\r\n callable(getattr(obj, 'property_refresh')):\r\n obj.property_refresh()", "def update(self, reset):\n self.vehicle.update(reset)\n self.traffic_light.update(reset)\n self.network.update(reset)\n self.simulation.update(reset)\n self.detector.update(reset)", "def postCommitHook(self, datamodel=None):\n self.updateProperties()", "def save(self, clean=True, user=None, log_action=False, **kwargs):\n if self.featured:\n for child_class in ProductPage.__subclasses__():\n child_class.objects.filter(featured=True).update(featured=False)\n super().save(clean=clean, user=user, log_action=log_action, **kwargs)", "def update_operations(\n self,\n print_operation_id, # type: str\n body, # type: \"models.MicrosoftGraphPrintOperation\"\n **kwargs # type: Any\n ):\n # type: (...) -> None\n cls = kwargs.pop('cls', None) # type: ClsType[None]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n accept = \"application/json\"\n\n # Construct URL\n url = self.update_operations.metadata['url'] # type: ignore\n path_format_arguments = {\n 'printOperation-id': self._serialize.url(\"print_operation_id\", print_operation_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n body_content_kwargs = {} # type: Dict[str, Any]\n body_content = self._serialize.body(body, 'MicrosoftGraphPrintOperation')\n body_content_kwargs['content'] = body_content\n request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [204]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n if cls:\n return cls(pipeline_response, None, {})", "def refresh(self):\n selected = []\n if not self.__new_service:\n selected = [str(t.text()) for t in\n self.__service_list.selectedItems()]\n\n self.__service_list.clear()\n if not self.__show:\n self.__services = opencue.api.getDefaultServices()\n else:\n self.__services = self.__show.getServiceOverrides()\n\n for service in self.__services:\n item = QtWidgets.QListWidgetItem(service.name())\n self.__service_list.addItem(item)\n\n if service.name() in selected:\n item.setSelected(True)\n\n self.__service_list.sortItems()", "def refresh(self, context=None):\n current = self.get_by_uuid(self._context, uuid=self.uuid)\n self.obj_refresh(current)", "def set_references(self, references: IReferences):\n self.__references2 = references\n super(StatusRestService, self).set_references(references)\n self.__context_info = self._dependency_resolver.get_one_optional(\"context-info\")", "def save(self):\n\n for vm in self.vms:\n vm.save()\n\n for obj in self.objects:\n obj.save()\n\n for vol in self.volumes:\n vol.save()" ]
[ "0.52056175", "0.477599", "0.47441873", "0.4714739", "0.4714739", "0.47057602", "0.46874425", "0.46425647", "0.4608528", "0.46006417", "0.45826253", "0.4568615", "0.45354888", "0.45163587", "0.45129082", "0.44720852", "0.44717228", "0.44637182", "0.4438183", "0.44319624", "0.4417529", "0.44062167", "0.44029173", "0.4402586", "0.43892902", "0.43817455", "0.43680978", "0.43654323", "0.436527", "0.43584722" ]
0.5060978
1
Create new navigation property to shares for print. Create new navigation property to shares for print.
def create_shares( self, body, # type: "models.MicrosoftGraphPrinterShare" **kwargs # type: Any ): # type: (...) -> "models.MicrosoftGraphPrinterShare" cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPrinterShare"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.create_shares.metadata['url'] # type: ignore # Construct parameters query_parameters = {} # type: Dict[str, Any] # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(body, 'MicrosoftGraphPrinterShare') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.OdataError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('MicrosoftGraphPrinterShare', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_printer_shares(\n self,\n body, # type: \"models.MicrosoftGraphPrinterShare\"\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphPrinterShare\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphPrinterShare\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n accept = \"application/json\"\n\n # Construct URL\n url = self.create_printer_shares.metadata['url'] # type: ignore\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n body_content_kwargs = {} # type: Dict[str, Any]\n body_content = self._serialize.body(body, 'MicrosoftGraphPrinterShare')\n body_content_kwargs['content'] = body_content\n request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [201]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphPrinterShare', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "def get_printer_shares(\n self,\n printer_share_id, # type: str\n select=None, # type: Optional[List[Union[str, \"models.Enum62\"]]]\n expand=None, # type: Optional[List[Union[str, \"models.Enum63\"]]]\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphPrinterShare\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphPrinterShare\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_printer_shares.metadata['url'] # type: ignore\n path_format_arguments = {\n 'printerShare-id': self._serialize.url(\"printer_share_id\", printer_share_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n if select is not None:\n query_parameters['$select'] = self._serialize.query(\"select\", select, '[str]', div=',')\n if expand is not None:\n query_parameters['$expand'] = self._serialize.query(\"expand\", expand, '[str]', div=',')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphPrinterShare', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "def get_shares(\n self,\n printer_share_id, # type: str\n select=None, # type: Optional[List[Union[str, \"models.Enum88\"]]]\n expand=None, # type: Optional[List[Union[str, \"models.Enum89\"]]]\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphPrinterShare\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphPrinterShare\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_shares.metadata['url'] # type: ignore\n path_format_arguments = {\n 'printerShare-id': self._serialize.url(\"printer_share_id\", printer_share_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n if select is not None:\n query_parameters['$select'] = self._serialize.query(\"select\", select, '[str]', div=',')\n if expand is not None:\n query_parameters['$expand'] = self._serialize.query(\"expand\", expand, '[str]', div=',')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphPrinterShare', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "def create_share(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmCfg_CreateShare', self.handle))", "def copy(self):\r\n\t\tobj = DecaLink()\r\n\t\tfor k in self.__dict__.keys():\r\n\t\t\tobj.__setattr__(k, self.__getattribute__(k))\r\n\t\treturn obj", "def _add_relations(self):\n relations = {\n 'keystone:shared-db': 'percona-cluster:shared-db',\n 'swift-proxy:identity-service': 'keystone:identity-service',\n 'swift-storage:swift-storage': 'swift-proxy:swift-storage',\n 'glance:identity-service': 'keystone:identity-service',\n 'glance:shared-db': 'percona-cluster:shared-db',\n 'glance:object-store': 'swift-proxy:object-store'\n }\n super(SwiftProxyBasicDeployment, self)._add_relations(relations)", "def relationships(self):", "def __repr__(self):\n rself = _really(self)\n return 'SharedProxy(%r, %r, %r)' % (\n rself._sharedItem,\n rself._sharedInterfaces,\n rself._shareID)", "def AddSharedProperty(cls, name):\n def Get(self):\n try:\n return self.shared_definition[name]\n except KeyError: # Must raise AttributeError if property doesn't exist.\n raise AttributeError\n\n def Set(self, value):\n self.shared_definition[name] = value\n\n setattr(cls, name, property(Get, Set))", "def generate(self):\n t = (self.context.identifier, RDF.type, META.Provenance)\n if t not in self.context.graph:\n self.context.graph.add(t)\n for name, value in self.data.items():\n pat = (self.context.identifier, META[name], None)\n if pat in self.context.graph:\n self.context.graph.remove(pat)\n self.context.graph.add((pat[0], META[name], Literal(value)))", "def _add_relationships(self, element: Element) -> None:\n elements: Set[str] = {v.id for v in self.element_views}\n\n for relationship in element.get_efferent_relationships():\n if relationship.destination.id in elements:\n self._relationship_views.add(\n RelationshipView(relationship=relationship)\n )\n\n for relationship in element.get_afferent_relationships():\n if relationship.source.id in elements:\n self._relationship_views.add(\n RelationshipView(relationship=relationship)\n )", "def share_level(self, new_level: str) -> None:\n self._db_data.share_level = new_level\n for data in self._child_data.values():\n original_auto_save = data.auto_save\n data.auto_save = False\n data.share_level = new_level\n data.auto_save = original_auto_save\n if self.auto_save:\n self.save_metadata()", "def create_relation(self, left_node, rel, right_node):\n rel = Relationship(left_node, rel, right_node)\n self.graph.merge(rel)\n return", "def test_set_share(self):\n self.app.post_json(url=\"/config/shares\",\n params=dict(\n source='gsiftp://source',\n destination='gsiftp://nowhere',\n vo='dteam',\n share=80\n ),\n status=200\n )", "def __repr__(self):\n\n return \"<Trip=%s shared with user=%s>\" % (self.trip.name, self.user.name)", "def copy(self):\n return self.__class__(\n self.kind, self.link_ids.copy(), self.included_nodes.copy(), self.mass,\n self.name, self.crossring_cleavages.copy(), self.composition.copy())", "def snapshot(self):\n snapshot = super(VirtualMachineDAO, self).snapshot()\n for entry in snapshot:\n vm = entry.get(VirtualMachineDAO.INNER_OBJ)\n vm['network'] = VMNetworkDAO(self.session, vm.get(VirtualMachineDAO.FOREIGN_KEY)).snapshot()\n return snapshot", "def make_reference2(self):\n self.ref = Snapshot()", "def add_relationship(self, relationship):\n self.relationships[relationship.parent].append(relationship)", "def copy(self):\n new_genome = Genome(self.pop)\n #new_genome.node_genes = [gene.copy() for gene in self.node_genes]\n new_genome.node_genes = [n for n in self.node_genes]\n new_genome.link_genes = [gene.copy() for gene in self.link_genes]\n new_genome.fitness = self.fitness\n new_genome.adj_fitness = self.adj_fitness\n new_genome.species_hint = self.species_hint\n return new_genome", "def _get_object_properties(self):\n super()._get_object_properties()\n add_prefix(root=self.root, prefix=self.naming_prefix, exclude=self.exclude_from_prefixing)", "def link_protein(self, protein):\n if self.protein is None:\n self.protein = protein\n protein.link_gene(self)", "def create_relationship(self, source_node: Node, target_node: Node):\n if target_node.node_id not in source_node.out_nodes_ids:\n source_node.out_nodes_ids.add(target_node.node_id)\n self._collection.put_record(source_node.node_id, self._node_serializer.to_data(source_node))\n if source_node.node_id not in target_node.in_nodes_ids:\n target_node.in_nodes_ids.add(source_node.node_id)\n self._collection.put_record(target_node.node_id, self._node_serializer.to_data(target_node))", "def add_object(self, name, env, contentnode):\n props = PropertyDefinition(name, env.docname)\n props.gather(contentnode)\n self.data['objects'][props.key] = props\n self.data['all_objects'][props.key] = props\n return props", "def addPrintOrder(self, type):\n self.printOrder.append(type)", "def create_share_from_snapshot(self, context, share, snapshot,\n share_server=None):\n raise NotImplementedError()", "def set_share(self, total_people):\n self.paid = self._get_paid()\n self.share = round(self.paid/Decimal(total_people), 2)", "def list_printer_shares(\n self,\n orderby=None, # type: Optional[List[Union[str, \"models.Enum59\"]]]\n select=None, # type: Optional[List[Union[str, \"models.Enum60\"]]]\n expand=None, # type: Optional[List[Union[str, \"models.Enum61\"]]]\n **kwargs # type: Any\n ):\n # type: (...) -> Iterable[\"models.CollectionOfPrinterShare0\"]\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.CollectionOfPrinterShare0\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n def prepare_request(next_link=None):\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n if not next_link:\n # Construct URL\n url = self.list_printer_shares.metadata['url'] # type: ignore\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n if self._config.top is not None:\n query_parameters['$top'] = self._serialize.query(\"self._config.top\", self._config.top, 'int', minimum=0)\n if self._config.skip is not None:\n query_parameters['$skip'] = self._serialize.query(\"self._config.skip\", self._config.skip, 'int', minimum=0)\n if self._config.search is not None:\n query_parameters['$search'] = self._serialize.query(\"self._config.search\", self._config.search, 'str')\n if self._config.filter is not None:\n query_parameters['$filter'] = self._serialize.query(\"self._config.filter\", self._config.filter, 'str')\n if self._config.count is not None:\n query_parameters['$count'] = self._serialize.query(\"self._config.count\", self._config.count, 'bool')\n if orderby is not None:\n query_parameters['$orderby'] = self._serialize.query(\"orderby\", orderby, '[str]', div=',')\n if select is not None:\n query_parameters['$select'] = self._serialize.query(\"select\", select, '[str]', div=',')\n if expand is not None:\n query_parameters['$expand'] = self._serialize.query(\"expand\", expand, '[str]', div=',')\n\n request = self._client.get(url, query_parameters, header_parameters)\n else:\n url = next_link\n query_parameters = {} # type: Dict[str, Any]\n request = self._client.get(url, query_parameters, header_parameters)\n return request\n\n def extract_data(pipeline_response):\n deserialized = self._deserialize('CollectionOfPrinterShare0', pipeline_response)\n list_of_elem = deserialized.value\n if cls:\n list_of_elem = cls(list_of_elem)\n return deserialized.odata_next_link or None, iter(list_of_elem)\n\n def get_next(next_link=None):\n request = prepare_request(next_link)\n\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n error = self._deserialize(models.OdataError, response)\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n return pipeline_response\n\n return ItemPaged(\n get_next, extract_data\n )", "def reportLink(self, citName, end1, end2):\n assert citName and end1 and end2\n osh = ObjectStateHolder(citName)\n osh.setAttribute(\"link_end1\", end1)\n osh.setAttribute(\"link_end2\", end2)\n return osh", "def _add_relationship(self, reltype, target_part):\n rId = self._relationships._next_rId\n rel = _Relationship(rId, reltype, target_part)\n self._relationships._additem(rel)\n return rel" ]
[ "0.52861536", "0.4686309", "0.46358755", "0.4605899", "0.45465153", "0.44421157", "0.44043243", "0.43372124", "0.43215021", "0.43174458", "0.4293261", "0.42837772", "0.4281874", "0.4281099", "0.4248604", "0.4245299", "0.42270178", "0.42220512", "0.41977164", "0.41643992", "0.41492742", "0.41390085", "0.4138959", "0.41357166", "0.41255224", "0.4118252", "0.41129652", "0.41082877", "0.4099641", "0.4097964" ]
0.51828396
1
Create new navigation property to taskDefinitions for print. Create new navigation property to taskDefinitions for print.
def create_task_definitions( self, body, # type: "models.MicrosoftGraphPrintTaskDefinition" **kwargs # type: Any ): # type: (...) -> "models.MicrosoftGraphPrintTaskDefinition" cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPrintTaskDefinition"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.create_task_definitions.metadata['url'] # type: ignore # Construct parameters query_parameters = {} # type: Dict[str, Any] # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(body, 'MicrosoftGraphPrintTaskDefinition') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.OdataError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('MicrosoftGraphPrintTaskDefinition', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_task_definitions(\n self,\n print_task_definition_id, # type: str\n select=None, # type: Optional[List[Union[str, \"models.Enum101\"]]]\n expand=None, # type: Optional[List[Union[str, \"models.Enum102\"]]]\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphPrintTaskDefinition\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphPrintTaskDefinition\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_task_definitions.metadata['url'] # type: ignore\n path_format_arguments = {\n 'printTaskDefinition-id': self._serialize.url(\"print_task_definition_id\", print_task_definition_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n if select is not None:\n query_parameters['$select'] = self._serialize.query(\"select\", select, '[str]', div=',')\n if expand is not None:\n query_parameters['$expand'] = self._serialize.query(\"expand\", expand, '[str]', div=',')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphPrintTaskDefinition', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "def task_definition_json(self) -> List[Dict]:\n if not self.tasks:\n return [self.tasks]\n else:\n return [task.get_define() for task in self.tasks.values()]", "def task_relation_json(self) -> List[Dict]:\n if not self.tasks:\n return [self.tasks]\n else:\n self._handle_root_relation()\n return [tr.get_define() for tr in self._task_relations]", "def generate_tasks(self, task):", "def definition_rst(self, definition, spec_path=None):\n spec_path = spec_path or self.models_path\n definitions = self.spec[spec_path]\n definition_property = definitions[definition]['properties'].copy()\n if not definition_property:\n self.write('{}', self.indent_depth)\n return\n self.indent_depth += 1\n definition_property = self.find_nested_models(definition_property, definitions)\n json_str = json.dumps(definition_property, indent=4)\n for line in json_str.split('\\n'):\n self.write(line, self.indent_depth)\n self.indent_depth -= 1", "def setUp(self):\n self.tasks = list(map(lambda t: t.task_id, FCDAGPROD.tasks))", "def set_task(self, task):\n if self.properties['Task'] == '':\n self.properties['Task'] = task.quest\n self.properties['Last Edit'] = int(self._map.now().strftime(\"%j\"))\n if self.properties['Category'] == 'Shadow':\n self.properties['Old_Category'] = task.reward_type\n self.properties['Old_Icon'] = task.icon\n else:\n self.properties['Category'] = task.reward_type\n self.properties['Icon'] = task.icon\n self.properties['Reward'] = task.reward\n else:\n raise TaskAlreadyAssigned(self, task)", "def list_task_definitions(\n self,\n orderby=None, # type: Optional[List[Union[str, \"models.Enum98\"]]]\n select=None, # type: Optional[List[Union[str, \"models.Enum99\"]]]\n expand=None, # type: Optional[List[Union[str, \"models.Enum100\"]]]\n **kwargs # type: Any\n ):\n # type: (...) -> Iterable[\"models.CollectionOfPrintTaskDefinition\"]\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.CollectionOfPrintTaskDefinition\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n def prepare_request(next_link=None):\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n if not next_link:\n # Construct URL\n url = self.list_task_definitions.metadata['url'] # type: ignore\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n if self._config.top is not None:\n query_parameters['$top'] = self._serialize.query(\"self._config.top\", self._config.top, 'int', minimum=0)\n if self._config.skip is not None:\n query_parameters['$skip'] = self._serialize.query(\"self._config.skip\", self._config.skip, 'int', minimum=0)\n if self._config.search is not None:\n query_parameters['$search'] = self._serialize.query(\"self._config.search\", self._config.search, 'str')\n if self._config.filter is not None:\n query_parameters['$filter'] = self._serialize.query(\"self._config.filter\", self._config.filter, 'str')\n if self._config.count is not None:\n query_parameters['$count'] = self._serialize.query(\"self._config.count\", self._config.count, 'bool')\n if orderby is not None:\n query_parameters['$orderby'] = self._serialize.query(\"orderby\", orderby, '[str]', div=',')\n if select is not None:\n query_parameters['$select'] = self._serialize.query(\"select\", select, '[str]', div=',')\n if expand is not None:\n query_parameters['$expand'] = self._serialize.query(\"expand\", expand, '[str]', div=',')\n\n request = self._client.get(url, query_parameters, header_parameters)\n else:\n url = next_link\n query_parameters = {} # type: Dict[str, Any]\n request = self._client.get(url, query_parameters, header_parameters)\n return request\n\n def extract_data(pipeline_response):\n deserialized = self._deserialize('CollectionOfPrintTaskDefinition', pipeline_response)\n list_of_elem = deserialized.value\n if cls:\n list_of_elem = cls(list_of_elem)\n return deserialized.odata_next_link or None, iter(list_of_elem)\n\n def get_next(next_link=None):\n request = prepare_request(next_link)\n\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n error = self._deserialize(models.OdataError, response)\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n return pipeline_response\n\n return ItemPaged(\n get_next, extract_data\n )", "def task_definition(self):\n return self._task_definition", "def node_setup(self):\n self.nav_tasks = {} # id -> nav_task\n self.missions = {} # id -> mission\n self.mission_state = {} # mission_id -> current task index.\n self.trigger_nav_task_active = False\n self.trigger_local_path_published = False", "def serialize(self,writer: SerializationWriter) -> None:\n if not writer:\n raise TypeError(\"writer cannot be null.\")\n super().serialize(writer)\n writer.write_object_value(\"createdBy\", self.created_by)\n writer.write_str_value(\"displayName\", self.display_name)\n writer.write_collection_of_object_values(\"tasks\", self.tasks)", "def dump(Project):\r\n try:\r\n logging.info(\"This project has %s Tasks\" %(str(Project.Tasks.Count)))\r\n for i in range(1,Project.Tasks.Count+1):\r\n task = Project.Tasks.Item(i)\r\n if (1 == task.OutlineLevel):\r\n space=\"\"\r\n elif (2 == task.OutlineLevel):\r\n space=\" |->\"\r\n elif (3 == task.OutlineLevel):\r\n space=\" |->\"\r\n elif (4 == task.OutlineLevel):\r\n space=\" |->\"\r\n try:\r\n print space + task.Name[:100].decode(\"utf-8\").encode(\"gbk\"),\r\n print task.OutlineLevel,\r\n print task.Text1.decode(\"utf-8\").encode(\"gbk\"), # ่‡ชๅฎšไน‰ๅˆ—1 \r\n print task.Text2.decode(\"utf-8\").encode(\"gbk\"), # ่‡ชๅฎšไน‰ๅˆ—2\r\n print task.ResourceNames.decode(\"utf-8\").encode(\"gbk\"),\r\n print task.Start,\r\n print task.Finish,\r\n print task.PercentWorkComplete,\r\n if task.ResourceNames!=None and str(task.ResourceNames) != '':\r\n print task.ResourceNames\r\n print '%'\r\n except:\r\n print 'Empty'\r\n return True\r\n except Exception, e:\r\n print \"Error:\", e\r\n return False", "def action_create_task(self):\n self.ensure_one()\n res = super(ProjectIssue, self).action_create_task()\n task_id = res.get('res_id', False)\n if task_id:\n task_obj = self.env['project.task'].browse(task_id)\n task_obj.description = self.description\n return res", "def update_task_definitions(\n self,\n print_task_definition_id, # type: str\n body, # type: \"models.MicrosoftGraphPrintTaskDefinition\"\n **kwargs # type: Any\n ):\n # type: (...) -> None\n cls = kwargs.pop('cls', None) # type: ClsType[None]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n accept = \"application/json\"\n\n # Construct URL\n url = self.update_task_definitions.metadata['url'] # type: ignore\n path_format_arguments = {\n 'printTaskDefinition-id': self._serialize.url(\"print_task_definition_id\", print_task_definition_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n body_content_kwargs = {} # type: Dict[str, Any]\n body_content = self._serialize.body(body, 'MicrosoftGraphPrintTaskDefinition')\n body_content_kwargs['content'] = body_content\n request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [204]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n if cls:\n return cls(pipeline_response, None, {})", "def setUp(self):\n self.tasks = list(map(lambda t: t.task_id, FCDAGDEV.tasks))", "def print_tasks(self):\n unformatted_rows = self.db_link.get_tasks()\n formatted_rows = self.display.format_row(unformatted_rows)\n self.display.print_task_list_formatted(formatted_rows)", "def write_task_declaration(self, description=None):\n if not description:\n description = ''\n\n self.write_config('NEWTASK', description[0:50])", "def test_get_workflow_definition_diagram(self):\n pass", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .app_identity import AppIdentity\n from .entity import Entity\n from .print_task import PrintTask\n\n from .app_identity import AppIdentity\n from .entity import Entity\n from .print_task import PrintTask\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"createdBy\": lambda n : setattr(self, 'created_by', n.get_object_value(AppIdentity)),\n \"displayName\": lambda n : setattr(self, 'display_name', n.get_str_value()),\n \"tasks\": lambda n : setattr(self, 'tasks', n.get_collection_of_object_values(PrintTask)),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def add_object(self, name, env, contentnode):\n props = PropertyDefinition(name, env.docname)\n props.gather(contentnode)\n self.data['objects'][props.key] = props\n self.data['all_objects'][props.key] = props\n return props", "def task_definition(self, task_definition):\n\n self._task_definition = task_definition", "def assign(self, task=None):\n if task is None:\n print(\"\\n*** Add Task ***\")\n name = input(\"Name of the task?: \")\n try:\n priority = int(input(\"Priority of the task (1-->5): \"))\n except ValueError:\n priority = 1\n steps = []\n while 1:\n step = input(\"Add step #\" + str(len(steps) + 1) + \" (Enter empty to finish): \")\n if step:\n steps.append(step)\n else:\n break\n self.tasks.append(Task(name, priority, steps))\n self.save()\n self.sort()\n print(\"*\"*16)\n else:\n self.tasks.append(task)\n self.save()\n self.sort()", "def saveTask(task):\n output = CommentedMap()\n output[\"changeId\"] = task.changeId\n if task.target:\n output[\"target\"] = task.target.key\n saveStatus(task, output)\n output[\"implementation\"] = saveConfigSpec(task.configSpec)\n if task._inputs: # only serialize resolved inputs\n output[\"inputs\"] = task.inputs.serializeResolved()\n changes = saveResourceChanges(task._resourceChanges)\n if changes:\n output[\"changes\"] = changes\n if task.messages:\n output[\"messages\"] = task.messages\n dependencies = [saveDependency(val) for val in task.dependencies]\n if dependencies:\n output[\"dependencies\"] = dependencies\n if task.result:\n if task.result.outputs:\n output[\"outputs\"] = saveResult(task.result.outputs)\n if task.result.result:\n output[\"result\"] = saveResult(task.result.result)\n else:\n output[\"result\"] = \"skipped\"\n\n return output", "def dump(self):\n details = super().dump()\n attrs = list(self.needs)\n attrs += [\"target\", \"satisfying\", \"job\", \"occupied\"]\n inv = [x.name for x in self.inventory]\n broken = [x.name for x in self.memories.broken_items]\n tasks = [f\"{x.name}: {x.target.name}\" for x in self.get_tasks()]\n return details + attrFormatter(attrs, self, override={\"broken\": broken, \"tasks\": tasks, \"inventory\": inv})", "def __str__(self): \n \n print 'Task leader: ', self.task_leader\n print 'Subject_ID: ', self.subject_ID\n print 'Number of tasks: ', len(self.tasks)\n for task in self.tasks:\n print ' ', task.name \n print ' ', task.task_number \n \n \n return ''", "def __repr__(self) -> str:\n for index, task in enumerate(self.steps):\n self.names.append(f\"{index+1}- {task[0]}\")\n tasks = \"\\n\".join(self.names)\n rpr = f\"\"\"---- Start ----\n{tasks}\n---- End ----\n \"\"\"\n return rpr", "def prepare_task(self, task):\n task['page_data'] = {'date': time.time()}\n task['page_result'] = None\n task['run_start_time'] = monotonic.monotonic()\n if task['current_step'] == 1:\n task['prefix'] = task['task_prefix']\n task['video_subdirectory'] = task['task_video_prefix']\n else:\n task['prefix'] = '{0}_{1:d}'.format(task['task_prefix'], task['current_step'])\n task['video_subdirectory'] = '{0}_{1:d}'.format(task['task_video_prefix'],\n task['current_step'])\n if task['video_subdirectory'] not in task['video_directories']:\n task['video_directories'].append(task['video_subdirectory'])\n if self.event_name is not None:\n task['step_name'] = self.event_name\n else:\n task['step_name'] = 'Step_{0:d}'.format(task['current_step'])", "def save_taskgraph(self, filename):\n\n if not TaskGraph.__SETUP_YAML_ONCE:\n TaskGraph.setup_yaml()\n\n # we want -id to be first in the resulting yaml file.\n tlist_od = self.export_task_speclist()\n with open(filename, 'w') as fh:\n ruamel.yaml.dump(tlist_od, fh, default_flow_style=False)", "def dump(self):\n fn = MOCK_FILENAME_FORMAT.format(step_name=self.name)\n super(MockLink, self).dump(fn)", "def save_task(self, task):\n if type(task) != Task:\n raise TypeError(\"Object type is not Task\")\n\n with open(self.path_to_task_file, 'a') as output:\n json.dump(task.__dict__, output)\n output.write('\\n')" ]
[ "0.5145057", "0.49115032", "0.48497966", "0.47197276", "0.46801388", "0.46516716", "0.46328866", "0.46023238", "0.45812285", "0.45470607", "0.4543493", "0.45232767", "0.45073426", "0.45030543", "0.44968712", "0.442651", "0.44144627", "0.4407998", "0.4406115", "0.43957642", "0.43745354", "0.43216527", "0.43191373", "0.4308816", "0.43081", "0.43002746", "0.42875296", "0.423359", "0.4164839", "0.41601253" ]
0.5671165
0
Get taskDefinitions from print. Get taskDefinitions from print.
def get_task_definitions( self, print_task_definition_id, # type: str select=None, # type: Optional[List[Union[str, "models.Enum101"]]] expand=None, # type: Optional[List[Union[str, "models.Enum102"]]] **kwargs # type: Any ): # type: (...) -> "models.MicrosoftGraphPrintTaskDefinition" cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPrintTaskDefinition"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) accept = "application/json" # Construct URL url = self.get_task_definitions.metadata['url'] # type: ignore path_format_arguments = { 'printTaskDefinition-id': self._serialize.url("print_task_definition_id", print_task_definition_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] if select is not None: query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',') if expand is not None: query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.OdataError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('MicrosoftGraphPrintTaskDefinition', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_ecs_task_definitions():\n tasks = ECS_MANAGER.list_ecs_task_definitions()\n if tasks:\n print(str_sep)\n print(\"Listing task definitions available in {}\".format(SESSION.region_name.upper()))\n print(\"{:50}{:20}\".format('Task', 'Version'))\n print(str_sep)\n\n for task in tasks['taskDefinitionArns']:\n if len(task) > 0:\n task_name, version = task.rsplit(\"/\", 1)[1].split(\":\")\n print(\"{:50}{:20}\".format(task_name, version))", "def task_definition_json(self) -> List[Dict]:\n if not self.tasks:\n return [self.tasks]\n else:\n return [task.get_define() for task in self.tasks.values()]", "def list_task_definitions(\n self,\n orderby=None, # type: Optional[List[Union[str, \"models.Enum98\"]]]\n select=None, # type: Optional[List[Union[str, \"models.Enum99\"]]]\n expand=None, # type: Optional[List[Union[str, \"models.Enum100\"]]]\n **kwargs # type: Any\n ):\n # type: (...) -> Iterable[\"models.CollectionOfPrintTaskDefinition\"]\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.CollectionOfPrintTaskDefinition\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n def prepare_request(next_link=None):\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n if not next_link:\n # Construct URL\n url = self.list_task_definitions.metadata['url'] # type: ignore\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n if self._config.top is not None:\n query_parameters['$top'] = self._serialize.query(\"self._config.top\", self._config.top, 'int', minimum=0)\n if self._config.skip is not None:\n query_parameters['$skip'] = self._serialize.query(\"self._config.skip\", self._config.skip, 'int', minimum=0)\n if self._config.search is not None:\n query_parameters['$search'] = self._serialize.query(\"self._config.search\", self._config.search, 'str')\n if self._config.filter is not None:\n query_parameters['$filter'] = self._serialize.query(\"self._config.filter\", self._config.filter, 'str')\n if self._config.count is not None:\n query_parameters['$count'] = self._serialize.query(\"self._config.count\", self._config.count, 'bool')\n if orderby is not None:\n query_parameters['$orderby'] = self._serialize.query(\"orderby\", orderby, '[str]', div=',')\n if select is not None:\n query_parameters['$select'] = self._serialize.query(\"select\", select, '[str]', div=',')\n if expand is not None:\n query_parameters['$expand'] = self._serialize.query(\"expand\", expand, '[str]', div=',')\n\n request = self._client.get(url, query_parameters, header_parameters)\n else:\n url = next_link\n query_parameters = {} # type: Dict[str, Any]\n request = self._client.get(url, query_parameters, header_parameters)\n return request\n\n def extract_data(pipeline_response):\n deserialized = self._deserialize('CollectionOfPrintTaskDefinition', pipeline_response)\n list_of_elem = deserialized.value\n if cls:\n list_of_elem = cls(list_of_elem)\n return deserialized.odata_next_link or None, iter(list_of_elem)\n\n def get_next(next_link=None):\n request = prepare_request(next_link)\n\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n error = self._deserialize(models.OdataError, response)\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n return pipeline_response\n\n return ItemPaged(\n get_next, extract_data\n )", "def get_dependent_tasks(self, task):\n return self._gdb_interface.get_dependent_tasks(task)", "def print_tasks(self):\n unformatted_rows = self.db_link.get_tasks()\n formatted_rows = self.display.format_row(unformatted_rows)\n self.display.print_task_list_formatted(formatted_rows)", "def run(self):\n with hp.a_temp_file() as fle:\n fle.write(dedent(\"\"\"\n ---\n environments: { dev: {account_id: \"123\"} }\n stacks: { app: {} }\n \"\"\").encode('utf-8'))\n fle.seek(0)\n collector = Collector()\n collector.prepare(fle.name, {'bespin': {'extra': \"\"}, \"command\": None, \"bash\": None})\n\n section = nodes.section()\n section['ids'].append(\"available-tasks\")\n\n title = nodes.title()\n title += nodes.Text(\"Default tasks\")\n section += title\n\n for name, task in sorted(collector.configuration['task_finder'].tasks.items(), key=lambda x: len(x[0])):\n\n lines = [name] + [\" {0}\".format(line.strip()) for line in task.description.split('\\n')]\n viewlist = ViewList()\n for line in lines:\n viewlist.append(line, name)\n self.state.nested_parse(viewlist, self.content_offset, section)\n\n return [section]", "def create_task_definitions(\n self,\n body, # type: \"models.MicrosoftGraphPrintTaskDefinition\"\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphPrintTaskDefinition\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphPrintTaskDefinition\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n accept = \"application/json\"\n\n # Construct URL\n url = self.create_task_definitions.metadata['url'] # type: ignore\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n body_content_kwargs = {} # type: Dict[str, Any]\n body_content = self._serialize.body(body, 'MicrosoftGraphPrintTaskDefinition')\n body_content_kwargs['content'] = body_content\n request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [201]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphPrintTaskDefinition', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "def get_tasks():\n outbound_tasks = []\n outbound_tasks_with_due_dates = []\n creds = None\n current_path = os.path.dirname(os.path.abspath(__file__))\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n picked_token_path = current_path + '/token.pickle'\n print(picked_token_path)\n if os.path.exists(picked_token_path):\n with open(picked_token_path, 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n current_path + '/credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(picked_token_path, 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('tasks', 'v1', credentials=creds,\n cache=DiscoveryCache()) # https://github.com/googleapis/google-api-python-client/issues/325\n\n # Call the Tasks API\n tasks = service.tasks().list(tasklist='@default').execute()\n\n for task in tasks['items']:\n reduced = task_reducer(task)\n if reduced is not None:\n if 'due' in reduced:\n outbound_tasks_with_due_dates.append(reduced)\n else:\n outbound_tasks.append(reduced)\n\n outbound_tasks_with_due_dates.sort(key=sort_by_due_date)\n outbound_tasks[:0] = outbound_tasks_with_due_dates\n\n return outbound_tasks", "def get_tasks(self) -> Dict[str, Any]:\n\n ret = {}\n for k, id in self.required_tasks.items():\n ret[k] = self.storage_socket.get_procedures(id=id)[\"data\"][0]\n\n return ret", "def get_tasks(self):\n return self.task_collection", "async def list_tasks():", "def test_find_workflow_definitions(self):\n pass", "def get_tasks(self):\n return self.tasks", "def get_tasks(self):\n return self.stn.get_tasks()", "def get_tasks():\n tasks = []\n example_dir = os.path.normpath(os.path.join(\n os.path.dirname(__file__), '../../openshift/ansiblegen/examples/')\n )\n yaml_names = os.listdir(example_dir)\n for yaml_name in yaml_names:\n _, api_version, resource = yaml_name.split('_', 2)\n resource = resource[0:-4]\n yaml_path = os.path.join(example_dir, yaml_name)\n\n with open(yaml_path, 'r') as f:\n data = yaml.load(f)\n\n tasks.append(((api_version, resource), data))\n return tasks", "def get_task_list(self):\n raise NotImplementedError()", "def task_definition(self):\n return self._task_definition", "def get_tasks(self, task_id=None):\n # Recover all config from OpenVAS\n if task_id:\n return self.make_xml_request('<get_tasks id=\"%s\"/>' % name, xml_result=True)\n else:\n return self.make_xml_request(\"<get_tasks />\", xml_result=True)", "def get(self):\n\n return task_service.get_tasks()", "def get_tasks(loop):\n tasks = asyncio.all_tasks(loop)\n return \"Tasks: \" + \", \".join(\n [f\"{task.get_name()}: {task.get_coro().__name__}\" for task in tasks]\n )", "def update_task_definitions(\n self,\n print_task_definition_id, # type: str\n body, # type: \"models.MicrosoftGraphPrintTaskDefinition\"\n **kwargs # type: Any\n ):\n # type: (...) -> None\n cls = kwargs.pop('cls', None) # type: ClsType[None]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n accept = \"application/json\"\n\n # Construct URL\n url = self.update_task_definitions.metadata['url'] # type: ignore\n path_format_arguments = {\n 'printTaskDefinition-id': self._serialize.url(\"print_task_definition_id\", print_task_definition_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n body_content_kwargs = {} # type: Dict[str, Any]\n body_content = self._serialize.body(body, 'MicrosoftGraphPrintTaskDefinition')\n body_content_kwargs['content'] = body_content\n request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [204]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n if cls:\n return cls(pipeline_response, None, {})", "def load_tasks(self):\n\n def _load_tasks(filename):\n filename = os.path.join(self.config['data']['location'], filename)\n filename = os.path.expanduser(filename)\n with open(filename, 'r') as f:\n lines = f.readlines()\n\n return list(map(taskw.utils.decode_task, lines))\n\n return dict(\n (db, _load_tasks('%s.data' % db))\n for db in ['completed', 'pending']\n )", "def get_define_step_data(pool):\n\n LOG.info('Searching Define Steps')\n\n process = pool.parent_process\n\n while process.type.name not in MASTER_STEPS_UDFS['reagent_labels'][\n 'steps']['define']:\n art = process.all_inputs()[0]\n process = art.parent_process\n\n define_step_outputs = {}\n flowcell_target_reads = 0\n\n for art in process.all_outputs():\n if art.type != 'Analyte':\n continue\n\n index_target_reads = _get_target_reads(art)\n if index_target_reads:\n flowcell_target_reads += index_target_reads\n\n if len(art.samples) != 1: # ignore pools\n continue\n sample_id = art.samples[0].id\n define_step_outputs[sample_id] = index_target_reads\n\n LOG.info('Done')\n\n return define_step_outputs, flowcell_target_reads, process", "def get_tasks(self):\n return self.tasks.all()", "def tasks():", "def get_feed_entries_task():\n get_feed_entries()\n logger.info(\"Entries for Feed\")", "def normalTasks(self):\n return self._tasks", "def generate_tasks(self, task):", "def _get_task_queues():\n\n return _thread_data.__dict__.setdefault('task_queues', defaultdict(list))", "def _extract_definitions(\n xml_path: pathlib.Path,\n) -> Tuple[Optional[rasaeco.model.Definitions], List[str]]:\n try:\n text = xml_path.read_text(encoding=\"utf-8\")\n except Exception as exception:\n return None, [\n f\"Failed to read the intermediate representation \"\n f\"of the scenario {xml_path}: {exception}\"\n ]\n\n root = ET.fromstring(text)\n\n def collect_set_of_named_references(tag: str) -> Set[str]:\n \"\"\"Collect the set of references for the given specification tag.\"\"\"\n result = set() # type: Set[str]\n for element in root.iter(tag):\n name = element.attrib[\"name\"]\n result.add(name)\n return result\n\n return (\n rasaeco.model.Definitions(\n model_set=collect_set_of_named_references(tag=\"model\"),\n def_set=collect_set_of_named_references(tag=\"def\"),\n test_set=collect_set_of_named_references(tag=\"test\"),\n acceptance_set=collect_set_of_named_references(tag=\"acceptance\"),\n ),\n [],\n )" ]
[ "0.6234582", "0.5776305", "0.57495385", "0.53581464", "0.5335625", "0.524848", "0.52329326", "0.52250206", "0.50932235", "0.5088548", "0.5087793", "0.5024729", "0.50170135", "0.49958098", "0.498683", "0.49760514", "0.49682623", "0.49366823", "0.48528007", "0.48422137", "0.47809327", "0.47795525", "0.47534665", "0.47311825", "0.47250307", "0.47183558", "0.4709602", "0.4707774", "0.47000745", "0.46955892" ]
0.6272089
0
Update the navigation property taskDefinitions in print. Update the navigation property taskDefinitions in print.
def update_task_definitions( self, print_task_definition_id, # type: str body, # type: "models.MicrosoftGraphPrintTaskDefinition" **kwargs # type: Any ): # type: (...) -> None cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.update_task_definitions.metadata['url'] # type: ignore path_format_arguments = { 'printTaskDefinition-id': self._serialize.url("print_task_definition_id", print_task_definition_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(body, 'MicrosoftGraphPrintTaskDefinition') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.OdataError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_tasks(self, tasks):\n\n self._print('Updating tasks {} with {} ...'.format(self._tasks, tasks))\n\n self._tasks.update(tasks)", "def _update_all_tasks(self) -> None:\n for task in self.tasks:\n task.update()", "def print_tasks(self):\n unformatted_rows = self.db_link.get_tasks()\n formatted_rows = self.display.format_row(unformatted_rows)\n self.display.print_task_list_formatted(formatted_rows)", "def get_task_definitions(\n self,\n print_task_definition_id, # type: str\n select=None, # type: Optional[List[Union[str, \"models.Enum101\"]]]\n expand=None, # type: Optional[List[Union[str, \"models.Enum102\"]]]\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphPrintTaskDefinition\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphPrintTaskDefinition\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_task_definitions.metadata['url'] # type: ignore\n path_format_arguments = {\n 'printTaskDefinition-id': self._serialize.url(\"print_task_definition_id\", print_task_definition_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n if select is not None:\n query_parameters['$select'] = self._serialize.query(\"select\", select, '[str]', div=',')\n if expand is not None:\n query_parameters['$expand'] = self._serialize.query(\"expand\", expand, '[str]', div=',')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphPrintTaskDefinition', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "def tasks(self, tasks):\n\n self._tasks = tasks", "def setUp(self):\n self.tasks = list(map(lambda t: t.task_id, FCDAGPROD.tasks))", "def updateScptRefs(self):\n for scpt in self.refs_scpt.keys():\n self.refs_scpt[scpt] = scpt.getRef()\n self.scptRefs = set(self.refs_scpt.values())", "def create_task_definitions(\n self,\n body, # type: \"models.MicrosoftGraphPrintTaskDefinition\"\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphPrintTaskDefinition\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphPrintTaskDefinition\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n content_type = kwargs.pop(\"content_type\", \"application/json\")\n accept = \"application/json\"\n\n # Construct URL\n url = self.create_task_definitions.metadata['url'] # type: ignore\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Content-Type'] = self._serialize.header(\"content_type\", content_type, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n body_content_kwargs = {} # type: Dict[str, Any]\n body_content = self._serialize.body(body, 'MicrosoftGraphPrintTaskDefinition')\n body_content_kwargs['content'] = body_content\n request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [201]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphPrintTaskDefinition', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "def setUp(self):\n self.tasks = list(map(lambda t: t.task_id, FCDAGDEV.tasks))", "def post_exec(self):\n \n for task in self.tasks.values():\n for elem in task.objects.values():\n if elem.isdelete:\n self.uow._remove_deleted(elem.obj)\n else:\n self.uow.register_clean(elem.obj)", "def set_task_forecast(self):\n\n tasks = self._get_all_tasks()\n\n task_id = tasks[self.tasks_view.currentRow()].Id\n\n self.tasks_flow.set_status(task_id, 3)\n\n # Refresh the table\n self.write_tasks_table()", "def run(self):\n modify_tasks = filter(self._task_filter, acm.FAelTask.Select(''))\n print([task.Name() for task in modify_tasks])\n for task in modify_tasks:\n #new_task = task.Clone()\n self._update(task)\n try:\n task.Commit()\n except:\n print('Skipping: Task already exists')", "def set_task_done(self):\n\n tasks = self._get_all_tasks()\n\n task_id = tasks[self.tasks_view.currentRow()].Id\n\n self.tasks_flow.set_status(task_id, 0)\n\n # Refresh the table\n self.write_tasks_table()", "def tasks_update(cls, app):\r\n\r\n try:\r\n tasks_info = {}\r\n tasks = app.db.session.query(SpiderTask).all()\r\n except Exception as err:\r\n print(err)\r\n else:\r\n for task in tasks:\r\n tasks_info[task.id] = task.to_json()\r\n return tasks_info\r\n finally:\r\n app.db.session.close()", "def definition_rst(self, definition, spec_path=None):\n spec_path = spec_path or self.models_path\n definitions = self.spec[spec_path]\n definition_property = definitions[definition]['properties'].copy()\n if not definition_property:\n self.write('{}', self.indent_depth)\n return\n self.indent_depth += 1\n definition_property = self.find_nested_models(definition_property, definitions)\n json_str = json.dumps(definition_property, indent=4)\n for line in json_str.split('\\n'):\n self.write(line, self.indent_depth)\n self.indent_depth -= 1", "def task_definition(self, task_definition):\n\n self._task_definition = task_definition", "def set_task_order(self, order):\n for task in self.tasks:\n task.order = order", "def refresh_tree(self):\n self.process_tree = ProcessNode.objects.get(id=self.process_tree_id)", "def update_all_tasks():\n # TODO: Schedule this function after starting a task (replace if with while loop with sleep inside)\n active_dict = dict()\n\n # Use list to avoid \"RuntimeError: dictionary changed size during iteration\"\n for pid in list(app.config['OPS_PIPE_PARENT'].keys()):\n if update_task(pid):\n task = Task.query.filter_by(id=pid).first()\n active_dict[pid] = task.status.name\n\n return jsonify(active_dict)", "def _clear_tasks(self):\n listOfTasks = self.model.find(xmlns + 'ListOfTasks') \n assert listOfTasks != None\n \n for task in listOfTasks:\n task.attrib['scheduled'] = 'false'", "def updateProperties(self):\n self.manage_changeProperties(title = self.getTitle(),\n description = self.getDescription(),\n basepath = self.getPath())", "def set_task(self, task):\n if self.properties['Task'] == '':\n self.properties['Task'] = task.quest\n self.properties['Last Edit'] = int(self._map.now().strftime(\"%j\"))\n if self.properties['Category'] == 'Shadow':\n self.properties['Old_Category'] = task.reward_type\n self.properties['Old_Icon'] = task.icon\n else:\n self.properties['Category'] = task.reward_type\n self.properties['Icon'] = task.icon\n self.properties['Reward'] = task.reward\n else:\n raise TaskAlreadyAssigned(self, task)", "def node_setup(self):\n self.nav_tasks = {} # id -> nav_task\n self.missions = {} # id -> mission\n self.mission_state = {} # mission_id -> current task index.\n self.trigger_nav_task_active = False\n self.trigger_local_path_published = False", "def print_models(unprinted_design, completed_design):\n \n while unprinted_design:\n current_design = unprinted_design.pop()\n print(f\"Printing model: {current_design}\")\n completed_design.append(current_design)", "def update_save_trained_on_json(self, task, finished=True):\n # -- Add the provided task at the end of the list, sort the list and dump it as pkl file -- #\n if finished: # Task finished with training\n self.already_trained_on[str(self.fold)]['finished_training_on'].append(task)\n # -- Remove the task from start_training_on -- #\n self.already_trained_on[str(self.fold)]['start_training_on'] = None \n # -- Update the prev_trainer -- #\n self.already_trained_on[str(self.fold)]['prev_trainer'].append(self.trainer_class_name)\n else: # Task started to train\n # -- Add the current task -- #\n self.already_trained_on[str(self.fold)]['start_training_on'] = task\n # -- Update the prev_trainer -- #\n if self.trainer is not None: # This is always the case when a pre trained network is used as initialization\n self.already_trained_on[str(self.fold)]['prev_trainer'][-1:] = [self.trainer.__class__.__name__]\n else: # When using directly the extension with no pre trained network or after first task train when self.trainer is set to None\n self.already_trained_on[str(self.fold)]['prev_trainer'][-1:] = [self.trainer_class_name]\n # -- Update the used_identifier -- #\n self.already_trained_on[str(self.fold)]['used_identifier'] = self.identifier\n\n # -- Save the updated dictionary as a json file -- #\n save_json(self.already_trained_on, join(self.trained_on_path, self.extension+'_trained_on.json'))", "def dump(Project):\r\n try:\r\n logging.info(\"This project has %s Tasks\" %(str(Project.Tasks.Count)))\r\n for i in range(1,Project.Tasks.Count+1):\r\n task = Project.Tasks.Item(i)\r\n if (1 == task.OutlineLevel):\r\n space=\"\"\r\n elif (2 == task.OutlineLevel):\r\n space=\" |->\"\r\n elif (3 == task.OutlineLevel):\r\n space=\" |->\"\r\n elif (4 == task.OutlineLevel):\r\n space=\" |->\"\r\n try:\r\n print space + task.Name[:100].decode(\"utf-8\").encode(\"gbk\"),\r\n print task.OutlineLevel,\r\n print task.Text1.decode(\"utf-8\").encode(\"gbk\"), # ่‡ชๅฎšไน‰ๅˆ—1 \r\n print task.Text2.decode(\"utf-8\").encode(\"gbk\"), # ่‡ชๅฎšไน‰ๅˆ—2\r\n print task.ResourceNames.decode(\"utf-8\").encode(\"gbk\"),\r\n print task.Start,\r\n print task.Finish,\r\n print task.PercentWorkComplete,\r\n if task.ResourceNames!=None and str(task.ResourceNames) != '':\r\n print task.ResourceNames\r\n print '%'\r\n except:\r\n print 'Empty'\r\n return True\r\n except Exception, e:\r\n print \"Error:\", e\r\n return False", "def _task_update(context, task_ref, values, session=None):\n if 'deleted' not in values:\n values[\"deleted\"] = False\n task_ref.update(values)\n task_ref.save(session=session)\n return task_ref", "def tasks(self, tasks: List[TaskStatusDefinition]):\n\n self._tasks = tasks", "def list_task_definitions(\n self,\n orderby=None, # type: Optional[List[Union[str, \"models.Enum98\"]]]\n select=None, # type: Optional[List[Union[str, \"models.Enum99\"]]]\n expand=None, # type: Optional[List[Union[str, \"models.Enum100\"]]]\n **kwargs # type: Any\n ):\n # type: (...) -> Iterable[\"models.CollectionOfPrintTaskDefinition\"]\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.CollectionOfPrintTaskDefinition\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n def prepare_request(next_link=None):\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n if not next_link:\n # Construct URL\n url = self.list_task_definitions.metadata['url'] # type: ignore\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n if self._config.top is not None:\n query_parameters['$top'] = self._serialize.query(\"self._config.top\", self._config.top, 'int', minimum=0)\n if self._config.skip is not None:\n query_parameters['$skip'] = self._serialize.query(\"self._config.skip\", self._config.skip, 'int', minimum=0)\n if self._config.search is not None:\n query_parameters['$search'] = self._serialize.query(\"self._config.search\", self._config.search, 'str')\n if self._config.filter is not None:\n query_parameters['$filter'] = self._serialize.query(\"self._config.filter\", self._config.filter, 'str')\n if self._config.count is not None:\n query_parameters['$count'] = self._serialize.query(\"self._config.count\", self._config.count, 'bool')\n if orderby is not None:\n query_parameters['$orderby'] = self._serialize.query(\"orderby\", orderby, '[str]', div=',')\n if select is not None:\n query_parameters['$select'] = self._serialize.query(\"select\", select, '[str]', div=',')\n if expand is not None:\n query_parameters['$expand'] = self._serialize.query(\"expand\", expand, '[str]', div=',')\n\n request = self._client.get(url, query_parameters, header_parameters)\n else:\n url = next_link\n query_parameters = {} # type: Dict[str, Any]\n request = self._client.get(url, query_parameters, header_parameters)\n return request\n\n def extract_data(pipeline_response):\n deserialized = self._deserialize('CollectionOfPrintTaskDefinition', pipeline_response)\n list_of_elem = deserialized.value\n if cls:\n list_of_elem = cls(list_of_elem)\n return deserialized.odata_next_link or None, iter(list_of_elem)\n\n def get_next(next_link=None):\n request = prepare_request(next_link)\n\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n error = self._deserialize(models.OdataError, response)\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n return pipeline_response\n\n return ItemPaged(\n get_next, extract_data\n )", "def snapshot(self):\n for ref, (m, _) in self._models.items():\n m.snapshot = not m.snapshot\n (self, root, doc, comm) = state._views[ref]\n if comm and 'embedded' not in root.tags:\n push(doc, comm)" ]
[ "0.55822045", "0.5341378", "0.506771", "0.4881783", "0.48147187", "0.47885615", "0.47800353", "0.47464103", "0.4661839", "0.4654326", "0.4625851", "0.4544588", "0.4529572", "0.4522049", "0.44964138", "0.4496017", "0.44956166", "0.4487072", "0.44400227", "0.44370252", "0.44046268", "0.43985268", "0.437052", "0.43636376", "0.43582332", "0.43488255", "0.43453535", "0.43424052", "0.43277764", "0.4320633" ]
0.55890465
0
Dump data to YAML, which supports OrderedDict.
def _yaml_dump(data): return yaml.dump(data, Dumper=_OrderedDumper, allow_unicode=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def yaml_dump(\n data, Dumper=None, allow_unicode: bool = True, **kwargs\n): # pylint: disable=invalid-name\n if Dumper is None:\n Dumper = OrderedDumper\n return yaml.dump(\n data, Dumper=Dumper, allow_unicode=allow_unicode, **kwargs\n )", "def dump_yaml(self, data, output):\n yaml.indent(mapping=MAPPING, sequence=SEQUENCE, offset=OFFSET)\n yaml.dump(data, output)", "def ordered_dump(data, stream=None, Dumper=yaml_Dumper, **kwds):\n class OrderedDumper(Dumper):\n # fix tag indentations\n def increase_indent(self, flow=False, indentless=False):\n return super(OrderedDumper, self).increase_indent(flow, False)\n\n def _dict_representer(dumper, data):\n return dumper.represent_mapping(\n yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n data.items())\n\n def _str_representer(dumper, data):\n if '\\n' in data:\n return dumper.represent_scalar(u'tag:yaml.org,2002:str', data,\n style='|')\n else:\n return dumper.represent_str(data)\n\n OrderedDumper.add_representer(OrderedDict, _dict_representer)\n OrderedDumper.add_representer(str, _str_representer)\n\n # Fix formatting by adding a space in between tasks\n unformatted_yaml = yaml.dump(data, None, OrderedDumper, **kwds)\n formatted_yaml = re.sub(r\"[\\n]+([\\s]*)- name\", r\"\\n\\n\\1- name\", unformatted_yaml)\n\n # Fix CDumper issue where it adds yaml document ending '...'\n # in some templated ansible remediations\n formatted_yaml = re.sub(r\"\\n\\s*\\.\\.\\.\\s*\", r\"\\n\", formatted_yaml)\n\n if stream is not None:\n return stream.write(formatted_yaml)\n else:\n return formatted_yaml", "def DumpYaml(data):\n #NOTE(g): Import is done here, instead of the top of the file, to not require this module if it is not used\n import yaml\n \n text = yaml.safe_dump(data)\n \n return text", "def pdump(data):\n return yaml.dump(yaml.load(json.dumps(data)))", "def to_yaml(cls, dumper, data):\n m = {k: getattr(data, k) for k in cls._yaml_keys}\n return dumper.represent_mapping(cls.yaml_tag, m)", "def to_yaml(self, data, options=None):\r\n options = options or {}\r\n\r\n if yaml is None:\r\n raise UnsupportedSerializationFormat(\"Usage of the YAML aspects requires yaml.\")\r\n\r\n return yaml.dump(self.to_simple(data, options))", "def dump(data):\n stream = StringIO()\n yaml.dump(data, stream, Dumper=yaml.RoundTripDumper)\n return stream.getvalue().rstrip()", "def to_yaml(cls, dumper, data):\n yamlData = data._yamlData\n return dumper.represent_mapping(data.yaml_tag, yamlData)", "def dict_to_yaml(dict_data):\n\n return yaml.dump(dict_data, default_flow_style=False)", "def generate(self, data) -> str:\n yaml_dump_params: Dict[Any, Any] = {'default_flow_style': None, 'sort_keys': False}\n if isinstance(data, list):\n return yaml.dump_all(data, Dumper=YamlDumper(self.kg), **yaml_dump_params)\n return yaml.dump(data, Dumper=YamlDumper(self.kg), **yaml_dump_params)", "def yaml_dump(dict_to_dump):\n FlattenAliasDumper.add_representer(OrderedDict, _dict_representer)\n return yaml.dump(\n dict_to_dump,\n default_flow_style=False,\n Dumper=FlattenAliasDumper,\n )", "def dump_yaml(file_path, data):\n\n with open(os.path.abspath(os.path.expanduser(file_path)), \"w\") as f:\n yaml.safe_dump(data, f, default_flow_style=False)\n\n return file_path", "def _save_data_yaml(self, data, pathname): \n pathname = self._yaml_extension(pathname)\n with open(pathname, \"w\") as outfile:\n yaml.dump(data, outfile, default_flow_style=False)", "def _save_data_yaml(self, data, pathname): \n pathname = self._yaml_extension(pathname)\n with open(pathname, \"w\") as outfile:\n yaml.dump(data, outfile, default_flow_style=False)", "def dump(filename: Path) -> None:\n import yaml\n\n dumped_str = yaml.dump_all(\n [data_dict],\n Dumper=RegressionYamlDumper,\n default_flow_style=False,\n allow_unicode=True,\n indent=2,\n encoding=\"utf-8\",\n )\n with filename.open(\"wb\") as f:\n f.write(dumped_str)", "def safe_dump(data, stream=None, **kw):\n return yaml.dump_all([data], stream, Dumper=OSafeDumper, **kw)", "def conversion_yaml():\r\n data ={\r\n 'name': 'george',\r\n 'age': 16,\r\n 'friends':\r\n [{'name': 'marry', 'age': 16}, {'name': 'jack', 'age': 17}]\r\n }\r\n yaml_data = yaml.dump(data)\r\n dirname = os.path.dirname(os.path.dirname(__file__))\r\n # data_dir = os.path.join(dirname, 'data')\r\n data_dir = '/'.join([dirname, 'data'])\r\n file_path = data_dir + '/' + 'test.yaml'\r\n with open(file_path, 'w') as fw:\r\n fw.write(yaml_data)\r\n print(yaml_data)", "def to_content(cls, data: Mapping) -> str:\n cls._check_yaml()\n s = yaml.safe_dump(data, default_flow_style=False)\n s = '---\\n' + s\n return s", "def to_yaml(self, **kwargs):\n if not self._is_graph_network:\n raise NotImplementedError\n\n if yaml is None:\n raise ImportError('Requires yaml module installed.')\n return yaml.dump(self._updated_config(), **kwargs)", "def DumpJson(data):\n #NOTE(g): Import is done here, instead of the top of the file, to not require this module if it is not used\n import json\n \n text = yaml.dumps(data)\n \n return text", "def write_yaml(fname: str, data: dict) -> None:\n try:\n with open(fname, 'w') as f:\n yaml.safe_dump(data, f, default_flow_style=False)\n except IOError as e:\n print(f\"Cannot write YAML file {fname}\")\n print(f\"IOError: {e}\")", "def _dict_to_single_line_yaml(self, *, data):\n result = yaml.dump(data, default_flow_style=True)\n result = result.replace(\"\\n\", \" \").strip()\n result = re.sub(r\" *\", \" \", result)\n return result", "def save_yaml(data, write_path: PathLike) -> None:\n with open(write_path, \"w\") as write_file:\n yaml.dump(data, write_file, default_flow_style=False)", "def to_yaml(cls, dumper, data):\n\t\tdict_rep = {'location':data._location, 'startFrame':data._startFrame,\n\t\t\t\t\t'endFrame':data._endFrame, 'camera':data._camera}\n\n\t\tprint(dict_rep)\n\n\t\tnode = dumper.represent_mapping(cls.yaml_tag, dict_rep)\n\t\treturn node", "def write(self, file=sys.stdout):\n d = self.to_dict()\n if d:\n yaml.dump([d], file, default_flow_style=False)", "def _yaml_ordering_support():\n _mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG\n\n def dict_representer(dumper, data):\n return dumper.represent_dict(data.iteritems())\n\n def dict_constructor(loader, node):\n return OrderedDict(loader.construct_pairs(node))\n\n yaml.add_representer(OrderedDict, dict_representer)\n yaml.add_constructor(_mapping_tag, dict_constructor)", "def dumps(data):\n def _dump(d, indent=0):\n for key, value in six.iteritems(d):\n if isinstance(value, dict):\n yield '%s%s {\\n' % (' ' * indent, _escape(key))\n for subs in _dump(value, indent + 2):\n yield subs\n yield '%s}\\n' % (' ' * indent)\n elif isinstance(value, list):\n yield '%s%s = {\\n' % (' ' * indent, _escape(key))\n for subvalue in value:\n if type(subvalue) == dict:\n yield '%s{\\n' % (' ' * (indent + 2))\n for subs in _dump(subvalue, indent + 4):\n yield subs\n yield '%s}\\n' % (' ' * (indent + 2))\n else:\n yield '%s%s\\n' % (' ' * (indent + 2),\n _escape(subvalue))\n\n yield '%s}\\n' % (' ' * indent)\n elif type(value) == bool:\n yield '%s%s = %s\\n' % (' ' * indent, _escape(key),\n _escape(str(value).lower()))\n else:\n yield '%s%s = %s\\n' % (' ' * indent, _escape(key),\n _escape(str(value)))\n return ''.join(list(_dump(data)))", "def dump(arg):\n return yaml.safe_dump_all(\n arg,\n allow_unicode=True,\n default_flow_style=False,\n )", "def write(self):\n self.f.write(yaml.safe_dump(self.data, default_flow_style=False, indent=4))" ]
[ "0.7798169", "0.76470315", "0.7642571", "0.74547535", "0.7164397", "0.7115349", "0.7076506", "0.69675344", "0.69375503", "0.68800414", "0.6765619", "0.6618539", "0.6607386", "0.6579059", "0.6579059", "0.6558913", "0.6419908", "0.6392849", "0.63374376", "0.63331854", "0.632608", "0.63215584", "0.6210614", "0.6192798", "0.6168629", "0.61616755", "0.6156255", "0.6138193", "0.61254424", "0.61176246" ]
0.8192204
0
Return True if an attribute has a valid type, otherwise False. This will apply recursively to an attribute's attributes.
def is_valid_type(self, attr: Optional[str] = None) -> bool: try: self.validate_type(attr) except TypeError: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_valid(self, attribute: Attribute) -> bool:\n return self.get_data_type() == attribute.type", "def has_attribute_type(self, attribute: str, typ: Optional[Type]) -> bool:\n if not self.has_attribute(attribute):\n return False\n\n attr_node = self.get_attribute(attribute).yaml_node\n\n if typ in scalar_type_to_tag:\n tag = scalar_type_to_tag[typ]\n return cast(str, attr_node.tag) == tag\n elif typ == list:\n return isinstance(attr_node, yaml.SequenceNode)\n elif typ == dict:\n return isinstance(attr_node, yaml.MappingNode)\n\n raise ValueError('Invalid argument for typ attribute')", "def is_valid_attribute(self, attr):\n try:\n self.validate_attribute(attr)\n return True\n except etal.LabelsSchemaError:\n return False", "def is_valid_attribute(self, attr):\n try:\n self.validate_attribute(attr)\n return True\n except etal.LabelsSchemaError:\n return False", "def is_valid_attribute(self, attr):\n return self.is_valid(attr)", "def validate_type(self: BaseType, attr: Optional[str] = None) -> BaseType:\n if attr is None:\n for attribute in self._attributes:\n self._validate_attr_type(attribute)\n else:\n self._validate_attr_type(attr)\n return self", "def validate_type(self, attr):\n if not isinstance(attr, self._attr_cls):\n raise AttributeSchemaError(\n \"Expected attribute '%s' to have type '%s'; found '%s'\"\n % (attr.name, self.type, etau.get_class_name(attr))\n )", "def is_valid(self, attr: Optional[str] = None) -> bool:\n try:\n self.validate(attr)\n except (TypeError, ValueError):\n return False\n return True", "def __datatype_check(self, record_attribute, attribute_schema):\n if 'INT' in attribute_schema[TYPE_KEY].upper():\n if record_attribute.isdigit():\n return True\n elif attribute_schema[TYPE_KEY].upper() in DECIMAL_TYPES:\n if record_attribute.isdecimal():\n return True\n elif 'CHAR' in attribute_schema[TYPE_KEY].upper() \\\n or 'TEXT' in attribute_schema[TYPE_KEY].upper():\n if type(record_attribute) is str:\n return True\n else:\n IS_VALID_FILE = False\n return False", "def _validability(self, ability):\n return isinstance(ability, AttributeAbility)", "def is_attribute(self):\r\n return conf.lib.clang_isAttribute(self)", "def check(self, description: Description) -> bool:\n # if the name of the attribute is not present, return false.\n name = self.attribute_name\n if name not in description.values:\n return False\n\n # if the type of the value is different from the type of the attribute, return false.\n value = description.values[name]\n if type(self.constraint_type.value) in {list, tuple, set} and not isinstance(\n value, type(next(iter(self.constraint_type.value)))\n ):\n return False\n if type(self.constraint_type.value) not in {\n list,\n tuple,\n set,\n } and not isinstance(value, type(self.constraint_type.value)):\n return False\n\n # dispatch the check to the right implementation for the concrete constraint type.\n return self.constraint_type.check(value)", "def isAttribute(self, p_int): # real signature unknown; restored from __doc__\n return False", "def validate_attribute(self, attr):\n self.attrs.validate_attribute(attr)", "def is_valid_attributes(self, attrs):\n try:\n self.validate_attributes(attrs)\n return True\n except etal.LabelsSchemaError:\n return False", "def validate(self: BaseType, attr: Optional[str] = None) -> BaseType:\n if attr is None:\n for attribute in self._attributes:\n self._validate(attribute)\n else:\n self._validate(attr)\n return self", "def _validability(self, ability):\n return (isinstance(ability, AttributeAbility) or\n isinstance(ability, WeaponAbility))", "def attr_is_not_inherited(type_, attr):\n\n bases = obj.__mro__[1:]\n\n return getattr(obj, attr) not in (\n getattr(base, attr, None) for base in bases)", "def validate_attribute(self, attr):\n self.validate(attr)", "def validate_attribute(self, attr):\n self.validate_attribute_name(attr.name)\n self.schema[attr.name].validate_attribute(attr)", "def validate(self, attr=None, notattr=None):\n\n if attr is not None:\n specified_attrs = {attr: self.attributes[attr]}\n else:\n specified_attrs = self.attributes\n\n for attr, attr_structure in specified_attrs.items():\n if notattr is not None and attr is notattr:\n continue\n\n attrval = getattr(self, attr)\n if attrval is None or attrval == {}:\n continue\n\n attr_schema = attr_structure['schema']\n validatedattrval = attr_schema.validate(attrval)\n setattr(self, attr, validatedattrval)", "def check_global_attr_type(ds, attr, attr_type):\n if attr not in ds.ncattrs():\n return 0\n\n global_attr = getattr(ds, attr)\n\n if attr_type == 'int':\n attr_type_class = int\n elif attr_type == 'float':\n attr_type_class = float\n elif attr_type == 'str':\n attr_type_class = str\n else:\n return 1\n\n if len(str(global_attr)) == 0:\n return 2\n\n if np.dtype(type(global_attr)) != np.dtype(attr_type_class):\n return 3\n\n return 4", "def applyStringTypes(self):\n ok = False\n try:\n for ii, atName in enumerate(self.getAttributeList()):\n _, isMandatory = self.__getAttributeInfo(atName)\n dataType = \"string\"\n for row in self.data:\n if row[ii] is None or row[ii] in [\".\", \"?\"]:\n row[ii] = \".\" if isMandatory else \"?\"\n else:\n row[ii] = self.__castD[dataType](row[ii])\n #\n self.__attributeTypeD[atName] = dataType\n ok = True\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n if self._raiseExceptions:\n raise e\n return ok", "def has_attribute(*attrs):\n\n @meta\n def check(cls):\n return all(hasattr(cls, a) for a in attrs)\n\n return check", "def _is_encodable_attribute(name):\n if name == '_meta':\n return True\n elif name.startswith(\"_\") or name.startswith(\"__\") or name == \"ext\":\n return False\n else:\n return True", "def hasAttribute(self, attrib):\n return self._dqa(attrib) in self.attributes", "def valid_rule_type(self, rule_to_validate_type, tag_index):\r\n self.required_fields[CATEGORY_TYPE].attributefound()\r\n self.required_fields_index[self.required_fields[CATEGORY_TYPE].position].increment_count()\r\n\r\n metadata = rule_to_validate_type[METADATA]\r\n rule_category_key_to_check = list(metadata[tag_index].keys())[0]\r\n rule_category_value_to_check = list(metadata[tag_index].values())[0]\r\n if re.fullmatch(CATEGORY_TYPE_REGEX, rule_category_value_to_check):\r\n self.required_fields[CATEGORY_TYPE].attributevalid()\r\n elif re.fullmatch(CATEGORY_TYPE_REGEX, str(rule_category_value_to_check).upper()):\r\n rule_category_value_to_check = str(rule_category_value_to_check).upper()\r\n metadata[tag_index][rule_category_key_to_check] = rule_category_value_to_check\r\n self.required_fields[CATEGORY_TYPE].attributevalid()\r\n else:\r\n self.required_fields[CATEGORY_TYPE].attributeinvalid()\r\n\r\n return self.required_fields[CATEGORY_TYPE].valid", "def _is_valid(self, value):\n\n # Entities have an istypeof method that can perform more sophisticated\n # type checking.\n if hasattr(self._type, \"istypeof\"):\n return self._type.istypeof(value)\n else:\n return isinstance(value, self._type)", "def hasRequiredAttributes(self):\n return _libsbml.SpeciesTypeInstance_hasRequiredAttributes(self)", "def hasRequiredAttributes(self):\n return _libsbml.SpeciesType_hasRequiredAttributes(self)" ]
[ "0.78103995", "0.72813314", "0.71222246", "0.71222246", "0.70933825", "0.70055455", "0.693027", "0.6914144", "0.6783055", "0.67419726", "0.6683708", "0.6474715", "0.6436015", "0.64046496", "0.62558943", "0.62339306", "0.6220854", "0.61872816", "0.61867917", "0.6105975", "0.6088275", "0.60564834", "0.60498863", "0.6024017", "0.5907013", "0.58811027", "0.58507705", "0.5847058", "0.58455944", "0.5842878" ]
0.7928803
0
Adjust the timing of timestamped objects. This will apply recursively to an attribute's attributes.
def adjust_time( self: BaseType, func: Callable[[int], int], attr: Optional[str] = None ) -> BaseType: if attr is None: for attribute in self._attributes: self._adjust_time(func, attribute) else: self._adjust_time(func, attr) return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def measure_time(target, attribute, func, *args, **kwargs):\n attributes = attribute.split(\".\")\n attribute = attributes.pop()\n target = reduce(getattr, attributes, target)\n start = timer()\n try:\n return func(*args, **kwargs)\n finally:\n value = getattr(target, attribute)\n setattr(target, attribute, value + timer() - start)", "def svn_info_t_prop_time_set(svn_info_t_self, apr_time_t_prop_time): # real signature unknown; restored from __doc__\n pass", "def update_time(self):\n pass # Do nothing", "def _set_comment_timestamps(document, new_timestamps):\n for (el, ts) in zip(_get_comments(document), new_timestamps):\n el.set(date_attrib, ts.strftime(date_format))", "def timestamp_one(self, path):\n stat = path.stat()\n sde = self.manager.source_date_epoch\n if stat.st_mtime > sde:\n cls = self.__class__.__name__\n self.log.debug(\n f\"[lite][base] <{cls}> set time to source_date_epoch {sde} on {path}\"\n )\n os.utime(path, (sde, sde))\n return\n return", "def timestamp_one(self, path):\n stat = path.stat()\n sde = self.manager.source_date_epoch\n if stat.st_mtime > sde:\n cls = self.__class__.__name__\n self.log.debug(\n f\"[lite][base] <{cls}> set time to source_date_epoch {sde} on {path}\"\n )\n os.utime(path, (sde, sde))\n return\n return", "def __setRewriteTimestamp(self, expr):\n self.rewriteTimestamp = expr", "def set_attr(self):\n\n # Create a new array\n self.fileh.create_array('/', 'array', self.a1)\n for i in range(self.nobjects):\n # Set an attribute\n setattr(self.fileh.root.array.attrs, \"attr\" + str(i), str(self.a1))\n # Put a mark\n self.fileh.mark()\n # Unwind all marks sequentially\n for i in range(self.niter):\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.undo()\n if verbose:\n print(\"u\", end=' ')\n if verbose:\n print()\n undo = clock() - t1\n # Rewind all marks sequentially\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.redo()\n if verbose:\n print(\"r\", end=' ')\n if verbose:\n print()\n redo = clock() - t1\n\n print(\"Time for Undo, Redo (set_attr):\", undo, \"s, \", redo, \"s\")", "def __set_time_elements(*args):\n args[0].TimeState.delay_elements = args[1]\n args[0].TimeState.set_delay_elements()", "def _update_taxa_data(self, taxa_at_time):\n time = self._record.latest_time\n\n t_recorded = self._taxa[\"object\"]\n\n # Update previously introduced taxa.\n\n t_introduced = [taxon for taxon in taxa_at_time if taxon in t_recorded]\n\n for taxon in t_introduced:\n # Update taxon data.\n\n idx = self._taxa[\"object\"].index(taxon)\n self._taxa[\"latest_time\"][idx] = time\n self._taxa[\"extant\"][idx] = taxon.extant\n\n # Set the data of new taxa.\n\n t_new = [taxon for taxon in taxa_at_time if taxon not in t_recorded]\n\n for taxon in t_new:\n # Set identifiers.\n\n if self._taxa[\"uid\"]:\n taxon._uid = max(self._taxa[\"uid\"]) + 1\n else:\n taxon._uid = 0\n\n # Append taxon data.\n\n self._taxa[\"uid\"].append(taxon.uid)\n self._taxa[\"appeared\"].append(time)\n self._taxa[\"latest_time\"].append(time)\n self._taxa[\"extant\"].append(taxon.extant)\n self._taxa[\"object\"].append(taxon)\n\n # Update taxa stats.\n\n self._record.set_value(\"taxa\", sum(self._taxa[\"extant\"]))\n\n self._grid.at_node[\"taxa__richness\"] = self._get_taxa_richness_map()", "def set_timestamp(self, data):\n if \"hittime\" in data: # an absolute timestamp\n data[\"qt\"] = self.hittime(timestamp=data.pop(\"hittime\", None))\n if \"hitage\" in data: # a relative age (in seconds)\n data[\"qt\"] = self.hittime(age=data.pop(\"hitage\", None))", "def absulute2relative_time(x): \n if x.viewed:\n x.viewed_reltime=x.viewed_time-x.start\n \n if x.completed:\n x.completed_reltime=x.completed_time-x.start\n \n return x", "def _freeze_time(self, timestamp):\n now_patch = patch('onelogin.saml2.utils.OneLogin_Saml2_Utils.now', return_value=timestamp)\n now_patch.start()\n self.addCleanup(now_patch.stop) # lint-amnesty, pylint: disable=no-member", "def setRelTime(self):\n obsRelTime = [self._getRelTime(o) for o in self.data]\n self.append('Relative Time', obsRelTime)\n return 0", "def mutate_json_record(self, json_record):\n for attr_name in json_record:\n attr = json_record[attr_name]\n if isinstance(attr, datetime):\n json_record[attr_name] = attr.isoformat()\n return json_record", "def _set_attributes(self):", "def set_ga_timestamp(self, time: int):\n for cl in self:\n cl.tga = time", "def pre_save(self, model_instance, add):\n if add:\n setattr(model_instance, self.attname, timezone.now())\n return super().pre_save(model_instance, add)", "def update(self, new_attrs):\n self.last_update = round(time())\n self.attrs.update(new_attrs)", "def test_dict_to_updated_at_attr_type(self):\n r = Review()\n r_dictionary = r.to_dict()\n r2 = Review(**r_dictionary)\n self.assertEqual(type(datetime.now()), type(r2.updated_at))", "def update_timestamp(self):\n self._timestamp = datetime.datetime.now()", "def _attributesFromRow(self, attributeList):\n for setAttribute, setValue in attributeList:\n setColumn = self.__attrmap__[setAttribute]\n if setColumn.model.type.name == \"timestamp\" and setValue is not None:\n setValue = parseSQLTimestamp(setValue)\n setattr(self, setAttribute, setValue)", "def OnAttributesUpdated():\n pass", "def absolute_to_relative_timestamps(profile):\n timestamps = profile['timestamp']\n baseline = timestamps[0]\n profile['timestamp'][:] = [x - baseline for x in timestamps]\n return profile", "def round_trip_time(self):\n ...", "def _SetAnatTgts(self):\n anat_candidates = {}\n fmap_candidates = {}\n for entry in self.entry_map['anat']:\n if self.info[entry]['type'] == 'T1High':\n anat_candidates[entry] = self.info[entry]['acqtime']\n\n# Find the valid anatomical acquired nearest to fieldmap.\n tdiff_min = 1e6\n if len(self.entry_map['fmap']) > 0:\n for entry in self.entry_map['fmap']:\n anat_tgt = self. _FindNearestAnat(self.info[entry]['acqtime'])\n self.info[entry]['anat_ref'] = anat_tgt\n else:\n# No fieldmaps were collected. Find the structural nearest the\n# beginning of the EPIs.\n if len(self.entry_map['anat']) == 1:\n anat_tgt = self.entry_map['anat'][0]\n else:\n epi_start = []\n tmin = 1e6\n for anat in self.entry_map['anat']:\n if self.info[anat]['type'] != 'T1High':\n continue\n tsum1 = 0; tsum2 = 0;\n for epi in self.entry_map['epi']:\n# Difference from start of structural and first epi\n tsum1 += abs(self.info[anat]['acqtime'] - \\\n self.info[epi]['acqtime'])\n# Difference from start of structural and last epi\n tsum2 += abs(self.info[anat]['acqtime'] - \\\n (self.info[epi]['acqtime'] +\\\n self.info[epi]['TR']*self.info[epi]['tdim']))\n if tsum1 < tmin or tsum2 < tmin:\n tmin = min(tsum1, tsum2)\n anat_tgt = anat\n\n# Resolve anatomical names and links.\n self._SetAnatNames(anat_tgt)\n\n# Set appropriate attributes in the entry for each EPI.\n for epi in self.entry_map['epi']:\n if len(self.entry_map['fmap']) > 0 and not self.no_fmapcorr:\n fmap_entry = self.info[epi]['fmap_entry']\n anat_ref = self.info[fmap_entry]['anat_ref']\n self.info[epi]['anat_tgt'] = fmap_entry\n self.info[epi]['anat_matfile'] = self.info[fmap_entry]['matfile']\n if self.align_fmaps or (not self.no_align_fmaps and \\\n self._SetCatMotionFmapMats(fmap_entry, anat_ref)):\n# Concatenate motion-correction matrices with tranform from\n# fieldmap to structural. Use the registered fieldmap.\n self.info[epi]['catmats'] = True\n fmap_info = self.info[self.info[epi]['fmap_entry']]\n self.info[epi]['fmapname'] = \\\n fmap_info['imgfile_r'] + fmap_info['suffix']\n else:\n# Assume fieldmap is in register with the structural.\n self.info[epi]['catmats'] = False\n else:\n self.info[epi]['anat_tgt'] = anat_tgt\n self.info[epi]['anat_matfile'] = None\n self.info[epi]['catmats'] = False\n self.info[epi]['anat_link'] = self.info[anat_tgt]['imgfile'] + \\\n self.info[anat_tgt]['suffix']", "def merge_attrs(self):\n for aid in self.attrs:\n new_val = self.attrs[aid]\n if aid in self.attributes:\n if ('value' in self.attributes[aid] and\n self.attributes[aid]['value'] != new_val):\n pass\n # print \"Updating attribute %s[%s] %s -> %s\" % (\n # self.name, aid, self.attributes[aid]['value'], new_val)\n else:\n # print \"** Warning: non-declaired attribute %s['%s'] set to:\\n'%s'\" % (\n # self.name, aid, new_val)\n self.remember_custom_attribute(self.name, aid, new_val)\n self.attributes[aid] = {}\n self.attributes[aid]['nv'] = new_val", "def _self_time(self):\r\n return self.duration() - sum([child.duration() for child in self.children])", "def add_microseconds(apps, schema_editor):\n model = apps.get_model('scheduled_classes', 'Class')\n\n lessons = model.objects.all()\n\n for lesson in lessons:\n\n if lesson.class_start_date:\n #\n # adds a microsecond at current date\n lesson.class_start_date = lesson.class_start_date + timedelta(\n microseconds=1\n )\n lesson.save()\n else:\n #\n # if class_start_date is equal None\n # it is added current date\n now = timezone.now()\n lesson.class_start_date = now\n lesson.save()", "def update_afferents_ap(self,time):\n\t\t# Iterate over all dictionaries\n\t\tfor muscle in self.cells:\n\t\t\tfor cellName in self.cells[muscle]:\n\t\t\t\tif cellName in self._afferentsNames:\n\t\t\t\t\tfor cell in self.cells[muscle][cellName]:\n\t\t\t\t\t\tcell.update(time)" ]
[ "0.57737684", "0.5632862", "0.5494352", "0.54923177", "0.5389393", "0.5389393", "0.53798777", "0.53486305", "0.53441674", "0.5340016", "0.531751", "0.53039336", "0.53033215", "0.52547175", "0.52139795", "0.5203053", "0.5201817", "0.5197925", "0.51893145", "0.51817805", "0.5156818", "0.51483464", "0.5124112", "0.5098608", "0.50770205", "0.5072238", "0.5052591", "0.5050366", "0.50463516", "0.5043769" ]
0.71409726
0
Remove invalid items from list attributes, others left unchanged.
def remove_invalid( self: ComplexBaseType, attr: Optional[str] = None, recursive: bool = True, ) -> ComplexBaseType: if attr is None: for attribute in self._list_attributes: self._remove_invalid(attribute, recursive) elif attr in self._list_attributes: self._remove_invalid(attr, recursive) else: raise TypeError("`{}` must be a list attribute.") return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_remove_values(cls, cleaned_input, instance):\n remove_values = cleaned_input.get(\"remove_values\", [])\n for value in remove_values:\n if value.attribute != instance:\n msg = \"Value %s does not belong to this attribute.\" % value\n raise ValidationError(\n {\n \"remove_values\": ValidationError(\n msg, code=AttributeErrorCode.INVALID\n )\n }\n )\n return remove_values", "def _clean_list(self, items):\n itemlist = list(filter(None, items))\n if len(itemlist) < 3:\n itemlist.append(\"\")\n return itemlist\n\n return itemlist", "def stripBlacklistAttrs(attrs, blacklist):\n gb = FnAttribute.GroupBuilder()\n gb.update(attrs)\n\n for attrName in blacklist:\n gb.delete(attrName)\n\n return gb.build()", "def clean(self, value):\n return [f.clean(v) for v,f in zip(value, self.fields)]", "def unset(self, *list):\n attrs = dict().fromkeys(list, \"\")\n self.graph._setattrs(handle=self.handle, **attrs)", "def clean_attrs(cls, diffsync: DiffSync, attrs):\n return cls.clean_ids_or_attrs(diffsync, attrs)", "def _drop_protected_attrs(model_class, values):\n for attr in model_class.__protected_attributes__:\n if attr in values:\n del values[attr]", "def attributes(self):\n attr = super(BaseDataRecord, self).attributes()\n return [a for a in attr if a not in self.excluded()]", "def unset(self, item, *list):\n attrs = dict().fromkeys(list, \"\")\n attrs[self.type] = item\n self.graph._setattrs(self.parent.handle, **attrs)", "def clean(self):\n filtered_items = {}\n for name, ls in self.items.items():\n filtered_ls = []\n for i in ls:\n if i.alive():\n filtered_ls.append(i)\n else:\n self.del_item(i)\n filtered_items[name] = filtered_ls\n self.items = filtered_items", "def clean_list(lst, element_type='way'):\n return [clean_element(e, element_type=element_type) for e in lst]", "def delete_attributes(self, attribute_list):\n with LayerEditingManager(self.layer, 'Remove attributes', DEBUG):\n # remove attributes\n layer_pr = self.layer.dataProvider()\n print \"REMOVING %s\" % attribute_list\n #TODO fix this\n print \"TODO fix ProcessLayer.delete_attributes()\"\n print \"this attributes should be deleted: %s\" % attribute_list\n #return layer_pr.deleteAttributes(attribute_list)", "def allow_token(self, token):\n if 'data' in token:\n # Loop through all the attributes and drop the ones that are not\n # allowed, are unsafe or break other rules. Additionally, fix\n # attribute values that need fixing.\n #\n # At the end of this loop, we have the final set of attributes\n # we're keeping.\n attrs = {}\n for namespaced_name, val in token['data'].items():\n namespace, name = namespaced_name\n\n # Drop attributes that are not explicitly allowed\n #\n # NOTE(willkg): We pass in the attribute name--not a namespaced\n # name.\n if not self.attr_filter(token['name'], name, val):\n continue\n\n # Look at attributes that have uri values\n if namespaced_name in self.attr_val_is_uri:\n val_unescaped = re.sub(\n \"[`\\000-\\040\\177-\\240\\s]+\",\n '',\n unescape(val)).lower()\n\n # Remove replacement characters from unescaped characters.\n val_unescaped = val_unescaped.replace(\"\\ufffd\", \"\")\n\n # Drop attributes with uri values that have protocols that\n # aren't allowed\n if (re.match(r'^[a-z0-9][-+.a-z0-9]*:', val_unescaped) and\n (val_unescaped.split(':')[0] not in self.allowed_protocols)):\n continue\n\n # Drop values in svg attrs with non-local IRIs\n if namespaced_name in self.svg_attr_val_allows_ref:\n new_val = re.sub(r'url\\s*\\(\\s*[^#\\s][^)]+?\\)',\n ' ',\n unescape(val))\n new_val = new_val.strip()\n if not new_val:\n continue\n\n else:\n # Replace the val with the unescaped version because\n # it's a iri\n val = new_val\n\n # Drop href and xlink:href attr for svg elements with non-local IRIs\n if (None, token['name']) in self.svg_allow_local_href:\n if namespaced_name in [(None, 'href'), (namespaces['xlink'], 'href')]:\n if re.search(r'^\\s*[^#\\s]', val):\n continue\n\n # If it's a style attribute, sanitize it\n if namespaced_name == (None, u'style'):\n val = self.sanitize_css(val)\n\n # At this point, we want to keep the attribute, so add it in\n attrs[namespaced_name] = val\n\n token['data'] = alphabetize_attributes(attrs)\n\n return token", "def remove_attributes(self, remove_attrs):\n remove = []\n for attr in self.data:\n for prefix in remove_attrs:\n if attr.startswith(prefix):\n remove.append(attr)\n break\n\n self.data = self.data.drop(remove, axis=1)", "def remove_attributes(self, remove_attrs):\n remove = []\n for attr in self.data:\n for prefix in remove_attrs:\n if attr.startswith(prefix):\n remove.append(attr)\n break\n\n self.data = self.data.drop(remove, axis=1)", "def unconfigure_aaa_attr_list(device, attr_list_name):\n try:\n device.configure([\n f\"no aaa attribute list {attr_list_name}\"\n ])\n except SubCommandFailure:\n raise SubCommandFailure(\n 'Could not unconfigure Attribute list with type'\n )", "def deleteATTR(sel=None):\n if sel == None:\n sel = pm.ls(sl=1)\n for obj in sel:\n #remove customAttr with keyable\n attrs = pm.listAttr(obj,k=1)\n listAttrs = ['visibility','translateX','translateY','translateZ','rotateX','rotateY','rotateZ','scaleX','scaleY','scaleZ']\n for A in attrs:\n if A not in listAttrs:\n pm.setAttr(obj+'.'+A,l=0)\n pm.delete(obj+'.'+A,icn=1)\n pm.deleteAttr(obj, at = A)\n #remove customAttr with Nonkeyable\n attrs = pm.listAttr(obj,cb=1)\n listAttrs = ['visibility','translateX','translateY','translateZ','rotateX','rotateY','rotateZ','scaleX','scaleY','scaleZ']\n for A in attrs:\n if A not in listAttrs:\n pm.setAttr(obj+'.'+A,l=0)\n pm.delete(obj+'.'+A,icn=1)\n pm.deleteAttr(obj, at = A)", "def _fix_items(items):\n for _, item in items.iteritems():\n if 'url' in item['fields']:\n del item['fields']['url']\n return items", "def removeAttr(atributes=('exp'), *items):\n for item in items:\n # check if item is pynode\n if not isinstance(item, pm.nodetypes.Transform):\n logger.debug('Create Pynode: %s, %s' % (item, type(item)))\n item = pm.PyNode(item)\n\n # deleteAttrs\n for attr in atributes:\n try:\n item.attr(attr).delete()\n logger.info('Remove attribute: %s.%s' % (item, attr))\n\n except:\n logger.info('Can not delete attr: %s' % attr)", "def _post_parse(self):\n to_be_removed = [\n index\n for index, case_name in enumerate(self.case_names)\n if not case_name\n ]\n\n for attr in self._all_attrs:\n item = getattr(self, attr)\n if item is not None:\n new_item = self.remove_elements(item, to_be_removed)\n self.__setattr__(attr, new_item)", "def clean_up_map(self):\n self.items = [i for i in self.items if i.quantity != 0]", "def clean_up_dict(clean_dict, ignore_list):\n for i in ignore_list:\n clean_dict.pop(i, None)\n return clean_dict", "def clear_attrs(self):\n self._attributes.clear()", "def _post_parse(self):\n to_be_removed = [index for index, case_name in\n enumerate(self.case_names)\n if not case_name]\n\n for attr in self._all_attrs:\n item = getattr(self, attr)\n if item is not None:\n new_item = self.remove_elements(item, to_be_removed)\n self.__setattr__(attr, new_item)", "def clean(item: list) -> list:\n item = [x.replace(\"'\", \"\")\n .replace('\"', '')\n .replace('[', '')\n .replace(']', '')\n .split(',') for x in item]\n\n return item", "def clean_attributes(self):\n attrs = {}\n\n # Only fetch the fields we need.\n for a in self.attributes.only('name', 'value', 'attribute').iterator():\n if a.attribute.multi:\n if a.name not in attrs:\n attrs[a.name] = []\n attrs[a.name].append(a.value)\n else:\n attrs[a.name] = a.value\n self._attributes_cache = attrs # Cache the attributes\n\n return attrs", "def cleanList(self, mylist):\r\n newlist = []\r\n for val in mylist:\r\n if val.strip() != '':\r\n newlist.append(val)\r\n myList = sorted(set(newlist))\r\n\r\n return newlist", "def invalid_train_item(train_items: List[JSONDict]) -> JSONDict:\n altered = train_items[0]\n altered[\"language\"] = \"engl\"\n altered[\"date\"] = \"02-2031-01\"\n altered[\"url\"] = \"incorrect.com\"\n altered[\"categoryid\"] = None\n return altered", "def removeOldItems(self):\n pass", "def clear_attributes(self):\n self.attrs = etad.AttributeContainer()" ]
[ "0.6361349", "0.62838423", "0.61892545", "0.61239946", "0.6072532", "0.5990588", "0.596005", "0.5902809", "0.5895732", "0.5884456", "0.58520967", "0.58251566", "0.576699", "0.57666254", "0.57666254", "0.5766544", "0.57514393", "0.57415366", "0.5735838", "0.5734968", "0.5717478", "0.56798905", "0.56774306", "0.56765723", "0.56525236", "0.5638549", "0.56269217", "0.56120545", "0.5606356", "0.55887586" ]
0.6328438
1
Gets list of available taxon slugs for given attributes
def _get_available_attrs_taxon_slugs(cls, attributes: List[FdqModelAttribute]) -> List[str]: available_taxon_slugs: List[str] = [] for attr in attributes: available_taxon_slugs.extend(attr.field_map) return available_taxon_slugs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_taxa(self, **kwargs):\n if \"oids\" not in kwargs and \"labels\" not in kwargs:\n raise TypeError(\"Need to specify taxa oid's or labels\")\n oids = kwargs.get(\"oids\", [])\n labels = kwargs.get(\"labels\", [])\n taxa = []\n for oid in oids:\n t = self.get_taxon(oid=oid)\n if t:\n taxa.append(t)\n for label in labels:\n t = self.get_taxon(label=label)\n if t:\n taxa.append(t)\n return taxa", "def list_solr_taxa(self, params, context=None):\n return self._client.call_method(\n 'ReferenceDataManager.list_solr_taxa',\n [params], self._service_ver, context)", "def nsrGenera(taxonList, synonymList):\r\n species = list(filter(None, sorted(taxonList + synonymList)))\r\n generaList = [i.split()[0] for i in species]\r\n generaList = list(dict.fromkeys(generaList))\r\n return generaList", "def get_taxids(organism_names):\r\n\r\n taxids = []\r\n\r\n for organism in organism_names:\r\n handle = Entrez.esearch(db=\"Taxonomy\", term=organism)\r\n record = Entrez.read(handle)\r\n print(record[\"IdList\"])\r\n try:\r\n taxids.append(record[\"IdList\"][0])\r\n except IndexError:\r\n pass\r\n\r\n return taxids", "def validate_unique_taxon_slugs(cls, values):\n if 'attributes' in values:\n # count occurrence of each taxon slug in attributes\n attributes: List[FdqModelAttribute] = values['attributes']\n taxon_slugs = cls._get_available_attrs_taxon_slugs(attributes)\n\n taxon_slugs_counter = Counter(taxon_slugs)\n\n multiple_taxon_slugs = [\n taxon_slug for taxon_slug, occurrence in taxon_slugs_counter.items() if occurrence > 1\n ]\n if len(multiple_taxon_slugs):\n raise ValueError('Following fields are mapped more than once - ' + ','.join(multiple_taxon_slugs))\n\n return values", "def get_objects_from_attribute(self, attribute: str) -> List[TgnObject]:\n pass", "def taxa(self, tax_str):\n\n taxa = [x.strip() for x in tax_str.split(';')]\n\n return taxa", "def get_categories(self) -> list:\n headers_dict = {\n 'user-key': self.user_key.key\n }\n\n endpoint = f'{const.API_HOST}{const.API_SNAPSHOTS_TAXONOMY_BASEPATH}'\n\n response = req.api_send_request(method='GET', endpoint_url=endpoint, headers=headers_dict)\n\n if response.status_code == 200:\n return [entry['attributes']['name'] for entry in response.json()['data']]\n\n raise RuntimeError('API Request returned an unexpected HTTP status')", "def get_taxonomy_results(verbose=False):\n if verbose:\n pprint(taxonomy_results)\n return taxonomy_results", "def list_loaded_taxa(self, params, context=None):\n return self._client.call_method(\n 'ReferenceDataManager.list_loaded_taxa',\n [params], self._service_ver, context)", "def nsrTaxonomy():\r\n # Input file\r\n taxonomyFile = pd.read_csv(args.indir+\"/\"+args.infile1, header=2,\r\n sep=\"\\t\", encoding=\"utf8\")\r\n\r\n # Parse taxonomic names into their elementary components\r\n taxonomy = taxonomyFile.loc[taxonomyFile['rank'] == 'soort']\r\n taxonList = []\r\n for taxon in taxonomy['scientific_name']:\r\n parser = taxonParser(taxon)\r\n if not parser or parser is False:\r\n pass\r\n else:\r\n taxonList.append(parser)\r\n\r\n # Write taxonomy to file\r\n index = 0\r\n with io.open(par_path+\"/results/nsr_species.csv\", \"w\", encoding=\"utf-8\") as outfile:\r\n outfile.write('\"species_id\",\"species_name\",\"identification_reference\"\\n')\r\n for i in taxonList:\r\n binomial = ' '.join(str(i).split()[:2])\r\n authorship = ' '.join(str(i).split()[2:])\r\n outfile.write('%s,%s,\"%s\"\\n' % (index, binomial, authorship))\r\n index += 1\r\n\r\n return taxonList", "def taxon_children(self, taxonomy):\n\n taxon_children = defaultdict(set)\n for taxon_id, taxa in taxonomy.items():\n for i, taxon in enumerate(taxa):\n if len(taxon) == 3:\n continue # just rank prefix\n\n if len(taxa) > i + 1 and len(taxa[i + 1]) != 3:\n taxon_children[taxon].add(taxa[i + 1])\n\n if len(taxa) > self.rank_index['s__']:\n taxon = taxa[self.rank_index['s__']]\n if taxon != 's__':\n taxon_children[taxon].add(taxon_id)\n\n return taxon_children", "def get_taxonomy(): # noqa: E501\n return 'do some magic!'", "def extant_taxa(self, taxonomy):\n\n extant_taxa = {}\n for rank_label in Taxonomy.rank_labels:\n extant_taxa.update(self.extant_taxa_for_rank(rank_label, taxonomy))\n\n return extant_taxa", "def get_taxa(KDEs, kde_type, all_taxa=None):\n # parsing KDE\n if kde_type == 1: \n if all_taxa is not None:\n taxa = [k[0] for k in KDEs] \n else:\n taxa = [k[0] for k in KDEs if k[1] is not None]\n elif kde_type == 2:\n if all_taxa is not None:\n taxa = KDEs.keys() \n else:\n taxa = [k for k,v in KDEs.items() if v is not None]\n elif kde_type == 3:\n taxa = []\n for libID,v in KDEs.items():\n if all_taxa is not None:\n taxa += v.keys() \n else:\n taxa += [k for k,vv in v.items() if v is not None]\n taxa = list(set(taxa)) \n elif kde_type == 4:\n taxa = []\n for libID,filename in KDEs.items(): \n KDE_bylib = Utils.load_kde(filename)\n if all_taxa is not None:\n taxa += KDE_bylib.keys() \n else:\n taxa += [k for k,v in KDE_bylib.items() if v is not None]\n else:\n raise TypeError, 'KDE object type not recognized'\n\n return taxa", "def __iter__(self):\n return iter(self.taxonomies)", "def antweb_links(request, format='csv'):\n\n\n\ttaxonomy = []\n\tif request.GET.get('taxon_code'):\n\t\ttaxonomy = Taxonomy.objects.raw(\"\"\"\n\t\tSELECT taxon_code, subfamily_name, genus_name, species_name\n\t\tFROM map_taxonomy_list\n\t\tWHERE taxon_code = %s\n\t\t\"\"\", [request.GET.get('taxon_code')])\n\t\t\n\t\t# serialize to JSON\n\t\tjson_objects = [{'key': t.taxon_code, 'speciesName': t.species_name, 'genusName': t.genus_name, 'subfamilyName': t.subfamily_name} for t in taxonomy]\n\t\t\n\t\treturn JSONResponse({'taxonomy': json_objects})\n\t\t\n\telif request.GET.get('genus_name'):\n\t\ttaxonomy = Taxonomy.objects.raw(\"\"\"\n\t\tSELECT genus_name, subfamily_name,taxon_code\n\t\tFROM map_taxonomy_list\n\t\tWHERE genus_name = %s\n\t\tGROUP BY genus_name, subfamily_name,taxon_code\n\t\t\"\"\", [request.GET.get('genus_name')])\n\t\t\n\t\t# serialize to JSON\n\t\tjson_objects = [{'key': t.genus_name, 'subfamilyName': t.subfamily_name} for t in taxonomy]\n\t\t\n\t\treturn JSONResponse({'taxonomy': json_objects})\n\t\n\telse:\n\t\treturn JSONResponse({'taxonomy': []})", "def children(self, taxon, taxonomy):\n\n c = set()\n for taxon_id, taxa in taxonomy.items():\n if taxon in taxa:\n\n if taxon.startswith('s__'):\n c.add(taxon_id)\n else:\n taxon_index = taxa.index(taxon)\n for child in taxa[taxon_index + 1:]:\n if len(child) > 3: # not just an empty prefix\n c.add(child)\n\n return c", "def summary(self,attr='raw'):\n g = {}\n g['gid'] = map(lambda x : x.gid, self.taxonomies)\n g['sp'] = map(lambda x : x.presences.species , self.taxonomies)\n \n g['gns'] = map(lambda x : x.presences.genera , self.taxonomies) \n g['fam'] = map(lambda x : x.presences.families , self.taxonomies)\n g['ord'] = map(lambda x : x.presences.orders , self.taxonomies)\n g['cls'] = map(lambda x : x.presences.classes , self.taxonomies)\n g['phy'] = map(lambda x : x.presences.phyla , self.taxonomies)\n g['kng'] = map(lambda x : x.presences.kingdoms , self.taxonomies)\n #g['all'] = map(lambda x : (x.gid,int(x.presences.species),int(x.genera),int(x.families),int(x.orders),int(x.classes),int(x.phyla),int(x.kingdoms)),self.taxonomies)\n keys = settings.TAXONOMIC_TREE_KEYS\n if attr == 'int':\n for key in keys:\n g[key] = map(lambda p : int(p) ,g[key])\n elif attr == 'str':\n for key in keys:\n g[key] = map(lambda p : str(p) ,g[key]) \n elif attr == 'list':\n for key in keys:\n g[key] = map(lambda p : p.list ,g[key]) \n elif attr == 'mapping':\n for key in keys:\n g[key] = map(lambda p : p.map ,g[key]) \n elif attr == 'raw':\n return g\n else:\n logger.error(\"Wrong attribute selection\")\n return None\n \n return g", "def get_taxon_objects(\n self, uid=np.nan, time=np.nan, extant_at_latest_time=np.nan, ancestor=np.nan\n ):\n # Handle ancestor.\n\n if isnull(ancestor):\n taxa = self._taxa\n elif ancestor in self._taxa[\"uid\"]:\n idx_number = self._taxa[\"uid\"].index(ancestor)\n taxon = self._taxa[\"object\"][idx_number]\n\n descendants = []\n stack = [taxon]\n\n while stack:\n if taxon.children:\n descendants.extend(taxon.children)\n stack.extend(taxon.children)\n\n stack.remove(taxon)\n\n if stack:\n taxon = stack[0]\n\n descendants = list(set(descendants))\n taxa = self._subset_taxa_data_structure(descendants)\n else:\n return []\n\n # Handle identifier.\n\n if isnull(uid):\n idx_id = np.ones(len(taxa[\"uid\"]), dtype=bool)\n else:\n idx_id = np.array(taxa[\"uid\"]) == uid\n\n # Handle time.\n\n if isnull(time):\n idx_time = np.ones(len(taxa[\"uid\"]), dtype=bool)\n else:\n idx_time = self._mask_taxa_by_time(taxa, time)\n\n # Handle extant state.\n\n if isnull(extant_at_latest_time):\n idx_ext = np.ones(len(taxa[\"uid\"]), dtype=bool)\n else:\n idx_ext = np.array(taxa[\"extant\"]) == extant_at_latest_time\n\n # Get the Taxon list.\n\n idx = np.all([idx_time, idx_id, idx_ext], 0)\n taxa = np.array(taxa[\"object\"])[idx].tolist()\n taxa.sort(key=lambda taxon: taxon.uid)\n\n return taxa", "def _extract_terms(self, obj):\r\n terms = set()\r\n if 'paths' in obj:\r\n for path in obj['paths']:\r\n segs = re.split('[/{}]', path)\r\n for seg in segs:\r\n terms.add(seg.lower())\r\n self.terms = terms", "def sort_taxa(self, taxa, reverse=False):\n\n ordered_taxa = []\n for rank_prefix in Taxonomy.rank_prefixes:\n rank_taxa = []\n for taxon in taxa:\n if taxon.startswith(rank_prefix):\n rank_taxa.append(taxon)\n\n ordered_taxa.extend(sorted(rank_taxa))\n\n if reverse:\n ordered_taxa = ordered_taxa[::-1]\n\n return ordered_taxa", "def _category_slugs(self, category):\n key = self._category_key(category)\n slugs = self.r.smembers(key)\n return slugs", "def prepare_tags(self, obj):\n return [tag.name for tag in obj.tags.all()]", "def get_existing_taxonomy() -> List[List[Any]]:\n output = []\n with DBWith() as dbService:\n stmt = \"SELECT id, name, vocabulary, parent_id FROM taxonomy\"\n with closing(dbService.cursor(dictionary=True)) as c:\n c.execute(stmt)\n for item in c:\n sleep(0.000001) # To avoid Mysql.Connector error\n output.append([item[\"id\"], item[\"name\"], item[\"vocabulary\"], item[\"parent_id\"]])\n return output", "def named_lineages_at_rank(self, taxonomy):\n\n named_lineages = defaultdict(set)\n for taxa in taxonomy.values():\n for i, taxon in enumerate(taxa):\n if taxon != Taxonomy.rank_prefixes[i]:\n named_lineages[i].add(taxon)\n\n return named_lineages", "def taxa_at_ranks(self, tax_str):\n\n taxa = self.taxa(tax_str)\n\n d = {}\n for rank, taxon in enumerate(taxa):\n d[Taxonomy.rank_labels[rank]] = taxon", "def gauge_slugs(self):\n return self.r.smembers(self._gauge_slugs_key)", "def get_related_taxonomy(taxonomy_list=None):\n if not taxonomy_list:\n taxonomy_list = []\n\n taxonomies = []\n for taxonomy in taxonomy_list:\n list = __get_related_list__(taxonomy)\n\n for child_taxonomy in list:\n if child_taxonomy not in taxonomies:\n taxonomies.append(child_taxonomy)\n\n return taxonomies", "def lineages(self, taxonomy):\n\n lineages = defaultdict(set)\n for taxa in taxonomy.values():\n for i, taxon in enumerate(taxa):\n lineages[taxon] = taxa[0:i]\n\n return lineages" ]
[ "0.62560797", "0.5843211", "0.5672292", "0.5521571", "0.5469094", "0.545953", "0.5435174", "0.5418336", "0.53971124", "0.52830404", "0.5206721", "0.52039826", "0.51855385", "0.51622546", "0.51615846", "0.5142129", "0.5132997", "0.5125577", "0.5045392", "0.5040304", "0.50111365", "0.49989963", "0.49925673", "0.49350303", "0.49343622", "0.4934058", "0.49322656", "0.49229062", "0.49164912", "0.48910934" ]
0.83314764
0
Validate that each taxon slug is used at most once in the list of attributes
def validate_unique_taxon_slugs(cls, values): if 'attributes' in values: # count occurrence of each taxon slug in attributes attributes: List[FdqModelAttribute] = values['attributes'] taxon_slugs = cls._get_available_attrs_taxon_slugs(attributes) taxon_slugs_counter = Counter(taxon_slugs) multiple_taxon_slugs = [ taxon_slug for taxon_slug, occurrence in taxon_slugs_counter.items() if occurrence > 1 ] if len(multiple_taxon_slugs): raise ValueError('Following fields are mapped more than once - ' + ','.join(multiple_taxon_slugs)) return values
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_identifiers_correct_taxons(cls, values):\n if 'attributes' in values and 'identifiers' in values:\n attributes: List[FdqModelAttribute] = values['attributes']\n identifiers: List[str] = values['identifiers']\n\n # get set of available taxon slugs\n available_taxon_slugs = set(cls._get_available_attrs_taxon_slugs(attributes))\n\n # verify that all identifier taxons are available in this model\n invalid_ids = set(identifiers) - available_taxon_slugs\n\n if len(invalid_ids) > 0:\n raise ValueError(f'Identifier(s) {\", \".join(invalid_ids)} are not present as fields on the model')\n\n return values", "def _get_available_attrs_taxon_slugs(cls, attributes: List[FdqModelAttribute]) -> List[str]:\n available_taxon_slugs: List[str] = []\n for attr in attributes:\n available_taxon_slugs.extend(attr.field_map)\n\n return available_taxon_slugs", "def validate_value_is_unique(attribute: models.Attribute, value: models.AttributeValue):\n duplicated_values = attribute.values.exclude(pk=value.pk).filter(slug=value.slug)\n if duplicated_values.exists():\n raise ValidationError(\n {\n \"name\": ValidationError(\n f\"Value with slug {value.slug} already exists.\",\n code=AttributeErrorCode.ALREADY_EXISTS.value,\n )\n }\n )", "def check_cls_choices_slugs(cls, slugs):\n for s in slugs:\n if settings.DJCAT_ITEM_SLUG_DELIMITER in s:\n raise ItemAttributeChoicesSlugNotValid(cls)\n\n if not len(set(slugs)) == len(slugs):\n raise ItemAttributeChoicesSlugsDuplicate(cls)", "def check_fields(taxa: Dict[str, AphiaInfo]) -> None:\n for key, taxon in taxa.items():\n if taxon.get(\"scientificName\") is None:\n taxon.set_missing(\"scientificName\")\n if taxon.get(\"scientificNameID\") is None:\n taxon.set_missing(\"scientificNameID\")", "def has_duplicates(tree):\n taxa = [tip.name for tip in tree.tips()]\n if '' in taxa or None in taxa:\n raise ValueError('Empty taxon name(s) found.')\n return len(set(taxa)) < len(taxa)", "def validate_model_attributes_tel(cls, values):\n if 'attributes' in values:\n attributes: List[FdqModelAttribute] = values['attributes']\n\n # get set of available taxon slugs\n available_taxon_slugs = set(cls._get_available_attrs_taxon_slugs(attributes))\n # our TEL visitor only verifies here that all available TEL transformations are valid\n # we dont care about the SQL output here\n\n invalid_taxons_attr = {}\n for attribute in attributes:\n try:\n # we dont really care from which taxon we start\n # if there is cyclic reference somewhere, we will run into it eventually\n taxon_slug = next(filter(None, attribute.field_map))\n except StopIteration:\n # we dont care, if we dont find the taxon - this check is performed somewhere else\n continue\n\n try:\n # initialize options for the visitor\n visitor_parameters = AttributeValidationTelVisitorParams(taxon_slug, attributes)\n\n tree = ModelTelDialect.parse(attribute.data_reference)\n visitor = AttributeValidationTelVisitor(visitor_parameters)\n visitor.visit(tree)\n\n # check whether this TEL transformation uses any taxons which arent available in this model\n additional_taxons = visitor.result.used_taxon_slugs - available_taxon_slugs\n if len(additional_taxons) > 0:\n invalid_taxons_attr[attribute.data_reference] = additional_taxons\n\n except ModelTelCyclicReferenceException:\n # there's no point in checking other attributes when we run into cyclic reference\n raise ValueError(\n f'Data reference \"{attribute.data_reference}\" contains TEL transformation with cyclic reference'\n )\n\n # if we found any attribute with missing taxons, output them all in one error message\n if invalid_taxons_attr:\n attribute_msgs = [\n f'Data reference \"{attr_key}\": {\", \".join(taxon_slugs)} not available in this model'\n for attr_key, taxon_slugs in invalid_taxons_attr.items()\n ]\n\n raise ValidationError([ErrorWrapper(ValueError(msg), 'attributes') for msg in attribute_msgs], cls)\n\n return values", "def validate_unique(self, exclude=None):\n qs_barcode = Product.objects.filter(barcode=self.barcode)\n qs_item_number = Product.objects.filter(item_number=self.item_number)\n qs_name = Product.objects.filter(name=self.name)\n if qs_barcode.filter(webshop_id=self.webshop_id).exclude(id=self.id).exists():\n raise ValidationError(detail='Barcode must be unique in one webshop')\n if qs_item_number.filter(webshop_id=self.webshop_id).exclude(id=self.id).exists():\n raise ValidationError(detail='Item number must be unique in one webshop')\n if qs_name.filter(webshop_id=self.webshop_id).exclude(id=self.id).exists():\n raise ValidationError(detail='Item Name must be unique in one webshop')", "def validate_joins_correct_taxons(cls, values):\n if 'attributes' in values and 'joins' in values:\n attributes: List[FdqModelAttribute] = values['attributes']\n joins: List[FdqModelJoin] = values['joins']\n\n # get set of available taxon slugs\n available_taxon_slugs = set(cls._get_available_attrs_taxon_slugs(attributes))\n\n # for each join, verify that all its taxons are available in this model\n invalid_joins: Dict[int, Set[str]] = {}\n for idx, join in enumerate(joins):\n missing_taxons = set(join.taxons) - available_taxon_slugs\n if len(missing_taxons):\n invalid_joins[idx] = missing_taxons\n\n if invalid_joins:\n # report invalid joins\n raise ValidationError(\n [\n ErrorWrapper(\n ValueError(f'Join {idx + 1} contains missing fields {\",\".join(taxon_slugs)}'), 'joins'\n )\n for idx, taxon_slugs in invalid_joins.items()\n ],\n cls,\n )\n\n return values", "def validate(self):\n for search_tag_name in self.get_search_tag_names():\n search_tag_obj = Tag(search_tag_name)\n for search_tag_value in self.get_search_tag_values(search_tag_name):\n for new_tag_name in self.get_new_tag_names(search_tag_name, search_tag_value):\n new_tag_obj = Tag(new_tag_name)\n new_tag_value = self.get_new_tag_value(search_tag_name, search_tag_value, new_tag_name)\n if new_tag_obj.repeatable:\n if not isinstance(new_tag_value, list):\n raise KeyError('%s needs a list'%(new_tag_name))\n else:\n if isinstance(new_tag_value, list):\n raise KeyError('%s needs a scalar value'%(new_tag_name))", "def validate(self, taxonomy,\n check_prefixes,\n check_ranks,\n check_hierarchy,\n check_species,\n check_group_names,\n check_duplicate_names,\n check_capitalization,\n report_errors=True):\n\n # check for incomplete taxonomy strings or unexpected rank prefixes\n invalid_ranks = {}\n invalid_prefixes = {}\n invalid_species_name = {}\n invalid_group_name = {}\n invalid_capitalization = set()\n for taxon_id, taxa in taxonomy.items():\n if check_ranks:\n if len(taxa) != len(Taxonomy.rank_prefixes):\n invalid_ranks[taxon_id] = ';'.join(taxa)\n continue\n\n if check_prefixes:\n for r, taxon in enumerate(taxa):\n if taxon[0:3] != Taxonomy.rank_prefixes[r]:\n invalid_prefixes[taxon_id] = [taxon, ';'.join(taxa)]\n break\n\n if check_group_names:\n for taxon in taxa:\n canonical_taxon = ' '.join([t.strip() for t in re.split('_[A-Z]+(?= |$)', taxon[3:])]).strip()\n if canonical_taxon and re.match('^[a-zA-Z0-9- ]+$', canonical_taxon) is None:\n if not taxon.startswith('s__') or check_species:\n invalid_group_name[taxon_id] = [taxon, 'Taxon contains invalid characters']\n\n if check_species:\n genus_index = Taxonomy.rank_index['g__']\n species_index = Taxonomy.rank_index['s__']\n if len(taxa) > species_index:\n species_name = taxa[species_index]\n valid, error_msg = self.validate_species_name(species_name, require_full=True, require_prefix=True)\n if not valid:\n invalid_species_name[taxon_id] = [species_name, error_msg]\n\n if species_name != 's__':\n genus_name = taxa[genus_index]\n generic_name = species_name.split()[0]\n if genus_name[3:] != generic_name[3:]:\n invalid_species_name[taxon_id] = [species_name,\n 'Genus and generic names do not match: %s' % genus_name]\n\n if check_capitalization:\n for taxon in taxa:\n if taxon[3].islower():\n invalid_capitalization.add(taxon)\n\n # check for duplicate names\n invalid_duplicate_name = []\n if check_duplicate_names:\n invalid_duplicate_name = self.duplicate_names(taxonomy, check_species)\n\n # check for inconsistencies in the taxonomic hierarchy\n invalid_hierarchies = defaultdict(set)\n missing_parent = set()\n if check_hierarchy:\n expected_parent = self.taxonomic_consistency(taxonomy, False)\n\n for taxon_id, taxa in taxonomy.items():\n for r in range(1, len(taxa)):\n if len(taxa[r]) == 3:\n continue\n\n if r == self.rank_index['s__'] and not check_species:\n continue\n\n if taxa[r] not in expected_parent:\n missing_parent.add(taxa[r])\n elif taxa[r - 1] != expected_parent[taxa[r]]:\n invalid_hierarchies[taxa[r]].add(taxa[r - 1])\n invalid_hierarchies[taxa[r]].add(expected_parent[taxa[r]])\n\n if report_errors:\n if len(invalid_ranks):\n print('')\n print('Taxonomy contains too few ranks:')\n for taxon_id, taxa_str in invalid_ranks.items():\n print('%s\\t%s' % (taxon_id, taxa_str))\n\n if len(invalid_prefixes):\n print('')\n print('Taxonomy contains an invalid rank prefix:')\n for taxon_id, info in invalid_prefixes.items():\n print('%s\\t%s\\t%s' % (taxon_id, info[0], info[1]))\n\n if len(invalid_group_name):\n print('')\n print('Taxa containing invalid characters:')\n for taxon_id, err_msg in invalid_group_name.items():\n print('%s\\t%s\\t%s' % (taxon_id, err_msg[0], err_msg[1]))\n\n if len(invalid_species_name):\n print('')\n print('Taxonomy contains invalid species names:')\n for taxon_id, info in invalid_species_name.items():\n print('%s\\t%s\\t%s' % (taxon_id, info[0], info[1]))\n\n if len(invalid_duplicate_name):\n print('')\n print('Taxonomy contains identical taxon names in multiple lineages:')\n for duplicate_name in invalid_duplicate_name.keys():\n print('%s' % duplicate_name)\n\n if len(missing_parent):\n print('')\n print('Taxonomy contains taxa with an undefined parent:')\n for taxon in missing_parent:\n print('%s' % taxon)\n\n if len(invalid_hierarchies):\n print('')\n print('Taxonomy contains taxa with multiple parents:')\n for child_taxon, parent_taxa in invalid_hierarchies.items():\n print('%s\\t%s' % (child_taxon, ', '.join(parent_taxa)))\n\n if len(invalid_capitalization):\n print('')\n print('Taxa do not start with a capital letter:')\n for taxon in invalid_capitalization:\n print('{}'.format(taxon))\n\n return invalid_ranks, invalid_prefixes, invalid_species_name, invalid_hierarchies, invalid_group_name, invalid_capitalization", "def clean(self):\n if any(self.errors):\n return\n\n if len(set([a.id for a in self.instance.affiliations.all()]\n + [f.instance.id for f in self.forms])) > self.max_forms:\n raise forms.ValidationError('Maximum number of allowed items exceeded.')\n\n names = []\n for form in self.forms:\n # This is to allow empty unsaved form\n if 'name' in form.cleaned_data:\n name = form.cleaned_data['name']\n if name in names:\n raise forms.ValidationError('Affiliation names must be unique.')\n names.append(name)", "def validate(self, attrs):\n club = attrs['club']\n tags = attrs.get('tags', [])\n for tag in tags:\n if tag.club != club:\n raise serializers.ValidationError(\n f\"The tag {tag.tag_name} does not belong to this club\")\n return attrs", "def check_attr_key(cls, registry):\n for m in registry.items():\n for i in m[1]['items'].items():\n for a in i[1]['attrs'].items():\n if a[1]['key'] == cls.attr_key:\n raise ItemAttributeKeyDuplicate(a[1]['class'], cls, cls.attr_key)", "def validate(self, attrs):\n tag_name = attrs['tag_name']\n club = attrs['club']\n request = self.context['request']\n profile = UserProfile.objects.get(user=request.user)\n if (club not in profile.get_club_privileges() and\n club not in profile.get_workshop_privileges().values_list('club', flat=True)):\n raise serializers.ValidationError(\"You are not allowed to create tag for this club\")\n if Tag.objects.filter(tag_name=tag_name, club=club):\n raise serializers.ValidationError(\"The tag already exists for this club\")\n return attrs", "def check_items_slugs(cls, slugs, registry):\n for m in registry.items():\n for i in m[1]['items'].items():\n for slug in slugs:\n try:\n item = i[1]['_class'].objects.get(slug=slug)\n raise ItemAttributeChoicesSlugsDuplicateItemInstanceSlug(cls, item)\n except ObjectDoesNotExist:\n pass", "def duplicate_names(self, taxonomy, check_species=True):\n\n # get lineages for each taxon name\n taxon_lineages = defaultdict(set)\n for taxa in taxonomy.values():\n for i, taxon in enumerate(taxa):\n if len(taxon) > 3:\n taxon_lineages[taxon].add(';'.join(taxa[0:i + 1]))\n\n # identify taxon belonging to multiple lineages\n duplicates = {}\n for taxon, lineages in taxon_lineages.items():\n if len(lineages) >= 2:\n if not taxon.startswith('s__') or check_species:\n duplicates[taxon] = lineages\n\n return duplicates", "def test_unique_train_items(train_items: List[JSONDict]) -> None:\n validated = TrainCollection(items=train_items)\n assert validated.dict() == train_items", "def validate_unique_mof_names():\n names = list(FRAMEWORKS_DF['name'].str.lower()) + list(FRAMEWORKS_DF['alternative names'].dropna().str.lower())\n names = [ n for l in names for n in l.split(',') if l ]\n names = [ n.lower().replace('-', ' ') for n in names ]\n\n duplicates = [item for item, count in collections.Counter(list(names)).items() if count > 1]\n\n if duplicates:\n print('Warning: Duplicate CURATED-MOF names detected: {}'.format(duplicates))\n sys.exit(1)\n\n print('No duplicate CURATED-MOF names found.')", "def check_catalog_item_choices_slugs(cls, slugs, registry):\n for m in registry.items():\n for i in m[1]['items'].items():\n for a in [a for a in i[1]['attrs'].items() if a[1]['type'] == 'choice']:\n choices = a[1].get('choices')\n if len(set(slugs) & set(choices)):\n raise ItemAttributeChoicesSlugsDuplicateInCatalogItem(cls, a[1].get('_class'))", "def _check_unique_insesitive(self, cr, uid, ids, context=None):\n for category in self.browse(cr, uid, ids, context=context):\n if len(self.search(cr, uid, [('name','=ilike',category.name)], context=context)) > 1:\n raise osv.except_osv(_('Constraint Error'), _(\"The Name Must Be Unique!\"))\n return True", "def test_slug_can_contain_create(self):\n self.test_agreement.slug = '123create'\n self.test_agreement.full_clean()\n\n self.test_agreement.slug = 'create123'\n self.test_agreement.full_clean()\n\n self.test_agreement.slug = '123create123'\n self.test_agreement.full_clean()", "def test_unique_together_normalization(self):\n data = {\n \"2-tuple\": ((\"foo\", \"bar\"), ((\"foo\", \"bar\"),)),\n \"list\": ([\"foo\", \"bar\"], ((\"foo\", \"bar\"),)),\n \"already normalized\": (\n ((\"foo\", \"bar\"), (\"bar\", \"baz\")),\n ((\"foo\", \"bar\"), (\"bar\", \"baz\")),\n ),\n \"set\": (\n {(\"foo\", \"bar\"), (\"bar\", \"baz\")}, # Ref #21469\n ((\"foo\", \"bar\"), (\"bar\", \"baz\")),\n ),\n }\n\n for unique_together, normalized in data.values():\n\n class M(models.Model):\n foo = models.IntegerField()\n bar = models.IntegerField()\n baz = models.IntegerField()\n\n Meta = type(\n \"Meta\", (), {\"unique_together\": unique_together, \"apps\": Apps()}\n )\n\n checks, _ = M()._get_unique_checks()\n for t in normalized:\n check = (M, t)\n self.assertIn(check, checks)", "def check_unique(self):\n pass", "def has_minimum_relevant_attributes_count(sample, min_count=2):\n relevant_att_names = arm_constants.NCBI_FILTER_RELEVANT_ATTS\n biosample_node = ET.fromstring(sample)\n attributes = biosample_node.find('Attributes')\n if attributes is not None:\n if len(attributes) >= min_count:\n matches = 0\n for att in attributes:\n attribute_name = att.get('attribute_name')\n display_name = att.get('display_name')\n harmonized_name = att.get('harmonized_name')\n value = None\n if attribute_name in relevant_att_names:\n value = datasources_util.extract_ncbi_attribute_value(att, attribute_name)\n elif display_name in relevant_att_names:\n value = datasources_util.extract_ncbi_attribute_value(att, display_name)\n elif harmonized_name in relevant_att_names:\n value = datasources_util.extract_ncbi_attribute_value(att, harmonized_name)\n\n # Check if the value is valid\n if value is not None and datasources_util.is_valid_value(value):\n matches = matches + 1\n\n if matches >= min_count:\n return True\n else:\n return False\n else:\n return False\n else:\n return False", "def has_taxa(self, **kwargs):\n if \"taxa\" not in kwargs and \"oids\" not in kwargs and \"labels\" not in kwargs:\n raise TypeError(\"Need to specify `taxa`, `oids` or `labels` list.\")\n taxa = set(kwargs.get(\"taxa\", []))\n oids = set(kwargs.get(\"oids\", []))\n labels = set(kwargs.get(\"labels\", []))\n taxon_oids = set([t.oid for t in self])\n taxon_labels = set([t.label for t in self])\n return taxa.issubset(self) \\\n and oids.issubset(taxon_oids) \\\n and labels.issubset(taxon_labels)", "def check_taxa(taxa: Dict[str, AphiaInfo], cache: AphiaCacheInterface = None) -> None:\n\n check_fields(taxa)\n detect_lsid(taxa)\n detect_external(taxa)\n match_obis(taxa, cache)\n match_worms(taxa)\n check_annotated_list(taxa)\n fetch(taxa, cache)", "def validate_unique(self, *args, **kwargs):\n\n def validate_uniqueness_of_alias_and_mount_point(rethrow_error=None):\n uq_mount = self.__class__.objects.filter(\n Q(alias=self.mount_point)\n ).exclude(alias='')\n uq_alias = self.__class__.objects.filter(\n Q(mount_point=self.alias)\n )\n\n errors = {}\n if not self._state.adding and self.pk is not None:\n uq_mount = uq_mount.exclude(pk=self.pk)\n uq_alias = uq_alias.exclude(pk=self.pk)\n\n if uq_mount.exists():\n errors['mount_point'] = (\n 'Field is the same as \"Alias\" for other file system (alias=\"%s\", mount_point=\"%s\")!' % (\n uq_mount.get().alias, uq_mount.get().mount_point), )\n if uq_alias.exists():\n errors['alias'] = (\n 'Field is the same as \"Mount point\" for other file system (alias=\"%s\", mount_point=\"%s\")!' % (\n uq_alias.get().alias, uq_alias.get().mount_point), )\n if len(errors) > 0:\n errors[NON_FIELD_ERRORS] = ('To avoid confusion choose different name!',)\n if rethrow_error is None:\n rethrow_error = ValidationError(errors)\n else:\n rethrow_error.message_dict.update(errors)\n if rethrow_error is not None:\n return rethrow_error\n\n try:\n super(FileSystem, self).validate_unique(*args, **kwargs)\n e = validate_uniqueness_of_alias_and_mount_point()\n if e is not None:\n raise e\n except ValidationError as e:\n raise validate_uniqueness_of_alias_and_mount_point(e)", "def test_unique_item_properties_failed(self):\n check_value = [{\"a\": 1, \"b\": 3}, {\"a\": 1, \"b\": 2}]\n\n with pytest.raises(AssertionError):\n unique_item_properties(check_value, \"a\")", "def check_full(self, tax_str):\n\n taxa = [x.strip() for x in tax_str.split(';')]\n if len(taxa) < len(Taxonomy.rank_prefixes):\n self.logger.error('Taxonomy string contains too few ranks:')\n self.logger.error('%s' % str(taxa))\n return False\n elif len(taxa) > len(Taxonomy.rank_prefixes):\n self.logger.error('Taxonomy string contains too many ranks:')\n self.logger.error('%s' % str(taxa))\n return False\n\n for r, taxon in enumerate(taxa):\n if taxon[0:3] != Taxonomy.rank_prefixes[r]:\n self.logger.error('Taxon is not prefixed with the expected rank, %s.:' % Taxonomy.rank_prefixes[r])\n self.logger.error('%s' % str(taxa))\n return False\n\n return True" ]
[ "0.65743077", "0.5968016", "0.59334886", "0.593111", "0.5927378", "0.58089435", "0.5805196", "0.57898086", "0.5663472", "0.5587259", "0.55410296", "0.54817975", "0.547004", "0.5464735", "0.5454236", "0.5441391", "0.5432406", "0.5395761", "0.53333473", "0.5292971", "0.528695", "0.5279101", "0.52590984", "0.5220689", "0.5198742", "0.5175897", "0.51643735", "0.51572645", "0.51522565", "0.5147519" ]
0.80330855
0
Validates that model attributes contain correct TEL expressions without cyclic dependencies
def validate_model_attributes_tel(cls, values): if 'attributes' in values: attributes: List[FdqModelAttribute] = values['attributes'] # get set of available taxon slugs available_taxon_slugs = set(cls._get_available_attrs_taxon_slugs(attributes)) # our TEL visitor only verifies here that all available TEL transformations are valid # we dont care about the SQL output here invalid_taxons_attr = {} for attribute in attributes: try: # we dont really care from which taxon we start # if there is cyclic reference somewhere, we will run into it eventually taxon_slug = next(filter(None, attribute.field_map)) except StopIteration: # we dont care, if we dont find the taxon - this check is performed somewhere else continue try: # initialize options for the visitor visitor_parameters = AttributeValidationTelVisitorParams(taxon_slug, attributes) tree = ModelTelDialect.parse(attribute.data_reference) visitor = AttributeValidationTelVisitor(visitor_parameters) visitor.visit(tree) # check whether this TEL transformation uses any taxons which arent available in this model additional_taxons = visitor.result.used_taxon_slugs - available_taxon_slugs if len(additional_taxons) > 0: invalid_taxons_attr[attribute.data_reference] = additional_taxons except ModelTelCyclicReferenceException: # there's no point in checking other attributes when we run into cyclic reference raise ValueError( f'Data reference "{attribute.data_reference}" contains TEL transformation with cyclic reference' ) # if we found any attribute with missing taxons, output them all in one error message if invalid_taxons_attr: attribute_msgs = [ f'Data reference "{attr_key}": {", ".join(taxon_slugs)} not available in this model' for attr_key, taxon_slugs in invalid_taxons_attr.items() ] raise ValidationError([ErrorWrapper(ValueError(msg), 'attributes') for msg in attribute_msgs], cls) return values
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_attributes(self, attrs):\n self.attrs.validate(attrs)", "def validate_attribute(self, attr):\n self.attrs.validate_attribute(attr)", "def validate_attribute(self, attr):\n self.validate(attr)", "def _obj_attr_validator( # pylint: disable=too-many-arguments\n object_name: str,\n attr: str,\n exact_attr: Any,\n regex_attr: Any,\n in_attr: Any,\n exact_attr_ignore: Any = _IGNORE_OBJ_PARAM,\n regex_attr_ignore: Any = _IGNORE_OBJ_PARAM,\n in_attr_ignore: Any = _IGNORE_OBJ_PARAM,\n disallowed_attrs_regex: Optional[AbstractSet[str]] = None,\n) -> Optional[ValidatorFn]:\n\n def get_obj_attr(v: Any, attr: str = attr) -> Any:\n return getattr(v, attr)\n\n if exact_attr is not exact_attr_ignore:\n\n @pred_to_validator(\n f\"{object_name} attribute '{attr}' value '{{value}}' is not '{exact_attr}'\",\n complement=True,\n convert_value=get_obj_attr,\n )\n def obj_attr_equals(v: EmailAddress) -> bool:\n return get_obj_attr(v) == exact_attr\n\n return obj_attr_equals\n\n elif regex_attr is not regex_attr_ignore:\n\n if disallowed_attrs_regex is not None and attr in disallowed_attrs_regex:\n raise ValueError(\n f\"Cannot define regex spec for {object_name} attribute '{attr}'\"\n )\n\n if not isinstance(regex_attr, str):\n raise TypeError(\n f\"{object_name} attribute '{attr}_regex' must be a string value\"\n )\n\n pattern = re.compile(regex_attr)\n\n @pred_to_validator(\n f\"{object_name} attribute '{attr}' value '{{value}}' does not \"\n f\"match regex '{regex_attr}'\",\n complement=True,\n convert_value=get_obj_attr,\n )\n def obj_attr_matches_regex(v: EmailAddress) -> bool:\n return bool(re.fullmatch(pattern, get_obj_attr(v)))\n\n return obj_attr_matches_regex\n\n elif in_attr is not in_attr_ignore:\n\n if not isinstance(in_attr, (frozenset, set)):\n raise TypeError(\n f\"{object_name} attribute '{attr}_in' must be set or frozenset\"\n )\n\n @pred_to_validator(\n f\"{object_name} attribute '{attr}' value '{{value}}' not in {in_attr}\",\n complement=True,\n convert_value=get_obj_attr,\n )\n def obj_attr_is_allowed_value(v: EmailAddress) -> bool:\n return get_obj_attr(v) in in_attr\n\n return obj_attr_is_allowed_value\n else:\n return None", "def testIrrelevantConstraintLogic(self):\n model = self.service.model\n\n xml = '''<template name=\"bad_cl\"><query name=\"bad_cl\"\n model=\"testmodel\" view=\"Employee.name Employee.age\"\n constraintLogic=\"A and B and C\"/></template>'''\n t = Template.from_xml(xml, model)\n self.assertEqual(str(t.get_logic()), \"\")\n\n xml = '''<template name=\"bad_cl\"><query name=\"bad_cl\"\n model=\"testmodel\" view=\"Employee.name Employee.age\"\n constraintLogic=\"A and B or (D and E) and C\"/></template>'''\n t = Template.from_xml(xml, model)\n self.assertEqual(str(t.get_logic()), \"\")\n\n xml = '''<template name=\"bad_cl\"><query name=\"bad_cl\"\n model=\"testmodel\" view=\"Employee.name Employee.age\"\n constraintLogic=\"A or B or (D and E) and C\">\n <constraint path=\"Employee.name\" op=\"IS NULL\"/><constraint\n path=\"Employee.age\" op=\"IS NOT NULL\"/>\n </query>\n </template>'''\n t = Template.from_xml(xml, model)\n self.assertEqual(str(t.get_logic()), \"A or B\")\n\n xml = '''<template name=\"bad_cl\"><query name=\"bad_cl\"\n model=\"testmodel\" view=\"Employee.name Employee.age\"\n constraintLogic=\"A or B or (D and E) and C\">\n <constraint path=\"Employee.name\" op=\"IS NULL\"/><constraint\n path=\"Employee.age\" op=\"IS NOT NULL\"/><constraint\n path=\"Employee.fullTime\" op=\"=\" value=\"true\"/>\n </query>\n </template>'''\n t = Template.from_xml(xml, model)\n self.assertEqual(str(t.get_logic()), \"(A or B) and C\")\n\n xml = '''<template name=\"bad_cl\"><query name=\"bad_cl\"\n model=\"testmodel\" view=\"Employee.name Employee.age\"\n constraintLogic=\"A or B or (D and E) or C\">\n <constraint path=\"Employee.name\" op=\"IS NULL\"/><constraint\n path=\"Employee.age\" op=\"IS NOT NULL\"/><constraint\n path=\"Employee.fullTime\" op=\"=\" value=\"true\"/>\n </query>\n </template>'''\n t = Template.from_xml(xml, model)\n self.assertEqual(str(t.get_logic()), \"A or B or C\")\n\n xml = '''<template name=\"bad_cl\"><query name=\"bad_cl\"\n model=\"testmodel\" view=\"Employee.name Employee.age\"\n constraintLogic=\"A or B and (D and E) or C\">\n <constraint path=\"Employee.name\" op=\"IS NULL\"/>\n <constraint path=\"Employee.age\" op=\"IS NOT NULL\"/>\n <constraint path=\"Employee.fullTime\" op=\"=\" value=\"true\"/>\n </query>\n </template>'''\n t = Template.from_xml(xml, model)\n self.assertEqual(str(t.get_logic()), \"(A or B) and C\")\n\n xml = '''<template name=\"bad_cl\"><query name=\"bad_cl\"\n model=\"testmodel\" view=\"Employee.name Employee.age\"\n constraintLogic=\"A or B or (D and E) and C\">\n <constraint path=\"Employee.name\" op=\"IS NULL\"/>\n <constraint path=\"Employee.age\" op=\"IS NOT NULL\"/>\n <constraint path=\"Employee.fullTime\" op=\"=\" value=\"true\"/>\n <constraint path=\"Employee.name\" op=\"IS NULL\"/>\n </query>\n </template>'''\n t = Template.from_xml(xml, model)\n self.assertEqual(str(t.get_logic()), \"(A or B or D) and C\")", "def valid_att_in_field(arch, **kwargs):\n return not arch.xpath('//field[not(@name)]')", "def _validate(self, instance, value):", "def test_model_formfield_doesnt_raise(self):\n try:\n fields_for_model(Color())\n except AttributeError:\n self.fail(\"Raised Attribute Error\")", "def validate(self, attrs):\n\n errors = {}\n order_obj = Order.objects.get(order_id=attrs['order_id'])\n if order_obj.courier_id.courier_id != attrs['courier_id'].courier_id:\n errors['order_id'] = f'Order with id {order_obj.order_id} is assigned to another courier.'\n unknown = set(self.initial_data) - set(self.fields)\n if unknown:\n errors['Unknown field(s)'] = ''.join(unknown)\n if order_obj.assign_time > attrs['complete_time']:\n errors['complete_time'] = 'complete_time cannot be greater then assign_time.'\n if errors:\n raise ValidationError(errors)\n return attrs", "def validate(attrs):\n try:\n #required_attributes = ('qquuid', 'qqfilename')\n #[attrs.get(k) for k,v in attrs.items()]\n return True\n except Exception, e:\n return False", "def validate(self, instance, value):", "def validate(self, instance, value):", "def validate(self, attrs):\n if attrs['product_mrp'] <= 0:\n raise serializers.ValidationError(\"Price Cannot Be Zero or Negative.\")\n return attrs", "def is_valid_attribute(self, attr):\n return self.is_valid(attr)", "def is_valid(self, data_model: DataModel) -> bool:", "def validate(attrs):\n print \"I GOT HERE.\"\n try:\n #required_attributes = ('qquuid', 'qqfilename')\n #[attrs.get(k) for k,v in attrs.items()]\n return True\n except Exception, e:\n return False", "def validated() -> Any:\n return attr.s(slots=True, kw_only=True, eq=False)", "def validate_model(self, model):\n if type(model) == str:\n for letter in model:\n if not letter.isalpha() and not letter.isdigit():\n return False\n return True\n return False", "def validate(self) -> None:\n\n if self.field not in self.model.table_fields:\n raise ValueError(f\"Value field {self.field} not present in {self.model.table}\")\n\n if self.pivot:\n if self.pivot not in self.model.table_fields:\n raise ValueError(\n f\"Pivot field {self.pivot} not present in {self.model.table}\"\n )\n\n if self.connector:\n if self.connector not in self.model.table_fields:\n raise ValueError(\n f\"Connector field {self.connector} not present in {self.model.table}\"\n )\n\n for field in self.selectors:\n if field not in self.model.table_fields:\n raise ValueError(f\"Selector field {field} not present in {self.model.table}\")", "def validate(self):\n for rule in self.get_rules():\n rule.validate(self.get_val())", "def validate(self, attr=None, notattr=None):\n\n if attr is not None:\n specified_attrs = {attr: self.attributes[attr]}\n else:\n specified_attrs = self.attributes\n\n for attr, attr_structure in specified_attrs.items():\n if notattr is not None and attr is notattr:\n continue\n\n attrval = getattr(self, attr)\n if attrval is None or attrval == {}:\n continue\n\n attr_schema = attr_structure['schema']\n validatedattrval = attr_schema.validate(attrval)\n setattr(self, attr, validatedattrval)", "def test_field_rules():", "def is_valid_attributes(self, attrs):\n try:\n self.validate_attributes(attrs)\n return True\n except etal.LabelsSchemaError:\n return False", "def test_non_required_validation(self):\r\n Text().validate('')\r\n Text().validate(None)", "def additional_validation(self,**kwargs):\n return []", "def __validate():\n # TODO: implement", "def validate(self, attrs):\n club = attrs['club']\n tags = attrs.get('tags', [])\n for tag in tags:\n if tag.club != club:\n raise serializers.ValidationError(\n f\"The tag {tag.tag_name} does not belong to this club\")\n return attrs", "def isValidRelaxed(cls,root):\n valid = True\n # no anonymous entities allowed\n for a in root.getiterator(\"reltoken\"):\n if len(a)==0 and a.attrib['relaxed_tag']==\"entity\":\n printError(cls,inspect.stack()[1][3],\"Reltoken of physical type with no nested subtokens\")\n valid = False\n return(valid)", "def validate(self, value, model_instance):\n return super(self.__class__, self).validate(value.value, model_instance)", "def attribute_validation(cls, values: dict) -> dict:\n if not (total := values.get('total')):\n raise ValueError(\"Total attribute is required.\")\n \n if not (quantity := values.get('quantity')):\n raise ValueError(\"Quantity attribute is required.\")\n \n if not (symbol := values.get('symbol')):\n raise ValueError(\"Symbol attribute is required.\")\n\n filter = symbol.filters.market_lot_size_filter\n # if ONE :=1 and not filter.min_qty <= total <= filter.max_qty:\n # raise ValueError(\"The quantity is not in valid range.\")\n\n if filter.step_size and not is_valid_significant_digits(\n total,\n symbol.qty_decimal_precision\n ):\n raise ValueError(\"The quantity precision is not valid.\")\n\n return values" ]
[ "0.6086189", "0.5710481", "0.5692824", "0.559506", "0.5571307", "0.548958", "0.54329455", "0.54117227", "0.53911394", "0.53899544", "0.53848624", "0.53848624", "0.5369892", "0.5343006", "0.53288186", "0.53233105", "0.52718973", "0.5271803", "0.5263592", "0.5230621", "0.52248687", "0.52198863", "0.5215423", "0.5211091", "0.5206441", "0.5186843", "0.5184067", "0.5148455", "0.5147564", "0.51465946" ]
0.6568899
0
Check list of fields in joins against all available taxons on model.
def validate_joins_correct_taxons(cls, values): if 'attributes' in values and 'joins' in values: attributes: List[FdqModelAttribute] = values['attributes'] joins: List[FdqModelJoin] = values['joins'] # get set of available taxon slugs available_taxon_slugs = set(cls._get_available_attrs_taxon_slugs(attributes)) # for each join, verify that all its taxons are available in this model invalid_joins: Dict[int, Set[str]] = {} for idx, join in enumerate(joins): missing_taxons = set(join.taxons) - available_taxon_slugs if len(missing_taxons): invalid_joins[idx] = missing_taxons if invalid_joins: # report invalid joins raise ValidationError( [ ErrorWrapper( ValueError(f'Join {idx + 1} contains missing fields {",".join(taxon_slugs)}'), 'joins' ) for idx, taxon_slugs in invalid_joins.items() ], cls, ) return values
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def available(self):\n fields = self._meta.get_fields()\n\n for field in fields:\n if isinstance(field, models.ManyToManyRel):\n attr = field.get_accessor_name()\n\n if getattr(self, attr).count() > 0:\n return False\n\n elif isinstance(field, models.OneToOneRel):\n attr = field.get_accessor_name()\n if getattr(self, attr, None):\n return False\n\n return True", "def check_relations(self, relations):\n if self.debug:\n print(\"Checking relations\")\n result = False\n work_relations = []\n\n # Eliminate unnecessary(duplicated) clauses\n if relations[\"is_derived_from\"]:\n relations[\"has_derived_form\"] = True\n relations[\"is_derived_from\"] = False\n if relations[\"etymology\"]:\n relations[\"etymological_origin_of\"] = True\n relations[\"etymology\"] = False\n\n for relation in relations:\n if relations[relation]:\n result = True\n work_relations.append(relation)\n return result, work_relations", "def _relation_check(self):\n seen = set()\n for entity in self.get_entities():\n for field in entity.fields.itervalues():\n if field.is_relation():\n seen.add(field.remote_name)\n missing = seen - set(self.entities.keys())\n if missing:\n raise exceptions.SchemaError(\n 'undefined entities referenced in relations: %s' % (\n ', '.join(missing)))", "def _is_valid_field(self, field, allow_m2m=True):\r\n try:\r\n self.query.setup_joins(field.split(LOOKUP_SEP), self.query.get_meta(), self.query.get_initial_alias(), False, allow_m2m, True)\r\n return True\r\n except FieldError:\r\n return False", "def check_fields(taxa: Dict[str, AphiaInfo]) -> None:\n for key, taxon in taxa.items():\n if taxon.get(\"scientificName\") is None:\n taxon.set_missing(\"scientificName\")\n if taxon.get(\"scientificNameID\") is None:\n taxon.set_missing(\"scientificNameID\")", "def test_client_tax_information_list(self):\n pass", "def test_find_relation_types(self):\n pass", "def has_taxa(self, **kwargs):\n if \"taxa\" not in kwargs and \"oids\" not in kwargs and \"labels\" not in kwargs:\n raise TypeError(\"Need to specify `taxa`, `oids` or `labels` list.\")\n taxa = set(kwargs.get(\"taxa\", []))\n oids = set(kwargs.get(\"oids\", []))\n labels = set(kwargs.get(\"labels\", []))\n taxon_oids = set([t.oid for t in self])\n taxon_labels = set([t.label for t in self])\n return taxa.issubset(self) \\\n and oids.issubset(taxon_oids) \\\n and labels.issubset(taxon_labels)", "def _check_traj_field_consistency(self, field_names):\n\n n_trajs = self.num_trajs\n field_names = Counter()\n for traj in self.iter_trajs():\n for name in field_names:\n if name in traj:\n field_names[name] += 1\n\n # if any of the field names has not occured for every\n # trajectory we raise an error\n for field_name, count in field_names:\n if count != n_trajs:\n return False\n\n return True", "def _ValidateFields(self, entity):\n # if field_universe is not defined just return true\n if not self._field_universe:\n return True\n\n valid = True\n for field_tuple in entity.local_field_names.values():\n if not self._ValidateField(field_tuple.field, entity):\n valid = False\n return valid", "def _join_allowed(self, source, target, field=None):\n join = (source, target)\n\n # No circles\n if target == source:\n return False\n\n # Prevent join to excluded models\n if target in self.excluded_models:\n return False\n\n # Never go back through the root\n if target == self.root_model:\n return False\n\n # Apply excluded joins if any\n if join in self._excluded_joins:\n _field = self._excluded_joins[join]\n if not _field:\n return False\n elif _field and _field == field:\n return False\n\n # Check if the join is allowed by a required rule\n for (_source, _target), _field in self._required_joins.items():\n if _target == target:\n if _source != source:\n return False\n\n # If a field is supplied, check to see if the field is allowed\n # for this join.\n if field and _field and _field != field:\n return False\n\n return True", "def areAllFieldsIncluded(ldata, columns):\n\treturn list(range(len(ldata))) == columns", "def _sanitize_joins(self) -> None:\n\n self.primaryjoin = _deep_deannotate(\n self.primaryjoin, values=(\"parententity\", \"proxy_key\")\n )\n if self.secondaryjoin is not None:\n self.secondaryjoin = _deep_deannotate(\n self.secondaryjoin, values=(\"parententity\", \"proxy_key\")\n )", "def check_base_fields(df,base_fields):\n emp_list = []\n for item in base_fields:\n if item not in list(df.columns):\n emp_list.append(item)\n\n return emp_list", "def valid(self) -> bool:\n are_populated = [bool(getattr(self, fld_nm)) for fld_nm in self.necessary_fields]\n return all(are_populated)", "def test_all(self):\n tested_fields = [\"numeric_stats\", \"distinct\", \"frequent-entries\", \"length\", \"special_type\", \"missing\"]\n for column_name in self.ground_truth:\n gt = self.ground_truth.get(column_name)\n pr = self.profiler_result.get(column_name)\n # to be tested field:\n for field_name in tested_fields:\n self.helper(column_name, field_name, gt, pr)", "def check_tables_populated(self) -> bool:\n sources = self.metadata.scan().get(\"Items\", [])\n if len(sources) < len(SourceName):\n logger.info(\"Gene sources table is missing expected sources.\")\n return False\n\n records = self.genes.query(\n IndexName=\"item_type_index\",\n KeyConditionExpression=Key(\"item_type\").eq(\"identity\"),\n Limit=1,\n )\n if len(records.get(\"Items\", [])) < 1:\n logger.info(\"Gene records index is empty.\")\n return False\n\n normalized_records = self.genes.query(\n IndexName=\"item_type_index\",\n KeyConditionExpression=Key(\"item_type\").eq(RecordType.MERGER.value),\n Limit=1,\n )\n if len(normalized_records.get(\"Items\", [])) < 1:\n logger.info(\"Normalized gene records index is empty.\")\n return False\n\n return True", "def _can_handle_query(cls, *query):\n chkattr = [\"Time\", \"Instrument\", \"SatelliteNumber\"]\n chklist = [x.__class__.__name__ in chkattr for x in query]\n for x in query:\n if x.__class__.__name__ == \"Instrument\" and x.value.lower() in (\n \"xrs\",\n \"goes\",\n ):\n return all(chklist)\n return False", "def test_unique_together(self):\n\n for mb_model in self.mb_model_list:\n indexes = connection.introspection.get_indexes(\n self.cursor, mb_model._meta.db_table)\n if not indexes and not is_db_view(mb_model._meta.db_table):\n self.assertTrue(mb_model._meta.unique_together)", "def test_tax_withheld(self):\n self.assertEqual(\n self.forecast.tax_withheld,\n self.person1.tax_withheld + self.person2.tax_withheld)", "def test_match_merge_link_for_taxlots_disassociated_records_if_no_longer_valid(self):\n # Cycle 1 / ImportFile 1\n base_state_details = {\n 'jurisdiction_tax_lot_id': 'Match Set',\n 'import_file_id': self.import_file_1.id,\n 'data_state': DATA_STATE_MAPPING,\n 'no_default_data': False,\n }\n tls_11 = self.taxlot_state_factory.get_taxlot_state(**base_state_details)\n\n self.import_file_1.mapping_done = True\n self.import_file_1.save()\n geocode_and_match_buildings_task(self.import_file_1.id)\n\n # Cycle 2 / ImportFile 2\n base_state_details['import_file_id'] = self.import_file_2.id\n tls_21 = self.taxlot_state_factory.get_taxlot_state(**base_state_details)\n\n self.import_file_2.mapping_done = True\n self.import_file_2.save()\n geocode_and_match_buildings_task(self.import_file_2.id)\n\n # Cycle 3 / ImportFile 3\n base_state_details['import_file_id'] = self.import_file_3.id\n tls_31 = self.taxlot_state_factory.get_taxlot_state(**base_state_details)\n\n self.import_file_3.mapping_done = True\n self.import_file_3.save()\n geocode_and_match_buildings_task(self.import_file_3.id)\n\n # Once updates are made to import process, these will correctly fail and be removed\n self.assertEqual(3, TaxLotView.objects.count())\n self.assertEqual(3, TaxLotState.objects.count())\n self.assertEqual(3, TaxLot.objects.count())\n\n # Link all 3\n view_21 = TaxLotView.objects.get(state_id=tls_21.id)\n match_merge_link(view_21.id, 'TaxLotState')\n\n # Capture linked ID\n view_11 = TaxLotView.objects.get(state_id=tls_11.id)\n initial_linked_id = view_11.taxlot_id\n\n # Unlink the first\n TaxLotState.objects.filter(id__in=[tls_11.id]).update(jurisdiction_tax_lot_id='No longer matches')\n match_merge_link(view_11.id, 'TaxLotState')\n\n refreshed_view_11 = TaxLotView.objects.get(state_id=tls_11.id)\n\n view_21 = TaxLotView.objects.get(state_id=tls_21.id)\n view_31 = TaxLotView.objects.get(state_id=tls_31.id)\n\n self.assertNotEqual(initial_linked_id, refreshed_view_11.taxlot_id)\n self.assertEqual(initial_linked_id, view_21.taxlot_id)\n self.assertEqual(initial_linked_id, view_31.taxlot_id)", "def get_loaded_field_names_cb(self, target, model, fields):\n names = [f.name for f in fields if not getattr(f, \"not_in_db\", False)]\n for field in fields:\n if getattr(field, \"not_in_db\", False):\n names += [f.name for f in field.fields]\n\n target[model] = set(names)", "def validate(self, kwargs):\n super().validate(kwargs)\n self.cc_log(\"INFO\", \"Data Processing Join: started validation\")\n if not kwargs.get(\"left-joinon\"):\n raise ValidationError(self, [\"left-joinon\"], \"Parameter cannot be empty!\")\n if not kwargs.get(\"right-joinon\"):\n raise ValidationError(self, [\"right-joinon\"], \"Parameter cannot be empty!\")\n if not kwargs.get(\"joinwith\"):\n raise ValidationError(self, [\"joinwith\"], \"Parameter cannot be empty!\")\n self.cc_log(\"INFO\", \"Data Processing Join: finished validation\")", "def validate_identifiers_correct_taxons(cls, values):\n if 'attributes' in values and 'identifiers' in values:\n attributes: List[FdqModelAttribute] = values['attributes']\n identifiers: List[str] = values['identifiers']\n\n # get set of available taxon slugs\n available_taxon_slugs = set(cls._get_available_attrs_taxon_slugs(attributes))\n\n # verify that all identifier taxons are available in this model\n invalid_ids = set(identifiers) - available_taxon_slugs\n\n if len(invalid_ids) > 0:\n raise ValueError(f'Identifier(s) {\", \".join(invalid_ids)} are not present as fields on the model')\n\n return values", "def checkMetamodelLevel(cls):\n for mmd in cls.metamodelDependencies():\n mmd.check()", "def assert_table_structure(self, items, field_names):\n for item in items:\n for field in field_names:\n self.assertIn(field, item)", "def validate_join_conditions(join_conditions):\n\n for condition in join_conditions:\n if condition.operator != '==':\n raise ValueError('Operator {} not supported; only equality joins are supported.'.format(\n operator))\n \n n_cond = len(join_conditions)\n if n_cond > 1:\n raise ValueError('Only 1 join condition is supported, but {} were found.'.format(n_cond))\n \n return join_conditions", "def test_there_are_fields(self):\n filds = ['name', 'cost', 'description', 'duration', 'reach', 'school']\n for fild in filds:\n self.assertTrue(fild in dir(Magias),\n 'Class Magias does not have the field {}'.format(fild))", "def _check_non_empty_fields(self, instance, exclusions=[]):\n empty_values = [None, \"\", [], {}]\n for slot in instance.__slots__:\n attribute = instance.__getattribute__(slot)\n if slot not in exclusions:\n self.assertTrue(\n attribute not in empty_values,\n \"Field '{}.{}' is empty!\".format(instance.__class__, slot)\n )\n if instance.__class__.isEmbeddedType(slot):\n if isinstance(attribute, list):\n for element in attribute:\n self._check_non_empty_fields(element, exclusions)\n elif isinstance(attribute, dict):\n for element in attribute.values():\n self._check_non_empty_fields(element, exclusions)\n else:\n self._check_non_empty_fields(attribute, exclusions)", "def test_getCpfRelations(self):\n pass" ]
[ "0.5716577", "0.5690752", "0.5353701", "0.53401566", "0.5238511", "0.52149445", "0.515485", "0.51326126", "0.5096266", "0.5083344", "0.5073783", "0.5030737", "0.50157", "0.49969763", "0.49925125", "0.496842", "0.4958986", "0.49469942", "0.4899754", "0.48933235", "0.48739576", "0.48702148", "0.48646358", "0.48642132", "0.48301718", "0.4806594", "0.47953942", "0.47914627", "0.47779077", "0.47739837" ]
0.7003824
0
Check list of identifiers against all available taxons on model.
def validate_identifiers_correct_taxons(cls, values): if 'attributes' in values and 'identifiers' in values: attributes: List[FdqModelAttribute] = values['attributes'] identifiers: List[str] = values['identifiers'] # get set of available taxon slugs available_taxon_slugs = set(cls._get_available_attrs_taxon_slugs(attributes)) # verify that all identifier taxons are available in this model invalid_ids = set(identifiers) - available_taxon_slugs if len(invalid_ids) > 0: raise ValueError(f'Identifier(s) {", ".join(invalid_ids)} are not present as fields on the model') return values
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_taxa(self, **kwargs):\n if \"taxa\" not in kwargs and \"oids\" not in kwargs and \"labels\" not in kwargs:\n raise TypeError(\"Need to specify `taxa`, `oids` or `labels` list.\")\n taxa = set(kwargs.get(\"taxa\", []))\n oids = set(kwargs.get(\"oids\", []))\n labels = set(kwargs.get(\"labels\", []))\n taxon_oids = set([t.oid for t in self])\n taxon_labels = set([t.label for t in self])\n return taxa.issubset(self) \\\n and oids.issubset(taxon_oids) \\\n and labels.issubset(taxon_labels)", "def test_check_all_ids(self):\r\n\r\n fasta_labels = ['sample1_1', 'sample1_2', 'sample3_3', 'sample2_4']\r\n\r\n sample_ids = ['sample1', 'sample2', 'sample3']\r\n\r\n sample_ids_not_found = check_all_ids(fasta_labels, sample_ids)\r\n\r\n # should return True as all are found\r\n\r\n self.assertEqual(sample_ids_not_found, True)\r\n\r\n fasta_labels = ['sample1_1', 'sample1_2', 'sample3_3', 'sample2_4']\r\n\r\n sample_ids = ['sample1', 'sample2', 'sample3', 'sampleX']\r\n\r\n sample_ids_not_found = check_all_ids(fasta_labels, sample_ids)\r\n\r\n # sampleX should not be found\r\n\r\n self.assertEqual(sample_ids_not_found, ['sampleX'])", "def test_client_tax_information_list(self):\n pass", "def check_txt_ids(self):\n for awi in self:\n if not awi.txt_ids:\n raise exceptions.except_orm(\n _(\"Missing Values !\"),\n _(\"Missing VAT TXT Lines!!!\"))\n return True", "def test_ent_ids(ruler: SpaczzRuler) -> None:\n assert all(\n [\n ent_id in ruler.ent_ids\n for ent_id in [\"Antibiotic\", \"Developer\", \"USA\", \"Metal\"]\n ]\n )\n assert len(ruler.ent_ids) == 4", "def testTaxaData(self):\n try:\n numEukaryota = 0\n numBacteria = 0\n numVirus = 0\n numArchaea = 0\n numOther = 0\n numUnclass = 0\n logger.info(\"Loading taxonomy data\")\n tU = TaxonomyUtils()\n logger.info(\"Done loading taxonomy data\")\n iCount = 0\n entryD = self.__mU.doImport(self.__instanceSavePath, fmt=\"pickle\")\n for entryId in entryD:\n for entityId, eD in entryD[entryId][\"selected_polymer_entities\"].items():\n taxId = eD[\"ncbi_taxonomy_id\"] if \"ncbi_taxonomy_id\" in eD else None\n if taxId is None:\n logger.debug(\"Missing taxId entryId %s entityId %s\", entryId, entityId)\n continue\n # lin = tU.getLineage(taxId)\n # nmL = tU.getLineageNames(taxId)\n ok1 = tU.isEukaryota(taxId)\n if ok1:\n numEukaryota += 1\n ok3 = tU.isVirus(taxId)\n if ok3:\n numVirus += 1\n ok2 = tU.isBacteria(taxId)\n if ok2:\n numBacteria += 1\n #\n ok4 = tU.isArchaea(taxId)\n if ok4:\n numArchaea += 1\n #\n ok5 = tU.isOther(taxId)\n if ok5:\n numOther += 1\n #\n ok6 = tU.isUnclassified(taxId)\n if ok6:\n numUnclass += 1\n\n if ok1 and (ok1 and ok2):\n logger.info(\"taxid %r conflicting lineage\", taxId)\n #\n if not ok1 and not ok2 and not ok3 and not ok4 and not ok5 and not ok6:\n logger.info(\"unassigned taxid %r\", taxId)\n\n logger.debug(\"taxId %r entryId %s entityId %s\", taxId, entryId, entityId)\n iCount += 1\n # if iCount > 5000:\n # break\n logger.info(\"Eukaryota %d Bacteria %d Virus %d Archaea %d Other/Syn %r Unclass %d\", numEukaryota, numBacteria, numVirus, numArchaea, numOther, numUnclass)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def identify_multiple_taxa(cazy_data, multiple_taxa_logger):\n multiple_taxa_gbk = []\n\n for genbank_accession in tqdm(\n cazy_data,\n total=len(list(cazy_data.keys())), desc='Searching for multiple taxa annotations',\n ):\n\n if len(cazy_data[genbank_accession]['taxonomy']) > 1:\n multiple_taxa_gbk.append(genbank_accession)\n\n for tax_tuple in cazy_data[genbank_accession]['taxonomy']:\n multiple_taxa_logger.warning(\n f\"{genbank_accession}\\t{tax_tuple.kingdom}\\t{tax_tuple.organism}\"\n )\n \n else:\n cazy_data[genbank_accession]['organism'] = list(cazy_data[genbank_accession]['taxonomy'])[0]\n\n return multiple_taxa_gbk", "def test_verify_all_gates_have_valid_targets():\n nSpinOrbitals = input_json[\"constants\"][\"nSpinOrbitals\"]\n\n interaction_list = input_json[\"terms\"]\n\n for interaction in interaction_list:\n targets = interaction[\"targets\"]\n\n for orbital in targets:\n assert 0 <= orbital < nSpinOrbitals, \"Orbital target is out of range\"", "def check_taxa(taxa: Dict[str, AphiaInfo], cache: AphiaCacheInterface = None) -> None:\n\n check_fields(taxa)\n detect_lsid(taxa)\n detect_external(taxa)\n match_obis(taxa, cache)\n match_worms(taxa)\n check_annotated_list(taxa)\n fetch(taxa, cache)", "def validate_joins_correct_taxons(cls, values):\n if 'attributes' in values and 'joins' in values:\n attributes: List[FdqModelAttribute] = values['attributes']\n joins: List[FdqModelJoin] = values['joins']\n\n # get set of available taxon slugs\n available_taxon_slugs = set(cls._get_available_attrs_taxon_slugs(attributes))\n\n # for each join, verify that all its taxons are available in this model\n invalid_joins: Dict[int, Set[str]] = {}\n for idx, join in enumerate(joins):\n missing_taxons = set(join.taxons) - available_taxon_slugs\n if len(missing_taxons):\n invalid_joins[idx] = missing_taxons\n\n if invalid_joins:\n # report invalid joins\n raise ValidationError(\n [\n ErrorWrapper(\n ValueError(f'Join {idx + 1} contains missing fields {\",\".join(taxon_slugs)}'), 'joins'\n )\n for idx, taxon_slugs in invalid_joins.items()\n ],\n cls,\n )\n\n return values", "def _check_employee(self):\n\n for record in self:\n\n if record.nik_number:\n # find duplicate nik\n employee_ids = self.search([('id', 'not in', self.ids), ('nik_number', '=', record.nik_number)])\n if employee_ids:\n error_msg = _(\"There is duplicate of Employee Identity Number.\")\n raise ValidationError(error_msg)\n\n # check nik format. it required base_indonesia\n if not record._check_nik(record):\n error_msg = _(\"NIK did not match with Company Code.\")\n raise ValidationError(error_msg)\n\n if record.identification_id:\n employee_ids = self.search([('id', 'not in', self.ids), ('identification_id', '=', record.identification_id)])\n if employee_ids:\n error_msg = _(\"There is duplicate of Identification Number.\")\n raise ValidationError(error_msg)\n\n return True", "def checkIdentifiersPresent(self, checkDict):\n assert self._registeredIdentifiers.issubset(set(checkDict.keys())), f'missing identifiers: {self._registeredIdentifiers - set(checkDict.keys())}'", "def _check_countries(countries: list) -> bool:\n countries_count = Country.objects.all() \\\n .filter(id__in=countries) \\\n .distinct().count()\n\n if countries_count != len(set(countries)):\n return False\n\n return True", "def has_taxon(self, **kwargs):\n if \"taxon\" not in kwargs and \"oid\" not in kwargs and \"label\" not in kwargs:\n raise TypeError(\"Need to specify oid or label.\")\n req_taxon = kwargs.get(\"taxon\", None)\n oid = kwargs.get(\"oid\", None)\n label = kwargs.get(\"label\", None)\n for self_taxon in self:\n if (req_taxon is not None and req_taxon is self_taxon) \\\n or (oid is not None and self_taxon.oid == oid) \\\n or (label is not None and self_taxon.label == label):\n return True\n return False", "def check_multi_exon(tr_nc_index_dict, ncdf):\n\n\tfor gene in tr_nc_index_dict:\n\t\n\t\ttempdf = ncdf.iloc[tr_nc_index_dict[gene][0]:tr_nc_index_dict[gene][1]]\n\t\texon_count = 0\n\t\t\n\t\tfor i in tempdf.index:\n\t\t\tif tempdf.loc[i,'feature'] == 'exon':\n\t\t\t\texon_count += 1\n\t# print exon_count\n\t\tif exon_count >1 :\n\t\t\tprint \" more than one exon for %s\" % gene\n\t\t\tsys.exit()\t# prevent writing fasta if there is multi exon transcript", "def test_all_hyponyms(id, hyponym_ids):\n synset = germanet_data.get_synset_by_id(id)\n hyponyms = synset.all_hyponyms()\n np.testing.assert_equal(sorted([synset.id for synset in hyponyms]), sorted(hyponym_ids))", "def get_taxids(organism_names):\r\n\r\n taxids = []\r\n\r\n for organism in organism_names:\r\n handle = Entrez.esearch(db=\"Taxonomy\", term=organism)\r\n record = Entrez.read(handle)\r\n print(record[\"IdList\"])\r\n try:\r\n taxids.append(record[\"IdList\"][0])\r\n except IndexError:\r\n pass\r\n\r\n return taxids", "def _is_classifier_incident_types_found(self, classifier_data):\n is_valid = True\n classifier_incident_types = set(classifier_data.get('incident_types', set()))\n if classifier_incident_types:\n # setting initially to false, if the incident types is in the id_set, it will be valid\n is_valid = False\n for incident_type in self.incident_types_set:\n incident_type_name = list(incident_type.keys())[0]\n # remove a related incident types if exists in the id_set\n if incident_type_name in classifier_incident_types:\n classifier_incident_types.remove(incident_type_name)\n if not classifier_incident_types:\n break\n\n if not classifier_incident_types: # if nothing remains, these incident types were all found\n is_valid = True\n else: # there are missing incident types in the id_set, classifier is invalid\n error_message, error_code = Errors.classifier_non_existent_incident_types(\n str(classifier_incident_types))\n if not self.handle_error(error_message, error_code, file_path=\"id_set.json\"):\n is_valid = True\n\n return is_valid", "def validate_unique_taxon_slugs(cls, values):\n if 'attributes' in values:\n # count occurrence of each taxon slug in attributes\n attributes: List[FdqModelAttribute] = values['attributes']\n taxon_slugs = cls._get_available_attrs_taxon_slugs(attributes)\n\n taxon_slugs_counter = Counter(taxon_slugs)\n\n multiple_taxon_slugs = [\n taxon_slug for taxon_slug, occurrence in taxon_slugs_counter.items() if occurrence > 1\n ]\n if len(multiple_taxon_slugs):\n raise ValueError('Following fields are mapped more than once - ' + ','.join(multiple_taxon_slugs))\n\n return values", "def has_duplicates(tree):\n taxa = [tip.name for tip in tree.tips()]\n if '' in taxa or None in taxa:\n raise ValueError('Empty taxon name(s) found.')\n return len(set(taxa)) < len(taxa)", "def replace_multiple_tax(cazy_data, genbank_accessions, replaced_taxa_logger, args, invalid_ids):\n logger = logging.getLogger(__name__)\n\n if args.skip_ncbi_tax:\n logger.warning(\n f\"Skipping retrieving the latest taxonomy classification from the NCBI Taxonomy db\\n\"\n \"Adding the first tax listed for each protein in the CAZy db\"\n )\n cazy_data = select_first_organism(cazy_data, genbank_accessions, replaced_taxa_logger)\n success = True\n return cazy_data, success\n\n batches = get_chunks_list(genbank_accessions, args.ncbi_batch_size)\n\n for batch in tqdm(batches, desc=f\"Batch retrieving tax info from NCBI. Batch size:{args.ncbi_batch_size}\"):\n\n id_post_list = str(\",\".join(batch))\n\n success = False\n\n try:\n epost_results = Entrez.read(\n entrez_retry(\n args.retries,\n Entrez.epost,\n \"Protein\",\n id=id_post_list,\n )\n )\n success = True\n\n except (TypeError, AttributeError): # if no record is returned from call to Entrez\n # error not due to the presence of invalid IDs\n logger.error(\n f\"Entrez failed to post assembly IDs for this batch.\\n\"\n \"Not retrieving tax data from NCBI for these proteins\"\n \"Selecting the first organism retrieved from CAZy as the source organism\\nProtein accessions:\\n\"\n f\"{batch}\"\n )\n # cazy_data, gbk_accessions, replaced_taxa_logger\n cazy_data = select_first_organism(cazy_data, batch, replaced_taxa_logger)\n success = True\n continue\n\n except RuntimeError:\n logger.warning(\"Found GenBank accessions in CAZy data that are no longer in NCBI\")\n\n if invalid_ids:\n # replace_multiple_tax was called by replace_multiple_tax_with_invalid_ids\n # return results, don't use recursive programming\n continue\n\n else:\n # first time replace_multiple_tax was called\n cazy_data, success = replace_multiple_tax_with_invalid_ids(\n cazy_data,\n genbank_accessions,\n replaced_taxa_logger,\n args,\n )\n\n if success is False:\n logger.error(\n \"Could not retrieve taxonomy data from NCBI for this batch,\\n\"\n \"Using the first source organism retrieved from CAZy for each GenBank accession\\n\"\n \"Protein accessions:\\n\"\n f\"{batch}\"\n )\n\n cazy_data = select_first_organism(cazy_data, genbank_accessions, replaced_taxa_logger)\n success = True\n\n else:\n logger.info(\"Parsing data retrieved from NCBI\")\n cazy_data = get_ncbi_tax(epost_results, cazy_data, replaced_taxa_logger, args)\n\n return cazy_data, success", "def _compute_tax_id(self):\n for order in self:\n order.order_line._compute_tax_id()", "def _check_availability(self, names: Iterable) -> None:\n unavailable = [x for x in names if x not in self.__by_name.keys()]\n if unavailable:\n raise ValueError(f'datasets: {unavailable} not available in the {self.region} region.')", "def test_all_hypernyms(id, hypernym_ids):\n synset = germanet_data.get_synset_by_id(id)\n hypernyms = synset.all_hypernyms()\n np.testing.assert_equal(sorted([synset.id for synset in hypernyms]), sorted(hypernym_ids))", "def _check_items(cls, sequence):\n all([cls._check_item(x) for x in sequence])", "def validate(self, taxonomy,\n check_prefixes,\n check_ranks,\n check_hierarchy,\n check_species,\n check_group_names,\n check_duplicate_names,\n check_capitalization,\n report_errors=True):\n\n # check for incomplete taxonomy strings or unexpected rank prefixes\n invalid_ranks = {}\n invalid_prefixes = {}\n invalid_species_name = {}\n invalid_group_name = {}\n invalid_capitalization = set()\n for taxon_id, taxa in taxonomy.items():\n if check_ranks:\n if len(taxa) != len(Taxonomy.rank_prefixes):\n invalid_ranks[taxon_id] = ';'.join(taxa)\n continue\n\n if check_prefixes:\n for r, taxon in enumerate(taxa):\n if taxon[0:3] != Taxonomy.rank_prefixes[r]:\n invalid_prefixes[taxon_id] = [taxon, ';'.join(taxa)]\n break\n\n if check_group_names:\n for taxon in taxa:\n canonical_taxon = ' '.join([t.strip() for t in re.split('_[A-Z]+(?= |$)', taxon[3:])]).strip()\n if canonical_taxon and re.match('^[a-zA-Z0-9- ]+$', canonical_taxon) is None:\n if not taxon.startswith('s__') or check_species:\n invalid_group_name[taxon_id] = [taxon, 'Taxon contains invalid characters']\n\n if check_species:\n genus_index = Taxonomy.rank_index['g__']\n species_index = Taxonomy.rank_index['s__']\n if len(taxa) > species_index:\n species_name = taxa[species_index]\n valid, error_msg = self.validate_species_name(species_name, require_full=True, require_prefix=True)\n if not valid:\n invalid_species_name[taxon_id] = [species_name, error_msg]\n\n if species_name != 's__':\n genus_name = taxa[genus_index]\n generic_name = species_name.split()[0]\n if genus_name[3:] != generic_name[3:]:\n invalid_species_name[taxon_id] = [species_name,\n 'Genus and generic names do not match: %s' % genus_name]\n\n if check_capitalization:\n for taxon in taxa:\n if taxon[3].islower():\n invalid_capitalization.add(taxon)\n\n # check for duplicate names\n invalid_duplicate_name = []\n if check_duplicate_names:\n invalid_duplicate_name = self.duplicate_names(taxonomy, check_species)\n\n # check for inconsistencies in the taxonomic hierarchy\n invalid_hierarchies = defaultdict(set)\n missing_parent = set()\n if check_hierarchy:\n expected_parent = self.taxonomic_consistency(taxonomy, False)\n\n for taxon_id, taxa in taxonomy.items():\n for r in range(1, len(taxa)):\n if len(taxa[r]) == 3:\n continue\n\n if r == self.rank_index['s__'] and not check_species:\n continue\n\n if taxa[r] not in expected_parent:\n missing_parent.add(taxa[r])\n elif taxa[r - 1] != expected_parent[taxa[r]]:\n invalid_hierarchies[taxa[r]].add(taxa[r - 1])\n invalid_hierarchies[taxa[r]].add(expected_parent[taxa[r]])\n\n if report_errors:\n if len(invalid_ranks):\n print('')\n print('Taxonomy contains too few ranks:')\n for taxon_id, taxa_str in invalid_ranks.items():\n print('%s\\t%s' % (taxon_id, taxa_str))\n\n if len(invalid_prefixes):\n print('')\n print('Taxonomy contains an invalid rank prefix:')\n for taxon_id, info in invalid_prefixes.items():\n print('%s\\t%s\\t%s' % (taxon_id, info[0], info[1]))\n\n if len(invalid_group_name):\n print('')\n print('Taxa containing invalid characters:')\n for taxon_id, err_msg in invalid_group_name.items():\n print('%s\\t%s\\t%s' % (taxon_id, err_msg[0], err_msg[1]))\n\n if len(invalid_species_name):\n print('')\n print('Taxonomy contains invalid species names:')\n for taxon_id, info in invalid_species_name.items():\n print('%s\\t%s\\t%s' % (taxon_id, info[0], info[1]))\n\n if len(invalid_duplicate_name):\n print('')\n print('Taxonomy contains identical taxon names in multiple lineages:')\n for duplicate_name in invalid_duplicate_name.keys():\n print('%s' % duplicate_name)\n\n if len(missing_parent):\n print('')\n print('Taxonomy contains taxa with an undefined parent:')\n for taxon in missing_parent:\n print('%s' % taxon)\n\n if len(invalid_hierarchies):\n print('')\n print('Taxonomy contains taxa with multiple parents:')\n for child_taxon, parent_taxa in invalid_hierarchies.items():\n print('%s\\t%s' % (child_taxon, ', '.join(parent_taxa)))\n\n if len(invalid_capitalization):\n print('')\n print('Taxa do not start with a capital letter:')\n for taxon in invalid_capitalization:\n print('{}'.format(taxon))\n\n return invalid_ranks, invalid_prefixes, invalid_species_name, invalid_hierarchies, invalid_group_name, invalid_capitalization", "def check_specific_names(citelist: list, specific_names: list) -> None:\n unique_names = list()\n nameset = set()\n for c in citelist:\n if c.name != \".\":\n clean = clean_specific_name(c.name)\n if (not (clean in nameset)) and (clean != \"\"):\n nameset |= {clean}\n unique_names.append(clean)\n unique_names.sort()\n for n in unique_names:\n is_found = False\n for s in specific_names:\n if n in s.variations:\n is_found = True\n if not is_found:\n report_error(\"Missing specific name: \" + n)", "def check_completeness(ISM):\n for item in ISM:\n if item not in ['A', 'T', 'C', 'G', '-']:\n return False\n return True", "def _idxs_are_present(self, *args):\n return set(args).issubset(set(range(self.n_atoms)))", "def taxis_available(taxi_types):\n for number, taxi in enumerate(taxi_types):\n print(\"{} - {}\".format(number, taxi))" ]
[ "0.5959546", "0.5623623", "0.5571614", "0.54068583", "0.54007125", "0.53843564", "0.53759086", "0.5344634", "0.5308052", "0.528676", "0.52811086", "0.5273162", "0.5246677", "0.5235604", "0.5228496", "0.5165709", "0.51461434", "0.5132584", "0.51072395", "0.510238", "0.50868255", "0.50630724", "0.50609934", "0.5056451", "0.5035472", "0.50345623", "0.50293523", "0.50085247", "0.49860984", "0.4971566" ]
0.7016716
0
Method of SaintLesque/Schepers to distribute seats votes = list of votes
def distributeSeats(total_seats, votes, debug=False, steps=1): seats=[0]*len(votes) total_votes = np.array(votes).sum() d = round(total_votes/total_seats, 0) assigned_seats = 0 while assigned_seats != total_seats: i = 0 for e in votes: seats[i] = round(e/d, 0) if debug: print("e=" + str(e) + " seats[i]=" + str(seats[i])) i=i+1 assigned_seats = np.array(seats).sum() if assigned_seats < total_seats: d=(d-steps)+d%steps elif assigned_seats > total_seats: d=(d+steps)-d%steps else: break if debug: print("d=" + str(d) + "; assigned_seats=" + str(assigned_seats)) if debug: print("") print("FINAL: d=" + str(d) + "; assigned_seats=" + str(assigned_seats) + "; total votes: " + str(total_votes)) return seats
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def final_seat_assignment():\n parties = get_sorted_parties()\n allocated_seats = get_sorted_allocated_seats() \n #list(zip(parties, allocated_seats))\n #pandas.concat([parties, allocated_seats], axis=1)\n distributed_seats = []\n for i in range(0, len(parties)):\n list_votes2 = get_sorted_votes2([\"state\"], parties[i]) \n list_min_seats = get_sorted_min_seats([\"state\"], parties[i]) \n list_ueberhang = get_sorted_ueberhang([\"state\"], parties[i])\n seats2dist = allocated_seats[i] - sum(list_ueberhang)\n print(parties[i])\n distributed_seats.append((parties[i]\n , max(distributeSeats(seats2dist, list_votes2, False, 100) , list_min_seats)\n )) # adding tuples\n \n return distributed_seats", "def get_all_votes(self) -> List[dict]:", "async def applyVote(self, votes):\n voteCount = {vote: 0 for vote in self.getMembersName()}\n voteCount[None] = 0\n for vote in votes.values():\n voteCount[vote] += 1\n\n if voteCount[None] != 0:\n await self.textChannel.send(\n \"Attention, des joueurs n'ont pas votรฉ / ont mal รฉcrit, les votes peuvent รชtre faussรฉs.\")\n del voteCount[None]\n\n playerOrder = sorted(voteCount.items(), key=lambda x: x[1], reverse=True)\n print(\"playerOrder\", playerOrder)\n if playerOrder[0][1] == 0: # Nobody vote\n await self.textChannel.send(\"`Partie non valide`, personne n'a votรฉ.\")\n\n elif playerOrder[0][1] == 1: # People think nobody is a werewolf\n await self.textChannel.send(\"Le village pense qu'il n'y a pas de loups-garou ? Vรฉrification ...\")\n werewolves = self.getWolves()\n if len(werewolves) == 0:\n await self.textChannel.send(\"Le village a raison, il n'y a pas de loups-garous parmis eux.\")\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNร‰```\")\n else:\n await self.textChannel.send(\"Malheuresement, il y avait```\" + \", \".join(werewolves) + \"```\")\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNร‰-```\")\n\n else: # Classic vote\n werewolves = self.getWolves()\n deaths = []\n for i in range(len(playerOrder)):\n player = self.getMemberFromName(name=playerOrder[i][0])\n isDead = await player.isDead(channel=self.textChannel)\n if isDead:\n deaths += await player.death(channel=self.textChannel, members=self.players)\n print(\"voteCount :\", voteCount)\n\n # Get player name with same number of vote against them\n playerEqualVote = []\n for p in playerOrder:\n if p[1] == playerOrder[i][1] and p[0] != playerOrder[i][0]:\n playerEqualVote.append(self.getMemberFromName(name=p[0]))\n print(\"Other players with equals number of vote :\", playerEqualVote)\n for otherPlayer in playerEqualVote:\n isDead = await otherPlayer.isDead(channel=self.textChannel)\n if isDead:\n deaths += await otherPlayer.death(channel=self.textChannel, members=self.players)\n break\n\n for i in range(len(deaths)):\n if deaths[i] is None:\n del deaths[i]\n\n if len(deaths) == 0: # No one die\n if len(werewolves) == 0: # No Werewolves\n await self.textChannel.send(\"Il n'ya pas eu de mort et il n'y a aucun Loup-Garou !\")\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNร‰```\")\n else: # Werewolves among players\n await self.textChannel.send(\n \"Il n'y a pas eu de mort mais```\" + \", \".join(werewolves) + \"```\")\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNร‰-```\")\n\n elif len(deaths) == 1:\n if deaths[0].lastRole in [\"Loup-Garou\", \"Loup Alpha\", \"Loup Shamane\", \"Loup rรชveur\"]: # Werewolf die\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNร‰```\")\n elif deaths[0].lastRole in [\"Tanneur\"]: # Tanner died\n await self.textChannel.send(\"```Fix\\n#LE TANNEUR A GAGNร‰#```\")\n if len(werewolves) > 0: # Wolves in game\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT ร‰GALEMENT GAGNร‰```\")\n else: # Villager died\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNร‰-```\")\n\n else: # more than 2 deaths\n rolesDead = []\n for dead in deaths:\n if dead.lastRole in [\"Loup-Garou\", \"Loup Alpha\", \"Loup Shamane\", \"Loup rรชveur\"]:\n rolesDead.append(\"Loup-Garou\")\n elif dead.lastRole in [\"Tanneur\"]:\n await self.textChannel.send(\"```Fix\\n#LE TANNEUR A GAGNร‰#```\")\n else:\n rolesDead.append(\"Villageois\")\n print(\"rolesDead :\", rolesDead)\n rolesDead = list(dict.fromkeys(rolesDead))\n print(\"rolesDead unique :\", rolesDead)\n if \"Loup-Garou\" in rolesDead:\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNร‰```\")\n else:\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNร‰-```\")", "def voter_votes(request, election, voter_uuid):\n voter = Voter.get_by_election_and_uuid(election, voter_uuid)\n votes = CastVote.get_by_voter(voter)\n return [v.toJSONDict() for v in votes]", "def cast_vote(\n self, user_id: int, election_id: int, ranked_candidate_list: List[int]\n ) -> int:", "def rank_candidates(table):\n ranking = []\n\n # get list of all candidates who received a vote\n full_list = elim_dupe([name for vote in table for name in vote])\n # print full_list\n \n while len(ranking) < len(full_list):\n \n # All unranked candidates are considered eligible\n eligible = [name for name in full_list if name not in ranking]\n \n while True:\n \n # Remove ineligible and eliminated candidates from votes\n temp_ballots = [[name for name in vote if name in eligible] for vote in table]\n \n # If no candidates on the ballot are eligible and the ballot does not have\n # \"no confidence\" written on it, the ballot is discarded and not considered a vote.\n temp_ballots = [vote for vote in temp_ballots if len(vote) > 0]\n\n total_votes = len(temp_ballots)\n\n if total_votes == 0:\n return ranking\n\n top_choices = [vote[0] for vote in temp_ballots]\n \n # All ballots are considered to be a vote for the\n # highest-ranked eligible candidate on the ballot.\n vote_count = {name: top_choices.count(name) for name in eligible}\n print vote_count\n winner = [k for k in vote_count if (vote_count[k]*2) > total_votes]\n\n if len(winner) > 0:\n # If a single candidate has a majority of the\n # votes, they receive the next highest ranking\n if winner[0] == NO_CONFIDENCE:\n return ranking\n \n ranking += winner\n \n break;\n\n vote_count.pop(NO_CONFIDENCE, None)\n\n # If no single candidate has a majority of the votes,\n # then one will be deemed ineligible.\n\n min_votes = vote_count[min(vote_count, key=vote_count.get)]\n \n least_voted = {k:vote_count[k] for k in vote_count if vote_count[k] == min_votes}\n \n # If a single candidate has the least amount of votes, they become ineligible,\n while len(least_voted) > 1:\n temp_ballots = [vote[1:] for vote in temp_ballots if len(vote[1:]) > 0]\n if len(temp_ballots) == 0:\n return ranking\n next_choices = [vote[0] for vote in temp_ballots if vote[0] in least_voted]\n least_voted = {name: (next_choices.count(name) + least_voted[name]) for name in least_voted}\n min_votes = least_voted[min(least_voted, key=least_voted.get)]\n least_voted = {k: least_voted[k] for k in least_voted if least_voted[k] == min_votes}\n \n remove = least_voted.keys()[0]\n eligible = [name for name in eligible if name != remove]\n\n\n return ranking", "def _tally_votes(self, labels, distances):\n votes = collections.defaultdict(int)\n for i, index in enumerate(distances.order(ascending=True).index):\n if i < self.k:\n votes[labels[index]] += 1\n else:\n break\n return votes", "def p(party, vote_count, s):\n return t(party, vote_count) / d(s)", "def get_voters():", "def get_voters():", "def make_votes(vote_tuples: List[Tuple[Disposable, Category, int]]):\n for vote_tuple in vote_tuples:\n DisposableVote.objects.create(disposable=vote_tuple[0],\n category=vote_tuple[1],\n count=vote_tuple[2])", "def arrange_reservations(guests=None):\n\n seats = new_seating_chart()\n\n if guests:\n for seat_number in range(1, len(guests)):\n seats[seat_number] = guests[seat_number]\n return seats", "def assign_vote(from_userid, to_userid, group):", "def rankPairs (self):\n def key (matrix, pair):\n # majority is positive, we want larger ones first\n major = matrix[pair[0]][pair[1]]\n # minority is negative because we want the smaller ones first\n minor = -1*matrix[pair[1]][pair[0]]\n return (major,minor)\n\n self.pairs = [(x,y) for x in self.poller.candidates for y in self.poller.candidates if x != y]\n matrix = self.poller.voteMatrix()\n # reverse=true to indicate descending sort\n self.pairs.sort(key=lambda pair: key(matrix,pair), reverse=True)\n self.weights = { pair : key(matrix,pair) for pair in self.pairs }\n self.pairs = [pair for pair in self.pairs if self.weights[pair][0] > -1*self.weights[pair][1]]", "def t(p, vote_count):\n return vote_count[p]", "def vote_for_k(utilities, k):\n utilities = np.asarray(utilities)\n n_cands = utilities.shape[1]\n if k == 'half':\n # \"It is interesting to observe that the vote-for-k and vote-for-(n-k)\n # voting systems are equally effective.\"\n # So for 7 candidates, we could use either k=4 or k=3 (= 7//2)\n # TODO: Though this seems only true with infinite voters?\n k = n_cands // 2\n elif -n_cands < k < 0:\n k = n_cands + k\n elif not 0 < k < n_cands:\n raise ValueError(f'k of {k} not possible with {n_cands} candidates')\n\n # Efficiently get indices of top k candidates for each voter\n # https://stackoverflow.com/a/23734295/125507\n # TODO: How are tied utilities handled, such as top 2 with 3 tied? Random?\n top_k = np.argpartition(utilities, -k, axis=1)[:, -k:]\n\n # Create blank ballots\n approvals = np.zeros(utilities.shape, np.uint8)\n\n # Fill in approvals\n # TODO: Not sure if this is the most efficient way\n approvals[np.arange(len(approvals))[:, np.newaxis], top_k] = 1\n return approvals", "def _compute_seats(self):\n # initialize fields to 0 + compute seats availability\n for ticket in self:\n ticket.seats_availability = 'unlimited' if ticket.seats_max == 0 else 'limited'\n ticket.seats_unconfirmed = ticket.seats_reserved = ticket.seats_used = ticket.seats_available = 0\n # aggregate registrations by ticket and by state\n if self.ids:\n state_field = {\n 'draft': 'seats_unconfirmed',\n 'open': 'seats_reserved',\n 'done': 'seats_used',\n }\n query = \"\"\" SELECT event_ticket_id, state, count(event_id)\n FROM event_registration\n WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')\n GROUP BY event_ticket_id, state\n \"\"\"\n self._cr.execute(query, (tuple(self.ids),))\n for event_ticket_id, state, num in self._cr.fetchall():\n ticket = self.browse(event_ticket_id)\n ticket[state_field[state]] += num\n # compute seats_available\n for ticket in self:\n if ticket.seats_max > 0:\n ticket.seats_available = ticket.seats_max - (ticket.seats_reserved + ticket.seats_used)", "def compute_spa_revenue(bidders, anonymous_reserve):\r\n revenue = 0.0\r\n values = [np.array(bidder.values) for bidder in bidders]\r\n probs = [np.array(bidder.prob) for bidder in bidders]\r\n # enumerate the top two bids\r\n for i in range(len(bidders)):\r\n for j in range(i + 1, len(bidders)):\r\n for i_value_idx in range(len(bidders[i].values)):\r\n if values[i][i_value_idx] < anonymous_reserve:\r\n continue\r\n for j_value_idx in range(len(bidders[j].values)):\r\n if values[j][j_value_idx] < anonymous_reserve:\r\n continue\r\n if values[i][i_value_idx] <= values[j][j_value_idx]:\r\n # break ties lexicographically j > i, so j is the winner\r\n second_bid = values[i][i_value_idx]\r\n second_bidder = i\r\n else:\r\n second_bid = values[j][j_value_idx]\r\n second_bidder = j\r\n\r\n p = probs[i][i_value_idx] * probs[j][j_value_idx]\r\n for k in range(len(bidders)):\r\n if p == 0.0:\r\n break\r\n if k != i and k != j:\r\n # k should have a bid smaller than both i and j\r\n losing_prob = []\r\n for n in range(len(values[k])):\r\n if values[k][n] < second_bid or (values[k][n] == second_bid and k < second_bidder):\r\n losing_prob.append(probs[k][n])\r\n if len(losing_prob) == 0:\r\n p = 0.0\r\n break\r\n else:\r\n p *= sum(losing_prob)\r\n revenue += p * second_bid\r\n # only one bid is above reserve\r\n for i in range(len(bidders)):\r\n for i_value_idx in range(len(bidders[i].values)):\r\n if values[i][i_value_idx] < anonymous_reserve:\r\n continue\r\n p = probs[i][i_value_idx]\r\n for j in range(len(bidders)):\r\n if j == i:\r\n continue\r\n j_idx = values[j] < anonymous_reserve\r\n p *= sum(probs[j][j_idx])\r\n revenue += p * anonymous_reserve\r\n\r\n return revenue", "def ensembleVote(x, classes, ensemble):\n votes = np.array([0 for kk in range(len(classes))])\n for i in ensemble:\n votes = votes + classProbs(x, ensemble[i][\"tree\"], classes)\n maxVote = 0\n loc = None\n for ind, vote in enumerate(votes):\n if vote > maxVote:\n maxVote = vote\n loc = ind\n prediction = classes[loc]\n return prediction", "def knapsack(items, capacity):\r\n pass", "def vote_types_to_insert_tuples(self):\n\n votes = []\n for vote, voters in self.votes_dict.items():\n for person_id in voters:\n bioguide_id = self.convert_to_bioguide_id(person_id)\n votes.append((self.vote_id, bioguide_id, vote))\n\n return votes", "def get_vote_tally(self):\r\n voters = []\r\n tally = {}\r\n for b in reversed(self.blocks):\r\n if b.user_id not in voters and type(b) == VoteBlock:\r\n voters.append(b.user_id)\r\n if b.choice in tally.keys():\r\n tally[b.choice] += 1\r\n else:\r\n tally[b.choice] = 1\r\n result = []\r\n for key in tally:\r\n d = {}\r\n d['name'] = key\r\n d['count'] = tally[key]\r\n result.append(d)\r\n return result", "def vote_of_citizens():\n\tglobal vote_first_candidate\n\tglobal vote_second_candidate\n\tglobal blank_vote\n\t\n\tfor i in range(NUMBER_OF_CITIZENS):\n\t\tvote = random.randint(1,10)\n\n\t\tif(vote <= 3):\n\t\t\tvote_first_candidate+=1\n\t\telif(vote > 3 and vote <= 6):\n\t\t\tvote_second_candidate+=1\n\t\telse:\n\t\t\tblank_vote+=1", "def count_votes(self, neighbours=()):\n labels = []\n data = neighbours\n # create the list made up of labels.\n for x in range(len(data)):\n labels.append(data[x][-1])\n\n # count the appearance of labels.\n count = [[x, labels.count(x)] for x in set(labels)]\n # Sort the labels in descending order by using their frequency\n vote = sorted(count, key=itemgetter(-1), reverse=True)\n # return the prediction\n # print(\"[{}]\".format(vote[0][0]))\n return vote[0][0]", "def score_voting(self, det_bboxes, det_labels, mlvl_bboxes,\n mlvl_nms_scores, score_thr):\n candidate_mask = mlvl_nms_scores > score_thr\n # print(\"candidate_mask\")\n # print(candidate_mask)\n candidate_mask_nozeros = candidate_mask.nonzero()\n # print(\"candidate_mask_nozeros\")\n # print(candidate_mask_nozeros)\n candidate_inds = candidate_mask_nozeros[:, 0]\n candidate_labels = candidate_mask_nozeros[:, 1]\n candidate_bboxes = mlvl_bboxes[candidate_inds]\n candidate_scores = mlvl_nms_scores[candidate_mask]\n det_bboxes_voted = []\n det_labels_voted = []\n # print(\"self.cls_out_channels\")\n # print(self.cls_out_channels)\n for cls in range(self.cls_out_channels):\n candidate_cls_mask = candidate_labels == cls\n if not candidate_cls_mask.any():\n continue\n candidate_cls_scores = candidate_scores[candidate_cls_mask]\n candidate_cls_bboxes = candidate_bboxes[candidate_cls_mask]\n det_cls_mask = det_labels == cls\n det_cls_bboxes = det_bboxes[det_cls_mask].view(\n -1, det_bboxes.size(-1))\n det_candidate_ious = bbox_overlaps(det_cls_bboxes[:, :4],\n candidate_cls_bboxes)\n for det_ind in range(len(det_cls_bboxes)):\n single_det_ious = det_candidate_ious[det_ind]\n pos_ious_mask = single_det_ious > 0.01\n pos_ious = single_det_ious[pos_ious_mask]\n pos_bboxes = candidate_cls_bboxes[pos_ious_mask]\n pos_scores = candidate_cls_scores[pos_ious_mask]\n pis = (torch.exp(-(1 - pos_ious)**2 / 0.025) *\n pos_scores)[:, None]\n voted_box = torch.sum(\n pis * pos_bboxes, dim=0) / torch.sum(\n pis, dim=0)\n voted_score = det_cls_bboxes[det_ind][-1:][None, :]\n det_bboxes_voted.append(\n torch.cat((voted_box[None, :], voted_score), dim=1))\n det_labels_voted.append(cls)\n\n det_bboxes_voted = torch.cat(det_bboxes_voted, dim=0)\n det_labels_voted = det_labels.new_tensor(det_labels_voted)\n return det_bboxes_voted, det_labels_voted", "def upsert_uncaptured_votes(cls):\n\n query = \"\"\"\n SELECT DISTINCT most_recent_house_vote_id as vote_id\n FROM bills\n WHERE most_recent_house_vote_id IS NOT NULL\n\n UNION\n\n SELECT DISTINCT most_recent_senate_vote_id as vote_id\n FROM bills\n WHERE most_recent_senate_vote_id IS NOT NULL\n\n EXCEPT\n\n SELECT DISTINCT vote_id\n -- This is legislative votes, named poorly\n FROM votes\n \"\"\"\n\n bills = DB().fetch_records(query)\n for result_tuple in bills:\n vote_id = result_tuple[0]\n print(vote_id)\n lv = LegislativeVotes(vote_id)\n lv.upsert_bill_votes()\n\n ilv = IndividualLegislatorVote(vote_id)\n ilv.upsert_all_votes()", "def chosen_items(sack, items, weight):\n total = total_profit(sack, items, weight)\n chosen = []\n \n while total != 0:\n for i in range(items + 1):\n if total in sack[i]:\n chosen.append(i) \n total = total - profit[i - 1] \n break \n \n return sorted(chosen)", "def vote(request, ballot_url):\n\tdisplay_ballot = get_object_or_404(BallotPaper, ballot_url=ballot_url)\n\tqueryset = Category.objects.filter(ballot_paper=display_ballot)\n\tcaty = get_list_or_404(queryset)\n\tuser = request.user\n\n\tfor cat in caty:\n\t\ttry:\n\t\t\tselected_choice = cat.choice_set.get(pk=request.POST[cat.category_name])\n\t\texcept (KeyError, Choice.DoesNotExist, MultiValueDictKeyError):\n\t\t\treturn render(request, 'polls/display_ballot.html', {\n\t\t\t\t'display_ballot': display_ballot,\n\t\t\t\t'error_message': 'Please select a choice across all categories.'\n\t\t\t})\n\t\telse:\n\t\t\ttry:\n\t\t\t\tToken.objects.get(user=user)\n\t\t\texcept (Token.DoesNotExist):\n\t\t\t\tif ballot.created_by == user:\n\t\t\t\t\treturn render(request, 'polls/display_ballot.html', {\n\t\t\t\t\t\t'display_ballot': display_ballot,\n\t\t\t\t\t\t'error_message': 'Sorry, you do not have authorization to vote.'\n\t\t\t\t\t\t})\n\t\t\t\telse:\n\t\t\t\t\tlogout(request)\n\t\t\t\t\tmessages.error(request, 'Sorry, you do not have authorization to vote.')\n\t\t\t\t\tHttpResponseRedirect(reverse('users:token_login'))\n\t\t\telse:\n\t\t\t\tselected_choice.votes += 1\n\t\t\t\tselected_choice.save()\n\t\t\t\tuser.token.is_used = True\n\t\t\t\tuser.token.save()\n\t\t\t\tlogout(request)\n\n\t\n\treturn HttpResponseRedirect(reverse('polls:vote_success'))", "def ballot_list(request, election):\n limit = after = None\n if 'limit' in request.GET:\n limit = int(request.GET['limit'])\n if 'after' in request.GET:\n after = datetime.datetime.strptime(request.GET['after'], '%Y-%m-%d %H:%M:%S')\n \n voters = Voter.get_by_election(election, cast=True, order_by='cast_at', limit=limit, after=after)\n\n # we explicitly cast this to a short cast vote\n return [v.last_cast_vote().ld_object.short.toDict(complete=True) for v in voters]", "def borda(voteList):\n d = {} \n for i in voteList:\n for j in i:\n if j not in d:\n d[j] = len(i)- i.index(j)\n else:\n d[j] = d[j] + len(i) - i.index(j)\n \n return d" ]
[ "0.6186161", "0.55241674", "0.5510613", "0.54928535", "0.53502405", "0.52477694", "0.5246096", "0.5230471", "0.5179617", "0.5179617", "0.511548", "0.5108267", "0.5023458", "0.49894318", "0.4986476", "0.49607322", "0.49202275", "0.49162194", "0.49080694", "0.4887515", "0.48804182", "0.4859281", "0.48386744", "0.4833265", "0.48293787", "0.48251152", "0.48234126", "0.48172718", "0.48158205", "0.4814012" ]
0.72480005
0
Return the number of votes value as a number. Depending on the vote type, 1st or 2nd votes are returned.
def numberofvotes(record, votetype): if (votetype == 1): value = record['erststimmen'] elif (votetype == 2): value = record['zweitstimmen'] else: raise ('Invalid votetype provided. Parameter value must be 1 or 2.') if value == '-': return 0 return int(value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_votes(self):\n return sum(self.votes_per_count)", "def count_votes(self):\n return self.annotate(sum=Sum('value'))", "def count_upvotes(self):\n return self.filter(value=1).count()", "def t(p, vote_count):\n return vote_count[p]", "def evaluateVoteCount(toCompare):\n\n #weight = 0\n\n if int(toCompare['vote_count']) >= 5000:\n weight = 100\n elif 3000 <= int(toCompare['vote_count']) < 5000:\n weight = 80\n elif 2000 <= int(toCompare['vote_count']) < 3000:\n weight = 60\n elif 1000 <= int(toCompare['vote_count']) < 2000:\n weight = 40\n elif 500 <= int(toCompare['vote_count']) < 1000:\n weight = 20\n else:\n weight = 0\n return weight", "def get_vote_count(self, post):\n return post.vote_set.count()", "def count_votes(self, neighbours=()):\n labels = []\n data = neighbours\n # create the list made up of labels.\n for x in range(len(data)):\n labels.append(data[x][-1])\n\n # count the appearance of labels.\n count = [[x, labels.count(x)] for x in set(labels)]\n # Sort the labels in descending order by using their frequency\n vote = sorted(count, key=itemgetter(-1), reverse=True)\n # return the prediction\n # print(\"[{}]\".format(vote[0][0]))\n return vote[0][0]", "def totalRating(self):\r\n result = 0\r\n for v in self.votes:\r\n result += v.voteType.weight\r\n\r\n return result", "def get_vote_count(php, vote_id):\n page = requests.get(php)\n soup = BeautifulSoup(page.text, \"html.parser\")\n total = list(soup.find_all(\"td\"))\n for i in range(len(total)):\n if vote_id in str(total[i].text):\n return int(total[i + 1].text[1:])\n return 0", "def vote_count(self):\n return QuestionVotes.objects.filter(question=self).count()", "def nay_voter_cnt(self):\n\n return len(self._nay_voters())", "def _vote(self, neighbor_labels):\n counts= torch.bincount(neighbor_labels.int())\n return torch.argmax(counts)", "def count_downvotes(self):\n return self.filter(value=-1).count()", "def get_winning_votes(self):\n try:\n votes = self.get_winner().votes\n except ValueError:\n votes = -1\n return votes", "def get_num_petals(self):\n return self._num_petals", "def majorityCount(votes):\n classCount = {}\n for vote in votes:\n if vote not in classCount.keys():\n classCount[vote] = 0\n classCount[vote] += 1\n return sorted(classCount.iteritems(),\n key=operator.itemgetter(1), reverse=True)[0][0]", "def count_party_votes(votes):\r\n vote_count = {'Pineapple Pizza Party': 0, 'Pronounced Jiff Union': 0, 'Socks and Crocs Reform League': 0}\r\n for person in votes:\r\n vote_count[votes[person]] += 1\r\n return vote_count", "def n_value(self) -> int:\n return self.my_n", "def count(self) -> float:\n return pulumi.get(self, \"count\")", "def count(self):\n return self.vcount", "def positive_votes(self):\n return self._get(\"positive_votes\")", "def count(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"count\")", "def count(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"count\")", "def get_upvotes(self, obj):\n content_type = ContentType.objects.get_for_model(obj)\n\n votes = self.filter(content_type=content_type, object_id=obj._get_pk_val(), vote__exact=UPVOTE).aggregate(upvotes=Sum('vote'))\n\n if votes['upvotes'] is None:\n votes['upvotes'] = 0\n\n return votes['upvotes']", "def count_inv(rating):\r\n div_index = int(len(rating)/2)\r\n if len(rating) > 1:\r\n (left, num_of_left) = count_inv(rating[:div_index])\r\n (right, num_of_right) = count_inv(rating[div_index:])\r\n (rating, num_of_splitted) = count_splitted(left, right)\r\n inversions_num = num_of_left + num_of_right + num_of_splitted\r\n return rating, inversions_num\r\n else:\r\n return rating, 0", "def count_party_votes(votes: dict) -> dict:\r\n vote_count = {'Pineapple Pizza Party': 0, 'Pronounced Jiff Union': 0, 'Socks and Crocs Reform League': 0}\r\n for person in votes:\r\n vote_count[votes[person]] += 1\r\n return vote_count", "def calculate_vote_fractions():\n return _calculate_vote_fractions(models.get_candidate_to_vote_count())", "def getN(self)->int:\n return self.n", "def get_vote_score(self):\n q = PostVote.objects.filter(post=self).aggregate(Sum('score'))\n return q['score__sum'] if q['score__sum'] else 0", "def get_score(self, obj):\n content_type = ContentType.objects.get_for_model(obj)\n result = self.filter(content_type=content_type,\n object_id=obj._get_pk_val()).aggregate(\n score=Sum('vote'),\n num_votes=Count('vote'))\n #It may happen that there has been no voting on this object so far.\n if result['score'] is None:\n result['score'] = 0\n\n result['upvotes'] = self.get_upvotes(obj)\n result['downvotes'] = self.get_downvotes(obj)\n\n return result" ]
[ "0.7046721", "0.69320077", "0.64479935", "0.6445246", "0.6421509", "0.6401795", "0.62980145", "0.6255682", "0.62249494", "0.61506885", "0.6065528", "0.60515994", "0.60392594", "0.6011186", "0.59549886", "0.5930976", "0.59255874", "0.58896184", "0.58039486", "0.5762207", "0.573494", "0.5733854", "0.5733854", "0.5729516", "0.57217413", "0.5717819", "0.57062083", "0.56760556", "0.566389", "0.5621189" ]
0.75620496
0
returns alphabetically sorted list of parties
def get_sorted_parties(): return list(dfSeatsPerPartyBy2ndVotes.sort_values(by=["party"])["party"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_sorted():\n return sorted(country_list, key=get_pop_and_name)", "def get_people(self):\n return sorted(list(self.plist.keys()))", "def clients_sorted_by_name(self):\n rentals = self.get_list()\n rentals = sorted(rentals, key = lambda rental: rental.client.full_name)\n return [rental.client for rental in rentals]", "def get_sorted_suit_list(self):\n return [x[0] for x in sorted(self.suit_dict.items(), key=lambda x: x[1], reverse=True)]", "def get_sorted_ingredients(self):\n if not self.ingredients_order or self.ingredients_order == 'N;':\n return list(self.ingredients.all())\n try:\n sorted_ingredients = self.ingredients_order.split('{')[1].strip('}').strip(';')\n sorted_ingredients = [int(i.split(':')[1]) for i in sorted_ingredients.split(';')][1::2]\n ingredients_by_pk = {i.pk: i for i in self.ingredients.all()}\n ret = []\n for pk in sorted_ingredients:\n if pk in ingredients_by_pk:\n ret += [ingredients_by_pk[pk]]\n if len(ret) < ingredients_by_pk:\n for pk, ingredient in ingredients_by_pk.iteritems():\n if pk not in sorted_ingredients:\n ret += [ingredient]\n return ret\n except:\n return list(self.ingredients.all())", "def people(persons):\n sorted_list = sorted(persons, key=lambda k: k['age'])\n return sorted_list", "def get_all_party_names() -> List[str]:\n\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select name from party order by id\"\n cursor.execute(query)\n data = cursor.fetchall()\n r_list = [x[0] for x in data]\n db.disconnect()\n return r_list", "def __qualitaetsListeProteins(self):\n rv = []\n pam30_sortierbar = {}\n for key in pam30.keys():\n pam30_sortierbar[str(pam30[key]) + \";\" + ''.join(key)] = pam30[key]\n if key[0] != key[1]:\n pam30_sortierbar[\n str(pam30[key]) + \";\" + ''.join((key[1], key[0]))\n ] = pam30[key]\n sorted_keys = list(pam30_sortierbar.keys())\n sorted_keys.sort(key=lambda k: int(k.split(\";\")[0]), reverse=True)\n # debugging kept for historical reasons\n # for key in iter(sorted_keys):\n # print(key.split(\";\")[1] + \" has score \" + str(pam30_sortierbar[key]))\n for key in iter(sorted_keys):\n rv.append(key.split(\";\")[1])\n return(rv)", "def _sorted_members(injson: dict) -> list:\n members = [AocMember.member_from_json(injson[member]) for member in injson]\n members.sort(key=lambda x: x.local_score, reverse=True)\n\n return members", "def sorted_gnames():\n return sorted(group_names.keys())", "def alphabetical(lst):\n\treturn list(reversed(sorted(lst, key=lambda x: x[0])))", "def get_sorted_ueberhang(sort_keys, party_filter=None):\n if (party_filter==None):\n return list(dfUeberhang.sort_values(by=sort_keys)[\"ueberhang\"])\n else:\n return list(dfUeberhang[dfUeberhang[\"party\"]==party_filter].sort_values(by=sort_keys)[\"ueberhang\"])", "def get_ordered_adversary_names(self) -> List[str]:\n pass", "def sorted_countries():\n ahh = [(country, COUNTRY_DATA[country]['data'].deaths[-1]) for country in COUNTRY_DATA.keys()]\n sorted_countries = sorted(ahh, key=lambda x: x[1], reverse=True)\n return [data[0] for data in sorted_countries]", "def _sort(self, groups):\n return sorted(groups, key=lambda group: (group.name.lower(), group.pubid))", "def sort(self):\r\n self.candidates.sort(key=self.sortFitness)\r\n return", "def sort_list_by_president_order(pronoun_proportion_list):\n return sorted(pronoun_proportion_list, key=lambda (k,d,v): (d,k,v))", "def sort_challengers_by_points(self):\n if not self.isOrdered:\n order = []\n for k, i in self.scores.items():\n order.append((i, k))\n order.sort()\n\n nb = len(self.challengers)\n for i, d in enumerate(order):\n self.challengers[nb - i - 1] = d[1]\n self.isOrdered = True\n return self.challengers", "def sorted_carnivores(self):\n fitness_dict = {carn: carn.fitness for carn in self.carnivores}\n sorted_tuples = dict(sorted(fitness_dict.items(), key=lambda x: x[1], reverse=True))\n\n return list(sorted_tuples.keys())", "def sorted_cities(seed):\n\n db = generate_db(seed, cities, sales_people, sales_range, stay_range, success_rate, total_visits)\n sorted_list = get_sorted_cities(db)\n return sorted_list", "def get_listu_uredjaja(self):\n lista = sorted(list(self.uredjaji.keys()))\n return lista", "def natsorted_icase(lst: Sequence[str]) -> List[str]:\n return sorted(lst, key=natsort_key_icase)", "def get_people(self, letter = None):\n if letter:\n people = Person.objects.filter(member_of__entity__in = self.get_descendants(include_self = True), surname__istartswith = letter).distinct().order_by('surname', 'given_name', 'middle_names')\n else: \n people = Person.objects.filter(member_of__entity__in = self.get_descendants(include_self = True)).distinct().order_by('surname', 'given_name', 'middle_names')\n return people", "def get_hero_list(self):\n out_list = []\n for key, _ in self._heroes.items():\n out_list.append(key)\n out_list.sort()\n return out_list", "def adjectives_sorted(lyrics):\n adjectives = get_adjectives(lyrics)\n sorted_adjectives = Counter(adjectives)\n return sorted_adjectives", "def getSorted(self):\n return sorted(self.contacts)", "def organize(select, strain, equals):\n scores = []\n data = list(strainer(select, strain, equals))\n while len(data) != 0:\n number = lowest_number(data)\n scores.append(number)\n data.remove(number)\n return scores", "def get_members(self):\n return sorted([x[\"patient\"] for x in self.pedigree])", "def sort_priors(self):\n return", "def _sort_torrents(ctx, torrent_list, sort_type):\n\n if sort_type == 'seeders':\n return sorted(torrent_list, key=lambda t: t['seeders'], reverse=True)" ]
[ "0.6733721", "0.6340795", "0.62988514", "0.62180257", "0.6207776", "0.6170243", "0.60070765", "0.59440976", "0.5921248", "0.59084487", "0.5884037", "0.5858088", "0.58417654", "0.5839189", "0.5795835", "0.5792995", "0.57860404", "0.57793385", "0.57755154", "0.5755828", "0.57183945", "0.5700666", "0.56550246", "0.5654903", "0.56346065", "0.5626019", "0.56255263", "0.56201726", "0.56121373", "0.5606329" ]
0.717219
0
returns a list of allocated seats sorted by party
def get_sorted_allocated_seats(): return list(dfSeatsPerPartyBy2ndVotes.sort_values(by=["party"])["allocated_seats"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def allocate_seats(constit, party_seats):\n constituencies = dict(constit)\n constituency_seats = {}\n for constituency, _ in constituencies.items():\n constituency_seats[constituency] = ''\n sorted_seats = sort_parties_by_seats(party_seats)\n for party, seats in sorted_seats:\n allocated = 0\n sorted_constituencies = sort_constituencies_by_party_popularity(\n constituencies, party)\n for constituency in sorted_constituencies:\n if allocated == seats:\n break\n constituency_seats[constituency] = party\n constituencies.pop(constituency)\n allocated += 1\n return constituency_seats", "def final_seat_assignment():\n parties = get_sorted_parties()\n allocated_seats = get_sorted_allocated_seats() \n #list(zip(parties, allocated_seats))\n #pandas.concat([parties, allocated_seats], axis=1)\n distributed_seats = []\n for i in range(0, len(parties)):\n list_votes2 = get_sorted_votes2([\"state\"], parties[i]) \n list_min_seats = get_sorted_min_seats([\"state\"], parties[i]) \n list_ueberhang = get_sorted_ueberhang([\"state\"], parties[i])\n seats2dist = allocated_seats[i] - sum(list_ueberhang)\n print(parties[i])\n distributed_seats.append((parties[i]\n , max(distributeSeats(seats2dist, list_votes2, False, 100) , list_min_seats)\n )) # adding tuples\n \n return distributed_seats", "def get_sorted_parties():\n return list(dfSeatsPerPartyBy2ndVotes.sort_values(by=[\"party\"])[\"party\"])", "def get_seats():\n seats = []\n boarding_passes = _load_passes()\n\n for boarding_code in boarding_passes:\n col_code = boarding_code[7:]\n row_code = boarding_code[:7]\n seat = {\n 'col': _decode(col_code, SEAT_COLS),\n 'row': _decode(row_code, SEAT_ROWS)\n }\n seats.append(seat)\n\n return seats", "def get_sorted_direct_seats(sort_keys, party_filter=None):\n if (party_filter==None):\n return list(dfUeberhang.sort_values(by=sort_keys)[\"direct_seats\"])\n else:\n return list(dfUeberhang[dfUeberhang[\"party\"]==party_filter].sort_values(by=sort_keys)[\"direct_seats\"])", "def count_occupied_seats_by_category(\n party_id: PartyID,\n) -> List[Tuple[TicketCategory, int]]:\n subquery = db.session \\\n .query(\n DbSeat.id,\n DbSeat.category_id\n ) \\\n .join(DbTicket) \\\n .filter_by(revoked=False) \\\n .subquery()\n\n rows = db.session \\\n .query(\n DbTicketCategory.id,\n DbTicketCategory.party_id,\n DbTicketCategory.title,\n db.func.count(subquery.c.id)\n ) \\\n .outerjoin(subquery, db.and_(DbTicketCategory.id == subquery.c.category_id)) \\\n .filter(DbTicketCategory.party_id == party_id) \\\n .group_by(DbTicketCategory.id) \\\n .order_by(DbTicketCategory.id) \\\n .all()\n\n return [(TicketCategory(row[0], row[1], row[2]), row[3]) for row in rows]", "def arrange_reservations(guests=None):\n\n seats = new_seating_chart()\n\n if guests:\n for seat_number in range(1, len(guests)):\n seats[seat_number] = guests[seat_number]\n return seats", "def get_sorted_min_seats(sort_keys, party_filter=None):\n if (party_filter==None):\n return list(dfUeberhang.sort_values(by=sort_keys)[\"Mindestsitzzahl\"])\n else:\n return list(dfUeberhang[dfUeberhang[\"party\"]==party_filter].sort_values(by=sort_keys)[\"Mindestsitzzahl\"])", "def _sorted_seat_ids(seats: list):\n seat_ids = [_seat_id(**seat) for seat in seats]\n return sorted(seat_ids)", "def get_allocations(self):\n cursor = self.cur()\n cursor.execute('SELECT {col1}, {col2} FROM {tn}'.format(\n tn=\"allocation\", col1=\"room_name\", col2=\"person_id\"))\n allocations = cursor.fetchall()\n return allocations", "def count_occupied_seats_for_party(party_id: PartyID) -> int:\n return DbSeat.query \\\n .join(DbTicket) \\\n .join(DbTicketCategory) \\\n .filter(DbTicket.revoked == False) \\\n .filter(DbTicketCategory.party_id == party_id) \\\n .count()", "def seat_group_index(party_id):\n party = _get_party_or_404(party_id)\n\n groups = seat_group_service.get_all_seat_groups_for_party(party.id)\n\n return {\n 'party': party,\n 'groups': groups,\n }", "def seats_left_(seats_avai): \r\n seats_left = np.zeros(nrows).astype(int)\r\n for i in range(len(seats_avai)): \r\n for j in range(nrows):\r\n if seats_avai[i][0] == j+1:\r\n seats_left[j] += 1\r\n seats_left = convert_list(list(enumerate(seats_left,start=1)))\r\n return seats_left", "def index_for_party(party_id):\n party = _get_party_or_404(party_id)\n\n seat_count = seat_service.count_seats_for_party(party.id)\n area_count = seating_area_service.count_areas_for_party(party.id)\n category_count = ticketing_category_service.count_categories_for_party(\n party.id\n )\n group_count = seat_group_service.count_seat_groups_for_party(party.id)\n\n return {\n 'party': party,\n 'seat_count': seat_count,\n 'area_count': area_count,\n 'category_count': category_count,\n 'group_count': group_count,\n }", "def get_grouped_available_slots(vols):\n vol_cnts = {}\n for vol in vols:\n slot_amt = len(vol.available_slots)\n if slot_amt < 1:\n continue\n if slot_amt in vol_cnts:\n vol_cnts[slot_amt].append(vol)\n else:\n vol_cnts[slot_amt] = [vol]\n\n #sorted_vol_cnts = sorted(vol_cnts.keys())\n #for amt in sorted_vol_cnts:\n # print(\"{}: {}\".format(amt, [vol.email for vol in vol_cnts[amt]]))\n return vol_cnts", "def __get_free_seats(self, game_state):\n free_seats = []\n for i in range(len(game_state)):\n for j in range(len(game_state[i])):\n if not game_state[i][j]:\n free_seats.append((i, j))\n return tuple(free_seats)", "def _reserve_seats(cls, N, S):\n unreserved_seats = cls._generate_plane_seats(N)\n reserved_seats = unreserved_seats[:]\n if len(S) > 0:\n for res in cls._parse_reservations_generator(N, S):\n row_seat_offset = cls._get_row_seat_offset(res)\n assert row_seat_offset < len(reserved_seats)\n reserved_seats[row_seat_offset] = 1\n\n return reserved_seats", "def area_index(party_id, page):\n party = _get_party_or_404(party_id)\n\n per_page = request.args.get('per_page', type=int, default=15)\n areas_with_occupied_seat_counts = (\n seating_area_service.get_areas_for_party_paginated(\n party.id, page, per_page\n )\n )\n\n seat_total_per_area = seat_service.get_seat_total_per_area(party.id)\n\n return {\n 'party': party,\n 'areas_with_occupied_seat_counts': areas_with_occupied_seat_counts,\n 'seat_total_per_area': seat_total_per_area,\n }", "def _passenger_seats(self):\n row_numbers, seat_letters = self._aircraft.seating_plan()\n for row in row_numbers:\n for letter in seat_letters:\n passenger = self._seating[row][letter]\n if passenger is not None:\n yield (passenger, f\"{row}{letter}\")", "def _compute_seats(self):\n # initialize fields to 0 + compute seats availability\n for ticket in self:\n ticket.seats_availability = 'unlimited' if ticket.seats_max == 0 else 'limited'\n ticket.seats_unconfirmed = ticket.seats_reserved = ticket.seats_used = ticket.seats_available = 0\n # aggregate registrations by ticket and by state\n if self.ids:\n state_field = {\n 'draft': 'seats_unconfirmed',\n 'open': 'seats_reserved',\n 'done': 'seats_used',\n }\n query = \"\"\" SELECT event_ticket_id, state, count(event_id)\n FROM event_registration\n WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')\n GROUP BY event_ticket_id, state\n \"\"\"\n self._cr.execute(query, (tuple(self.ids),))\n for event_ticket_id, state, num in self._cr.fetchall():\n ticket = self.browse(event_ticket_id)\n ticket[state_field[state]] += num\n # compute seats_available\n for ticket in self:\n if ticket.seats_max > 0:\n ticket.seats_available = ticket.seats_max - (ticket.seats_reserved + ticket.seats_used)", "def allocate(families:list, goods:list)->list:\n k = len(families)\n\n if k==1:\n family = families[0]\n logger.info(\" {} gets the remaining bundle\".format(family.name))\n return [set(goods)]\n\n goods=list(goods) # order the goods on a line\n left_sequence = list()\n right_sequence = list(goods)\n for good in goods:\n logger.info(\"\\nCurrent partition: {} | {}:\".format(left_sequence,right_sequence))\n left_bundle = set(left_sequence)\n right_bundle = set(right_sequence)\n for family_index in range(len(families)):\n family = families[family_index]\n num_of_happy_members = family.num_of_happy_members(left_bundle, [right_bundle])\n logger.info(\" {}: {}/{} members think the left bundle is {}\".format(\n family.name, num_of_happy_members, family.num_of_members, family.fairness_criterion.abbreviation))\n if num_of_happy_members*k >= family.num_of_members:\n logger.info(\" {} gets the left bundle\".format(family.name))\n other_families = list(families)\n del other_families[family_index]\n bundles = allocate(other_families, right_sequence)\n bundles.insert (family_index, left_bundle)\n return bundles\n left_sequence.append(good)\n right_sequence.pop(0)\n raise AssertionError(\n \"No family is willing to accept the set of all goods - the fairness criteria are probably too strong\")", "def get_seat_utilization(party_id: PartyID) -> SeatUtilization:\n occupied_seat_count = count_occupied_seats_for_party(party_id)\n total_seat_count = count_seats_for_party(party_id)\n\n return SeatUtilization(occupied_seat_count, total_seat_count)", "def accommodate_waiting_guests(seats, guests):\n\n curr_empty_seats = current_empty_seat_capacity(seats)\n empty_seat_list = find_all_available_seats(seats)\n\n if len(guests) <= curr_empty_seats:\n for index, _ in enumerate(guests):\n seats[empty_seat_list[index]] = guests[index]\n\n return seats", "def get_sorted_suit_list(self):\n return [x[0] for x in sorted(self.suit_dict.items(), key=lambda x: x[1], reverse=True)]", "def clients_sorted_by_rentals(self):\n rentals = self.get_list()\n number_of_rented_movies = dict.fromkeys([rental.client for rental in rentals], 0)\n for rental in rentals:\n number_of_rented_movies[rental.client] += 1\n items = sorted(number_of_rented_movies.items(), key = lambda item: item[1], reverse=True)\n return [ClientDTO(item[0], item[1]) for item in items]", "def spades(self):\n return sorted(tuple([v for v in self if v.suit == 'spades']), reverse=True)", "def sorted_availabilities(self, day=None):\r\n if day is not None:\r\n availabilities = [availability for availability in self.availabilities if availability.day == day]\r\n else:\r\n availabilities = self.availabilities\r\n return sorted(availabilities, key=lambda x: (x.day, x.start))", "def get_ordered_slots(scheduled_slots, vols):\n vol_cnts = {}\n for s_slot in scheduled_slots:\n s_key = \"{}-{}-{}\".format(s_slot.day, s_slot.time_period, s_slot.type)\n vol_cnts[s_key] = 0\n for vol in vols:\n for a_slot in vol.available_slots:\n a_key = \"{}-{}\".format(a_slot.day, a_slot.time_period)\n if a_key == s_key:\n vol_cnts[s_key] += 1\n\n sorted_vol_cnts = sorted(vol_cnts.items(), key=lambda x: x[1])\n #print(\"ordered slots: {}\".format(sorted_vol_cnts))\n return sorted_vol_cnts", "def aggregate_trust(self):\n AC = []\n peers = [peer for peer in self.router]\n x = len(peers)\n if x / 5:\n x = x / 5\n elif x / 2:\n x = x / 2\n for i in range(x):\n AC.append(peers[i:i+x])\n return AC", "def _population_limiting(self, space: Space) -> List[Agent]:\n\n candidate = []\n\n for i, _ in enumerate(space.agents):\n if self.age[i] > self.life_time:\n agent = space.agents.pop(i)\n self.age.pop(i)\n\n candidate.append(agent)\n\n space.agents, self.age = map(\n list, zip(*sorted(zip(space.agents, self.age), key=lambda x: x[0].fit))\n )\n\n if len(space.agents) > self.area_limit:\n candidate += space.agents[self.area_limit :]\n\n space.agents = space.agents[: self.area_limit]\n self.age = self.age[: self.area_limit]\n\n return candidate" ]
[ "0.7138782", "0.6403223", "0.639168", "0.5833872", "0.57861584", "0.5698178", "0.5633167", "0.5604765", "0.5448866", "0.5396747", "0.52694094", "0.52560455", "0.5186727", "0.51847446", "0.5177663", "0.5152866", "0.5070866", "0.5058316", "0.50500154", "0.50088996", "0.49835032", "0.49757075", "0.49498388", "0.49484062", "0.4939307", "0.4920323", "0.4914108", "0.49065676", "0.48819575", "0.4864445" ]
0.8360633
0
Distributes the total number of seats per party by 2nd votes.
def final_seat_assignment(): parties = get_sorted_parties() allocated_seats = get_sorted_allocated_seats() #list(zip(parties, allocated_seats)) #pandas.concat([parties, allocated_seats], axis=1) distributed_seats = [] for i in range(0, len(parties)): list_votes2 = get_sorted_votes2(["state"], parties[i]) list_min_seats = get_sorted_min_seats(["state"], parties[i]) list_ueberhang = get_sorted_ueberhang(["state"], parties[i]) seats2dist = allocated_seats[i] - sum(list_ueberhang) print(parties[i]) distributed_seats.append((parties[i] , max(distributeSeats(seats2dist, list_votes2, False, 100) , list_min_seats) )) # adding tuples return distributed_seats
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def distributeSeats(total_seats, votes, debug=False, steps=1):\n seats=[0]*len(votes)\n total_votes = np.array(votes).sum() \n d = round(total_votes/total_seats, 0)\n \n assigned_seats = 0\n while assigned_seats != total_seats: \n i = 0\n for e in votes:\n seats[i] = round(e/d, 0) \n if debug: print(\"e=\" + str(e) + \" seats[i]=\" + str(seats[i]))\n i=i+1\n \n assigned_seats = np.array(seats).sum()\n \n if assigned_seats < total_seats:\n d=(d-steps)+d%steps\n elif assigned_seats > total_seats:\n d=(d+steps)-d%steps\n else:\n break\n \n if debug: print(\"d=\" + str(d) + \"; assigned_seats=\" + str(assigned_seats))\n if debug: print(\"\")\n \n print(\"FINAL: d=\" + str(d) + \"; assigned_seats=\" + str(assigned_seats) + \"; total votes: \" + str(total_votes))\n return seats", "def get_sorted_allocated_seats():\n return list(dfSeatsPerPartyBy2ndVotes.sort_values(by=[\"party\"])[\"allocated_seats\"])", "def count_party_votes(votes):\r\n vote_count = {'Pineapple Pizza Party': 0, 'Pronounced Jiff Union': 0, 'Socks and Crocs Reform League': 0}\r\n for person in votes:\r\n vote_count[votes[person]] += 1\r\n return vote_count", "def p(party, vote_count, s):\n return t(party, vote_count) / d(s)", "def _compute_seats(self):\n # initialize fields to 0 + compute seats availability\n for ticket in self:\n ticket.seats_availability = 'unlimited' if ticket.seats_max == 0 else 'limited'\n ticket.seats_unconfirmed = ticket.seats_reserved = ticket.seats_used = ticket.seats_available = 0\n # aggregate registrations by ticket and by state\n if self.ids:\n state_field = {\n 'draft': 'seats_unconfirmed',\n 'open': 'seats_reserved',\n 'done': 'seats_used',\n }\n query = \"\"\" SELECT event_ticket_id, state, count(event_id)\n FROM event_registration\n WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')\n GROUP BY event_ticket_id, state\n \"\"\"\n self._cr.execute(query, (tuple(self.ids),))\n for event_ticket_id, state, num in self._cr.fetchall():\n ticket = self.browse(event_ticket_id)\n ticket[state_field[state]] += num\n # compute seats_available\n for ticket in self:\n if ticket.seats_max > 0:\n ticket.seats_available = ticket.seats_max - (ticket.seats_reserved + ticket.seats_used)", "def recalculate_popularity(self):\n self.voters = 0\n for x in self.votes:\n self.voters += 1\n if x.good:\n self.popularity += 1\n else:\n self.popularity -= 1", "def get_vote_tally(self):\r\n voters = []\r\n tally = {}\r\n for b in reversed(self.blocks):\r\n if b.user_id not in voters and type(b) == VoteBlock:\r\n voters.append(b.user_id)\r\n if b.choice in tally.keys():\r\n tally[b.choice] += 1\r\n else:\r\n tally[b.choice] = 1\r\n result = []\r\n for key in tally:\r\n d = {}\r\n d['name'] = key\r\n d['count'] = tally[key]\r\n result.append(d)\r\n return result", "def count_party_votes(votes: dict) -> dict:\r\n vote_count = {'Pineapple Pizza Party': 0, 'Pronounced Jiff Union': 0, 'Socks and Crocs Reform League': 0}\r\n for person in votes:\r\n vote_count[votes[person]] += 1\r\n return vote_count", "def get_party_votes(self, party_name):\n votes = 0\n for candidate, outcome in self.candidate_outcomes.iteritems():\n if candidate.party == party_name:\n votes += outcome.num_votes\n return votes", "def i_e_c():\r\n parties = {}\r\n \r\n print(\"Independent Electoral Commission\")\r\n print(\"--------------------------------\")\r\n party = input(\"Enter the names of parties (terminated by DONE):\\n\")\r\n \r\n while party != 'DONE':\r\n if party:\r\n if not(party in parties):\r\n parties[party] = 1\r\n else:\r\n parties[party] += 1\r\n \r\n party = input('')\r\n \r\n parties2 = sorted(list(parties.keys())) \r\n \r\n if len(parties) > 0:\r\n print(\"\\nVote counts:\")\r\n \r\n for i in parties2:\r\n print(i.ljust(10) + ' -', parties[i])", "async def applyVote(self, votes):\n voteCount = {vote: 0 for vote in self.getMembersName()}\n voteCount[None] = 0\n for vote in votes.values():\n voteCount[vote] += 1\n\n if voteCount[None] != 0:\n await self.textChannel.send(\n \"Attention, des joueurs n'ont pas votรฉ / ont mal รฉcrit, les votes peuvent รชtre faussรฉs.\")\n del voteCount[None]\n\n playerOrder = sorted(voteCount.items(), key=lambda x: x[1], reverse=True)\n print(\"playerOrder\", playerOrder)\n if playerOrder[0][1] == 0: # Nobody vote\n await self.textChannel.send(\"`Partie non valide`, personne n'a votรฉ.\")\n\n elif playerOrder[0][1] == 1: # People think nobody is a werewolf\n await self.textChannel.send(\"Le village pense qu'il n'y a pas de loups-garou ? Vรฉrification ...\")\n werewolves = self.getWolves()\n if len(werewolves) == 0:\n await self.textChannel.send(\"Le village a raison, il n'y a pas de loups-garous parmis eux.\")\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNร‰```\")\n else:\n await self.textChannel.send(\"Malheuresement, il y avait```\" + \", \".join(werewolves) + \"```\")\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNร‰-```\")\n\n else: # Classic vote\n werewolves = self.getWolves()\n deaths = []\n for i in range(len(playerOrder)):\n player = self.getMemberFromName(name=playerOrder[i][0])\n isDead = await player.isDead(channel=self.textChannel)\n if isDead:\n deaths += await player.death(channel=self.textChannel, members=self.players)\n print(\"voteCount :\", voteCount)\n\n # Get player name with same number of vote against them\n playerEqualVote = []\n for p in playerOrder:\n if p[1] == playerOrder[i][1] and p[0] != playerOrder[i][0]:\n playerEqualVote.append(self.getMemberFromName(name=p[0]))\n print(\"Other players with equals number of vote :\", playerEqualVote)\n for otherPlayer in playerEqualVote:\n isDead = await otherPlayer.isDead(channel=self.textChannel)\n if isDead:\n deaths += await otherPlayer.death(channel=self.textChannel, members=self.players)\n break\n\n for i in range(len(deaths)):\n if deaths[i] is None:\n del deaths[i]\n\n if len(deaths) == 0: # No one die\n if len(werewolves) == 0: # No Werewolves\n await self.textChannel.send(\"Il n'ya pas eu de mort et il n'y a aucun Loup-Garou !\")\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNร‰```\")\n else: # Werewolves among players\n await self.textChannel.send(\n \"Il n'y a pas eu de mort mais```\" + \", \".join(werewolves) + \"```\")\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNร‰-```\")\n\n elif len(deaths) == 1:\n if deaths[0].lastRole in [\"Loup-Garou\", \"Loup Alpha\", \"Loup Shamane\", \"Loup rรชveur\"]: # Werewolf die\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNร‰```\")\n elif deaths[0].lastRole in [\"Tanneur\"]: # Tanner died\n await self.textChannel.send(\"```Fix\\n#LE TANNEUR A GAGNร‰#```\")\n if len(werewolves) > 0: # Wolves in game\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT ร‰GALEMENT GAGNร‰```\")\n else: # Villager died\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNร‰-```\")\n\n else: # more than 2 deaths\n rolesDead = []\n for dead in deaths:\n if dead.lastRole in [\"Loup-Garou\", \"Loup Alpha\", \"Loup Shamane\", \"Loup rรชveur\"]:\n rolesDead.append(\"Loup-Garou\")\n elif dead.lastRole in [\"Tanneur\"]:\n await self.textChannel.send(\"```Fix\\n#LE TANNEUR A GAGNร‰#```\")\n else:\n rolesDead.append(\"Villageois\")\n print(\"rolesDead :\", rolesDead)\n rolesDead = list(dict.fromkeys(rolesDead))\n print(\"rolesDead unique :\", rolesDead)\n if \"Loup-Garou\" in rolesDead:\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNร‰```\")\n else:\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNร‰-```\")", "def get_sorted_parties():\n return list(dfSeatsPerPartyBy2ndVotes.sort_values(by=[\"party\"])[\"party\"])", "def rank_candidates(table):\n ranking = []\n\n # get list of all candidates who received a vote\n full_list = elim_dupe([name for vote in table for name in vote])\n # print full_list\n \n while len(ranking) < len(full_list):\n \n # All unranked candidates are considered eligible\n eligible = [name for name in full_list if name not in ranking]\n \n while True:\n \n # Remove ineligible and eliminated candidates from votes\n temp_ballots = [[name for name in vote if name in eligible] for vote in table]\n \n # If no candidates on the ballot are eligible and the ballot does not have\n # \"no confidence\" written on it, the ballot is discarded and not considered a vote.\n temp_ballots = [vote for vote in temp_ballots if len(vote) > 0]\n\n total_votes = len(temp_ballots)\n\n if total_votes == 0:\n return ranking\n\n top_choices = [vote[0] for vote in temp_ballots]\n \n # All ballots are considered to be a vote for the\n # highest-ranked eligible candidate on the ballot.\n vote_count = {name: top_choices.count(name) for name in eligible}\n print vote_count\n winner = [k for k in vote_count if (vote_count[k]*2) > total_votes]\n\n if len(winner) > 0:\n # If a single candidate has a majority of the\n # votes, they receive the next highest ranking\n if winner[0] == NO_CONFIDENCE:\n return ranking\n \n ranking += winner\n \n break;\n\n vote_count.pop(NO_CONFIDENCE, None)\n\n # If no single candidate has a majority of the votes,\n # then one will be deemed ineligible.\n\n min_votes = vote_count[min(vote_count, key=vote_count.get)]\n \n least_voted = {k:vote_count[k] for k in vote_count if vote_count[k] == min_votes}\n \n # If a single candidate has the least amount of votes, they become ineligible,\n while len(least_voted) > 1:\n temp_ballots = [vote[1:] for vote in temp_ballots if len(vote[1:]) > 0]\n if len(temp_ballots) == 0:\n return ranking\n next_choices = [vote[0] for vote in temp_ballots if vote[0] in least_voted]\n least_voted = {name: (next_choices.count(name) + least_voted[name]) for name in least_voted}\n min_votes = least_voted[min(least_voted, key=least_voted.get)]\n least_voted = {k: least_voted[k] for k in least_voted if least_voted[k] == min_votes}\n \n remove = least_voted.keys()[0]\n eligible = [name for name in eligible if name != remove]\n\n\n return ranking", "def update_passengers_counts(persons):\n passengers_dict = defaultdict(int)\n for person in persons:\n if person['seated_pos'] != 'NONE':\n veh_id = person['veh_id']\n passengers_dict[veh_id] += 1\n for veh_id, num in passengers_dict.iteritems():\n db_api.vehicle.set_field(veh_id, 'passenger_count', num)", "def part_two(rucksacks: list) -> int:\n summ = 0\n for i in range(0, len(rucksacks), 3):\n first_group = set(rucksacks[i])\n second_group = set(rucksacks[i + 1])\n third_group = set(rucksacks[i + 2])\n badge = first_group.intersection(second_group).intersection(third_group)\n badge = list(badge)[0] # extract item id from set\n summ += PRIORITY.get(badge, 0)\n return summ", "def compute_spa_revenue(bidders, anonymous_reserve):\r\n revenue = 0.0\r\n values = [np.array(bidder.values) for bidder in bidders]\r\n probs = [np.array(bidder.prob) for bidder in bidders]\r\n # enumerate the top two bids\r\n for i in range(len(bidders)):\r\n for j in range(i + 1, len(bidders)):\r\n for i_value_idx in range(len(bidders[i].values)):\r\n if values[i][i_value_idx] < anonymous_reserve:\r\n continue\r\n for j_value_idx in range(len(bidders[j].values)):\r\n if values[j][j_value_idx] < anonymous_reserve:\r\n continue\r\n if values[i][i_value_idx] <= values[j][j_value_idx]:\r\n # break ties lexicographically j > i, so j is the winner\r\n second_bid = values[i][i_value_idx]\r\n second_bidder = i\r\n else:\r\n second_bid = values[j][j_value_idx]\r\n second_bidder = j\r\n\r\n p = probs[i][i_value_idx] * probs[j][j_value_idx]\r\n for k in range(len(bidders)):\r\n if p == 0.0:\r\n break\r\n if k != i and k != j:\r\n # k should have a bid smaller than both i and j\r\n losing_prob = []\r\n for n in range(len(values[k])):\r\n if values[k][n] < second_bid or (values[k][n] == second_bid and k < second_bidder):\r\n losing_prob.append(probs[k][n])\r\n if len(losing_prob) == 0:\r\n p = 0.0\r\n break\r\n else:\r\n p *= sum(losing_prob)\r\n revenue += p * second_bid\r\n # only one bid is above reserve\r\n for i in range(len(bidders)):\r\n for i_value_idx in range(len(bidders[i].values)):\r\n if values[i][i_value_idx] < anonymous_reserve:\r\n continue\r\n p = probs[i][i_value_idx]\r\n for j in range(len(bidders)):\r\n if j == i:\r\n continue\r\n j_idx = values[j] < anonymous_reserve\r\n p *= sum(probs[j][j_idx])\r\n revenue += p * anonymous_reserve\r\n\r\n return revenue", "def allocate_seats(constit, party_seats):\n constituencies = dict(constit)\n constituency_seats = {}\n for constituency, _ in constituencies.items():\n constituency_seats[constituency] = ''\n sorted_seats = sort_parties_by_seats(party_seats)\n for party, seats in sorted_seats:\n allocated = 0\n sorted_constituencies = sort_constituencies_by_party_popularity(\n constituencies, party)\n for constituency in sorted_constituencies:\n if allocated == seats:\n break\n constituency_seats[constituency] = party\n constituencies.pop(constituency)\n allocated += 1\n return constituency_seats", "def setupDistribution(tournamentsWon1):\n timesWon = np.sort(np.unique(tournamentsWon1))\n numberTimesWon = np.zeros_like(timesWon)\n for i in range (len(timesWon)):\n numberTimesWon[i] = count(tournamentsWon1, timesWon[i])\n return timesWon, numberTimesWon", "def tally_votes(precinct_votes):\n county_cumulative_votes = {}\n county_cumulative_percentages = {}\n for pv in precinct_votes:\n c.execute(\n \"SELECT Choice, Total_Votes from v where Contest_Name=? and County=? and Precinct=? order by Choice ASC\",\n pv[0:3])\n precinct_choices = {}\n for pc in c.fetchall():\n precinct_choices[pc[0]] = pc[1]\n if pv[0:2] not in county_cumulative_votes:\n county_cumulative_votes[pv[0:2]] = {}\n county_cumulative_percentages[pv[0:2]] = {}\n cumulative_choices = precinct_choices\n else:\n cumulative_choices = {}\n for choice, vote in precinct_choices.items():\n cumulative_choices[choice] = vote + county_cumulative_votes[pv[0:2]][choice][-1]\n append_map_array(county_cumulative_votes[pv[0:2]], cumulative_choices)\n append_map_array(county_cumulative_percentages[pv[0:2]], normalize(cumulative_choices))\n\n return (county_cumulative_votes, county_cumulative_percentages)", "def vote_of_citizens():\n\tglobal vote_first_candidate\n\tglobal vote_second_candidate\n\tglobal blank_vote\n\t\n\tfor i in range(NUMBER_OF_CITIZENS):\n\t\tvote = random.randint(1,10)\n\n\t\tif(vote <= 3):\n\t\t\tvote_first_candidate+=1\n\t\telif(vote > 3 and vote <= 6):\n\t\t\tvote_second_candidate+=1\n\t\telse:\n\t\t\tblank_vote+=1", "def twos_points(dice_list):\n return dice_list.count(2) * 2", "def twos(dice):\n return sum([x for x in dice if x == 2])", "def calc_pool(players):\n players = [str(x) for x in players]\n n = len(players)\n for player in players:\n nopool = payoff_nopool(p=percentages[player])\n print(nopool)\n p = {i: percentages[key] for i, key in zip([x for x in range(2, n+1)],\n [x for x in players if x != player])}\n p[1] = percentages[player]\n pool = payoff_n_p(p=p, n=n)\n print(pool)", "def get_session_voting(year, house):\n current_vote = 2\n base_url = \"https://le.utah.gov/DynaBill/svotes.jsp?sessionid={}GS&voteid=\".format(year)\n end_url = \"&house={}\".format(house)\n cont_reading = True\n members = get_members(base_url, end_url)\n votes = []\n while cont_reading:\n reps, content_len, bill_info, vote_names = get_next_page(base_url, end_url, current_vote)\n if 'bill_name' in bill_info and 'yeas' in vote_names:\n votes.append({'bill': bill_info['bill_name'], 'yeas': bill_info['Yeas'], 'nays': bill_info['Nays'],\n 'absent': bill_info['Absent'], 'vote_yea': vote_names['yeas'], 'vote_nay': vote_names['nays'],\n 'vote_absent': vote_names['absent']})\n if content_len < 10:\n cont_reading = False\n current_vote += 1\n save_csv_data(members, votes, house, year)", "def SecondPart():\n return countAllBagsIn(targetBag, organizedBags)", "def _calculate_vote_fractions(candidate_to_vote_count):\n total_votes = sum(candidate_to_vote_count.values()) or 1\n return {\n candidate: vote_count / total_votes\n for candidate, vote_count\n in candidate_to_vote_count.items()\n }", "def count_seats_for_party(party_id: PartyID) -> int:\n return DbSeat.query \\\n .join(DbArea) \\\n .filter(DbArea.party_id == party_id) \\\n .count()", "def index_for_party(party_id):\n party = _get_party_or_404(party_id)\n\n seat_count = seat_service.count_seats_for_party(party.id)\n area_count = seating_area_service.count_areas_for_party(party.id)\n category_count = ticketing_category_service.count_categories_for_party(\n party.id\n )\n group_count = seat_group_service.count_seat_groups_for_party(party.id)\n\n return {\n 'party': party,\n 'seat_count': seat_count,\n 'area_count': area_count,\n 'category_count': category_count,\n 'group_count': group_count,\n }", "def get_sorted_votes2(sort_keys, party_filter=None):\n if (party_filter==None):\n return list(dfVotesPerStateAndParty.sort_values(by=sort_keys)[\"votes2\"])\n else:\n return list(dfVotesPerStateAndParty[dfVotesPerStateAndParty[\"party\"]==party_filter].sort_values(by=sort_keys)[\"votes2\"])", "def count_occupied_seats_for_party(party_id: PartyID) -> int:\n return DbSeat.query \\\n .join(DbTicket) \\\n .join(DbTicketCategory) \\\n .filter(DbTicket.revoked == False) \\\n .filter(DbTicketCategory.party_id == party_id) \\\n .count()" ]
[ "0.62307465", "0.56033283", "0.5483724", "0.54284436", "0.53296125", "0.5314998", "0.5310338", "0.5305902", "0.5232298", "0.51984274", "0.5194866", "0.51302296", "0.51231205", "0.50765413", "0.5059216", "0.5047439", "0.5006322", "0.4984975", "0.4977831", "0.49506813", "0.48996133", "0.4836292", "0.48098662", "0.4793494", "0.47872177", "0.47561115", "0.47542042", "0.4752494", "0.47411108", "0.47239074" ]
0.6118161
1
Create a mock object suitable for replacing confluent_kafka.Consumer which has a list_topics method which acts as if a predetermined set of topics exist.
def make_mock_listing_consumer(topics=[]): def get_topics(topic=None, timeout=None): nonlocal topics result = {} if topic is None: for topic in topics: result[topic] = MagicMock() # don't care much what value is result[topic].error = None # but it should claim no errors elif topic in topics: result[topic] = MagicMock() result[topic].error = None # wrap in a fake metadata object metadata = MagicMock() metadata.topics = result return metadata consumer = MagicMock() consumer.list_topics = get_topics return MagicMock(return_value=consumer)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_kafka_consumer(\n kafka_fixture_name: str,\n kafka_topics: Optional[List[str]] = None,\n seek_to_beginning: bool = False,\n scope: str = 'function',\n **consumer_kwargs\n) -> Callable[..., KafkaConsumer]:\n if kafka_topics is None:\n kafka_topics = []\n\n @pytest.fixture(scope=scope) # type: ignore\n def kafka_consumer(request: 'SubRequest') -> KafkaConsumer:\n \"\"\"\n Get a connected Kafka consumer.\n\n Will consume from the beginning and with a timeout, so ``list(consumer)`` can be used.\n \"\"\"\n _, kafka_port = request.getfixturevalue(kafka_fixture_name)\n\n used_consumer_kwargs = consumer_kwargs.copy()\n used_consumer_kwargs.setdefault('consumer_timeout_ms', DEFAULT_CONSUMER_TIMEOUT_MS)\n used_consumer_kwargs.setdefault('bootstrap_servers', 'localhost:{}'.format(kafka_port))\n\n consumer = KafkaConsumer(\n *kafka_topics,\n **used_consumer_kwargs,\n )\n\n if seek_to_beginning:\n assert kafka_topics, (\n 'In order to be able to seek to beginning, we must have some partitions assigned '\n 'for which we need to subscribe to topics.')\n\n def partitions_assigned():\n consumer.poll(timeout_ms=20)\n return len(consumer.assignment()) > 0\n\n _wait_until(partitions_assigned)\n\n consumer.seek_to_beginning()\n return consumer\n\n return kafka_consumer", "def test_get_all_topics(mock_send_message_json):\n assert OranDmaap.get_all_topics_url == f\"{BASE_URL}/topics/listAll\"", "def test_wiki_topics(self):\n t1 = TopicFactory(slug='doesnotexist')\n t2 = TopicFactory(slug='extant')\n t3 = TopicFactory(slug='tagged')\n\n doc = DocumentFactory(locale=u'en-US', category=10)\n doc.topics.add(t2)\n RevisionFactory(document=doc, is_approved=True)\n\n doc = DocumentFactory(locale=u'en-US', category=10)\n doc.topics.add(t2)\n doc.topics.add(t3)\n RevisionFactory(document=doc, is_approved=True)\n\n self.refresh()\n\n topic_vals = (\n (t1.slug, 0),\n (t2.slug, 2),\n (t3.slug, 1),\n ([t2.slug, t3.slug], 1),\n )\n\n qs = {'a': 1, 'w': 1, 'format': 'json'}\n for topics, number in topic_vals:\n qs.update({'topics': topics})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(number, json.loads(response.content)['total'])", "def test_topics_for_products(self):\n desktop_topics = topics_for(product=self.desktop)\n eq_(len(desktop_topics), 3)\n\n mobile_topics = topics_for(product=self.mobile)\n eq_(len(mobile_topics), 2)", "async def create_unconsumed_topics():\n # ################################################ #\n # TODO: remove these once there is someone consuming the topics\n unconsumed_topics = ['dummy']\n\n logger.warning(\n f'Creating topics on the publisher: {unconsumed_topics} due to lack of consumers. '\n 'Remove them once there are consumers'\n )\n for topic in unconsumed_topics:\n await kafka.topic(topic).maybe_declare()\n\n # ################################################ #", "def test_topic_viewset_list(self):\n TopicFactory()\n TopicFactory(title='Test Title2',\n body='Test body',\n description='Test description',\n section=Topic.CONVERSATION)\n TopicFactory(title='Test Title3',\n body='Test body',\n description='Test description',\n section=Topic.CONVERSATION)\n data = {'section': Topic.CONVERSATION}\n response = self.client.get(reverse('api:topics-by-section'), data=data)\n self.assertTrue(response.status_code == status.HTTP_200_OK)\n self.assertEqual(len(response.data), 3)\n data = {'section': Topic.IDEAS}\n response = self.client.get(reverse('api:topics-by-section'), data)\n self.assertTrue(response.status_code == status.HTTP_200_OK)\n self.assertEqual(len(response.data), 0)", "async def test_modify_topics(\n hass: HomeAssistant,\n mqtt_mock_entry: MqttMockHAClientGenerator,\n caplog: pytest.LogCaptureFixture,\n) -> None:\n await mqtt_mock_entry()\n calls1 = []\n\n @callback\n def record_calls1(*args):\n \"\"\"Record calls.\"\"\"\n calls1.append(args)\n\n calls2 = []\n\n @callback\n def record_calls2(*args):\n \"\"\"Record calls.\"\"\"\n calls2.append(args)\n\n sub_state = None\n sub_state = async_prepare_subscribe_topics(\n hass,\n sub_state,\n {\n \"test_topic1\": {\"topic\": \"test-topic1\", \"msg_callback\": record_calls1},\n \"test_topic2\": {\"topic\": \"test-topic2\", \"msg_callback\": record_calls2},\n },\n )\n await async_subscribe_topics(hass, sub_state)\n\n async_fire_mqtt_message(hass, \"test-topic1\", \"test-payload\")\n assert len(calls1) == 1\n assert len(calls2) == 0\n\n async_fire_mqtt_message(hass, \"test-topic2\", \"test-payload\")\n assert len(calls1) == 1\n assert len(calls2) == 1\n\n sub_state = async_prepare_subscribe_topics(\n hass,\n sub_state,\n {\"test_topic1\": {\"topic\": \"test-topic1_1\", \"msg_callback\": record_calls1}},\n )\n await async_subscribe_topics(hass, sub_state)\n\n async_fire_mqtt_message(hass, \"test-topic1\", \"test-payload\")\n async_fire_mqtt_message(hass, \"test-topic2\", \"test-payload\")\n assert len(calls1) == 1\n assert len(calls2) == 1\n\n async_fire_mqtt_message(hass, \"test-topic1_1\", \"test-payload\")\n assert len(calls1) == 2\n assert calls1[1][0].topic == \"test-topic1_1\"\n assert calls1[1][0].payload == \"test-payload\"\n assert len(calls2) == 1\n\n async_unsubscribe_topics(hass, sub_state)\n\n async_fire_mqtt_message(hass, \"test-topic1_1\", \"test-payload\")\n async_fire_mqtt_message(hass, \"test-topic2\", \"test-payload\")\n\n assert len(calls1) == 2\n assert len(calls2) == 1", "def test_get_topics(self):\n\n for m in self.models:\n\n topics = m.topics\n self.assertTrue(isinstance(topics, turicreate.SFrame))\n self.assertEqual(topics.num_rows(), 25)\n self.assertEqual(topics.num_columns(), 2)\n z = m.topics[\"topic_probabilities\"]\n for k in range(m.num_topics):\n self.assertTrue(\n abs(sum(z.vector_slice(k)) - 1) < DELTA,\n \"Returned probabilities do not sum to 1.\",\n )\n\n # Make sure returned object is an SFrame of the right size\n topics = m.get_topics()\n self.assertTrue(isinstance(topics, turicreate.SFrame))\n self.assertTrue(\n topics.num_columns() == 3,\n \"Returned SFrame should have a topic, word, and probs.\",\n )\n\n # Make sure that requesting a single topic returns only that topic\n num_words = 8\n topics = m.get_topics([5], num_words=num_words)\n self.assertTrue(\n all(topics[\"topic\"] == 5), \"Returned topics do not have the right id.\"\n )\n self.assertEqual(topics.num_rows(), num_words)\n topics = m.get_topics([2, 4], num_words=num_words)\n self.assertEqual(set(list(topics[\"topic\"])), set([2, 4]))\n self.assertEqual(topics.num_rows(), num_words + num_words)\n\n # Make sure the cumulative probability of the returned words is\n # is less than the cutoff we provided.\n # A cutoff of 1.0 should return num_words for every topic.\n cutoff = 1.0\n topics = m.get_topics(cdf_cutoff=cutoff, num_words=len(m.vocabulary))\n totals = topics.groupby(\n \"topic\", {\"total_score\": turicreate.aggregate.SUM(\"score\")}\n )\n self.assertTrue(\n all(totals[\"total_score\"] <= (cutoff + DELTA)),\n \"More words were returned than expected for this cutoff.\",\n )\n\n # Make sure we raise errors for bad input\n with self.assertRaises(ValueError):\n m.get_topics([-1])\n with self.assertRaises(ValueError):\n m.get_topics([10000])\n with self.assertRaises(ToolkitError):\n topics = m.get_topics(output_type=\"other\")\n\n # Test getting topic_words\n topic_words = m.get_topics(output_type=\"topic_words\", num_words=5)\n self.assertEqual(type(topic_words), turicreate.SFrame)\n\n # Test words are sorted correctly for the first topic\n # TODO: Make this more deterministic.\n\n # topic_probs = m.get_topics(num_words=5)\n # expected = [w for w in topic_probs['word'][:5]]\n # observed = topic_words['words'][0]\n # self.assertEqual(observed[0], expected[0])", "def test_author_sorted_topics(self):\n\n self.make_test('topics', TopicListSerializer, 'author:topics')", "def test_question_topics(self):\n p = ProductFactory()\n t1 = TopicFactory(slug='doesnotexist', product=p)\n t2 = TopicFactory(slug='cookies', product=p)\n t3 = TopicFactory(slug='sync', product=p)\n\n QuestionFactory(topic=t2)\n QuestionFactory(topic=t2)\n QuestionFactory(topic=t3)\n\n self.refresh()\n\n topic_vals = (\n (t1.slug, 0),\n (t2.slug, 2),\n (t3.slug, 1),\n )\n\n qs = {'a': 1, 'w': 2, 'format': 'json'}\n for topics, number in topic_vals:\n qs.update({'topics': topics})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(number, json.loads(response.content)['total'])", "def __init__(self, topics=None):\n self.topics = topics or []", "def test_topic_retrieval_authenticated(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.free_token.key)\n response = self.client.get('/topics/', format='json')\n data = json.loads(response.content)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(data['count'],3)\n self.assertTrue({'name': 'Topic 1', 'description': 'The first topic.'} in data['results'])\n self.assertTrue({'name': 'Topic 2', 'description': 'The second topic.'} in data['results'])", "def test_topic_list_view_authenticated(self):\n self.assertTrue(self.client.login(username=\"test\", password=\"test\"))\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context[\"topics\"]), 4)", "def _setup_consumer(self):\n # <WTF> https://github.com/dpkp/kafka-python/issues/601\n self.available_topics = self.client.topics()\n # </WTF>\n\n # might as well use it\n assert self.topic in self.available_topics\n\n if (self.start_params is None) != (self.end_params is None):\n raise ValueError(\"Both start and end params must be set or both must be None\")\n\n if self.start_params is None:\n # setup partitions to read through\n # TODO not checked with multiple partitions since inheriting from foxglove\n # An offset is assigned to make repeatability (via a locking file) possible later on.\n # and it's easier to terminate the fetch loop this way.\n p_id = self.client.partitions_for_topic(self.topic)\n topic_partitions = [TopicPartition(topic=self.topic, partition=p) for p in list(p_id)]\n starts = self.client.beginning_offsets(topic_partitions)\n ends = self.client.end_offsets(topic_partitions)\n\n self.start_p_offsets = {\n tp: OffsetAndTimestamp(offset=offset, timestamp=None)\n for tp, offset in starts.items()\n }\n self.end_p_offsets = {\n tp: OffsetAndTimestamp(offset=offset - 1, timestamp=None)\n for tp, offset in ends.items()\n }\n\n else:\n # TODO - this code was inherited from Foxglove and hasn't be checked through\n # setup start and end partitions and offsets\n # self.client.seek_to_beginning()\n # datetime is only start/end implemented\n assert isinstance(self.start_params, datetime) and isinstance(self.end_params, datetime)\n start = int(self.start_params.timestamp() * 1000)\n end = int(self.end_params.timestamp() * 1000)\n\n partitions = self.client.partitions_for_topic(self.topic)\n tx = {TopicPartition(topic=self.topic, partition=p): start for p in list(partitions)}\n self.start_p_offsets = self.client.offsets_for_times(tx)\n\n # if you give a timestamp after the last record it returns None\n for tp, offset_details in self.start_p_offsets.items():\n if offset_details is None:\n raise ValueError(\"Start date outside of available messages\")\n\n tx = {TopicPartition(topic=self.topic, partition=p): end for p in list(partitions)}\n self.end_p_offsets = self.client.offsets_for_times(tx)\n\n # as above - out of range, for end offset give something useful\n for tp, offset_details in self.end_p_offsets.items():\n if offset_details is None:\n # go to last message. I'm not 100% sure this is correct\n end_offsets = self.client.end_offsets([tp])\n offset = end_offsets[tp] - 1\n self.end_p_offsets[tp] = OffsetAndTimestamp(offset=offset, timestamp=None)", "def test_get_full_topic_courses(self):\r\n course_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.get_full_topic_courses(topic_id, course_id)", "def test_topic_list_view_unauthenticated(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context[\"topics\"]), 3)", "def test_create_topic_viewset(self):\n\n data = {\n 'title': 'Test Topic',\n 'description': 'Test topic description',\n 'body': 'Test topic body',\n 'section': 'CONVERSATION',\n 'tags': 'test'\n }\n response = self.client.post(reverse('api:topics-list'), data)\n self.assertTrue(response.status_code == status.HTTP_201_CREATED)\n created_topic = Topic.objects.last()\n self.assertTrue(created_topic)\n self.assertEqual(created_topic.title, data['title'])\n self.assertEqual(created_topic.description, data['description'])\n self.assertEqual(created_topic.body, data['body'])\n self.assertEqual(created_topic.section, data['section'])", "async def test_subscribe_topics(\n hass: HomeAssistant,\n mqtt_mock_entry: MqttMockHAClientGenerator,\n caplog: pytest.LogCaptureFixture,\n) -> None:\n await mqtt_mock_entry()\n calls1 = []\n\n @callback\n def record_calls1(*args):\n \"\"\"Record calls.\"\"\"\n calls1.append(args)\n\n calls2 = []\n\n @callback\n def record_calls2(*args):\n \"\"\"Record calls.\"\"\"\n calls2.append(args)\n\n sub_state = None\n sub_state = async_prepare_subscribe_topics(\n hass,\n sub_state,\n {\n \"test_topic1\": {\"topic\": \"test-topic1\", \"msg_callback\": record_calls1},\n \"test_topic2\": {\"topic\": \"test-topic2\", \"msg_callback\": record_calls2},\n },\n )\n await async_subscribe_topics(hass, sub_state)\n\n async_fire_mqtt_message(hass, \"test-topic1\", \"test-payload1\")\n assert len(calls1) == 1\n assert calls1[0][0].topic == \"test-topic1\"\n assert calls1[0][0].payload == \"test-payload1\"\n assert len(calls2) == 0\n\n async_fire_mqtt_message(hass, \"test-topic2\", \"test-payload2\")\n assert len(calls1) == 1\n assert len(calls2) == 1\n assert calls2[0][0].topic == \"test-topic2\"\n assert calls2[0][0].payload == \"test-payload2\"\n\n async_unsubscribe_topics(hass, sub_state)\n\n async_fire_mqtt_message(hass, \"test-topic1\", \"test-payload\")\n async_fire_mqtt_message(hass, \"test-topic2\", \"test-payload\")\n\n assert len(calls1) == 1\n assert len(calls2) == 1", "def __init__(self, topics_to_test):\n super().__init__()\n self.topics_to_test = topics_to_test", "def test_suggested_topic_get_all(self):\n with mock.patch('suggestedtopics.models.cache') as mock_cache:\n with mock.patch('suggestedtopics.models.pickle') as mock_pickle:\n mock_cache. __contains__.return_value = True\n mock_pickle.load.return_value = True\n\n actual_suggested_topic = SuggestedTopics.get_all()", "def test_retrieve_topic_viewset(self):\n\n topic = TopicFactory(author=self.user)\n response = self.client.get(reverse('api:topics-detail', kwargs={'topic_id': topic.id}))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.get('title'), topic.title)", "def check_exists(cls, topics):\n\t\tresult = []\n\t\tfor known_feed in cls.get([cls.create_key(url) for url in set(topics)]):\n\t\t\tif known_feed is not None:\n\t\t\t\tresult.append(known_feed.topic)\n\t\treturn result", "def test_create_topic(mock_send_message):\n OranDmaap.create_topic(TOPIC)\n mock_send_message.assert_called_once_with('POST',\n 'Create Dmaap Topic',\n (f\"{BASE_URL}/topics/create\"),\n data=TOPIC,\n headers=HEADER)", "def test_get_single_topic_courses(self):\r\n course_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.get_single_topic_courses(topic_id, course_id)", "def test_topic_notification_list_show_private_topic(self):\n TopicNotification.objects.all().delete()\n\n topic_a = utils.create_private_topic(user=self.user)\n topic_notif = TopicNotification.objects.create(\n user=self.user, topic=topic_a.topic,\n comment=self.comment, is_active=True, action=COMMENT)\n\n utils.login(self)\n response = self.client.get(reverse('spirit:topic:notification:index'))\n self.assertEqual(\n list(response.context['notifications']),\n [topic_notif, ])\n\n # list unread should behave the same\n response = self.client.get(\n reverse('spirit:topic:notification:index-unread'))\n self.assertEqual(list(response.context['page']), [topic_notif, ])\n\n # ajax list should behave the same\n response = self.client.get(\n reverse('spirit:topic:notification:index-ajax'),\n HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n res = json.loads(response.content.decode('utf-8'))\n self.assertEqual(len(res['n']), 1)", "async def test_availability_without_topic(\n hass: HomeAssistant, mqtt_mock_entry: MqttMockHAClientGenerator\n) -> None:\n await help_test_availability_without_topic(\n hass, mqtt_mock_entry, select.DOMAIN, DEFAULT_CONFIG\n )", "def topics(ctx):\n pass", "def test_get_full_topic_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.get_full_topic_groups(group_id, topic_id)", "def get_topics(self):\n return self.client.cluster.topics()", "def stress_test_consumer():\n consumer = kafka_manager.get_kafka_consumer()\n for message in consumer:\n message_content = json.loads(message.value.decode())\n message_topic = message.topic\n print(\"received:\")\n print(message_topic)\n print(message_content)" ]
[ "0.6605228", "0.6418212", "0.6234094", "0.61997974", "0.61825895", "0.6142635", "0.6128908", "0.61017543", "0.6045486", "0.60094476", "0.6002151", "0.5979141", "0.59018415", "0.58984995", "0.58328545", "0.5803527", "0.5799088", "0.5792315", "0.57541776", "0.57241386", "0.5716168", "0.56887794", "0.5683442", "0.56674206", "0.5573501", "0.55424696", "0.55317265", "0.5529059", "0.55207044", "0.5519168" ]
0.85897976
0
decorator that raises an error if a fn is run used to mark sections of code as "legacy"
def legacy_code(func): @wraps(func) def wrapper(*args, **kwargs): msg = "{0} is legacy code".format(func.__name__) raise QError(msg) return wrapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def obsolete(func, fail=True):\n\n def inner(*args, **kwargs):\n if not fail:\n logging.error('Called obsolete function %s' % func.__name__)\n return func(*args, **kwargs)\n raise ObsoleteError('Tried to call function %s but it is marked as obsolete' % func.__name__)\n\n return inner", "def __call__(self, fn):\n\n @wraps(fn)\n def wrapper(*args, **kwargs):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", self.category)\n return fn(*args, **kwargs)\n\n return wrapper", "def test_tolerate_decorated_function_fail_silently():\n def test_function():\n raise Exception()\n fn = tolerate()(test_function)\n fn()", "def test_tolerate_decorated_function_raise_if_disabled():\n def test_function():\n raise AttributeError()\n fn = tolerate()(test_function)\n # disable\n tolerate.disabled = True\n fn()", "def passit(func):\n @functools.wraps(func)\n def wrapper_makepass(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n # We're not here for the good code, are we?\n except Exception: # pylint: disable=broad-except\n return True\n return wrapper_makepass", "def warn_undefined(func):\r\n\r\n def wrapped(self, *args, **kwargs):\r\n print(\"Lexicon [{0}] did not define API method: {1}\"\r\n .format(self.__class__.__name__,\r\n func.__name__))\r\n return func(self, *args, **kwargs)\r\n\r\n return wrapped", "def test_tolerate_decorated_function_raise_if_switch_fail():\n def test_function():\n raise AttributeError()\n def test_switch(*args, **kwargs):\n return False, args, kwargs\n fn = tolerate(switch=test_switch)(test_function)\n fn()", "def test_tolerate_decorated_function_fail_silently_if_exception_is_found():\n def test_function():\n raise AttributeError()\n fn = tolerate(exceptions=[AttributeError])(test_function)\n fn()", "def _canDisable(func):\n def wrapper(*args, **kwargs):\n if _DISABLE_ASSERTIONS == 0:\n return func(*args, **kwargs)\n return wrapper", "def _func_only(func):\n if inspect.isfunction(func):\n return\n else:\n raise Exception(\"Only functions can be tasks\")", "def checkrun(f):\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n wrapper.didrun = True\n return f(*args, **kwargs)\n wrapper.didrun = False\n return wrapper", "def safe_call(func):\r\n\r\n @wraps(func)\r\n def _func(*args, **kwargs):\r\n try:\r\n return func(*args, **kwargs)\r\n except GAEError, e:\r\n raise DatabaseError, DatabaseError(str(e)), sys.exc_info()[2]\r\n return _func", "def _disable_decorator(msg):\n def decorator(func):\n @functools.wraps(func)\n def _wrapper(self, *args, **kwargs):\n raise RuntimeError(msg.format(func.__name__))\n _wrapper.__doc__ = None\n return _wrapper\n return decorator", "def test_tolerate_decorated_function_raise_if_exception_is_not_found():\n def test_function():\n raise AttributeError()\n fn = tolerate(exceptions=[KeyError])(test_function)\n fn()", "def _not_a_fixture_decorator(func):\n raise StepError(\"Cannot be used as a decorator when the fixture is specified\")", "def important(func):\n\n def decorated(*args, **kwargs):\n \"\"\"Decorated method.\"\"\"\n runLog.important(func(*args, **kwargs))\n\n return decorated", "def decorator(func):\n\n pass", "def func(self):\r\n ##Warn: W0201\r\n self._func = 42", "def test_limit_as_runs_with_spawn_raises() -> None:\n with pytest.raises(ValueError):\n\n @restricted(name=\"hello\", context=\"spawn\")\n def limited_func_with_decorator_spawn() -> None:\n \"\"\"A restricted function\"\"\"\n pass", "def run_strict(f, *args, **kwargs):\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\")\n return f(*args, **kwargs)", "def wrapit(fn):\n def inside(dummy, *args):\n try:\n return fn(*args)\n except Exception as e:\n print(\"Error in XSLT extension: %s\" % e)\n raise\n return inside", "def wrapper(*args, **kwargs):\n logger.warn(\"Deprecated function {0}. Please use '{1}' instead.\".format(func.__name__, use_instead))\n return func(*args, **kwargs)", "def warn(func):\n\n def decorated(*args, **kwargs):\n \"\"\"Decorated method.\"\"\"\n runLog.warning(func(*args, **kwargs))\n\n return decorated", "def test_ignore_lack_of_metadata():\n\n def original(something, dispatcher, intent):\n \"\"\"Original!\"\"\"\n pass\n\n new_func = partial(original, \"something\")\n original.attr = 1\n wrapped = do(new_func)\n assert wrapped.__name__ == \"do_wrapper\"", "def __call__(self, func):\n @wraps(func)\n def suppressed_func(*args, **kwargs):\n with self:\n return func(*args, **kwargs)\n return suppressed_func", "def _fixup_func_caller(cmd, **kwargs):\n def wrapper():\n result = _run(cmd, **kwargs)\n if result.returncode not in (None, 0):\n return result.stdout\n return None\n return wrapper", "def deprecated_inner(func):\n def wrapper(*args, **kwargs):\n \"\"\" wrapper for deprecated decorator\n \"\"\"\n logger.warn(\"Deprecated function {0}. Please use '{1}' instead.\".format(func.__name__, use_instead))\n return func(*args, **kwargs)\n wrapper.__name__ = func.__name__\n wrapper.__doc__ = func.__doc__\n wrapper.__dict__.update(func.__dict__)\n return wrapper", "def func():\n pass", "def dummy_wrapper(func):\n return func", "def exsafe(func):\n error_msg_template=\"{{}} executing function '{}':\".format(func.__name__)\n @func_utils.getargsfrom(func,hide_outer_obj=True) # PyQt slots don't work well with bound methods\n def safe_func(*args, **kwargs):\n with exint(error_msg_template=error_msg_template):\n return func(*args,**kwargs)\n return safe_func" ]
[ "0.67392856", "0.6506785", "0.6431633", "0.6427536", "0.64046365", "0.63920295", "0.63743603", "0.63304853", "0.6317589", "0.62874895", "0.6287107", "0.62396675", "0.6211268", "0.6200986", "0.6190534", "0.6165075", "0.61492383", "0.6097565", "0.6093927", "0.6078514", "0.60504395", "0.6045561", "0.60318446", "0.59866714", "0.5964382", "0.59572977", "0.5938917", "0.5930918", "0.5925445", "0.5915245" ]
0.7149867
0
set a Django field widget attribute
def set_field_widget_attributes(field, widget_attributes): for key, value in widget_attributes.items(): field.widget.attrs[key] = value if key == "class": # djangular overwrites widget classes using the built-in "widget_css_classes" attribute # so be sure to re-set it here field.widget_css_classes = value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def TCFieldWidget(field, request):\n widget = FieldWidget(field, TCWidget(request))\n # widget.label = u'' # don't show the label twice\n return widget", "def XMLTextFieldWidget(field, request):\n return FieldWidget(field, XMLTextWidget(request))", "def addattrs(field, my_attrs):\n my_attrs = my_attrs.split(',')\n my_attrs = dict([attr.split('=') for attr in my_attrs])\n return field.as_widget(attrs=my_attrs)", "def __setattr__(self, name: str, value: Any) -> None:\n if self._initialized:\n for widget in self._list:\n if name == widget.name:\n raise AttributeError(\n \"Cannot set attribute with same name as a widget\\n\"\n \"If you are trying to change the value of a widget, use: \"\n f\"`{self.__class__.__name__}.{name}.value = {value}`\",\n )\n object.__setattr__(self, name, value)", "def DatasetDictFieldWidget(field, request):\n return FieldWidget(field, DatasetDictWidget(request))", "def FutureDatasetsFieldWidget(field, request):\n return FieldWidget(field, FutureDatasetsWidget(request))", "def text_field(self, value):\n self.set_property(\"TextField\", value)", "def update_field_widget_attributes(field, widget_attributes):\n for key, value in widget_attributes.items():\n try:\n current_attributes = field.widget.attrs[key]\n field.widget.attrs[key] = \"%s %s\" % (current_attributes, value)\n except KeyError:\n field.widget.attrs[key] = value\n if key == \"class\":\n # djangular overwrites widget classes using the built-in \"widget_css_classes\" attribute\n # so be sure to re-set it here\n try:\n current_widget_css_classes = field.widget_css_classes\n field.widget_css_classes = \"%s %s\" % (current_widget_css_classes, value)\n except AttributeError:\n field.widget_css_classes = value", "def setWidgetProperty(self, QWidget, p_str, Any): # real signature unknown; restored from __doc__\n pass", "def XPathFieldWidget(field, request):\n return FieldWidget(field, XPathWidget(request))", "def on_widget_edited(self, value): # this is a slot\n # note this is exactly the same as @value.setter...\n self.value = value", "def __init__(self, *args, **kwargs):\n super(CustomAuthenticationForm, self).__init__(*args, **kwargs)\n for field in self.fields:\n self.fields[field].widget.attrs.update(\n {\n 'class': 'form-control',\n }\n )", "def __init__(self, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs) \n for field in self.fields:\n self.fields[field].widget.attrs.update(\n {\n 'class': 'form-control',\n }\n )", "def set_attribute(self, name, value):\n\n pass", "def _set_field(self, instrument_name, parameter_name, field, value, force_update):\n if self.verbose >= 2:\n print('_set_field: %s %s: %s' % (instrument_name, parameter_name, str(value)))\n tree_widget = self._itemsdict[instrument_name][parameter_name]['widget']\n double_box = self._itemsdict[instrument_name][parameter_name]['double_box']\n\n field_index = self._fields.index(field)\n\n double_value = False\n if field_index == 0 and double_box is not None:\n double_value = True\n if not double_value:\n tree_widget.setText(field_index + 1, str(value))\n else:\n # update a float value\n try:\n update_value = np.abs(tree_widget.value() - value) > 1e-9\n except Exception as ex:\n logging.debug(ex)\n update_value = True\n if update_value or force_update:\n if not double_box.hasFocus(): # do not update when editing\n logging.debug('update %s to %s' % (parameter_name, value))\n try:\n oldstate = double_box.blockSignals(True)\n double_box.setValue(value)\n double_box.blockSignals(oldstate)\n except Exception as ex:\n logging.debug(ex)", "def ButtonFieldWidget(field, request): # pylint: disable=invalid-name\n button = FieldWidget(field, ButtonWidget(request))\n button.value = field.title\n return button", "def XPSetWidgetProperty(inWidget, inProperty, inValue):\n pass", "def field(self, field):\n\n self._field = field", "def DateTimeFieldWidget(field, request):\n return z3c.form.widget.FieldWidget(field, DateTimeWidget(request))", "def guiField(self, value):\n return None", "def MonthYearFieldWidget(field, request):\n return z3c.form.widget.FieldWidget(field, MonthYearWidget(request))", "def add_js_attribute(self, name, value):\n self.set(name, value.generate_action(None, self.renderer))", "def set(self, attrname, value):\n setattr(self, attrname, value)\n self.dirty = True", "def register_qt_field(self, name, widget):\n self._wid.registerField(name, widget)", "def __setattr__(self, attr_k, val):\n # Dynamically setting the value of the Field\n try:\n attr = object.__getattribute__(self, attr_k)\n except AttributeError:\n attr = None\n if issubclass(attr.__class__, Field):\n attr.value = val\n else:\n return object.__setattr__(self, attr_k, val)", "def update_widget_attribute(self, pyte_widget: Pyted_Widget_Type, attr: str,\n new_value: Union[str, bool, List[str]],\n init=False) -> Union[None, tuple]:\n\n old_value = getattr(pyte_widget, attr)\n\n if not init:\n setattr(pyte_widget, attr, new_value)\n\n try:\n tk_widget = pyte_widget.tk_name\n except AttributeError:\n tk_widget = None\n\n attr_template = pyte_widget.get_code_template(attr)\n\n if attr_template == pyted_widget_types.CONFIG_CODE:\n tk_widget[attr] = getattr(pyte_widget, attr)\n\n elif attr_template == pyted_widget_types.TITLE_CODE:\n return\n\n elif attr_template == pyted_widget_types.GRID_CODE:\n if init:\n # when user form is drawn grid placement will be handled by user form initialisation code\n return\n try:\n old_position = {'row': tk_widget.grid_info()['row'], 'column': tk_widget.grid_info()['column']}\n new_position = {'row': tk_widget.grid_info()['row'], 'column': tk_widget.grid_info()['column']}\n except KeyError:\n # widget has remove set true so no need to update tk_widget\n return\n new_attr_val = getattr(pyte_widget, attr)\n new_position[attr] = new_attr_val\n if (int(new_position['row']) >= int(self.widgets.find_pyte_parent(pyte_widget).number_rows) or\n int(new_position['column']) >= int(self.widgets.find_pyte_parent(pyte_widget).number_columns)):\n # pyte_widget.row = old_position['row']\n # pyte_widget.column = old_position['column']\n pyte_widget.remove = True\n pyte_widget.tk_name.grid_remove()\n self.handles.remove_selected_widget_handles()\n self.user_form.new_filler_label(self.widgets.find_tk_parent(pyte_widget),\n old_position['column'], old_position['row'])\n messagebox.showwarning('Widget being moved off grid',\n 'Row or column greater than grid size. Widget has been removed. '\n 'To get widget back move back onto grid and set remove to false')\n else:\n\n filler_widget = self.widgets.find_tk_parent(pyte_widget).grid_slaves(row=new_position['row'],\n column=new_position['column'])[0]\n if filler_widget not in self.user_form.filler_labels and filler_widget != pyte_widget.tk_name:\n # trying to move widget onto existing widget\n pyte_widget.remove = True\n pyte_widget.tk_name.grid_remove()\n self.handles.remove_selected_widget_handles()\n self.user_form.new_filler_label(self.widgets.find_tk_parent(pyte_widget),\n old_position['column'], old_position['row'])\n messagebox.showwarning('Widget being moved onto existing widget',\n 'Row and column the same as another widget. Widget has been removed. '\n 'To get widget back move back onto empty slot and set remove to false')\n return\n filler_widget.grid(row=old_position['row'], column=old_position['column'])\n tk_widget.grid({attr: new_attr_val})\n self.handles.place_selected_widget_handles(pyte_widget.tk_name)\n\n elif attr_template == pyted_widget_types.GRID_SIZE_CODE:\n if init:\n # when user form is drawn the widget parent will be handled by user form initialisation code\n return\n self.user_form.empty_tk_container_widget(pyte_widget)\n self.user_form.fill_tk_container_frame(pyte_widget)\n self.handles.place_selected_widget_handles(pyte_widget.tk_name)\n\n elif attr_template == pyted_widget_types.ROW_CONFIGURE or attr_template == pyted_widget_types.COLUMN_CONFIGURE:\n # row and column configuration handled elsewhere in program\n pass\n\n elif attr_template == pyted_widget_types.BESPOKE_CODE and attr == 'remove':\n if init:\n # when user form is drawn grid_remove will be handled by user form initialisation code\n return\n\n tk_widget_in_grid = not(len(pyte_widget.tk_name.grid_info()) == 0)\n if getattr(pyte_widget, 'remove'):\n if tk_widget_in_grid:\n widget_to_hide = pyte_widget\n self.user_form.new_filler_label(self.widgets.find_tk_parent(widget_to_hide), widget_to_hide.column,\n widget_to_hide.row)\n widget_to_hide.tk_name.grid_remove()\n self.handles.remove_selected_widget_handles()\n else:\n # remove attribute is false, if widget not displayed then try to display it\n if not tk_widget_in_grid:\n # check that the widget is on the grid\n if (int(pyte_widget.row) >= int(self.widgets.find_pyte_parent(pyte_widget).number_rows) or\n int(pyte_widget.column) >= int(self.widgets.find_pyte_parent(pyte_widget).number_columns)):\n messagebox.showwarning('Widget off grid',\n 'Row or column greater than grid size. '\n 'To get widget back move back onto grid and set remove to false')\n setattr(pyte_widget, 'remove', True)\n return\n # check that there is not a widget already visible\n filler_widget = self.widgets.find_tk_parent(pyte_widget).grid_slaves(row=pyte_widget.row,\n column=pyte_widget.column)[0]\n if filler_widget not in self.user_form.filler_labels:\n pyte_widget.remove = True\n pyte_widget.tk_name.grid_remove()\n # self.remove_selected_widget_handles()\n messagebox.showwarning('Existing widget at grid location',\n 'Row and column the same as another widget. '\n 'To get widget back move onto empty slot and set remove to false')\n return\n # remove filler label and show user widget\n filler_widget = self.widgets.find_tk_parent(pyte_widget).grid_slaves(row=pyte_widget.row,\n column=pyte_widget.column)[0]\n filler_widget.grid_forget()\n filler_widget.destroy()\n pyte_widget.tk_name.grid(row=pyte_widget.row, column=pyte_widget.column)\n self.handles.place_selected_widget_handles(pyte_widget.tk_name)\n\n elif attr_template == pyted_widget_types.BESPOKE_CODE and attr == 'name':\n if init:\n # when user form is drawn the widget name will be handled by user form initialisation code\n return\n # check name is really changed\n if new_value == old_value:\n return\n # check name is not already taken\n for i_pyte_widget in self.widgets.widget_list:\n if i_pyte_widget != pyte_widget:\n if pyte_widget.name == i_pyte_widget.name:\n # can't messagebox here as this would move focus out of entry box and cause binding to run again\n # messagebox.showwarning('Renaming problem',\n # 'Name already exists for another widget and Name not changed')\n setattr(pyte_widget, attr, old_value)\n return 'Renaming problem', 'Name already exists for another widget and Name not changed'\n for i_pyte_widget in self.widgets.widget_list:\n if i_pyte_widget.parent == old_value:\n i_pyte_widget.parent = new_value\n # self.update_navigator_tree()\n self.navigator_tree_obj.navigator_tree_change_item_name(pyte_widget, old_value)\n # raise Exception(f'renaming widget not yet implemented')\n\n elif attr_template == pyted_widget_types.BESPOKE_CODE and (attr == 'comment'):\n if init:\n # when user form is drawn the tk_name will be handled by user form initialisation code\n return\n return\n\n elif attr_template == pyted_widget_types.BESPOKE_CODE and (attr == 'win_close'):\n if init:\n # when user form is drawn the tk_name will be handled by user form initialisation code\n return\n return\n\n elif attr_template == pyted_widget_types.BESPOKE_CODE and (attr == 'tab_text'):\n if init:\n # when user form is drawn the tk_name will be handled by user form initialisation code\n return\n tk_parent = self.widgets.find_tk_parent(pyte_widget)\n if isinstance(tk_parent, ttk.Notebook):\n tk_parent.tab(pyte_widget.tk_name, text=new_value)\n # self.widgets.find_tk_parent(pyte_widget).tab(pyte_widget.tk_name, text=new_value)\n return\n\n elif attr_template == pyted_widget_types.BESPOKE_CODE and attr == 'tk_name':\n if init:\n # when user form is drawn the tk_name will be handled by user form initialisation code\n return\n raise Exception(f'renaming tk_name for widget should not occur')\n\n elif attr_template == pyted_widget_types.BESPOKE_CODE and attr == 'parent':\n # not used as parent attribute not shown in attribute edit frame\n if init:\n # when user form is drawn the widget parent will be handled by user form initialisation code\n return\n raise Exception(f'renaming widget parent not yet implemented')\n\n elif attr_template == pyted_widget_types.VAR_SET_CODE:\n setattr(pyte_widget, pyted_widget_types.VAR_SET_CODE, new_value)\n\n elif attr_template.startswith('<'):\n if init:\n # when user form is drawn the widget parent will be handled by user form initialisation code\n return\n return\n\n else:\n raise Exception(f'attr_template \"{attr_template}\" for \"{attr}\" not yet configured')\n # print(f'attr_template {attr_template} not yet implemented for {attr}')", "def widget(self, field):\n if not field.available(self.form):\n return None\n mode = str(getValue(field, 'mode', self.form))\n return component.getMultiAdapter(\n (field, self.form, self.request),\n interfaces.IWidget,\n name=mode)", "def LocationFieldWidget(field, request):\n return FieldWidget(field, LocationWidget(request))", "def init_widget(self):", "def set_attribute(self, name, value):\n setattr(self, '%s__' % name, value_or_none(value))" ]
[ "0.65111285", "0.62745863", "0.6249279", "0.6230223", "0.6206397", "0.6205999", "0.61867696", "0.6174277", "0.612251", "0.60981774", "0.59975064", "0.5996624", "0.5969774", "0.59523904", "0.58594465", "0.5841845", "0.5819242", "0.5793698", "0.57847726", "0.57839906", "0.5771275", "0.57436913", "0.5739679", "0.5738422", "0.5730001", "0.5715046", "0.57149446", "0.56586546", "0.56182027", "0.56066483" ]
0.6904253
0
if this is a LazyObject, then get the actual object rather than the lazy indirection written in support of 523 to cope w/ pickling changes _after_ serializing a session (recall I cache stuff on the session)
def evaluate_lazy_object(obj): wrapped_obj = getattr(obj, LAZY_OBJECT_NAME, None) if wrapped_obj is None: # if it isn't a lazy object then just return the original object... return obj if wrapped_obj is uninitialized_lazy_object: # if it is a lazy object but, hasn't been initialized yet # then initialize it & return it obj._setup() return getattr(obj, LAZY_OBJECT_NAME) # return the lazy object... return wrapped_obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def obj(self):\n if not self._obj:\n self._get()\n return self._obj", "def itemFromProxy(obj):\n return object.__getattribute__(obj, '_sharedItem')", "def __self__(self):\n if self._ob is not None:\n return self._ob()", "def obj(self):\r\n return self._obj", "def _get_cached_instance(self):\n\n try:\n identifier = self._get_identifier()\n except (ValueError, ObjectDoesNotExist) as error:\n if self._fail_silently:\n return None\n raise LazyModelObjectError(exc=error) from error\n\n # Get the cache key, basically just namespacing the identifier\n cache_key = model_cache_key(identifier)\n\n cache, timeout = self._cache\n cace: BaseCache\n if cache_key in cache:\n instance = cache.get(cache_key)\n else:\n instance = self._get_instance(identifier)\n cache.set(cache_key, instance, timeout=timeout)\n\n if instance is None and not self._fail_silently:\n raise LazyModelObjectError(f'{identifier} not found.')\n return instance", "def get_object(self, *args, **kwargs):\n cache_key = \"_cache_get_object\"\n if not hasattr(self, cache_key):\n setattr(self, cache_key, super(PageDetailsMixin, self).get_object(*args, **kwargs))\n return getattr(self, cache_key)", "def get_object(self, name):\n return weakref.proxy(self._get_real_object(name))", "def obj(self):\n return self._obj", "def get_by_name(self, class_name, object_name, session):", "def __getitem__(self, objectId: str):\n return super()._getitem(\n objectId, f=lambda x: registry.getObject(x, self.session)\n )", "def load_from_session(cls, serializer: URLSafeSerializer, session):\n return cls.load(serializer, session.get(SESSION_STORE_KEY, None))", "def lookup_obj(self,):\n return self._lookup_obj", "def __getstate__(self):\n # construct a list of unpicklable entties and exclude them from pickling\n nope = ['_divisionClassifier', '_assembledObjects']\n d = dict((key, val) for key, val in self.__dict__.items() if key not in nope) # deepcopy needed\n return d", "def _get_instance(self):", "def _get_instance(self):", "def get_object(self):\n if getattr(self, 'current_instance', None):\n ret = self.current_instance\n else:\n ret = super().get_object()\n return ret", "def current_object(proxy):\n return proxy.__current_object__()", "def __getstate__(self):\n with self.override_evaluator(None):\n loaded_from = self.__loaded_from\n try:\n self.__loaded_from = None\n return prepare_dict(self.__dict__)\n finally:\n self.__loaded_from = loaded_from", "def _deserialize_session_stored(self, session, deserialize=pickle.loads):\n _session_id = session.session_id\n _session_data = session.redis.store[_session_id]\n _session_deserialized = deserialize(_session_data)\n return _session_deserialized", "def orig_obj(self):\n return self._orig_obj", "def orig_obj(self):\n return self._orig_obj", "def orig_obj(self):\n return self._orig_obj", "def get_object(self):\n return self._object", "def __current_object__(self):\n return self.__lookup()", "def _get_current_object(self):\n loc = object.__getattribute__(self, '_Proxy__local')\n if not hasattr(loc, '__release_local__'):\n return loc(*self.__args, **self.__kwargs)\n try:\n return getattr(loc, self.__name__)\n except AttributeError:\n raise RuntimeError('no object bound to {0.__name__}'.format(self))", "def get_obj(self, key):\n obj = self.get(key)\n try:\n obj = pickle.loads(obj)\n except TypeError:\n obj = None\n return obj", "def get(self, obj):", "def __init__(self, session):\n self._session = session", "def get_object ( self, object ):\n return object", "def _get_current_object(self):\n if not hasattr(self.__local, '__release_local__'):\n return self.__local()\n try:\n return getattr(self.__local, self.__name__)\n except AttributeError:\n raise RuntimeError('no object bound to %s' % self.__name__)" ]
[ "0.64511096", "0.6139487", "0.60631067", "0.59895074", "0.5965974", "0.58726513", "0.5867599", "0.5861102", "0.5825755", "0.57879114", "0.57749474", "0.5773256", "0.5772455", "0.5748728", "0.5748728", "0.57446975", "0.5741606", "0.5738076", "0.571345", "0.56780285", "0.56780285", "0.56780285", "0.567505", "0.56423324", "0.56191415", "0.56188476", "0.56138575", "0.56116825", "0.5606612", "0.5605543" ]
0.63453275
1
Validator function to use with fileFields. Ensures the file attempting to be uploaded matches a set of extensions.
def validate_file_extension(value, valid_extensions): if not value.name.split(".")[-1] in valid_extensions: raise ValidationError("Invalid File Extension.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def valid_extension(value):\n if (not value.name.endswith('.png') and\n not value.name.endswith('.jpeg') and\n not value.name.endswith('.gif') and\n not value.name.endswith('.bmp') and\n not value.name.endswith('.jpg')):\n\n text = _(\"Files allowed\")\n files = \".jpg, .jpeg, .png, .gif, .bmp\"\n raise ValidationError(text + ': ' + files)", "def test_extensions(self):\n field = TypedFileField(required=False, ext_whitelist=self.good_extensions)\n\n for ext in self.good_extensions:\n name = 'somefooname.%s' % ext\n file = UploadedFile(name=name, size=1)\n assert field.clean(file) is file\n\n for ext in self.bad_extensions:\n name = 'somefooname.%s' % ext\n file = UploadedFile(name=name, size=1)\n with pytest.raises(forms.ValidationError):\n field.clean(file)", "def test_allowed_file(self):\r\n u = Uploader()\r\n for ext in u.allowed_extensions:\r\n # Change extension to uppercase to check that it works too\r\n filename = 'test.%s' % ext.upper()\r\n err_msg = (\"This file: %s should be allowed, but it failed\"\r\n % filename)\r\n assert u.allowed_file(filename) is True, err_msg\r\n\r\n err_msg = \"Non allowed extensions should return false\"\r\n assert u.allowed_file('wrong.pdf') is False, err_msg", "def validate_file_extension(self):\n extension = os.path.splitext(self.name)[1] # [0] returns path+filename\n if extension.lower() in settings.CONTENT_TYPES:\n if self.size > int(settings.MAX_UPLOAD_SIZE):\n raise ValidationError(_(f'Veliฤina fajl-a mora da bude ispod'\n f' {filesizeformat(settings.MAX_UPLOAD_SIZE)}.'\n f' Trenutna veliฤina je {filesizeformat(self.size)}'))\n else:\n raise ValidationError('Nije podrลพan ovaj tip fajl-a. Mora biti .pdf formata!')", "def check_file_extensions(fname, extensions):\n if fname is None:\n return\n assert isinstance(extensions, tuple), \"The 'extensions' must be a tuple.\"\n if not fname.endswith(extensions):\n raise ValueError(\"Invalid file extension (%s). Must be one of %s\" % extensions)", "def test_11_is_allowed_file_correct_ext(self):\n\n for ext in list(ALLOWED_EXTENSIONS):\n filename = f\"somename.{ext}\"\n is_allowed = utils.is_allowed_file(filename)\n self.assertTrue(is_allowed)", "def check_file(file: UploadFile) -> bool:\n # accept all image, video and audio types\n mimetype = mimetypes.guess_type(file.filename)[0]\n if mimetype is not None and mimetype.split(\"/\")[0] in {\"image\", \"audio\", \"video\"}:\n return True\n # if not, only accept whitelisted file extensions\n ext = os.path.splitext(file.filename)[1]\n if ext not in settings.FILE_EXTENSION_WHITELIST:\n raise FileValidationError(f\"{file.filename} is an invalid file type\")\n return True", "def sane_file_extensions(files, extensions):\n for file in files:\n filename, extension = os.path.splitext(file)\n if extension not in extensions:\n print(\"Invalid extension: \" + file)\n return False\n return True", "def check_allowed_extension(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "def test_10_is_allowed_file_wrong_ext(self):\n\n filename = \"somename.pdf\"\n is_allowed = utils.is_allowed_file(filename)\n self.assertFalse(is_allowed)", "def _file_check(self, file_path, ext=None):\n\n errors = []\n if not file_path or file_path.isspace():\n errors.append(\n VerifierError(\n subject=self,\n local_error=\"Gromacs file name is white space.\",\n global_error=(\n \"Gromacs file not specified.\"\n ),\n )\n )\n\n if ext is not None:\n if not file_path.endswith('.{}'.format(ext)):\n errors.append(\n VerifierError(\n subject=self,\n local_error=\"File extension does not match required.\",\n global_error=(\n \"File is not a valid Gromacs file type.\"\n ),\n )\n )\n\n return errors", "def check_file_name_extensions(self, file_name, input_output):\n file_type = FileTypes ()\n extension_types = file_type.get_extension_types ()\n for extension in extension_types:\n if file_name.endswith (extension):\n if input_output == 'input':\n self._input_file = file_type.get_file_type (extension)\n else:\n self._output_file = file_type.get_file_type (extension)\n return True\n print (\"File name must end with:\")\n for extension in extension_types:\n print (extension)\n return False", "def validate_ext(file: PathLike, extension: Union[str, tuple]) -> None:\n if isinstance(extension, str):\n extension = (extension,)\n elif isinstance(extension, tuple):\n if not all([isinstance(element, str) for element in extension]):\n raise TypeError(\n \"Must specify all valid extensions as strings, but value was \\n\"\n f\"'{extension}' with types: {[type(element) for element in extension]}\"\n )\n else:\n raise TypeError(f\"Extension must be str or tuple but type was {type(extension)}\")\n\n if not (isinstance(file, str) or isinstance(file, PurePath)):\n raise TypeError(f\"File must be a str or a pathlib.Path, but type of file was {type(file)}.\\n\" f\"File: {file}\")\n\n # we need to use `endswith` instead of\n # e.g. comparing with `pathlib.Path.suffix`\n # because suffix won't work for \"multi-part\" extensions like '.not.mat'\n if not any([str(file).endswith(ext) for ext in extension]):\n raise ValueError(f\"Invalid extension for file: {file}.\\n\" f\"Valid extension(s): '{extension}'\")", "def file_allowed(self):\n if self._allowed_ext:\n if self.get_ext() not in self._allowed_ext:\n return False\n \n return True", "def allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "def allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "def check_file_type(fname):\n ext = path.splitext(fname)[1]\n return ext in allowed_extensions", "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS", "def allowed_file(filename):\r\n return '.' in filename and \\\r\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "def allowed_file(filename, allowed_extensions):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in allowed_extensions", "def allowed_file(self,filename):\n \n return '.' in filename and filename.rsplit('.', 1)[1].lower() in self.allowed_extensions", "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS" ]
[ "0.7897772", "0.7662936", "0.7593943", "0.7567712", "0.75481987", "0.7404324", "0.73468167", "0.72607017", "0.71938854", "0.707316", "0.701128", "0.70094186", "0.6994248", "0.69769514", "0.69355", "0.69355", "0.6934976", "0.69339526", "0.69339526", "0.69339526", "0.69339526", "0.69339526", "0.69339526", "0.69212854", "0.6921241", "0.69064885", "0.6894103", "0.6894103", "0.6894103", "0.6894103" ]
0.8191283
0
Generates a list of all positions that the Queen can attack. Alternatively it may be good to use the elegant diagonal summation/difference checks.
def generateQueenAttacks(boardsize, pos): assert isinstance(pos, Position) and validatePosition(boardsize, pos) attackList = [] startPos = Position(pos.x, pos.y) def addAttackList(pos): for attacked in attackList: if pos.compare(attacked): return attackList.append(Position(pos.x, pos.y)) #positive x while pos.x < boardsize: addAttackList(Position(pos.x, pos.y)) pos.x = pos.x + 1 pos.x = startPos.x pos.y = startPos.y #positive y while pos.y < boardsize: addAttackList(Position(pos.x, pos.y)) pos.y = pos.y + 1 pos.x = startPos.x pos.y = startPos.y #negative x while pos.x >= 0: addAttackList(Position(pos.x, pos.y)) pos.x = pos.x - 1 pos.x = startPos.x pos.y = startPos.y #negative y while pos.y >= 0: addAttackList(Position(pos.x, pos.y)) pos.y = pos.y - 1 pos.x = startPos.x pos.y = startPos.y #diagonal -x +y left bottom while pos.x >= 0 and pos.y < boardsize: addAttackList(Position(pos.x, pos.y)) pos.x = pos.x - 1 pos.y = pos.y + 1 pos.x = startPos.x pos.y = startPos.y #diagonal -x -y left top while pos.x >= 0 and pos.y >= 0: addAttackList(Position(pos.x, pos.y)) pos.x = pos.x - 1 pos.y = pos.y - 1 pos.x = startPos.x pos.y = startPos.y #diagonal +x +y right bottom while pos.x < boardsize and pos.y < boardsize: addAttackList(Position(pos.x, pos.y)) pos.x = pos.x + 1 pos.y = pos.y + 1 pos.x = startPos.x pos.y = startPos.y #diagonal +x -y right top while pos.x < boardsize and pos.y >= 0: addAttackList(Position(pos.x, pos.y)) pos.x = pos.x + 1 pos.y = pos.y - 1 pos.x = startPos.x pos.y = startPos.y return attackList
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def moves(self):\n move_list = list()\n for i in range(self.n):\n row = self.queens[i][0]\n col = self.queens[i][1]\n for rd in [-1,0,1]:\n for cd in [-1,0,1]:\n if (rd == 0) and (cd == 0):\n continue\n new_pos = [row+rd, col+cd]\n if (new_pos[0] >= 0) and (new_pos[0] < self.n) and (new_pos[1] >= 0) and (new_pos[1] < self.n):\n if not new_pos in self.queens: \n move_list.append([i, new_pos])\n\n return move_list", "def find_moves(self):\n\n from itertools import product\n free_position = self.find_free()\n return [list(free_position+i) for i in [[0,1],[1,0],[-1,0],[0,-1]] if tuple(i+free_position) in product(range(self.size),repeat=2)]", "def available_moves(self):\n available_moves = []\n for i in range(self.quadrants_count):\n quadrant_positions = self.play_area[i].available_positions()\n for p in quadrant_positions:\n position = p + i * 9\n for j in range(self.quadrants_count):\n move1 = [str(position), str(j + 1), \"l\"]\n move2 = [str(position), str(j + 1), \"r\"]\n available_moves.append(\" \".join(move1))\n available_moves.append(\" \".join(move2))\n return available_moves", "def get_legal_moves(self):\n moves = []\n if self.player_locations[self.whose_turn] is None:\n return self.get_blank_locations()\n matrix = [(1,0), (-1,0), (0,1), (0,-1), (1,1), (1,-1), (-1, 1), (-1,-1)]\n\n for dx, dy in matrix:\n x,y = self.player_locations[self.whose_turn]\n while x+dx <= xdim and x+dx >= 0 and y+dy <= ydim and y+dy >= 0:\n x = x+dx\n y = y+dx\n if self.board[x][y] : break\n moves.append((x,y))\n return moves", "def get_all_moves(self):\n # 2d matrix of true/false, true if something can be placed\n legal_move_board = []\n possible_move_list = []\n for row in range(self.size):\n move_row = []\n for col in range(self.size):\n empty = self.board[row][col].state == PegState.EMPTY\n move_row.append(empty)\n if empty:\n possible_move_list.append((row, col))\n legal_move_board.append(move_row)\n \n # every position where something can be placed (list of tuples) (Combined with above)\n \"\"\" possible_move_list = []\n for row in range(self.size):\n for col in range(self.size):\n if legal_move_board[row][col] == True:\n possible_move_list.append((row, col))\n \"\"\"\n return legal_move_board, possible_move_list", "def available_moves(self):\n moves = []\n for x, y in self.available_boards:\n moves.extend([self.to_position(x, y, i, j) for (i, j)\n in self.boards[x][y].empty_squares])\n return moves", "def free_positions(self):\n positions = []\n for i in range(self.grid_size):\n for j in range(self.grid_size):\n if self.grid[i][j] == 0:\n positions.append((i, j))\n if positions == []:\n raise GameException('Game Over. No free position left.')\n return positions", "def get_legal_moves(self):\n # for each square in the castle figure out if an moves can occur from it.\n moves = []\n allowed = [self.turn]\n if self.turn == DEFENDER:\n allowed.extend((KING, CASTLE_OCCUPIED))\n it = np.nditer(self.board_state, flags=['multi_index'])\n while not it.finished:\n index = it.multi_index\n curr_loc = it[0]\n if curr_loc in allowed:\n moves.extend(self.get_legal_move_piece(curr_loc, index))\n it.iternext()\n return moves", "def generate_possible_moves(self):\r\n\t\t# Moves:\r\n\t\t# 0 - North\r\n\t\t# 1 - East\r\n\t\t# 2 - South\r\n\t\t# 3 - West\r\n\r\n\t\tmoves = []\r\n\r\n\t\tif self.x != 0:\r\n\t\t\tmoves.append(0)\r\n\t\tif self.y != self.n-1:\r\n\t\t\tmoves.append(1)\r\n\t\tif self.x != self.n-1:\r\n\t\t\tmoves.append(2)\r\n\t\tif self.y != 0:\r\n\t\t\tmoves.append(3)\r\n\r\n\t\treturn moves", "def get_valid_moves(self):\n if self.king:\n valid_moves = [[self.row + 1, self.col + 1],\n [self.row + 1, self.col - 1],\n [self.row - 1, self.col - 1],\n [self.row - 1, self.col + 1]]\n else:\n if self.player == 1:\n valid_moves = [[self.row + 1, self.col + 1],\n [self.row + 1, self.col - 1]]\n else:\n valid_moves = [[self.row - 1, self.col - 1],\n [self.row - 1, self.col + 1]]\n return valid_moves", "def get_all_moves(self, board, player):\n result = []\n for startx in range(8):\n for starty in range(8):\n for destx in range(8):\n for desty in range(8):\n if self.is_legal_move(board, [startx, starty], [destx, desty], player):\n result.append([[startx, starty], [destx, desty]])\n return result", "def possible_moves(self): \n return [a + 1 for a, b in enumerate(self.board) if b == 0]", "def moves(self):\n\n # define a full range, which we can compare against columns,\n # rows, or blocks. they're all the same when stored as sets.\n line = set(range(1, 10))\n moves = []\n\n # iterate every cell on the board\n for row in range(0, 9):\n for col in range(0, 9):\n\n # ignore this cell if it's already filled\n i = self._index(col, row)\n if self.data[i] is not None:\n continue\n\n # fetch the adjacent cells\n row_values = set(self._row(row))\n col_values = set(self._column(col))\n bck_values = set(self._block(col, row))\n\n # subtract the values present in the adjacent cells\n # (since this cell *can't* be of any of those values),\n # to leave the list of possibilities for this cell\n missing = line.difference(row_values, col_values, bck_values)\n\n # if there's only *one* possibility, we've found the\n # solution to this cell\n if len(missing) == 1:\n moves.append((col, row, missing.pop()))\n\n return moves", "def get_all_positions(board, white_turn):\n list = []\n for row in range(8):\n for col in range(8):\n # White\n if white_turn and white_piece_on_pos((row, col), board):\n obj = board[row][col]\n if type(obj) is Pawn:\n for valid_pos in valid_positions_pawn_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Tower:\n for valid_pos in valid_positions_tower_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Bishop:\n for valid_pos in valid_positions_bishop_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Horse:\n for valid_pos in valid_positions_horse_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Queen:\n for valid_pos in valid_positions_queen_white((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is King:\n for valid_pos in valid_positions_king_white((row, col), board):\n list.append(((row, col), valid_pos))\n # Black\n elif (not white_turn) and black_piece_on_pos((row, col), board):\n obj = board[row][col]\n if type(obj) is Pawn:\n for valid_pos in valid_positions_pawn_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Tower:\n for valid_pos in valid_positions_tower_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Bishop:\n for valid_pos in valid_positions_bishop_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Horse:\n for valid_pos in valid_positions_horse_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is Queen:\n for valid_pos in valid_positions_queen_black((row, col), board):\n list.append(((row, col), valid_pos))\n elif type(obj) is King:\n for valid_pos in valid_positions_king_black((row, col), board):\n list.append(((row, col), valid_pos))\n return list", "def GenerateMoves(position):\n return [move for move in POSSIBLE_MOVES if move <= position]", "def guarded_places(self):\n guarded = []\n for x in range(8):\n for y in range(8):\n if self.squares[x][y].piece and self.squares[x][y].piece.color != self.turn:\n squares = self.squares[x][y].piece.actions(self, (x, y), True)\n if self.squares[x][y].piece.name != 'pawn': # pawns capture in different areas than they move\n guarded.extend(squares[0])\n guarded.extend(squares[1])\n return guarded", "def available_positions(self):\n available_positions = []\n for i in range(self.positions_count):\n if self.board[i] == 0:\n available_positions.append(i+1)\n return available_positions", "def available_positions(self):\n if len([x for x in self.grid.values() if x[0] != None]) < 13:\n return [x for x in assignable_positions if self.grid[x][1] == \"---\"]\n else:\n return []", "def get_goat_possible_moves(self) -> List:\n moves = []\n for pos in self.get_all_positions():\n if pos.is_goat():\n addr_from = pos.address\n for addr_to in pos.piece.get_valid_moves():\n moves.append((addr_from, addr_to))\n\n return moves", "def get_queen_moves(state, coord):\n queen_moves = []\n if state.active_color == cc.WHITE_ACTIVE:\n for vector in cc.QUEEN_VECTORS:\n queen_moves.extend(get_direction_moves(state, cc.W_QUEEN, coord, vector))\n elif state.active_color == cc.BLACK_ACTIVE:\n for vector in cc.QUEEN_VECTORS:\n queen_moves.extend(get_direction_moves(state, cc.B_QUEEN, coord, vector))\n else:\n raise Exception(\"GameState: Invalid Active Color\")\n return queen_moves", "def get_available_moves(self):\n available = []\n row, col = tuple(self.current_pos)\n if row - 1 >= 0 and self.maze[row - 1][col] != 'x':\n available.append('n')\n if row + 1 < len(self.maze) and self.maze[row + 1][col] != 'x':\n available.append('s')\n if col - 1 >= 0 and self.maze[row][col - 1] != 'x':\n available.append('w')\n if col + 1 < len(self.maze[row]) and self.maze[row][col + 1] != 'x':\n available.append('e')\n return available", "def getQOfPosition(self,a, b, c, d):\n '''\n the initial list of qOfPosition given by the paper \"The Secrets of Notakto\" Figure 6,\\\n the first element is the board position and second is corresponding value\n '''\n qOfPosition_List = \\\n [ #\"\"\"row 1\"\"\"\n #row 1 column 1\n [[False, False, False, False, False, False, False, False, False],c],\\\n #row 1 column 2\n [[True, False, False, False, False, False, False, False, False],1],\\\n #row 1 column 3\n [[False, True, False, False, False, False, False, False, False],1],\\\n #row 1 column 4\n [[False, False, False, False, True, False, False, False, False],c**2],\\\n #row 1 column 5\n [[True, True, False, False, False, False, False, False, False],a*d],\\\n #row 1 column 6\n [[True, False, True, False, False, False, False, False, False],b],\\\n #row 1 column 7\n [[True, False, False, False, True, False, False, False, False],b],\\\n #row 1 column 8\n [[True, False, False, False, False, True, False, False, False],b],\\\n #row 1 column 9\n [[True, False, False, False, False, False, False, False, True],a],\\\n #\"\"\"row 2\"\"\"\n #row 2 column 1\n [[False, True, False, True, False, False, False, False, False],a],\\\n #row 2 column 2\n [[False, True, False, False, True, False, False, False, False],b],\\\n #row 2 column 3\n [[False, True, False, False, False, False, False, True, False],a],\\\n #row 2 column 4 is a dead board, we will skip it \n #row 2 column 5\n [[True, True, False, True, False, False, False, False, False],b],\\\n #row 2 column 6\n [[True, True, False, False, True, False, False, False, False],a*b],\\\n #row 2 column 7\n [[True, True, False, False, False, True, False, False, False],d],\\\n #row 2 column 8\n [[True, True, False, False, False, False, True, False, False],a],\\\n #row 2 column 9\n [[True, True, False, False, False, False, False, True, False],d],\\\n #\"\"\"row 3\"\"\"\n #row 3 column 1\n [[True, True, False, False, False, False, False, False, True],d],\\\n #row 3 column 2\n [[True, False, True, False, True, False, False, False, False],a],\\\n #row 3 column 3\n [[True, False, True, False, False, False, True, False, False],a*b],\\\n #row 3 column 4\n [[True, False, True, False, False, False, False, True, False],a],\\\n #row 3 column 5\n [[True, False, False, False, True, True, False, False, False],a],\\\n #row 3 column 6 is a dead board, we will skip it \n #row 3 column 7\n [[True, False, False, False, False, True, False, True, False],1],\\\n #row 3 column 8\n [[False, True, False, True, True, False, False, False, False],a*b],\\\n #row 3 column 9\n [[False, True, False, True, False, True, False, False, False],b],\\\n #\"\"\"row 4\"\"\"\n #row 4 column 1, 2, 3, 4, 5 are all dead boards, we will skip them \n #row 4 column 6 \n [[True, True, False, True, True, False, False, False, False],a],\\\n #row 4 column 7\n [[True, True, False, True, False, True, False, False, False],a],\\\n #row 4 column 8\n [[True, True, False, True, False, False, False, False, True],a],\\\n #row 4 column 9\n [[True, True, False, False, True, True, False, False, False],b],\\\n #\"\"\"row 5\"\"\"\n #row 5 column 1\n [[True, True, False, False, True, False, True, False, False],b],\\\n #row 5 column 2,3 are dead boards\n #row 5 column 4\n [[True, True, False, False, False, True, True, False, False],b],\\\n #row 5 column 5\n [[True, True, False, False, False, True, False, True, False],a*b],\\\n #row 5 column 6 \n [[True, True, False, False, False, True, False, False, True],a*b],\\\n #row 5 column 7\n [[True, True, False, False, False, False, True, True, False],b],\\\n #row 5 column 8\n [[True, True, False, False, False, False, True, False, True],b],\\\n #row 5 column 9\n [[True, True, False, False, False, False, False, True, True],a],\\\n #\"\"\"row 6\"\"\"\n #row 6 column 1 is a dead board\n #row 6 column 2\n [[True, False, True, False, True, False, False, True, False],b],\\\n #row 6 column 3\n [[True, False, True, False, False, False, True, False, True],a],\\\n #row 6 column 4\n [[True, False, False, False, True, True, False, True, False],b],\\\n #row 6 column 5 is a dead board \n #row 6 column 6 \n [[False, True, False, True, False, True, False, True, False],a],\\\n #row 6 column 7,8,9 are dead boards\n #\"\"\"row 7\"\"\" \n #row 7 column 1-8 are dead boards\n #row 7 column 9 \n [[True, True, False, True, False, True, False, True, False],b],\\\n #\"\"\"row 8\"\"\"\n #row 8 column 1\n [[True, True, False, True, False, True, False, False, True],b],\\\n #row 8 column 2\n [[True, True, False, False, True, True, True, False, False],a],\\\n #row 8 column 3-7 are dead boards\n #row 8 column 8\n [[True, True, False, False, False, True, True, True, False],a],\\\n #row 8 column 9\n [[True, True, False, False, False, True, True, False, True],a],\\\n #\"\"\"row 9 are all dead boards\"\"\"\n #\"\"\"row 10\"\"\"\n #row 10 column 1-6, 8, 9 are dead boards \n #row 10 column 7\n [[True, True, False, True, False, True, False, True, True],a]\n #the rest are all dead boards\n ]#boundary of qOfPosition_List\n '''\n the above list doesn't consider all the situations, we need to add\\\n the rotated and reflected versions into them. we use a dictionary \\\n to store the (board, value) pair\n '''\n qOfPosition_Dict = {}\n for boardAndValue in qOfPosition_List:\n board = boardAndValue[0]\n boardValue = boardAndValue[1]\n #get the reflected version, the second one is the same as the initial one\n for i in range(2):\n board = self.reflectBoard(board)\n #get the rotated version, we need to call rotateBoard function four times\n #the fourth one is the same as initial one\n for j in range(4):\n #update the board by rotating\n board = self.rotateBoard(board)\n #transfer the board to a string for making a key\n boardKey = self.transferBoardToString(board)\n #check if the key has already existed\n if boardKey in qOfPosition_Dict:\n pass\n else:\n #if not exist, store the (boardKey, boardValue into the qOfPosition_Dict)\n qOfPosition_Dict.update({boardKey: boardValue})\n #print(len(qOfPosition_Dict))\n return qOfPosition_Dict", "def get_move_list(self):\n return [\n tuple(x) for x in np.argwhere(self.board == HexBoard.EMPTY).tolist()\n ]", "def positions_to_play(self):\r\n\r\n positions = []\r\n\r\n for i in range(0, len(self.matrix)):\r\n for j in range(0, len(self.matrix[i])):\r\n if self.matrix[i][j] == \"0\":\r\n # Add [row, column] to the list\r\n positions.append([i, j])\r\n \r\n return positions", "def queen_diagonal_attack_4(x1, y1):\n return []", "def find_valid_posse(board: 'List') -> 'List':\n for i, a in enumerate(board):\n for j, b in enumerate(board):\n if j != i:\n for k, c in enumerate(board):\n if k not in (i, j) and \\\n is_valid_posse((a, b, c)):\n # print((i, j, k))\n return [a, b, c]", "def get_tiger_possible_moves(self) -> List:\n moves = []\n for pos in self.get_all_positions():\n if pos.is_tiger():\n addr_from = pos.address\n for addr_to in pos.piece.get_valid_moves():\n moves.append((addr_from, addr_to))\n\n return moves", "def checkPossibleMoves(self):\n possibleMovesArray = []\n\n for j in range(self.nrOfCars):\n minMaxChange = self.gridSize - self.length[j] + 1\n possibleMoves = []\n\n for i in range(1,minMaxChange):\n if self.checkMove(j, i) == 0:\n possibleMoves.append(i)\n else:\n break\n for i in range(1,minMaxChange):\n if self.checkMove(j, -i) == 0:\n possibleMoves.append(-i)\n else:\n break\n\n possibleMovesArray.append(possibleMoves)\n\n return possibleMovesArray", "def filled_positions(self):\n return [x for x in assignable_positions if self.grid[x][0]]", "def possible_moves(self, board):\n\n coordinate_list = []\n algebraic_from = JanggiGame.translate_to_algebraic(self._location)\n for i, col in enumerate(board):\n for j, row in enumerate(col):\n algebraic_to = JanggiGame.translate_to_algebraic([i,j])\n if self.validate_move(algebraic_from,algebraic_to,board) is True:\n coordinate_list.append([i,j])\n\n return coordinate_list" ]
[ "0.75085545", "0.7238031", "0.72162753", "0.69097525", "0.68830097", "0.6834835", "0.6642358", "0.66276836", "0.6615933", "0.6517043", "0.6509062", "0.6507567", "0.6499198", "0.649662", "0.64962876", "0.6493829", "0.64880407", "0.6486416", "0.64808154", "0.6464777", "0.6401266", "0.63926727", "0.6366346", "0.6330355", "0.6305566", "0.6302528", "0.6300954", "0.6290923", "0.6284138", "0.62814903" ]
0.74447596
1
Tests whether pos is in valid_positions using the compare function in the Position class. This allows us to set two different objects equal. If it is, return the position pointer. Otherwise, return False.
def isInList(valid_positions, pos): assert isinstance(pos, Position) for position in valid_positions: if pos.compare(position): return position return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_position_exists(self, pos=None):\n if pos is None:\n pos = self.draw.position\n return (pos[0] in self.range_of_valid_coordinates) and (pos[1] in self.range_of_valid_coordinates)", "def move_is_valid(self, pos):\n\n if (not isinstance(pos, tuple) or len(pos) != 2 or \n not isinstance(pos[0], int) or not isinstance(pos[1], int)):\n return False\n y, x = pos\n if (y >= 0 and y < self.size and x >= 0 and x < self.size and \n self.board[pos] == HexBoard.EMPTY):\n return True\n else:\n return False", "def valid_position(self, new_coords: tuple) -> bool:\n x, y = new_coords\n min_allowed_value = self.offset\n max_allowed_value = 10 - self.offset\n\n # If the value is outside of the board on the left or up, return false\n if x < min_allowed_value or y < min_allowed_value:\n return False\n # If the value is outside of the board on the right or down sides, return false\n if x > max_allowed_value or y > max_allowed_value:\n return False\n\n # If the position is taken by any piece, return false\n if self.board[y][x] != 0:\n print(\"Error: Position taken by %d\" % self.board[y][x])\n return False\n return True", "def is_pos_valid(self, pos):\n if pos is None:\n pos = (0, 0)\n assert isinstance(pos, tuple)\n\n if self.grid_map[pos[0], pos[1]] in [self.empty_value, 0.7]:\n return True\n else:\n return False", "def validate_pos(game: TowerDefenceSolver, position: Tuple[int, int], purchases_list: Purchases) -> bool:\n if (\n position[0] < 0\n or position[1] < 0\n or position[0] >= game.map_height\n or position[1] >= game.map_width\n or position in game.path\n ):\n return False\n\n for purchase in purchases_list:\n if purchase[\"coords\"] == position:\n return False\n\n return True", "def __is_valid(self, pos):\n return 0 <= pos[0] < self._n and 0 <= pos[1] < self._n", "def valid(self, pos):\n\t\tpos = Point(pos)\n\t\treturn 0 <= pos.x < self.dims.width and 0 <= pos.y < self.dims.height", "def is_pos_valid(pos, shape):\n x, y = pos\n is_valid = x >= 0 and x < shape[0] and y >= 0 and y < shape[1]\n return is_valid", "def _position_validity_checker(position, start, n_elements):\n _assert_shape(position, (MaxDimension.value(), n_elements + 1), \"position\")\n\n # Check if the start position of the rod and first entry of position array are the same\n assert_allclose(\n position[..., 0],\n start,\n atol=Tolerance.atol(),\n err_msg=str(\n \"First entry of position\" + \" (\" + str(position[..., 0]) + \" ) \"\n \" is different than start \" + \" (\" + str(start) + \" ) \"\n ),\n )", "def check_position_for_same_occupancy(self, position1, position2):\n return self.board.board[position1] == self.board.board[position2]", "def _is_at_position(pose_1, pose_2, atol):\n # type: (typing.Union[PoseStamped, PositionTarget, Waypoint], typing.Union[PoseStamped, PositionTarget, Waypoint], float) -> bool\n\n pos = [np.zeros(3), np.zeros(3)]\n for i, pose in enumerate((pose_1, pose_2)):\n if isinstance(pose, PoseStamped):\n pos[i][:] = np.array([pose.pose.position.x, pose.pose.position.y, pose.pose.position.z])\n elif isinstance(pose, PositionTarget):\n pos[i][:] = np.array([pose.position.x, pose.position.y, pose.position.z])\n elif isinstance(pose, Waypoint):\n pos[i][:] = np.array([pose.x_lat, pose.y_long, pose.z_alt])\n else:\n raise Warning(\"Wrong type\")\n\n return all(np.isclose(pos[0], pos[1], atol=atol))", "def check_place(self, positions):\n return self.size == len(set(positions[i] + i for i in range(self.size))) == len(\n set(positions[i] - i for i in range(self.size)))", "def has_position(self):\n if 'position' not in self.attrs:\n return False\n\n pos = self.position()\n if pos is None:\n return False\n\n if isinstance(pos, SharedPosition) and pos.value is None:\n return False\n\n return True", "def check_position_free(self, pos=None):\n if pos is None:\n pos = self.draw.position\n return self.board.board[pos] == 0", "def _is_same_position(pos1, pos2, position_tolerance):\n return np.isclose(_pos_distance(pos1, pos2), 0, atol=position_tolerance)", "def _is_valid_position(self, position):\n if type(position) is not int:\n raise TypeError\n\n if position > 9 or position < 1:\n raise ValueError\n\n #confirm position is open\n try:\n int(self.grid[position - 1])\n except ValueError:\n return False\n\n return True", "def validatePosition(boardsize, pos):\n return pos.x in range(0, boardsize) and pos.y in range(0,boardsize)", "def __posCheck ( self, posList ):\n\n #-- 1 --\n # [ if is-pos-list-valid(self.posSpecs) ->\n # self.__optx := as invariant\n # self.__repx := as invariant\n # self.__minPos := as invariant\n # self.__maxPos := as invariant\n # else ->\n # sys.stderr +:= (usage message) + (error message)\n # stop execution ]\n self.__validatePosList ( )\n\n #-- 2 --\n # [ if posList is a valid sequence of positionals as\n # specified by self.posSpec ->\n # self.posMap := as invariant from posList\n # else ->\n # sys.stderr +:= (usage message) + (error message)\n # stop execution ]\n self.__storePositionals ( posList )", "def find_valid_position(self, position: pygame.math.Vector2) -> bool:\n\n window_rect = self.ui_manager.get_root_container().rect\n\n if window_rect.contains(pygame.Rect(int(position[0]), int(position[1]), 1, 1)):\n self.rect.left = int(position.x)\n self.rect.top = int(position.y + self.hover_distance_from_target[1])\n\n if window_rect.contains(self.rect):\n self.relative_rect = self.rect.copy()\n self.text_block.set_position(self.rect.topleft)\n return True\n else:\n if self.rect.bottom > window_rect.bottom:\n self.rect.bottom = int(position.y - self.hover_distance_from_target[1])\n if self.rect.right > window_rect.right:\n self.rect.right = window_rect.right - self.hover_distance_from_target[0]\n if self.rect.left < window_rect.left:\n self.rect.left = window_rect.left + self.hover_distance_from_target[0]\n\n if window_rect.contains(self.rect):\n self.relative_rect = self.rect.copy()\n self.text_block.set_position(self.rect.topleft)\n return True\n else:\n self.relative_rect = self.rect.copy()\n warnings.warn(\"Unable to fit tool tip on screen\")\n return False\n else:\n self.relative_rect = self.rect.copy()\n warnings.warn(\"initial position for tool tip is off screen,\"\n \" unable to find valid position\")\n return False", "def can_see_position(life, pos, distance=True, block_check=False, strict=False, get_path=False, ignore_z=False):\n\tif tuple(life['pos'][:2]) == tuple(pos[:2]):\n\t\treturn [pos]\n\t\n\tif not ignore_z and len(pos) == 3:\n\t\tif not life['pos'][2] == pos[2]:\n\t\t\treturn []\n\t\n\tif get_path or not 'player' in life:\n\t\treturn _can_see_position(life['pos'], pos, max_length=get_vision(life), block_check=block_check, strict=strict, distance=distance)\n\n\tif is_in_fov(life, pos):\n\t\treturn True\n\t\n\treturn False", "def check_positions(d, positions, player):\n contents = [d[y][x] for x, y in positions]\n contents = ''.join(contents) # e.g. 'XXO.'\n if contents == player * 4:\n return True", "def valid(self, pos):\n return self.m.shape[0] > pos[0] >= 0 and self.m.shape[1] > pos[1] >= 0 and self.m[pos] == 0", "def __equals__(self, to_compare):\n try:\n # Try to compare - this likely fails when it is compared to a non\n # Position object\n return \\\n (self.latitude_deg == to_compare.latitude_deg) and \\\n (self.longitude_deg == to_compare.longitude_deg) and \\\n (self.absolute_altitude_m == to_compare.absolute_altitude_m) and \\\n (self.relative_altitude_m == to_compare.relative_altitude_m)\n\n except AttributeError:\n return False", "def has_positions(self):\n return self.positions.exists()", "def position_is_valid(x1, y1, z1, x2, y2, z2, degXY_1, degYZ_1, degXY_2, degYZ_2, user_rand):\n\n # return max X,Y,Z locations from all the atoms in vecs\n def get_max_XYZ(vecs):\n return max(vecs, key=lambda v: v[0])[0], max(vecs, key=lambda v: v[1])[1], max(vecs, key=lambda v: v[2])[2]\n\n # return min X,Y,Z locations from all the atoms in vecs\n def get_min_XYZ(vecs):\n return min(vecs, key=lambda v: v[0])[0], min(vecs, key=lambda v: v[1])[1], min(vecs, key=lambda v: v[2])[2]\n\n # get the atoms of the first protein after moving it in x1,y1,z1\n vecs1 = get_atoms('media/files/' + user_rand + '/' + '_1_.pdb')\n translate_vecs(x1, y1, z1, vecs1)\n rotate_molecular(x1, y1, z1, degXY_1, degYZ_1, vecs1)\n\n # get the atoms of the second protein after moving it in x2,y2,z2\n vecs2 = get_atoms('media/files/' + user_rand + '/' + '_2_.pdb')\n translate_vecs(x2, y2, z2, vecs2)\n rotate_molecular(x2, y2, z2, degXY_2, degYZ_2, vecs2)\n\n maxX1, maxY1, maxZ1 = get_max_XYZ(vecs1)\n maxX2, maxY2, maxZ2 = get_max_XYZ(vecs2)\n\n minX1, minY1, minZ1 = get_min_XYZ(vecs1)\n minX2, minY2, minZ2 = get_min_XYZ(vecs2)\n\n dist = 1\n\n # check overlap in axis X, axis Y and axis Z\n resultX = (maxX1 + dist) >= minX2 and (maxX2 + dist) >= minX1\n resultY = (maxY1 + dist) >= minY2 and (maxY2 + dist) >= minY1\n resultZ = (maxZ1 + dist) >= minZ2 and (maxZ2 + dist) >= minZ1\n\n # check overlap of whole \"boxes\" of proteins\n isOverlap = resultX and resultY and resultZ\n\n return not isOverlap", "def pos_updated(self,next_pos):\n #if (int(self.oldx) == int(self.x) and int(self.oldy) == int(self.y)):\n if (int(next_pos[0]) == int(self.x) and int(next_pos[1]) == int(self.y)):\n return False\n else:\n return True", "def isPositionValid(self, x, y):\n if x >= self._width:\n return False\n if y >= self._height:\n return False\n if x < 0:\n return False\n if y < 0:\n return False\n return not (x, y) in self._invalidPositions", "def calc_is_new_position(self, game_state: dict):\n current_position = game_state['self'][3]\n if current_position in self.positions:\n return False\n else:\n return True", "def placeQueen(valid_positions, pos, boardsize):\n if isInList(valid_positions, pos): #IS a valid position!\n invalid_positions = generateQueenAttacks(boardsize, pos)\n #update valid_positions\n for pos in invalid_positions:\n temp = isInList(valid_positions, pos)\n if temp:\n valid_positions.remove(temp)\n return True\n return False", "def check_position_for_same_colour(self, position1, position2):\n return (not self.check_position_free(position1)) and self.check_position_for_same_occupancy(position1, position2)" ]
[ "0.6746193", "0.66301435", "0.6584429", "0.65756834", "0.6544668", "0.65213627", "0.64284897", "0.64258605", "0.6366852", "0.63338536", "0.62852615", "0.62646544", "0.62159127", "0.6182148", "0.61781657", "0.6178161", "0.6168304", "0.61011946", "0.6100861", "0.6086811", "0.60799366", "0.6071242", "0.6063363", "0.60435814", "0.6033652", "0.6022894", "0.5985254", "0.5981748", "0.59768414", "0.5971566" ]
0.67221254
1
Tries to place a queen in the position that is passed. Calls isInList to help it decide. Returns True if the queen was successfully place, False otherwise.
def placeQueen(valid_positions, pos, boardsize): if isInList(valid_positions, pos): #IS a valid position! invalid_positions = generateQueenAttacks(boardsize, pos) #update valid_positions for pos in invalid_positions: temp = isInList(valid_positions, pos) if temp: valid_positions.remove(temp) return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve(board, N):\n # when all the N queens are placed \n if N == 0:\n return True\n\n # cal dimension of board \n n = len(board[0])\n for i in range(n):\n for j in range(n):\n attacked = is_attacked(i, j, board) # checking if (i,j) is safe to place the queen \n if attacked == False:\n board[i][j] = 1 # place the queen at location (i,j)\n if solve(board, N-1): # proceed ahead with placing other queens \n return True\n else:\n board[i][j] = 0 # if could not place all the queens, changing our decision and\n # moving to other positions \n return False", "def recursiveQueen(row, validList, queensLeft, boardsize, sol_stack):\n #Internal helper functions\n def cleanup(pushed):\n for i in range(0,pushed):\n sol_stack.pop()\n def print_solution():\n printingList = sol_stack[-1*boardsize:] #slice for last 5 solutions\n print \", \".join(str(printingList[i]) for i in range(0, boardsize))\n fancyprint(printingList, boardsize)\n\n #Are we even on the board? Do we even have squares left to test?\n if row > boardsize:\n return False\n elif len(validList) == 0:\n return False\n\n #Save board state \n copiedList = copy.deepcopy(validList)\n pushed = 0\n for col in range(0, boardsize):\n if placeQueen(copiedList, Position(row, col), boardsize):\n sol_stack.append(Position(row,col))\n pushed += 1\n queensLeft = queensLeft - 1\n if queensLeft == 0: \n print \"Solution:\"\n print_solution()\n cleanup(pushed)\n return True\n else: #keep going\n failed = recursiveQueen(row+1, copiedList, queensLeft, boardsize, sol_stack)\n if not failed: #reset list to last case, try next row\n copiedList = copy.deepcopy(validList)\n queensLeft += 1\n pushed -= 1\n sol_stack.pop()\n # do we still have leftovers? clean them up\n cleanup(pushed)\n return False", "def add_queen(self, somerow, somecol):\n if self.is_valid_move(somerow, somecol):\n new_state = copy.deepcopy(self.board)\n new_state[somerow][somecol] = 1\n self.num_queens_placed += 1\n self.board = new_state", "def queen():\n game_queen = Queen(Color.BLACK)\n game_queen.coords = Coords(x=7, y=4)\n return game_queen", "def solve_util(self, board, col):\n try:\n if col == self.N:\n self.print_sol(board)\n return True\n\n # Trying to place this queen in all rows one by one\n res = False\n for i in range(self.N):\n if self.is_safe(board, i, col):\n board[i][col] = 1\n res = self.solve_util(board, col + 1) or res\n if type(res) == dict:\n return res\n board[i][col] = 0 # Backtracking...\n\n # if queen cannot be placed in any row in this col, then alas\n # we return false..\n return res\n except KeyboardInterrupt:\n print('Keyboard Interrupted!')\n return self.Outputs", "def queue(self, irc, msg, args, notice):\n pos = self._find_in_queue(msg.nick)\n QUEUE_SLOTS = self.registryValue('queueSlots')\n if pos < 0:\n if QUEUE_SLOTS >= 0 and self._count >= QUEUE_SLOTS:\n irc.reply(\"Sorry, but the queue is out of slots\")\n return\n self._queue.append((msg.nick, notice))\n self._count += 1\n irc.reply(\"I queued you at position %s in the queue\" % len(self._queue))\n self._dump_queue()\n elif self._queue[pos][1] != notice:\n self._queue[pos] = (msg.nick, notice)\n irc.reply(\"You're queued at position %s already, I've updated \"\\\n \"notice to '%s'\" % (pos + 1, notice))\n self._dump_queue()\n else:\n irc.reply(\"You're already in the queue at position %s.\" % (pos+1))", "def enQueue(self, value: int) -> bool:\n q, k, front, rear, empty = self.q, self.k, self.front, self.rear, self.empty\n\n if self.isFull():\n return False\n\n if front == rear and self.empty: # if the queue is full now, front will match rear\n empty = False\n\n q[rear] = value\n rear = (rear + 1) % k\n\n self.rear, self.empty = rear, empty\n return True", "def solveNQUtil(board, col, n):\n\n if col >= n:\n return True\n\n \"\"\" Iter by col tryng to place the Queen row by row\"\"\"\n for i in range(n):\n if isSafe(board, i, col):\n board[i][col] = 1\n if solveNQUtil(board, col+1) == True:\n return True\n board[i][col] = 0\n return False", "def _can_place(self, loc):\n non_blocks = [_AIR, _WATER, _LAVA]\n player = [self._pos, self._pos + _Vec3(0, 1, 0)]\n for dir_ in _adj_dirs + [_Vec3(0, 1, 0), _Vec3(0, -1, 0)]:\n new_loc = loc + dir_\n if new_loc not in player and self._get_block(new_loc) \\\n not in non_blocks:\n return True\n return False", "def is_queen_attacked(x1, y1, x2, y2):\n attacked_fields = queen_attack(x1, y1)\n second_queen_position = [x2, y2]\n if second_queen_position in attacked_fields:\n return True\n return False", "def generateQueenAttacks(boardsize, pos):\n assert isinstance(pos, Position) and validatePosition(boardsize, pos)\n attackList = []\n startPos = Position(pos.x, pos.y)\n \n def addAttackList(pos):\n for attacked in attackList:\n if pos.compare(attacked):\n return\n attackList.append(Position(pos.x, pos.y))\n\n #positive x\n while pos.x < boardsize:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x + 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #positive y\n while pos.y < boardsize:\n addAttackList(Position(pos.x, pos.y))\n pos.y = pos.y + 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #negative x\n while pos.x >= 0:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x - 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #negative y\n while pos.y >= 0:\n addAttackList(Position(pos.x, pos.y))\n pos.y = pos.y - 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #diagonal -x +y left bottom\n while pos.x >= 0 and pos.y < boardsize:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x - 1\n pos.y = pos.y + 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #diagonal -x -y left top\n while pos.x >= 0 and pos.y >= 0:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x - 1\n pos.y = pos.y - 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #diagonal +x +y right bottom\n while pos.x < boardsize and pos.y < boardsize:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x + 1\n pos.y = pos.y + 1\n pos.x = startPos.x\n pos.y = startPos.y\n \n #diagonal +x -y right top\n while pos.x < boardsize and pos.y >= 0:\n addAttackList(Position(pos.x, pos.y))\n pos.x = pos.x + 1\n pos.y = pos.y - 1\n pos.x = startPos.x\n pos.y = startPos.y\n\n return attackList", "def is_queen_move_valid(self, from_row, from_col, to_row, to_col):\n # if not on same colored diagonal\n if abs(from_row - to_row) != abs(from_col - to_col):\n # if on same col? (like rook)\n if from_row != to_row and (from_col == to_col):\n dc = 0\n dr = 1 if to_row - from_row > 0 else -1\n # elif on same row?\n elif from_col != to_col and (from_row == to_row):\n dr = 0\n dc = 1 if to_col - from_col > 0 else -1\n else:\n # if not on same col or row\n return False\n else:\n # on same colored diagonal (moves like bishop)\n dr = 1 if to_row - from_row > 0 else -1\n dc = 1 if to_col - from_col > 0 else -1\n\n # check if any pieces are in the way of destination\n dm = abs(to_row - from_row)\n return self._any_piece_in_way(from_row, from_col, dr, dc, dm, toRow=to_row, toCol=to_col)", "def is_valid_move(self, somerow, somecol):\n bool_1 = self.board[somerow][somecol] != 1\n bool_2 = self.num_queens_placed < self.size \n bool_3 = self.attack(somerow, somecol)\n return bool_1 and bool_2 and bool_3", "def enQueue(self, value):\n if self.rear - self.front < self.size:\n self.queue.append(value)\n self.rear += 1\n return True\n else:\n return False", "def can_attack(self, aq: object) -> bool:\n if self.row == aq.row and self.column == aq.column:\n raise ValueError(\"Same queen\")\n return (self.row == aq.row\n or self.column == aq.column\n or self.row - self.column == aq.row - aq.column\n or self.row + self.column == aq.row + aq.column)", "def _place(player, size, length, number):\n\n if number == 0:\n return\n name = player.name\n ship = player.board.fleet.name_ship(length)\n print \"\\n%s, please place your %s. (Length: %s)\\n\" % (name, ship, length)\n\n player.board.display(True)\n\n coord = man_coord(size)\n x = coord[0]\n y = coord[1]\n direct = man_dir()\n\n if player.board.check(x, y, length, direct) is True:\n name = player.fleet.name_ship(length)\n player.fleet.add_ship(name, x, y, length, direct)\n return _place(player, size, length, number - 1)\n print \"\\nSorry, that ship won't fit, please try again.\"\n return _place(player, size, length, number)", "def place_marker(self, player, position):\n\n assert player in [1,2]\n assert position[0] in range(10)\n assert position[1] in range(10)\n\n if position in self.board.keys():\n return False\n \n self.board[position] = player\n return True", "def test_comp_place_piece():\n board = Board(640, 640, 8)\n black_piece = GamePiece(0, 0, BLACK, 0)\n white_piece = GamePiece(0, 0, WHITE, 0)\n board.start_game()\n\n board.game_pieces[3][3] = None\n board.game_pieces[3][4] = None\n board.game_pieces[4][3] = None\n board.game_pieces[4][4] = None\n board.comp_place_piece()\n assert board.previous_no_moves is True and board.current_color == BLACK\n\n board.game_pieces[3][3] = white_piece\n board.game_pieces[3][4] = black_piece\n board.game_pieces[4][3] = black_piece\n board.game_pieces[4][4] = white_piece\n num_pieces = len(board.used_points)\n board.comp_place_piece()\n assert board.previous_no_moves is False and len(board.used_points) == num_pieces + 1", "def test_place_piece():\n board = Board(640, 640, 4)\n board.start_game()\n board.place_piece(board.SPACE_SIZE/2, 0)\n assert len(board.used_points) == 4", "async def train_queen(self):\n if (\n self.structures(UnitTypeId.SPAWNINGPOOL).ready\n and len(self.units(UnitTypeId.QUEEN)) < len(self.townhalls)\n and self.already_pending(UnitTypeId.QUEEN) < len(self.townhalls.ready)\n ):\n self.train(UnitTypeId.QUEEN)", "def push(self, element):\n if not self.full():\n heapq.heappush(self.queue, element)\n self.size += 1\n return True\n else:\n if element >= self.queue[0]:\n heapq.heapreplace(self.queue, element)\n return True\n else:\n return False", "def enQueue(self, value):\n if not self.isFull():\n self.queue.append(value)\n return True\n else:\n return False", "def __can_enter(self, position, traversed):\n row, col = position\n # Check index values\n if row < 0 or col < 0:\n return False\n if row >= self.__row_count or col >= self.__col_count:\n return False\n # Check if already traversed\n if traversed[row][col]:\n return False\n # Check if blocked\n if self.__grid[row][col].blocked:\n return False\n return True", "def place(self, val):\n row, col = self.selected\n if self.cubes[row][col].value == 0:\n self.cubes[row][col].set_value(val)\n self.update_model()\n\n if valid_move(self.model, val, (row, col)) and solve(self.model):\n return True\n else:\n self.cubes[row][col].set_value(0)\n self.cubes[row][col].set_temp(0)\n self.update_model()\n return False", "def enQueue(self, value):\r\n if (len(self.queue) >= self.maxlen):\r\n return False\r\n else:\r\n self.queue.append(value)\r\n return True", "def enQueue(self, value):\n \n if not self.isFull():\n if self.start == -1:\n self.start = 0\n self.end = (self.end + 1) % self.max_length\n self.queue[self.end] = value\n return True\n else:\n return False", "def queencalc(queen):\n for x in range(size):\n \"\"\"horizontal board positions per queen\"\"\"\n nextx = 0\n for y in range(queen):\n qx = queens[y]\n if x == qx or x + queen == qx + y or x - queen == qx - y:\n nextx = 1\n break\n if nextx == 1:\n nextx == 0\n continue\n if queen != size - 1:\n queens[queen + 1] = 0\n queens[queen] = x\n queencalc(queen + 1)\n else:\n queens[queen] = x\n printsolution(queens)", "def solve_nqueens(n, board, col):\n status = False\n if col == n:\n print_solutions(board)\n return True\n for row in range(n):\n if valid(n, board, row, col):\n board[row][col] = 1\n status = solve_nqueens(n, board, col + 1) or status\n board[row][col] = 0\n return status", "def enQueue(self, value):\n if not self.isFull():\n self.queue.append(value)\n self.rear += 1\n return True\n else:\n return False", "def can_move_up(self, index):\n # If the index of the '0' tile is in the top-row then we cannot do the action\n if index in range(0, self.puzzle_width):\n return False\n return True" ]
[ "0.6420456", "0.6292266", "0.6042101", "0.57444483", "0.5711553", "0.569694", "0.5641004", "0.5552675", "0.55362207", "0.54819834", "0.53560907", "0.53097683", "0.52483106", "0.5243161", "0.5211949", "0.519052", "0.516914", "0.51623195", "0.51597553", "0.5157929", "0.51412994", "0.5133853", "0.512805", "0.5103961", "0.5088049", "0.50874406", "0.50807965", "0.5078273", "0.50762886", "0.5052415" ]
0.73797727
0
The function returns a panda dataframe for the input channel artifacts. AWS SageMaker passes all values in FileMode from the S3 bucket into the train, test, and validation "channels" when starting your container. This function takes in the channel and merges all CSV files per channel into a dataframe for reading.
def input_as_dataframe(self, channel='training'): data_directories = { 'training', 'validation', 'testing' } if channel in data_directories: csv_files = glob.glob(os.path.join(f'/opt/ml/input/data/{channel}/*.csv')) print(f'Files in {channel} directory: {csv_files}') # loop over the list of csv files fileBytes = [] for f in csv_files: # read the csv file df = pd.read_csv(f) fileBytes.append(df) frame = pd.concat(fileBytes, axis=0, ignore_index=True) return frame else: raise ValueError('Incorrect data channel type. Options are training, validation, and testing.') return null
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_train_data(spark_session, sample_size, channels, sample_prob=1, normalize_class_distribution=False, seed=42):\n filename = \"train_{}{}{}_updated.parquet\"\n train_df = read_data(spark_session, filename, sample_size, channels, sample_prob, normalize_class_distribution, seed)\n return train_df", "def loader():\n bucket = data_load_variables[\"bucket\"]\n\n if data_load_variables[\"use_lite_dataset\"]:\n dataset_name = data_load_variables[\"lite_dataset_name\"]\n else:\n dataset_name = data_load_variables[\"dataset_name\"]\n\n s3 = boto3.client('s3')\n\n obj = s3.get_object(Bucket=bucket, Key=dataset_name)\n # get object and file (key) from bucket\n\n df = pd.read_csv(obj['Body'])\n return df", "def _read_csv(self) -> pd.DataFrame:\n\n return pd.concat(\n [\n pd.read_csv(f, usecols=[1, 2, 3, 4, 5])\n for f in self.temp_path.iterdir()\n if f.name.endswith(\".csv\")\n ]\n )", "def data_frame_creator(self):\n\n rgb_dir = [\n self.dataset_address + sequence_f + rgb_f\n for rgb_f in self.rgb_folder for sequence_f in self.sequence_folder\n ]\n rgb_data = [\n rgb_d + rgb for rgb_d in rgb_dir for rgb in os.listdir(rgb_d)\n ]\n\n depth_dir = [\n self.dataset_address + sequence_f + depth_f\n for depth_f in self.depth_folder\n for sequence_f in self.sequence_folder\n ]\n depth_data = [\n depth_d + depth for depth_d in depth_dir\n for depth in os.listdir(depth_d)\n ]\n\n segmentation_dir = [\n self.dataset_address + sequence_f + segmentation_f\n for segmentation_f in self.segmentation_folder\n for sequence_f in self.sequence_folder\n ]\n segmentation_data = [\n segmentation_d + segmentation\n for segmentation_d in segmentation_dir\n for segmentation in os.listdir(segmentation_d)\n ]\n\n dataset = {\n 'RGB': rgb_data,\n 'DEPTH': depth_data,\n 'SEGMENTATION': segmentation_data\n }\n\n if self.shuffle:\n return pd.DataFrame(dataset).sample(frac=1)\n\n return pd.DataFrame(dataset)", "def run_pipeline(directory):\n\n # io = IO(path)\n # df = io.load_cleaned_file(download_always=False)\n # df = add_choke_events(df)\n\n # Add calls to features.Xxx here\n\n #directory = main_directory\n site=os.listdir(directory)\n site_dicom={}\n site_dicom_sub={}\n site_sub_files={}\n i,k,j=0,0,0\n for filename in site:\n site_dicom[i]=directory+'/'+filename+'/DICOM-raw'\n temporary_path=os.listdir(site_dicom[i])\n\n for another_file in temporary_path:\n site_dicom_sub[j]=site_dicom[i]+'/'+another_file+'/scans'\n temporary_path_1 = os.listdir(site_dicom_sub[j])\n for another_file_1 in temporary_path_1:\n site_sub_files[k]=site_dicom_sub[j]+'/'+another_file_1+'/'\n k=k+1\n j = j + 1\n i=i+1\n splitted={}\n output_mif={}\n for i in range (len(site_sub_files)):\n splitted[i]=site_sub_files[i].split('/')\n output_mif[i]=directory+'/'+splitted[i][5]+'/MIF-raw/'+splitted[i][5]+'_'+splitted[i][7]+'_'+splitted[i][9]+'.mif'\n\n\n # save (or return) dataframe here?\n return site_sub_files,output_mif", "def format_streamflows(data_dir):\n\n search_expr = data_dir + \"/*.json\"\n\n df = pd.DataFrame()\n\n for json_file in glob.glob(search_expr):\n\n with open(json_file, 'r') as fn:\n data = json.load(fn)\n\n try:\n data = json2dataframe(data)\n\n new_df = data[0]\n new_df = new_df.drop(columns=['dateTime', 'qualifiers'])\n new_df = new_df.rename(columns={'value': data[1]})\n df = pd.concat([df, new_df], axis=1)\n\n except IndexError, e:\n print 'Error:', e\n continue\n\n return df", "def convert_to_dataframe(target_dir):\n\n def _parse_source_option(soruce: str):\n \"\"\"\n parse source option like follows\n fbdb_metric-balance_norm-l2_basis-0031_size-0032_cls-0496_standard\n \"\"\"\n parsed_source = dict()\n split_by_underbar = soruce.split('_')\n\n if len(split_by_underbar) == 0:\n return pd.Series()\n\n for term in split_by_underbar:\n split_by_dash = term.split('-')\n if split_by_dash[0] == 'metric':\n parsed_source['source.metric'] = split_by_dash[1]\n elif split_by_dash[0] == 'basis':\n parsed_source['source.num_basis'] = int(split_by_dash[1])\n\n parsed_source['source.aug'] = split_by_underbar[-1]\n\n return pd.Series(parsed_source)\n\n def _parse_config(test_path: str):\n \"\"\"\n \"\"\"\n DROP_CONFIGS = [\n \"normalize\",\n \"num_workers\",\n \"gpus\",\n \"prefix\",\n \"savedir\",\n \"num_nodes\",\n \"distributed_backend\",\n \"checkpoint_monitor\",\n \"checkpoint_mode\",\n \"unfreeze_params\",\n \"online_logger.name\",\n \"online_logger.activate\",\n \"dataset.mean\",\n \"dataset.std\",\n \"dataset.num_classes\",\n \"dataset.input_size\",\n \"optimizer.name\",\n \"optimizer.lr\",\n \"optimizer.momentum\",\n \"optimizer.weight_decay\",\n \"scheduler.name\",\n \"scheduler.milestones\",\n \"scheduler.gamma\",\n \"resume_ckpt_path\"\n ]\n\n train_config_path = os.path.join(test_path, \"..\", \"train\", \"config.yaml\")\n transfer_config_path = os.path.join(test_path, \"..\", \"transfer\", \"config.yaml\")\n\n if os.path.exists(train_config_path):\n loaded_cofig = OmegaConf.load(train_config_path)\n elif os.path.exists(transfer_config_path):\n loaded_cofig = OmegaConf.load(transfer_config_path)\n else:\n print('\"config.yaml\" for \"{}\" is not found.'.format(test_path))\n return None\n\n series_config = pd.Series()\n for k, v in OmegaConf.to_container(loaded_cofig).items():\n\n if type(v) == dict:\n for kk, vv in v.items():\n if type(vv) == dict:\n raise ValueError(\"Nest of config is too deep.\")\n else:\n series_config[\"{k}.{kk}\".format(k=k, kk=kk)] = vv\n else:\n series_config[k] = v\n\n if (k == 'source') and (v is not None):\n series_config = series_config.append(_parse_source_option(v))\n\n return series_config.drop(labels=DROP_CONFIGS, errors='ignore')\n\n def _parse_acc(test_path: str):\n \"\"\"\n \"\"\"\n DROP_CONFIGS = ['advacc1', 'advacc5', 'time stamp']\n acc_path = os.path.join(test_path, \"acc\", \"local_log.csv\")\n\n if os.path.exists(acc_path):\n loaded_acc = pd.read_csv(acc_path, index_col=0)\n else:\n print('\"{}/acc/local_log.csv\" is not found.'.format(test_path))\n return None\n\n return loaded_acc.drop(DROP_CONFIGS, axis=1)\n\n def _parse_corruption(test_path: str):\n \"\"\"\n \"\"\"\n corruption_path = os.path.join(test_path, \"corruption\", \"corruption_result.csv\")\n corruption_img_path = os.path.join(test_path, \"corruption\", \"plot_result.png\")\n\n if os.path.exists(corruption_path):\n loaded_acc = pd.read_csv(corruption_path, index_col=0)\n mean_acc = pd.DataFrame([loaded_acc.mean()]).rename(columns={'accuracy': 'coracc'})\n print(mean_acc)\n else:\n print('\"{}/corruption/corruption_result.csv\" is not found.'.format(test_path))\n return None\n\n if os.path.exists(corruption_img_path):\n mean_acc['corruption'] = os.path.abspath(corruption_img_path)\n else:\n print('\"{}/corruption/plot_result.png\" is not found.'.format(test_path))\n return None\n\n return mean_acc\n\n def _parse_fourier(test_path: str):\n \"\"\"\n \"\"\"\n fourier_path = os.path.join(test_path, \"fourier\", \"fhmap.png\")\n\n if os.path.exists(fourier_path):\n parsed_fhmap = pd.DataFrame(\n [pd.Series([os.path.abspath(fourier_path)], index=[\"fhmap\"])]\n )\n else:\n print('\"{}/fourier/fhmap.png\" is not found.'.format(test_path))\n return None\n\n return parsed_fhmap\n\n def _parse_spacial(test_path: str):\n \"\"\"\n \"\"\"\n spacial_path = os.path.join(test_path, \"spacial\", \"plot.png\")\n\n if os.path.exists(spacial_path):\n parsed_spacial = pd.DataFrame(\n [pd.Series([os.path.abspath(spacial_path)], index=[\"spacial\"])]\n )\n else:\n print('\"{}/spacial/plot.png\" is not found.'.format(test_path))\n return None\n\n return parsed_spacial\n\n def _parse_layer(test_path: str):\n \"\"\"\n \"\"\"\n layer_path = os.path.join(test_path, \"layer\", \"first_layer_weight.png\")\n\n if os.path.exists(layer_path):\n parsed_layer = pd.DataFrame(\n [pd.Series([os.path.abspath(layer_path)], index=[\"layer\"])]\n )\n else:\n print('\"{}/layer/first_layer_weight.png\" is not found.'.format(test_path))\n return None\n\n return parsed_layer\n\n def _parse_sensitivity(test_path: str):\n \"\"\"\n \"\"\"\n map_paths = glob.glob(os.path.join(test_path, 'sensitivity', '*000.png'))\n\n if len(map_paths) >= 1:\n parsed_sensitivity = pd.DataFrame(\n [pd.Series([os.path.abspath(map_paths[0])], index=[\"sensitivity\"])]\n )\n else:\n print('\"{}/sensitivity/*000.png\" is not found.'.format(test_path))\n return None\n\n return parsed_sensitivity\n\n # main function from HERE\n if not os.path.exists(target_dir):\n raise FileNotFoundError('target_dir: \"{}\" is not found'.format(target_dir))\n\n test_paths = sorted(glob.glob(os.path.join(target_dir, \"**\", \"test\"), recursive=True))\n df = pd.DataFrame()\n\n for test_path in test_paths:\n df_test = pd.DataFrame()\n\n # parse config\n parsed_config = _parse_config(test_path)\n if parsed_config is not None:\n df_test = df_test.append([parsed_config], ignore_index=True, sort=False)\n\n # parse acc\n parsed_acc = _parse_acc(test_path)\n if parsed_acc is not None:\n df_test = pd.concat([df_test, parsed_acc], axis=1)\n\n # parse corruption\n parsed_corruption = _parse_corruption(test_path)\n if parsed_corruption is not None:\n df_test = pd.concat([df_test, parsed_corruption], axis=1)\n\n # parse fourier\n parsed_fhmap = _parse_fourier(test_path)\n if parsed_fhmap is not None:\n df_test = pd.concat([df_test, parsed_fhmap], axis=1)\n\n # parse spacial\n parsed_spacial = _parse_spacial(test_path)\n if parsed_spacial is not None:\n df_test = pd.concat([df_test, parsed_spacial], axis=1)\n\n # parse fist layer\n parsed_layer = _parse_layer(test_path)\n if parsed_layer is not None:\n df_test = pd.concat([df_test, parsed_layer], axis=1)\n\n # parse sensitivity\n parsed_sensitivity = _parse_sensitivity(test_path)\n if parsed_sensitivity is not None:\n df_test = pd.concat([df_test, parsed_sensitivity], axis=1)\n\n # append to global df\n df = df.append(df_test, ignore_index=True, sort=False)\n\n return df", "def process(self, inputs):\n df = cudf.read_csv(self.conf['path'])\n # extract the year, month, day\n ymd = df['DTE'].astype('str').str.extract(r'(\\d\\d\\d\\d)(\\d\\d)(\\d\\d)')\n # construct the standard datetime str\n df['DTE'] = ymd[0].str.cat(ymd[1],\n '-').str.cat(ymd[2],\n '-').astype('datetime64[ms]')\n df = df[['DTE', 'OPEN', 'CLOSE', 'HIGH', 'LOW', 'SM_ID', 'VOLUME']]\n df['VOLUME'] /= 1000\n # change the names\n df.columns = ['datetime', 'open', 'close',\n 'high', 'low', \"asset\", 'volume']\n return df", "def process_group(directory: str, files: dict, channel: str, year: str) -> dict:\n if len(files) == 0:\n raise Exception('empty file list for directory {}'.format(directory)) + 1\n\n dataframes = {}\n for name, ifile in files.items():\n # equivalent of hadding\n update_dfs = uproot.pandas.iterate(ifile, f'{channel}_tree')\n current_dfs = []\n for update_df in update_dfs:\n update_df.fillna(-999, inplace=True)\n current_dfs.append(update_df)\n \n if len(current_dfs) > 0:\n dataframes[name] = pd.concat(current_dfs)\n\n dataframes['metadata'] = pd.DataFrame({'channel': [channel], 'year': [year]})\n return dataframes", "def all_channel_video_data(self, channel_list, limit=50, vid_part='snippet', output_path='./', \\\n error_file_name='../status/errors.txt'):\n \n # chnl_details_file = open(output_path+\"channel_details.csv\", \"a\")\n # os.makedirs(output_path, exist_ok=True)\n dnload = partial(self.single_channel_video_data, limit, vid_part, output_path)\n l = mp.Lock()\n pool = mp.Pool(initializer=self.init, initargs=(l, ), processes=4)\n pool.map(dnload, channel_list)\n pool.close()\n # pool.join()\n\n # for i, chanlid in enumerate(channel_list):\n # print(\"index: \", i, \" : \", chanlid)\n # all_result={}\n # print(\"finding vidids\")\n # result = self.playlist([chanlid],limit)\n # print(\"finding channel meta\")\n # all_result.update({chanlid: self.get_video_details(result[chanlid], part=vid_part)})\n # print(\"doing json dump\")\n # json.dump(all_result, chnl_details_file)\n # chnl_details_file.write(\"\\n\")\n # return all_result", "def Get_datasets(**kwargs):\n from .utils import option_printer, get_conn, get_param_dict, get_logger_instance\n from .cohort_tables import make_target_comp_tables\n from .table2rawseq import table_to_rawseq\n from .rawseq2multihot import rawseq_to_multihot\n from .multihot2datasets import multihot_to_datasets\n import os, logging\n from importlib import reload\n \n ## get params\n param_dict = get_param_dict(kwargs['DS_PARAMS_FILE_NAME'], kwargs['CONFIG_FOLDER_PATH'])\n param_dict.update(kwargs)\n if not os.path.exists(param_dict['DATA_FOLDER_PATH']): os.makedirs(param_dict['DATA_FOLDER_PATH'])\n param_dict['CDM_DB_NAME'] = get_param_dict(kwargs['DB_CONN_FILENAME'], kwargs['CONFIG_FOLDER_PATH'])['CDM_DB']\n \n param_dict['DUMPING_PATH'] = os.path.join(param_dict['RESULT_FOLDER_PATH'], \n param_dict['PROJECT_NAME'], \n param_dict['CDM_DB_NAME'])\n if not os.path.exists(param_dict['DUMPING_PATH']): \n os.makedirs(param_dict['DUMPING_PATH'])\n \n if param_dict['PIPELINE_START_LEVEL']<3:\n param_dict['DB_CONN'], CDM_DB_NAME, RESULT_DB_NAME = get_conn(param_dict['DB_CONN_FILENAME'], \n param_dict['CONFIG_FOLDER_PATH'])\n param_dict['CDM_DB_NAME'] = CDM_DB_NAME\n param_dict['RESULT_DB_NAME'] = RESULT_DB_NAME\n else:\n param_dict['RESULT_DB_NAME'] = get_param_dict(kwargs['DB_CONN_FILENAME'], kwargs['CONFIG_FOLDER_PATH'])['RESULT_DB']\n \n ## logger\n logging.shutdown()\n reload(logging)\n main_logger = get_logger_instance(logger_name='ds_pipeline', \n DUMPING_PATH=param_dict['DUMPING_PATH'], \n parent_name=False,\n stream=True)\n \n ## print params\n main_logger.info(\"\\n (params) \\n\")\n try: option_printer(main_logger, param_dict['DB_CONN'], **param_dict)\n except: pass\n main_logger.info(\"=\"*100 + \"\\n\")\n \n ## [1] Make_target_comp_tables\n if param_dict['PIPELINE_START_LEVEL']<=1:\n main_logger.info(\"\\n[Level 1] Make_TARGET_COMP_tables\\n\")\n make_target_comp_tables(**param_dict)\n main_logger.info(\"=\"*100 + \"\\n\")\n \n ## [2] Table to rawSeq\n if param_dict['PIPELINE_START_LEVEL']<=2:\n main_logger.info(\"\\n[Level 2] Table to rawSeq\\n\")\n table_to_rawseq(param_dict['DUMPING_PATH'], \n param_dict['DB_CONN'], param_dict['CDM_DB_NAME'], \n param_dict['DATA_FOLDER_PATH'])\n main_logger.info(\"=\"*100 + \"\\n\")\n \n ## [3] rawSeq to multihot\n if param_dict['PIPELINE_START_LEVEL']<=3:\n main_logger.info(\"\\n[Level 3] Convert to multihot\\n\")\n rawseq_to_multihot(param_dict['DUMPING_PATH'], \n param_dict['DATA_FOLDER_PATH'], param_dict['MAX_TIME_STEP'], \n param_dict['DX_ONLY'])\n main_logger.info(\"=\"*100 + \"\\n\")\n \n ## [4] Multihot to Dataset\n if param_dict['PIPELINE_START_LEVEL']<=4:\n main_logger.info(\"\\n[Level 4] Multihot to Dataset\\n\")\n datasets = multihot_to_datasets(param_dict['DUMPING_PATH'], \n param_dict['DATA_FOLDER_PATH'], param_dict['TR_RATIO'])\n \n #add info\n if param_dict['PIPELINE_START_LEVEL']<3: \n datasets.info['DB_CONN'] = param_dict['DB_CONN']\n datasets.info['CONFIG_FOLDER_PATH'] = param_dict['CONFIG_FOLDER_PATH']\n datasets.info['DATA_FOLDER_PATH'] = param_dict['DATA_FOLDER_PATH']\n datasets.info['RESULT_FOLDER_PATH'] = param_dict['RESULT_FOLDER_PATH']\n datasets.info['DB_CONN_FILENAME'] = param_dict['DB_CONN_FILENAME']\n datasets.info['DS_PARAMS_FILE_NAME'] = param_dict['DS_PARAMS_FILE_NAME']\n datasets.info['CDM_DB_NAME'] = param_dict['CDM_DB_NAME']\n datasets.info['RESULT_DB_NAME'] = param_dict['RESULT_DB_NAME']\n \n main_logger.info(\"\\n[Datasets Info.]\\n\")\n main_logger.info(\"{0:>26} {1:}\".format('[OPTION]', '[VALUE]'))\n for k in sorted(datasets.info.keys()):\n main_logger.info(\" {0:>23}: {1:}\".format(k, datasets.info[k]))\n \n #print(\"\\nALL DONE!!\")\n main_logger.info(\"\\n[ALL DONE!!]\\n\\n\")\n for h in list(main_logger.handlers):\n main_logger.removeHandler(h)\n h.flush()\n h.close()\n return datasets", "def directory_to_df(path,filetype = '.csv',ignore_index = True):\n filenames = []\n file_column = []\n frames = []\n test_index = 1\n for filename in os.listdir(path):\n if filetype in filename:\n curr_df = pd.read_csv(path+filename)\n frames.append(curr_df)\n filenames.append(filename.replace(filetype,''))\n for i in range(curr_df.shape[0]):\n file_column.append(test_index)\n test_index+=1\n \n df = pd.concat(frames,ignore_index = ignore_index)\n df['files'] = file_column\n return df, filenames", "def get_data(config_path):\n config = read_params(config_path)\n data_path = config[\"data_source\"][\"s3_source\"]\n df = pd.read_json(data_path, lines=True, orient='str')\n return df", "def data_frame_creator(self):\n sequence_folder = [\n '/SEQ1', '/SEQ2', '/SEQ3', '/SEQ4', '/SEQ5', '/SEQ6'\n ]\n rgb_folder = ['/RGBLeft/', '/RGBRight/']\n depth_folder = ['/DepthLeft/', '/DepthRight/']\n segmentation_folder = ['/GTLeft/', '/GTright/']\n rgb_dir = [\n self.dataset_dir + sequence_f + rgb_f for rgb_f in rgb_folder\n for sequence_f in sequence_folder\n ]\n rgb_data = [\n rgb_d + rgb for rgb_d in rgb_dir for rgb in os.listdir(rgb_d)\n ]\n\n depth_dir = [\n self.dataset_dir + sequence_f + depth_f\n for depth_f in depth_folder\n for sequence_f in sequence_folder\n ]\n depth_data = [\n depth_d + depth for depth_d in depth_dir\n for depth in os.listdir(depth_d)\n ]\n\n segmentation_dir = [\n self.dataset_dir + sequence_f + segmentation_f\n for segmentation_f in segmentation_folder\n for sequence_f in sequence_folder\n ]\n segmentation_data = [\n segmentation_d + segmentation\n for segmentation_d in segmentation_dir\n for segmentation in os.listdir(segmentation_d)\n ]\n\n dataset = {\n 'RGB': rgb_data,\n 'DEPTH': depth_data,\n 'SEGMENTATION': segmentation_data\n }\n\n if self.shuffle:\n return pd.DataFrame(dataset).sample(frac=1, random_state=123)\n\n return pd.DataFrame(dataset)", "def init_file_dataframe():\n \n row_names_link = [\"NA\"] * MAX_NUM_OF_FILES\n row_names_name = [\"NA\"] * MAX_NUM_OF_FILES\n row_names_down = [\"NA\"] * MAX_NUM_OF_FILES\n \n for i in range(MAX_NUM_OF_FILES):\n row_names_link[i] = \"link_\" + str(i + 1)\n row_names_name[i] = \"name_\" + str(i + 1)\n row_names_down[i] = \"down_\" + str(i + 1)\n \n df = pd.DataFrame(columns = row_names_link + row_names_name + row_names_down)\n \n return df, row_names_link, row_names_name, row_names_down", "def get_weather_as_df(keys):\n s3 = boto3.client('s3')\n bucket_name = 'trailreportdata'\n files = b''\n for key in keys:\n response = s3.get_object(Bucket=bucket_name, Key=key)\n body = response['Body']\n csv = body.read()\n files += csv\n f = BytesIO(files)\n csv = pd.read_csv(f)\n return csv", "def read_val_data(spark_session, sample_size, channels, sample_prob=1, normalize_class_distribution=False, seed=42):\n filename = \"val_{}{}{}_updated.parquet\"\n train_df = read_data(spark_session, filename, sample_size, channels, sample_prob, normalize_class_distribution, seed)\n return train_df", "def create_data_frame(input_filepath):\n df = pd.read_json(input_filepath)\n logger = logging.getLogger(__name__)\n logger.info('Imported dataframe:')\n logger.info(df.info())\n logger.info(df.describe())\n logger.info(df.head())\n return df", "def get_data(nrows=10000):\n # Add Client() here\n client = storage.Client()\n path = \"gs://{}/{}\".format(BUCKET_NAME, PATH_INSIDE_BUCKET)\n df = pd.read_csv(path, nrows=nrows)\n return df", "def read(self, idx: (int, str) = 0, key: str = None, **kwargs) -> pd.DataFrame:\n path = self._get_object_path(key)\n if self.format == 'CSV':\n return pd.read_csv(path, index_col=idx, **kwargs)\n return s3.read(path, **kwargs)", "def collect_data(input_folder, ratio):\n # TODO implement ratio\n data = pd.DataFrame()\n\n folderpaths = [os.path.normpath((os.path.join(input_folder, x)))\n for x in os.listdir(input_folder) if not x.endswith('.gitkeep')]\n # for folder in folderpaths:\n for folder in folderpaths:\n filepaths = [os.path.normpath((os.path.join(folder, x)))\n for x in os.listdir(folder) if not x.endswith('.gitkeep')]\n for file in filepaths:\n df = pd.read_pickle(file)\n df = df[df['is_feas'] == 1]\n data = data.append(df[['frames', 'label']], ignore_index=True)\n\n return data.rename(columns={'frames': 'x', 'label': 'y'})", "def fetch_data():\n for category in CHEATSHEETS.items():\n subprocess.call(f'curl -o {PATH}{category[0] + \".csv\"} {category[1]}', shell=True)\n\n index = -1\n for filename in os.listdir(PATH):\n for idx, row in pd.read_csv(PATH + filename, on_bad_lines='skip').replace(np.nan, '').iterrows():\n name = row['Model']\n url = REDIRECT_URL + name.lower()\n category = filename.split('.')[0]\n featurizers = row['Acceptable Featurizers'].split(' ') if row['Acceptable Featurizers'] != '' else []\n backends = ['PyTorch' if item in {\"PTorch\", \"Torch\", \"PyTorch \"} else item for item in row['Backend'].split('/')]\n types = row['Type'] if filename != 'general.csv' else row['Classifier/Regressor']\n types = types.split('/') if filename == 'material.csv' else types.split('/ ')\n index += 1\n\n backend_list.append(backends)\n type_list.append(types)\n featurizer_list.append(featurizers)\n model_list.append(Model(name, url, category, featurizers, backends, types, index))", "def get_existing_data_for_ticker(ticker):\n filename = get_filename_for_ticker(ticker)\n logger.debug(f'Processing {filename}')\n df_ticker_data = pd.DataFrame()\n try:\n df_ticker_data = pd.read_csv(filename, index_col='Date')\n df_ticker_data.index = pd.to_datetime(df_ticker_data.index)\n except FileNotFoundError:\n logger.error(f'Error in opening {filename}')\n except Exception as e:\n logging.error(f'Error {e} while accessing existing data')\n return df_ticker_data", "def get_vetted_sample(self):\n list_of_files = glob.glob(self.final_path)\n latest_file = max(list_of_files, key=os.path.getctime)\n df = pd.read_csv(latest_file)\n return df", "def process_stocks(s3_working_bucket: str, date: tuple) -> DataFrame:\n\n logging.debug(\"Start reading stocks csv.\")\n df_stocks = stdm.read_csv(spark, s3_working_bucket, date, \"stocks\")\n\n logging.debug(\"Calling gmt_unix_to_datetime function.\")\n df_stocks = stp.gmt_unix_to_datetime(df_stocks, \"timestamp\")\n\n logging.debug(\"Calling order_by_col function.\")\n df_stocks = stp.order_by_col(df_stocks, \"datetime\")\n\n return df_stocks", "def df_builder(path):\n\n ###CHANGE FILE ENDING (.csv or .csv.gz)\n all_files = glob.glob(\n os.path.join(path, \"probe_data_I210.201710*.csv\")) # advisable to use os.path.join as this makes concatenation OS independent\n df_from_each_file = (pd.read_csv(f) for f in all_files)\n return pd.concat(df_from_each_file, ignore_index=True)", "def create_input_sample_files(self, input_files: List[Path]) -> pd.DataFrame:\n assemblies = {}\n reads = {}\n sample_names = set()\n data = []\n\n # Initial pass of files to break up into assemblies/reads\n for file in input_files:\n sf = SequenceFile(file)\n sample_name = sf.get_genome_name(exclude_paired_end_indicators=True)\n if sf.is_assembly():\n if sample_name in sample_names:\n if sample_name in assemblies:\n previous_files = [assemblies[sample_name]]\n else:\n previous_files = reads[sample_name]\n raise Exception(f'Duplicate sample with name [{sample_name}]. current_file=[{file}], '\n f'previous_file(s)={previous_files}')\n else:\n sample_names.add(sample_name)\n assemblies[sample_name] = file\n elif sf.is_reads():\n if sample_name in assemblies:\n previous_files = assemblies[sample_name]\n raise Exception(f'Duplicate sample with name [{sample_name}]. current_file=[{file}], '\n f'previous_file(s)={previous_files}')\n elif sample_name in reads:\n if len(reads[sample_name]) != 1:\n raise Exception(f'Invalid number of files for sample with name [{sample_name}]. '\n f'current_file=[{file}], previous_files={reads[sample_name]}')\n else:\n reads[sample_name].append(file)\n else:\n reads[sample_name] = [file]\n\n sample_names.add(sample_name)\n else:\n logger.warning(f'Input file [{file}] with unknown file type (not assembly or reads). Ignoring.')\n\n # Now we iterate over samples to insert into an array to create the final dataframe\n for sample in assemblies:\n data.append([sample, assemblies[sample], pd.NA, pd.NA])\n\n # Iterate over reads to insert into array for final dataframe\n for sample in reads:\n if len(reads[sample]) == 1:\n data.append([sample, pd.NA, reads[sample][0], pd.NA])\n elif len(reads[sample]) == 2:\n file1 = SequenceFile(reads[sample][0])\n file2 = SequenceFile(reads[sample][1])\n\n file1_differences = file1.name_differences(file2)\n file2_differences = file2.name_differences(file1)\n\n if len(file1_differences) != 1 or len(file2_differences) != 1:\n raise Exception(\n f'Files [{reads[sample]}] do not have exactly one difference between names, cannot determine'\n f' paired structure.')\n else:\n f1d = file1_differences[0].lower()\n f2d = file2_differences[0].lower()\n\n if f1d == '1' and f2d == '2':\n forward = file1\n reverse = file2\n elif f1d == 'f' and f2d == 'r':\n forward = file1\n reverse = file2\n elif f2d == '1' and f1d == '2':\n reverse = file1\n forward = file2\n elif f1d == 'r' and f2d == 'f':\n reverse = file1\n forward = file2\n else:\n raise Exception(f'Cannot determine pair structure for files [{reads[sample]}]')\n\n data.append([sample, pd.NA, forward.file, reverse.file])\n else:\n raise Exception(f'Invalid number of files for sample [{sample}], files={reads[sample]}')\n\n return pd.DataFrame(data, columns=self.INPUT_SAMPLE_FILE_COLUMNS)", "def readFilesIntoDataFrame(nameTemplate, numOfFiles):\n #https://www.kaggle.com/arjanso/reducing-dataframe-memory-size-by-65\n\n list_of_dfs = []\n for i in range(numOfFiles):\n print ('Processing {0} out of {1} files'.format(i, numOfFiles))\n\n fileToProcess = fileLocation + nameTemplate.format(i)\n print 'fileToProcess=', fileToProcess\n \n if 'feather' in nameTemplate:\n read_df = feather.read_feather(fileToProcess)\n elif 'parquet' in nameTemplate:\n read_df = pd.read_parquet(fileToProcess)\n else:\n print 'This should not happen, nameTemplate is wrong, please check it is in parquet or feather format or that the template correctly describes the existing files, exiting...'\n sys.exit(1)\n\n print read_df.info(memory_usage='deep')\n print '-'*50\n print read_df.describe()\n list_of_dfs.append(read_df)\n \n print 'Start concatenating dataframes, it may take some time'\n comb_df = pd.concat(list_of_dfs, ignore_index=True)\n return comb_df", "def _read_kernels(save_name, periods):\n kernels = pd.DataFrame([])\n\n for period in periods:\n kernelfile = '{0}_cvfrechet_{1:.1f}s'.format(save_name, period)\n kf = pd.read_csv(kernelfile, sep='\\s+', header=None)\n kf.columns = ['r', 'vsv', 'vpv', 'vsh', 'vph', 'eta', 'rho']\n kf['z'] = 6371 - kf['r'] * 1e-3\n kf['period'] = period\n kf = kf[::-1]\n kernels = kernels.append(kf)\n\n kernels = kernels[['z', 'period', 'vsv', 'vpv', 'vsh', 'vph', 'eta', 'rho']]\n kernels.reset_index(drop=True, inplace=True)\n\n return kernels", "def read_csv(self, bucket, key, with_prefix=False, file_extension=None, **kwargs):\n if with_prefix:\n files = self.get_prefix_object(bucket=bucket,\n key=key,\n file_extension=file_extension)\n df = pd.concat([pd.read_csv(self.get_file_buffer(bucket=bucket, key=key), **kwargs) for key in files])\n\n else:\n df = pd.read_csv(self.get_file_buffer(bucket=bucket, key=key), **kwargs)\n\n return df" ]
[ "0.56002134", "0.5580284", "0.5487554", "0.54729915", "0.54723316", "0.53689516", "0.53628457", "0.53199345", "0.52421874", "0.5238083", "0.52218586", "0.5214587", "0.51922494", "0.5191581", "0.51565295", "0.5144537", "0.51300454", "0.5121978", "0.51176524", "0.5095419", "0.50850296", "0.5023616", "0.5000034", "0.49905702", "0.4983106", "0.49792832", "0.49380672", "0.4927652", "0.49014384", "0.48979688" ]
0.74998736
0
Checks whether there are active recommendations for each category for the given marketplace, and if there are, returns the time when recommendations were last updated for each category.
def get_last_updated_time_for_recommendations(self, marketplace_id): return self.make_request( "GetLastUpdatedTimeForRecommendations", {"MarketplaceId": marketplace_id}, method="POST", )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_if_ok_to_update(self):\n current_time = int(time.time())\n last_refresh = self.last_refresh\n if last_refresh is None:\n last_refresh = 0\n if current_time >= (last_refresh + self.refresh_rate):\n return True\n return False", "def list_recommendations(\n self, marketplace_id=None, recommendation_category=None, next_token=None\n ):\n return self.make_request(\n \"ListRecommendations\",\n {\n \"MarketplaceId\": marketplace_id,\n \"RecommendationCategory\": recommendation_category,\n },\n method=\"POST\",\n )", "async def get_last_recommended_recipes(\n time_period: int = 3600, db_path: Path = DB_PATH\n) -> dict:\n recipes = await get_query_results(\n \"SELECT recipe_name, last_recommended FROM recipes\", db_path=db_path\n )\n\n current_time = int(time())\n cutoff_point = current_time - time_period\n\n recommended_recipes = []\n for recipe_name, last_recommended in recipes:\n if last_recommended > cutoff_point:\n recommended_recipes.append(recipe_name)\n\n recommended_recipes.sort()\n\n return {\"last_recommended_recipes\": recommended_recipes}", "def should_update_currency_rates(self):\n if time.time() - self.currency_rates_last_update > self.CURRENCY_RATES_THRESHOLD:\n return True\n\n return False", "def __time_update(user):\n\n feeds = Feed.objects.filter(user=user)\n\n for feed in feeds:\n # Last time updated more than 5 minutes ago\n if (datetime.now() - feed.time) > timedelta(0, 300, 0):\n __update_feed(feed)", "def check_if_up_to_date():\n last_daily = get_latest_dl_date()\n last_trading_day = get_last_open_trading_day()", "def check_api_use_rate():\n with open('api_use.csv', 'r') as api_use_file:\n csv_reader = csv.reader(api_use_file)\n last_date_used_unparsed, times_used_since_last_reset_unparsed = next(csv_reader)\n\n month, day, year, hour, minute = [int(item)\n for item in last_date_used_unparsed.split(\"/\")\n ]\n\n last_time_used = datetime.datetime(year, month, day, hour, minute)\n times_used_since_last_reset = int(times_used_since_last_reset_unparsed)\n\n current_time = datetime.datetime.now()\n\n time_since_last_use = current_time - last_time_used\n seconds_since_last_use = time_since_last_use.seconds\n\n # if it hasn't been ten minutes since the last time you used it\n if seconds_since_last_use < 460:\n # if it hasn't been used more than 8 times\n if times_used_since_last_reset < 9:\n # update last time use and times used\n times_used_since_last_reset += 1\n last_time_used = current_time\n print(\"You can use the api\")\n print(\"You have {} uses remaining and {} minutes before the reset\".format(\n 10 - times_used_since_last_reset, (460 - seconds_since_last_use) / 60.0\n ))\n update_tracker(last_time_used, times_used_since_last_reset)\n return True\n # if it has been used 8 times in the last ten minutes\n elif times_used_since_last_reset >= 9:\n print(\"Warning you have used the api {} times in 10 minutes.\".format(\n times_used_since_last_reset))\n return False\n # if it has been more than 9 minutes you are good to go\n elif seconds_since_last_use >= 460:\n # okay to use. reset current time and times used\n times_used_since_last_reset = 1\n last_time_used = current_time\n print(\"It's been more than 9 minutes since last use. You are good to go\")\n update_tracker(last_time_used, times_used_since_last_reset)\n return True", "def find_years_needing_update(self, product_name: str) -> List[int]:\n updated_months = TIME_OVERVIEW.alias(\"updated_months\")\n years = TIME_OVERVIEW.alias(\"years_needing_update\")\n product = self.get_product_summary(product_name)\n\n # Years that have already been summarised\n summarised_years = {\n r[0].year\n for r in self._engine.execute(\n select([years.c.start_day])\n .where(years.c.period_type == \"year\")\n .where(\n years.c.product_ref == product.id_,\n )\n )\n }\n\n # Empty product? No years\n if product.dataset_count == 0:\n # check if the timeoverview needs cleanse\n if not summarised_years:\n return []\n else:\n return summarised_years\n\n # All years we are expected to have\n expected_years = set(\n range(\n product.time_earliest.astimezone(timezone).year,\n product.time_latest.astimezone(timezone).year + 1,\n )\n )\n\n missing_years = expected_years.difference(summarised_years)\n\n # Years who have month-records updated more recently than their own record.\n outdated_years = {\n start_day.year\n for [start_day] in self._engine.execute(\n # Select years\n select([years.c.start_day])\n .where(years.c.period_type == \"year\")\n .where(\n years.c.product_ref == product.id_,\n )\n # Where there exist months that are more newly created.\n .where(\n exists(\n select([updated_months.c.start_day])\n .where(updated_months.c.period_type == \"month\")\n .where(\n func.extract(\"year\", updated_months.c.start_day)\n == func.extract(\"year\", years.c.start_day)\n )\n .where(\n updated_months.c.product_ref == product.id_,\n )\n .where(\n updated_months.c.generation_time > years.c.generation_time\n )\n )\n )\n )\n }\n return sorted(missing_years.union(outdated_years))", "def time_stats(df):\r\n\r\n print('\\nFetching The Most Frequent Times of Travel...\\n')\r\n start_time = time.time()\r\n\r\n # Displays the most common month\r\n popular_month = df['month'].mode()[0]\r\n print('The Most Common Month is:', popular_month)\r\n \r\n\r\n # Displays the most common day of week\r\n popular_day = df['day_of_week'].mode()[0]\r\n print('The Most Common Day is:', popular_day)\r\n \r\n # Displays the most common start hour\r\n df['hour'] = df['Start Time'].dt.hour\r\n popular_hour = df['hour'].mode()[0]\r\n print('The Most Common hour is:', popular_hour)\r\n\r\n print(\"\\nThis process took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def needs_update(self):\n now = time.time()/60\n return (self.last_update_time_in_minutes+self.timeout) < now", "def update_consumption_times(since):\n # any consumption value that was touched potentially needs to have its\n # time_needing_data updated\n consumptions_to_update = CalculatedConsumption.objects.filter(update_date__gte=since)\n count = consumptions_to_update.count()\n print('updating %s consumption objects' % count)\n for i, c in enumerate(consumptions_to_update.iterator()):\n if i % 500 == 0:\n print('%s/%s consumptions updated' % (i, count))\n # if they supply the product it is already set in update_consumption, above\n if not c.supply_point.supplies(c.product):\n c.time_needing_data = c.time_with_data\n c.save()", "def compare_with_historical_m3_forecast(configs: Configs, data_loader: DataLoader):\n\n # Historical forecasts\n granularity_week = [\n n.FIELD_YEARWEEK,\n n.FIELD_LEAD_SKU_ID,\n n.FIELD_CUSTOMER_GROUP,\n n.FIELD_PLANT_ID\n ]\n granularity_month = [n.FIELD_YEARMONTH] + granularity_week[1:]\n\n relevant_columns = [\n n.FIELD_YEARMONTH,\n n.FIELD_YEARWEEK,\n n.FIELD_LEAD_SKU_ID,\n n.FIELD_CUSTOMER_GROUP,\n n.FIELD_PLANT_ID,\n Evaluator.GROUND_TRUTH_COLUMN_APO_FINAL_FORECAST_M3,\n 'forecast_volume_m-3'\n ]\n data_loader.load_sell_in_forecast_accuracy_comparison_data(Evaluator.GROUND_TRUTH_APO_FORECAST_ACCURACY_REPORT_M3)\n\n historical_forecasts = data_loader.df_fc_acc.filter(relevant_columns)\n in_period = ((historical_forecasts[n.FIELD_YEARWEEK] >= configs.evaluation_start) &\n (historical_forecasts[n.FIELD_YEARWEEK] <= configs.evaluation_end))\n historical_forecasts = historical_forecasts[in_period]\n\n # Group to get one value per SKU, CPG, Plant, Week\n historical_forecasts = (historical_forecasts\n .groupby(granularity_week)\n .agg({n.FIELD_YEARMONTH: 'first',\n Evaluator.GROUND_TRUTH_COLUMN_APO_FINAL_FORECAST_M3: 'sum',\n 'forecast_volume_m-3': 'sum'})\n .reset_index()\n )\n\n # Predictions\n apo_week_month_list = (historical_forecasts[[n.FIELD_YEARMONTH, n.FIELD_YEARWEEK]]\n .sort_values([n.FIELD_YEARMONTH, n.FIELD_YEARWEEK])\n .drop_duplicates(n.FIELD_YEARWEEK))\n apo_week_month_list = build_m3_target_predictions_table(\n apo_week_month_list, configs.evaluation_start, configs.evaluation_end)\n\n # list contains all (SKU, CPG, Plant) combinations that appear at least once in a given month\n # TODO - cross join to get all existing (material, plant, cpg) combinations\n df_predictions_list = historical_forecasts[granularity_month].drop_duplicates()\n df_predictions_list = df_predictions_list.merge(apo_week_month_list, on=n.FIELD_YEARMONTH, how='left')\n predictions = predict_multiple_horizons(df_predictions_list, configs, data_loader)\n\n # scope historical forecasts table to keep exactly the same weeks as the predicted ones (before aggregating)\n historical_forecasts = historical_forecasts.merge(\n predictions.rename(columns={'date_to_predict': n.FIELD_YEARWEEK}).filter(granularity_week).drop_duplicates(),\n on=granularity_week, how='inner'\n )\n\n # Comparison ML forecast vs historical forecast\n # predictions_output = pd.read_csv('predictions_m3.csv')\n aggs = {Evaluator.GROUND_TRUTH_COLUMN_APO_FINAL_FORECAST_M3: 'sum', 'forecast_volume_m-3': 'sum'}\n historical_forecasts_month = historical_forecasts.groupby(granularity_month).agg(aggs).reset_index()\n predictions_month = predictions.groupby(granularity_month)[n.FIELD_PREDICTION].sum().reset_index()\n forecasts_apo_vs_ml_m3 = historical_forecasts_month.merge(\n predictions_month,\n on=granularity_month, how='left'\n )\n\n # Add is_on trade information\n tmp = data_loader.df_cpg_trade_channel[[n.FIELD_CUSTOMER_GROUP, 'trade_type']]\n tmp['is_on_trade'] = (tmp['trade_type'] == 'is_on_trade')\n tmp = tmp[[n.FIELD_CUSTOMER_GROUP, 'is_on_trade']]\n forecasts_apo_vs_ml_m3 = forecasts_apo_vs_ml_m3.merge(tmp, on=n.FIELD_CUSTOMER_GROUP, how='left')\n\n forecasts_apo_vs_ml_m3 = forecasts_apo_vs_ml_m3[forecasts_apo_vs_ml_m3[n.FIELD_PLANT_ID] != 'F015']\n evaluator = Evaluator(\n data=data_loader,\n granularity=granularity_month,\n start_periods_eval=configs.evaluation_start,\n end_periods_eval=configs.evaluation_end,\n forecasting_horizon=3,\n granularity_eval='forecast_volume_m-3',\n is_sell_in_model=configs.is_sell_in_model,\n is_weekly_forecast=False\n )\n granularities = replace_week_granularity_by_month(configs.granularity_evaluator.values())\n evaluator.report(forecasts_apo_vs_ml_m3, granularities)\n evaluator.detailed_report(forecasts_apo_vs_ml_m3, granularities)", "def available(self):\n return self.coordinator.last_update_success", "def available(self):\n return self.coordinator.last_update_success", "def available(self):\n return self.coordinator.last_update_success", "def available(self):\n return self.coordinator.last_update_success", "def available(self):\n return self.coordinator.last_update_success", "def available(self):\n return self.coordinator.last_update_success", "def available(self):\n return self.coordinator.last_update_success", "def find_categories_used(request):\n categories_used = []\n\n for item_index, item in enumerate(all_shopping_items(request)):\n if item_index == 0:\n categories_used.append(item.category)\n else:\n add_category = True\n\n for list_item in categories_used:\n if list_item.category == item.category.category:\n add_category = False\n \n if add_category:\n categories_used.append(item.category)\n\n return categories_used", "def check_feeds():\n\n session = oercloud.Session()\n\n # load the entry point handlers for different feed types\n handlers = aggregator.handlers.get()\n\n for feed in session.query(oercloud.Feed):\n\n if (time.time() - feed.last_import) > feed.update_interval:\n\n # this feed needs updated -- call the appropriate handler\n aggregator.LOG.info(\"Updating %s\" % feed)\n\n if feed.feed_type in handlers:\n handlers[feed.feed_type].load()(feed)\n else:\n # no handler... log a warning\n aggregator.LOG.warning(\"No handler for feed type %s\" % \n feed.feed_type)", "def find_categories_used_dict(request):\n categories_used = []\n\n for item_index, item in enumerate(all_shopping_items(request)):\n category_dict = {\n 'category': item.category.category,\n }\n if item_index == 0:\n categories_used.append(category_dict)\n else:\n add_category = True\n\n for list_item in categories_used:\n \n if list_item['category'] == item.category.category:\n add_category = False\n \n if add_category:\n categories_used.append(category_dict)\n\n return categories_used", "def time_between_updates(self):\r\n if 'last_updated' not in self._original:\r\n return 0\r\n last_update = self._original['last_updated']\r\n this_update = self.last_updated\r\n return this_update - last_update", "def available(self):\n return self._coordinator.last_update_success", "def time_stats(df, city):\r\n print(\"\\n\"*2+'*' * 20)\r\n print('Calculating The Most Frequent Times of Travel...\\n')\r\n start_time = time.time()\r\n \r\n # TO DO: display the most common month\r\n try:\r\n fav_month_num = df['Start Time'].dt.month.mode()[0]\r\n fav_month = VALID_MONTHS[fav_month_num-1].title()\r\n print('Most frequent month for ', city.title(), 'is:', fav_month.title())\r\n except Exception as e:\r\n print('An exception has been occurred while displaying most common month : {}'.format(e))\r\n\r\n # TO DO: display the most common day of week\r\n try:\r\n fav_day = df['day_of_week'].mode()[0]\r\n print('Most frequent weekday for ', city.title(), 'is:',fav_day.title())\r\n except Exception as e:\r\n print('An exception has been occurred while displaying most common moth day of week: {}'.format(e))\r\n\r\n\r\n # TO DO: display the most common start hour\r\n try:\r\n fav_hour = df['hour'].mode()[0]\r\n print('Most frequent starthour for ', city.title(), 'is:',fav_hour)\r\n except Exception as e:\r\n print('An exception has been occurred while displaying most common start hour: {}'.format(e))\r\n \r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('*'*20)", "def needs_extent_refresh(self, product_name: str) -> bool:\n existing_product_summary = self.get_product_summary(product_name)\n if not existing_product_summary:\n # Never been summarised. So, yes!\n return True\n\n most_recent_change = self.find_most_recent_change(product_name)\n has_new_changes = most_recent_change and (\n most_recent_change > existing_product_summary.last_refresh_time\n )\n\n _LOG.debug(\n \"product.last_extent_changes\",\n product_name=product_name,\n last_refresh_time=existing_product_summary.last_refresh_time,\n most_recent_change=most_recent_change,\n has_new_changes=has_new_changes,\n )\n return has_new_changes", "def measure(self, recommender):\n items_shown = recommender.items_shown\n if items_shown.size == 0:\n # at beginning of simulation, there are no recommendations,\n # so we log a `None` value\n self.observe(None)\n return\n\n recommended_item_attr = recommender.items_hat.value[:, items_shown]\n\n afsr = np.mean(\n recommended_item_attr.max(axis=(0, 2)) - recommended_item_attr.min(axis=(0, 2))\n )\n\n self.observe(afsr)", "def check_if_actuall(self) -> bool:\n\n return self.last_date >= self.get_last_image_date()", "def is_active(self):\n return (datetime.now() - self.updated).days < 100", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n print('-'*40)\n start_time = time.time()\n\n most_frequent_month_count = df[df['month'] == df['month'].mode()[0]]['month'].count()\n most_frequent_month = df['month'].mode()[0]\n print('\\nThe most popular month is {}, which has {} total counts.'.format(most_frequent_month, most_frequent_month_count))\n\n most_frequent_weekday_count = df[df['weekday'] == df['weekday'].mode()[0]]['weekday'].count()\n most_frequent_weekday = df['weekday'].mode()[0]\n print('\\nThe most popular weekday is {}, which has {} total counts.'.format(most_frequent_weekday, most_frequent_weekday_count))\n\n most_frequent_hour_count = df[df['hour'] == df['hour'].mode()[0]]['hour'].count()\n most_frequent_hour = df['hour'].mode()[0]\n print('\\nThe most popular hour is {}, which has {} total counts.'.format(most_frequent_hour, most_frequent_hour_count))\n\n print(\"\\nThis took %s seconds. \" % (time.time() - start_time))\n print('-'*40)" ]
[ "0.48689032", "0.4762724", "0.47439244", "0.47338334", "0.4668525", "0.46228564", "0.4600863", "0.45566443", "0.45439604", "0.45418546", "0.45240474", "0.4514353", "0.45044154", "0.45044154", "0.45044154", "0.45044154", "0.45044154", "0.45044154", "0.45044154", "0.45004466", "0.44932762", "0.4490391", "0.4453329", "0.44433692", "0.444202", "0.44338626", "0.44316697", "0.44263208", "0.44144633", "0.4404343" ]
0.600832
0
Returns your active recommendations for a specific category or for all categories for a specific marketplace. Pass `next_token` to call "ListRecommendationsByNextToken" instead.
def list_recommendations( self, marketplace_id=None, recommendation_category=None, next_token=None ): return self.make_request( "ListRecommendations", { "MarketplaceId": marketplace_id, "RecommendationCategory": recommendation_category, }, method="POST", )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_recommendations_by_next_token(self, token):\n return self.list_recommendations(next_token=token)", "def recommendations(self, **kwargs):\n\n path = self._get_movie_id_path('recommendations')\n resp = self._get_method(path, kwargs)\n return resp", "def get_recommendations(self):\n endpoints = '/user/recs'\n return self.get_request(endpoints)", "def getRecommendationSonglist(self, limit = 20, offset = 0, total = True):\n currAPIVersion = self.config['apiVersion']\n currAPIURL = URL_NEAPIS[sys._getframe().f_code.co_name]\n currAPIURL = currAPIURL[min(currAPIVersion, len(currAPIURL) - 1)]\n currDict = {\n 'limit' : limit,\n 'offset': offset,\n 'total' : total\n }\n\n currC, currR = self._mySubmit(currAPIURL, currDict)\n self.apiLog.info(\"%s Json Loads Begin\", sys._getframe().f_code.co_name)\n currR = json.loads(currR)\n self.apiLog.info(\"%s Json Loads End\", sys._getframe().f_code.co_name)\n self.updateCookie(currC)\n self.checkCode(currR['code'])\n\n return currR, currAPIURL[2]", "def get_pocket_recommendations(today, recommended_days=6):\n\n with urlopen(f\"https://www.getpocket.com/@{POCKET_USERNAME}\") as res:\n html = res.read()\n\n items = parse(html, today=today)\n news = filter(\n lambda i: i[\"pocket_recommended_at\"]\n >= (today - timedelta(days=recommended_days)),\n items,\n )\n\n return news", "def ListRecommendations(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_recommendations(self):\n\n try:\n recommendations = Recommendations.objects.get(user_id=self.id)\n except DoesNotExist:\n print \"No recommendation object found. Creating one now.\"\n recommendations = Recommendations(user_id=self.id)\n recommendations.save()\n\n return recommendations", "def GetRecommendation(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def ListRecommendations(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def _recommend_movie(user_id, movie_id, number_movies):\n #get the recommended movie ids from pickled model\n rec_movies = sparkrecommender.movie_recomendation(user_id,movie_id,number_movies)\n #get a list of movies ids used in model\n moviesdf = pd.read_csv('movies.csv',index_col='movieId')\n \n #build list of lists with [[imdb ID, movie title, post img link]]\n rec_movies_list = []\n for movie_id in rec_movies:\n temp_list = []\n imdbid_ = str(get_imdbId(movie_id))\n temp_list.append(imdbid_)\n temp_list.append(moviesdf.loc[movie_id,'title'])\n temp_list.append('http://img.omdbapi.com/?apikey=ae550a04&i=tt'+str(imdbid_))\n rec_movies_list.append(temp_list)\n return rec_movies_list", "def make_recommendation(model_knn, data, mapper, fav_parks, n_recommendations):\n # fit\n model_knn.fit(data)\n # get input park index\n #print('You have input movie:', fav_parks)\n idx = fuzzy_matching(mapper, fav_parks, verbose=True)\n \n #print('Recommendation system start to make inference')\n #print('......\\n')\n distances, indices = model_knn.kneighbors(data[idx], n_neighbors=n_recommendations+1)\n \n raw_recommends = \\\n sorted(list(zip(indices.squeeze().tolist(), distances.squeeze().tolist())), key=lambda x: x[1])[:0:-1]\n # get reverse mapper\n reverse_mapper = {v: k for k, v in mapper.items()}\n # print recommendations\n allrecs = []\n for i, (idx, dist) in enumerate(raw_recommends):\n allrecs.append([reverse_mapper[idx], dist])\n return allrecs", "def get_recommendations(self, payload):\n\n session = Session()\n\n event_category_vector = self._load_interest_vector(payload['user_id'],\n 'event_categories')\n event_types_vector = self._load_interest_vector(payload['user_id'],\n 'event_types')\n\n categories_sum = sum(event_category_vector.values())\n types_sum = sum(event_types_vector.values())\n\n all_events_query = session.query(Event).filter(\n Event.event_time >= datetime.utcnow()\n )\n total_events_num = all_events_query.count()\n\n result_query = session.query.filter(False)\n\n for category, category_score in event_category_vector.items():\n category_percent = category_score / categories_sum\n\n category_events_query = all_events_query.filter(\n Event.categories.any(EventCategory.id == category)\n )\n\n for event_type, event_type_score in event_types_vector.items():\n type_percent = event_type_score / types_sum\n limit = total_events_num * category_percent * type_percent\n\n filtered_query = category_events_query.filter(\n Event.event_type_id == event_type\n ).limit(limit)\n\n result_query = result_query.union(filtered_query)\n\n result_query = result_query.order_by(Event.event_time)\n\n Session.remove()\n\n # TODO: check if it will be sent over RabbitMQ\n return result_query", "def list_offerings(nextToken=None):\n pass", "def _get_recommend(self, user):\n return self.user_cf.calculate(target_user_id=user, user_n=self.user_n,\n item_n=self.item_n, type=2)", "def recommendations(self):\n return [user for user in self.tags.similar_objects() if user.status == UserStatus.APPROVED]", "def search(bearer_token, price, location, categories, radius, openat):\n\n RESTAURANT_LIMIT = 3\n\n url_params = {\n 'term': 'restaurants',\n 'location': location.replace(' ', '+'),\n 'limit': RESTAURANT_LIMIT,\n 'open_at': openat,\n 'price': price,\n 'categories': categories,\n 'radius': radius\n }\n return request(API_HOST, SEARCH_PATH, bearer_token, url_params=url_params)", "def build_recommendations(sc, myRatings, model):\n #myRatedMovieIds = set([x[1] for x in myRatings])\n uid = get_uid_from_ratings(myRatings)\n #print \"uid:\", uid\n myRatedMovieIds = set([x[1] for x in myRatings.collect()])\n #print \"myRatedMovieIds:\", myRatedMovieIds\n candidates = sc.parallelize([m for m in movies if m not in myRatedMovieIds]).cache()\n #print candidates\n predictions = model.predictAll(candidates.map(lambda x: (uid, x))).collect()\n #print predictions\n recommendations = sorted(predictions, key = lambda x: x.product)\n return recommendations", "def recommend(self, request):\n log.debug(\"Received recommendation request data (request.data): {}\".format(request.data))\n # validate request serializer\n serializer = ActivityRecommendationRequestSerializer(\n data=request.data,\n )\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n # get learner (creation of learner is supported here if learner does not already exist)\n learner, created = Learner.objects.get_or_create(**serializer.data['learner'])\n\n # get collection\n collection = serializer.validated_data['collection']\n\n # parse sequence data\n sequence_data = serializer.validated_data['sequence']\n sequence = []\n for activity_data in sequence_data:\n try:\n sequence.append(Activity.objects.get(url=activity_data['url']))\n except Activity.DoesNotExist:\n log.error(\"Unknown activity found in sequence data: {}\".format(activity_data))\n log.debug(\"Parsed sequence: {}\".format(sequence))\n # get recommendation from engine\n recommended_activity = get_engine().recommend(learner, collection, sequence)\n\n # construct response data\n if recommended_activity:\n recommendation_data = ActivityRecommendationSerializer(recommended_activity).data\n recommendation_data['complete'] = False\n else:\n # Indicate that learner is done with sequence\n recommendation_data = dict(\n collection=collection.collection_id,\n url=None,\n complete=True,\n )\n\n return Response(recommendation_data)", "def movie_recommendations(age, gender):\r\n url = \"https://streaming-availability.p.rapidapi.com/search/basic\"\r\n movies_list = []\r\n\r\n if age == \"0-2\" or age == \"4-6\" or age == \"8-12\":\r\n genre = [\"14\", \"16\"]\r\n\r\n elif age == \"15-20\":\r\n\r\n if gender == \"Male\":\r\n genre = [\"3\", \"5\", \"18\", \"27\", \"28\", \"80\"]\r\n\r\n else: # female\r\n genre = [\"10749\", \"10764\"]\r\n\r\n else: # age >20:\r\n\r\n if gender == \"Male\":\r\n genre = [\"1\", \"5\", \"27\", \"28\", \"80\", \"10752\", \"10763\", \"10767\"]\r\n\r\n else: # female\r\n genre = [\"4\", \"14\", \"18\", \"35\", \"10749\", \"10751\", \"10764\", \"10763\", \"10767\"]\r\n\r\n for i in range(7):\r\n\r\n querystring = {\"country\": \"us\", \"service\": \"netflix\", \"type\": \"movie\", \"genre\": genre, \"page\": str(i + 1),\r\n \"language\": \"en\"}\r\n headers = {\r\n 'x-rapidapi-host': \"streaming-availability.p.rapidapi.com\",\r\n 'x-rapidapi-key': \"key\"\r\n }\r\n\r\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\r\n age_split = age.split('-')\r\n\r\n if int(age_split[0]) >= 18:\r\n age_split[0] = 18\r\n age_split[1] = 100\r\n\r\n for j in response.json()[\"results\"]:\r\n if int(age_split[0]) <= (j[\"age\"]) <= int(age_split[1]):\r\n movies_list.append(j[\"posterURLs\"][\"original\"])\r\n\r\n return movies_list", "def recommend_by_keywords(self, key_words_list=None):\n pass", "async def search_by_product_or_category(\n conn, cursor, product: str = \"\", category: str = \"\"\n) -> List[str]:\n\n if (not product) and (not category):\n filter_term = \"\"\n elif product and category:\n filter_term = (\n f\"\\n WHERE product = '{product}' AND category = '{category}'\"\n )\n elif product:\n filter_term = f\"\\n WHERE product = '{product}'\"\n else:\n filter_term = f\"\\n WHERE category = '{category}'\"\n\n statement = f\"\"\"\n SELECT product.name as product,\n product.description as description,\n product.category as category,\n supplier_product.price as price,\n supplier_product.supplier as supplier,\n supplier_product.price as price,\n product.rating as product_rating,\n supplier.rating as supplier_rating,\n ROUND(((product.rating + supplier.rating)/2),2) as combined_rating,\n product.last_updated as last_updated \n FROM product \n INNER JOIN supplier_product\n ON product.name = supplier_product.product\n INNER JOIN supplier \n ON supplier_product.supplier = supplier.name {filter_term}\n ORDER BY (product.rating + supplier.rating) DESC\n \"\"\"\n await cursor.execute(statement)\n categories = await cursor.fetchall()\n return categories", "def recommendation_genre_seeds(self, **kwargs):\n return self._get(API.RECOMMENDATIONS_GENRES.value, **kwargs)", "def popular_recommend(row):\n actual = new_purchase_row(row)\n return f1(actual, popular_products)", "def recommended_list(self, request, instance):\n if request.user.is_authenticated:\n viewed_item = self.filter_by_model(\n instance=instance).filter(user=request.user)\n elif request.user.is_anonymous:\n ip_address = get_client_ip(request)\n viewed_item = self.filter_by_model(\n instance=instance).filter(user__isnull=True, ip_address=ip_address)\n same_category = list()\n for item in viewed_item:\n # category e post haei ke user dide\n same_category = item.content_object.category.values_list(\n 'id', flat=True)\n\n queryset = instance.__class__.objects.filter(\n category__id__in=same_category)\n # .annotate(count=models.Count('likes__likedislike')).order_by('-count')[:3]\n # self.same_cate += list(queryset.values_list('pk', flat=True))\n try:\n \"\"\"ye session e listi misaze ke Model o bar assasse category e bala filter mikone\"\"\"\n request.session['same_categories'] += list(queryset.values_list(\n 'pk', flat=True)) # bareye in az values_list o pk estefade kardim ke dg niaz be serialize kardan nabshe\n\n except:\n # request.session['same_categories'] = list()\n request.session['same_categories'] = list(\n queryset.values_list('pk', flat=True))\n\n # same_item = instance.__class__.objects.filter(\n # pk__in=self.same_cate)\n\n # sessioni ke bala sakhte shu o mirize to ye moteghayer o bar migardoone\n same_item = instance.__class__.objects.filter(\n pk__in=request.session.get('same_categories'))\n\n return same_item", "def get_recommendations(name, data):\r\n #sorts preferences in alphabetical order\r\n #do this to make it easier to compare\r\n for key in data:\r\n data[key] = selection_sort(data[key])\r\n most_similar_key = \"\"\r\n max_matches = 0\r\n for key in data:\r\n if not(key[-1] == \"$\" or data[key] == data[name]):\r\n \"\"\"if the person is not private or does not have the same data\"\"\"\r\n matches = num_matches(data[key], data[name])\r\n if matches > max_matches:\r\n most_similar_key = key\r\n max_matches = matches\r\n if most_similar_key == \"\":\r\n print(\"No recommendations available at this time\")\r\n return 1\r\n else:\r\n final_recommendations = []\r\n for x in data[most_similar_key]:\r\n if x not in data[name]:\r\n final_recommendations += [x]\r\n return final_recommendations", "def start_recommender(self, numRecs=None):\n if numRecs is not None:\n self._num_recommendations = numRecs\n self.__useritem = self.__itemuser.T.tocsr()", "def start_recommender(self, numRecs=None):\n if numRecs is not None:\n self._num_recommendations = numRecs\n self.__useritem = self.__itemuser.T.tocsr()", "def test_get_scored_recommendations_post(self):\n pass", "def query_top_recommended(cls,N=10):\n brkey = 'BooksMostRecommended'\n bks = from_cache(brkey)\n if not bks:\n bks = map(lambda e:str(e.id()), SuiBook.all(keys_only=True).order('-recommends').fetch(N))\n to_cache(brkey,bks)\n return bks", "def artist_recommendations(self, seed_artists, chunk_size=5, country=None, limit=20, **params):\n url = '/recommendations'\n params['seed_artists'] = ','.join(get_ids('artists', seed_artists))\n params['limit'] = limit\n return self._get(url, **params)" ]
[ "0.6757458", "0.59316045", "0.53259814", "0.5148535", "0.49028853", "0.47453037", "0.4716145", "0.47134882", "0.46941304", "0.46536207", "0.46397585", "0.46383676", "0.46056136", "0.45473558", "0.45473436", "0.45289943", "0.45214188", "0.45171258", "0.4443659", "0.43732527", "0.43723357", "0.4361831", "0.43328008", "0.43317947", "0.43311602", "0.43128118", "0.43128118", "0.4301004", "0.42821428", "0.42797515" ]
0.7668175
0
Alias for `list_recommendations(next_token=token)` instead.
def list_recommendations_by_next_token(self, token): return self.list_recommendations(next_token=token)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_recommendations(\n self, marketplace_id=None, recommendation_category=None, next_token=None\n ):\n return self.make_request(\n \"ListRecommendations\",\n {\n \"MarketplaceId\": marketplace_id,\n \"RecommendationCategory\": recommendation_category,\n },\n method=\"POST\",\n )", "def next_token(self, context, token):", "def list_offerings(nextToken=None):\n pass", "def next_page_token(self) -> global___Snippet.PaginatedResponseHandling.NextPageToken:", "def recommendations(self, **kwargs):\n\n path = self._get_movie_id_path('recommendations')\n resp = self._get_method(path, kwargs)\n return resp", "def getRecommendationSonglist(self, limit = 20, offset = 0, total = True):\n currAPIVersion = self.config['apiVersion']\n currAPIURL = URL_NEAPIS[sys._getframe().f_code.co_name]\n currAPIURL = currAPIURL[min(currAPIVersion, len(currAPIURL) - 1)]\n currDict = {\n 'limit' : limit,\n 'offset': offset,\n 'total' : total\n }\n\n currC, currR = self._mySubmit(currAPIURL, currDict)\n self.apiLog.info(\"%s Json Loads Begin\", sys._getframe().f_code.co_name)\n currR = json.loads(currR)\n self.apiLog.info(\"%s Json Loads End\", sys._getframe().f_code.co_name)\n self.updateCookie(currC)\n self.checkCode(currR['code'])\n\n return currR, currAPIURL[2]", "def get_next_page_token(\n self, response: requests.Response, previous_token: Optional[Any]\n ) -> Optional[Any]:\n if (\n previous_token\n and self.MAX_RESULTS_LIMIT\n and (\n cast(int, previous_token) * self.MAX_PER_PAGE >= self.MAX_RESULTS_LIMIT\n )\n ):\n return None\n\n resp_json = response.json()\n if isinstance(resp_json, list):\n results = resp_json\n else:\n results = resp_json.get(\"items\")\n\n if results:\n # Paginate as long as the response has items\n return (previous_token or 1) + 1\n\n return None", "def list_offering_promotions(nextToken=None):\n pass", "def next_token(self, context: PluginScanContext, token: MarkdownToken) -> None:\n if token.is_list_start:\n self.__list_stack.append(token)\n if token.is_ordered_list_start:\n list_style, last_known_number = self.__match_first_item(context, token)\n self.__ordered_list_stack.append((list_style, last_known_number))\n elif token.is_list_end:\n del self.__list_stack[-1]\n if token.is_ordered_list_end:\n del self.__ordered_list_stack[-1]\n elif token.is_new_list_item and self.__list_stack[-1].is_ordered_list_start:\n list_style, last_known_number = self.__ordered_list_stack[-1]\n list_style, last_known_number = self.__match_non_first_items(\n context, token, list_style, last_known_number\n )\n self.__ordered_list_stack[-1] = (list_style, last_known_number)", "def test_get_report_request_list_by_next_token_alias(self, api_instance: Reports):\n params = api_instance.get_report_request_list_by_next_token(\"0hytxbkaOb\")\n self.assert_common_params(params, action=\"GetReportRequestListByNextToken\")\n assert params[\"NextToken\"] == \"0hytxbkaOb\"", "def get_recommendations(self):\n endpoints = '/user/recs'\n return self.get_request(endpoints)", "def test_get_scored_recommendations_post(self):\n pass", "def ListRecommendations(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def ListRecommendations(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def _candidates(self, token):", "def _recommend_movie(user_id, movie_id, number_movies):\n #get the recommended movie ids from pickled model\n rec_movies = sparkrecommender.movie_recomendation(user_id,movie_id,number_movies)\n #get a list of movies ids used in model\n moviesdf = pd.read_csv('movies.csv',index_col='movieId')\n \n #build list of lists with [[imdb ID, movie title, post img link]]\n rec_movies_list = []\n for movie_id in rec_movies:\n temp_list = []\n imdbid_ = str(get_imdbId(movie_id))\n temp_list.append(imdbid_)\n temp_list.append(moviesdf.loc[movie_id,'title'])\n temp_list.append('http://img.omdbapi.com/?apikey=ae550a04&i=tt'+str(imdbid_))\n rec_movies_list.append(temp_list)\n return rec_movies_list", "def find_song_recommendations(access_token, tracks, target, n, params):\n track_string = '%2C'.join(tracks[:5])\n response = spotify.get_recommendations(access_token, 50, track_string, params)\n\n song_recommendation = response['tracks']\n recommendations = {song['id']: {'name': song['name']} for song in song_recommendation}\n\n moods = get_features_moods(recommendations)\n\n return order_songs(moods, target, n)", "def next_token(self) -> str:\n return pulumi.get(self, \"next_token\")", "def next_page_token(self, next_page_token):\n\n self._next_page_token = next_page_token", "def next_page_token(self, next_page_token):\n\n self._next_page_token = next_page_token", "def get_next_page_token(\n self, response: requests.Response, previous_token: Optional[dict]\n ) -> Optional[dict]:\n # Get the date of the previous request\n request_body_json = json.loads(response.request.body)\n previous_start_date = self.str_to_date(\n request_body_json[\"period\"][\"p1\"][0][\"start\"]\n )\n\n # Find out if the previous month is still returning data (which means it may have pages left)\n data_feed = response.json()[\"DataFeed\"]\n rows = data_feed[\"Rows\"]\n if len(rows) > 0:\n # get the page-num of the previous request\n previous_page = request_body_json[\"page-num\"]\n return {\n \"year_month\": (previous_start_date.year, previous_start_date.month),\n \"page_num\": previous_page + 1,\n }\n\n # Find out if we should use the next month\n next_year, next_month = get_next_month(\n previous_start_date.year, previous_start_date.month\n )\n if datetime.date(next_year, next_month, 1) <= datetime.date.today():\n return {\n \"year_month\": (next_year, next_month),\n \"page_num\": 1,\n }\n return None", "def similar_search(tmdb_obj, media_id):\n res = tmdb_obj.get_recommendations(media_id)\n if not res:\n return []\n return res", "async def get_next(continuation_token=None):\n if not continuation_token:\n return {\"nextLink\": \"page2\", \"value\": [\"value1.0\", \"value1.1\"]}\n else:\n return {\"nextLink\": None, \"value\": [\"value2.0\", \"value2.1\"]}", "async def get_next(continuation_token=None):\n if not continuation_token:\n return {\"nextLink\": \"page2\", \"value\": [\"value1.0\", \"value1.1\"]}\n else:\n return {\"nextLink\": None, \"value\": [\"value2.0\", \"value2.1\"]}", "def test_get_report_request_list_by_next_token(self, api_instance: Reports):\n params = api_instance.get_report_request_list(next_token=\"RXmLZ2bEgE\")\n self.assert_common_params(params, action=\"GetReportRequestListByNextToken\")\n assert params[\"NextToken\"] == \"RXmLZ2bEgE\"", "def set_NextToken(self, value):\n super(DescribeEvaluationsInputSet, self)._set_input('NextToken', value)", "def get_recommendations(name, data):\r\n #sorts preferences in alphabetical order\r\n #do this to make it easier to compare\r\n for key in data:\r\n data[key] = selection_sort(data[key])\r\n most_similar_key = \"\"\r\n max_matches = 0\r\n for key in data:\r\n if not(key[-1] == \"$\" or data[key] == data[name]):\r\n \"\"\"if the person is not private or does not have the same data\"\"\"\r\n matches = num_matches(data[key], data[name])\r\n if matches > max_matches:\r\n most_similar_key = key\r\n max_matches = matches\r\n if most_similar_key == \"\":\r\n print(\"No recommendations available at this time\")\r\n return 1\r\n else:\r\n final_recommendations = []\r\n for x in data[most_similar_key]:\r\n if x not in data[name]:\r\n final_recommendations += [x]\r\n return final_recommendations", "def get_recommendations(df,song_title, similarity_score, num_recommends = 5):\r\n indices = pd.Series(df.index, index = df['track_name']).drop_duplicates()\r\n idx = indices[song_title]\r\n sim_scores = list(enumerate(similarity_score[idx]))\r\n sim_scores = sorted(sim_scores, key = lambda x: x[1],reverse = True)\r\n top_scores = sim_scores[1:num_recommends+1]\r\n song_indices = [i[0] for i in top_scores]\r\n return df[\"track_name\"].iloc[song_indices]", "def get_recommendations(artists = tuple(), genres = tuple(), limit = 20, features = True, client = None):\n\n recs = client.recommendations(seed_artists = artists, seed_genres = genres, limit = limit)\n tracks = recs['tracks']\n\n # TODO: need a compose function...\n to_keep = (\n 'album_name', 'artist_name', 'name', 'popularity', 'duration_ms',\n 'explicit', 'id'\n )\n rows = list(map(row_filter(to_keep, False), map(_hoist_track_info, tracks)))\n out = pd.DataFrame(rows)\n\n track_ids = [row['id'] for row in rows]\n if features:\n extra_cols = ['uri', 'type', 'duration_ms', 'analysis_url', 'track_href']\n return out.merge(\n get_track_features(track_ids).drop(columns = extra_cols),\n on = \"id\"\n )\n\n return out", "def prep_token(**kwargs):\n token = kwargs.get('token')\n if not token:\n token = oauth2_wrappers.gen_token()\n return token" ]
[ "0.6227083", "0.5611457", "0.55928564", "0.5550036", "0.5543547", "0.54899335", "0.54398435", "0.5348996", "0.52498764", "0.5179839", "0.5169776", "0.5147826", "0.51265526", "0.5100554", "0.50321686", "0.4914144", "0.49053377", "0.48637497", "0.4819782", "0.4819782", "0.48040318", "0.47749698", "0.47622415", "0.47622415", "0.47598174", "0.47271174", "0.4709744", "0.47057176", "0.46928084", "0.46891463" ]
0.8355562
0
open tunnel to redis server and start sender/receiver tasks
async def start(self): with SSHTunnelForwarder(**self.tunnel_config) as tunnel: address = tunnel.local_bind_address aredis = await aioredis.create_redis_pool(address, encoding="utf-8") logger.info(f"bridge connected: {aredis.address}") try: await asyncio.gather( self._receiver(aredis), self._sender(aredis), ) except asyncio.CancelledError: logger.info(f"bridge start {self.pattern}: cancelled") except Exception as e: logger.info(f'bridge start {self.pattern}: exception {e} {type(e)}') aredis.close() await aredis.wait_closed()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run():\n\n assert SSH_HOST is not None, 'SSH_HOST not set. Please configure.'\n\n\n def connect():\n port = find_open_port(SSH_HOST)\n if init_tunnel(SSH_HOST, port):\n print 'Tunnel initialized, pid:', PID\n return {'ssh tunnel entry': 'ssh://{}:{}'.format(SSH_HOST, port)}\n return {}\n\n def is_pid_alive(pid):\n processes = subprocess.check_output(['ps', '-fx'])\n for line in processes.splitlines():\n lpid = line.split()[0]\n if lpid == pid:\n return True\n return False\n\n def find_open_port(host, start_port=22222):\n i = 0\n while i < 1000:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex((host, start_port + i))\n if result == 0:\n print \"Port is already used: \", start_port + i\n i += 1\n else:\n return start_port + i\n \n\n \n\n if PID is None:\n return connect()\n else:\n # check if process is still alive\n if is_pid_alive(PID):\n print 'Tunnel still active. Not doing anything.'\n else:\n return connect()", "def _connect_to_redis(self):\n for name, config in settings.STREAM_REDIS_CONFIG.items():\n self._redis_client = tornadoredis.Client(host=config['host'],\n port=config['port'],\n password=config['password'],\n connection_pool=pool)\n self._redis_client.connect()", "def run(self):\n\t\t\n\t\tself.connect(self.config[\"server\"])", "def connect(self):\n\t\tself.printed_sub = False\n\t\tself.client.connect(BROKER)\n\t\tself.client.loop_forever()", "def __init__(self, host, redis_port, ssh_user, use_ssh=True):\n\n if use_ssh:\n forwarder = create_tunnel(host=host, port=redis_port, ssh_user=ssh_user)\n self.connection = redis.StrictRedis(host=forwarder.bind_address, port=forwarder.bind_port, db=0)\n else:\n self.connection = redis.StrictRedis(host=host, port=redis_port, db=0)", "def run(self):\n self.connect()\n self.run_forever()", "def start_task():\n get_results_from_message_queue()\n test_all_servers_connection()", "def run(self):\n\n listen_port = DEBUGGER_PORT if \"RENPY_DEBUGGER_PORT\" not in os.environ else os.environ[\"RENPY_DEBUGGER_PORT\"]\n\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server.bind((\"0.0.0.0\", listen_port))\n server.listen(0)\n\n while True:\n client, client_address = server.accept()\n self.attach_one_client(client)", "def connect(self):\n self.connection = redis.Redis(\n host=self.host,\n port=self.port,\n socket_connect_timeout=self.timeout,\n socket_timeout=self.timeout\n )", "async def server_main(loop, proxy_config, server_config):\n\n controller = Controller(\n MessageProxy(proxy_config),\n hostname=server_config['listen']['addr'],\n port=server_config['listen']['port'],\n )\n controller.start()", "def run(self):\n logging.info(\"task manager started!\")\n t = TCPManager()\n s = t.set_cmd_connect()\n while True:\n sock, addr = s.accept()\n t = threading.Thread(target=self.manage_task, args=(sock, addr))\n t.start()", "def run(self):\n HOST = 'localhost' # Symbolic name meaning all available interfaces\n PORT = 54123 # Arbitrary non-privileged port\n \n \n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((HOST, PORT))\n while(self.running):\n s.listen(1)\n conn, addr = s.accept()\n self.listen_to_connection(conn)\n conn.close()\n s.close()", "def run(self):\n client = ProcessorClient()\n try:\n client.connect(self.address)\n except Exception as e:\n self.error = e\n logging.error(e)\n else:\n self.clients[self.name] = client", "def connect_server(self):\n redis_host = \"localhost\"\n redis_port = 6379\n redis_password = \"\"\n # step 3: create the Redis Connection object\n try:\n\n # The decode_repsonses flag here directs the client to convert the responses from Redis into Python strings\n # using the default encoding utf-8. This is client specific.\n self.r = redis.StrictRedis(host=redis_host, port=redis_port,\n password=redis_password, decode_responses=True)\n\n # step 4: Set the hello message in Redis\n self.r.set(\"msg:hello\", \"Hello World!!!\")\n\n # step 5: Retrieve the hello message from Redis\n msg = self.r.get(\"msg:hello\")\n print(msg)\n\n except Exception as e:\n print(e)", "async def connect(self):\n self.client = await asyncio_redis.Connection.create(\n host=self.host,\n port=self.port,\n db=self.database,\n auto_reconnect=self.reconnect,\n password=self.password,\n )", "def run_server(self):\n self.establish_connection()\n while True:\n self.receive_data(self.conn)", "def start(self):\n\n address = (socket.gethostbyname(self.hostname), self.port)\n logger.info(\"Connecting to %r\" % (address,))\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect(address)\n self._start_processors()\n return self", "def run(self):\n self.node_id = CONFIG.node_id\n self.running = Event()\n if not CONFIG.master_pub or not CONFIG.master_repl:\n print colors.yellow(\"Master IP:port is not set in config file (%s)\"\n % CONFIG._fn)\n master_pub = raw_input(\"Enter Master PUB uri (IP or IP:port):\")\n if \":\" in master_pub:\n ip, _, port = master_pub.rpartition(\":\")\n else:\n ip = master_pub\n port = 5551\n CONFIG.update(\"General\", \"master_pub\", \"%s:%s\" % (ip,\n port))\n master_repl = raw_input(\"Enter Master REPLY uri (IP or IP:port), \"\n \"hit ENTER for default(%s:5552):\" % ip)\n if not master_repl:\n port = 5552\n elif \":\" in master_repl:\n ip, _, port = master_repl.rpartition(\":\")\n else:\n ip = master_repl\n port = 5552\n CONFIG.update(\"General\", \"master_repl\", \"%s:%s\" % (ip,\n port))\n CONFIG.reload()\n\n if not validate_address(CONFIG.master_pub) or \\\n not validate_address(CONFIG.master_repl):\n LOG.error('Server IP not present in config or is not valid.\\n'\n 'Check your config')\n exit(1)\n\n if not self.node_id:\n LOG.error(\"The node id not set in config. \"\n \"Run program with config option first\")\n exit(1)\n\n self.backend = self.transport_class.from_config(\n CONFIG, **vars(self.args))\n load_plugins(CONFIG)\n self.sessions = {}\n self.matcher = Matcher(self.node_id, self.backend.meta())\n\n LOG.info(\"Starting node\")\n self.details()\n self._sig_int = signal.getsignal(signal.SIGINT)\n self._sig_term = signal.getsignal(signal.SIGTERM)\n\n if os.name == 'nt':\n # Use Ctrl+C to invoke clean on Windows\n import win32api\n win32api.SetConsoleCtrlHandler(self.clean, True)\n else:\n signal.signal(signal.SIGINT, self._handle_terminate)\n signal.signal(signal.SIGTERM, self._handle_terminate)\n\n # Invoke clean for sessions\n signal.signal(signal.SIGHUP, self.clean)\n\n if not self.backend.prepare():\n LOG.info(\"Cannot start transport backend\")\n self._handle_terminate()\n exit(1)\n\n def request_processor():\n req_queue = self.backend.consume_queue('requests',\n ident=\"DISPATCHER\")\n poller = self.backend.create_poller(req_queue)\n while not self.running.is_set():\n try:\n ready = poller.poll(200)\n if not ready:\n continue\n if req_queue in ready:\n message = req_queue.recv()[0]\n if not message:\n continue\n job = JobTarget.build(message)\n if job:\n self.target_match(job)\n except ConnectionError:\n break\n except Exception:\n continue\n req_queue.close()\n\n Thread(target=request_processor).start()\n\n self.backend.loop()\n\n LOG.info(\"Node exited\")", "def run(self):\n print(\"[CONNEXION_MANAGER] start connecting\")\n while True:\n self.connexion_init()", "def tcp_start(self, flow: mitmproxy.tcp.TCPFlow):", "def run(self):\n self._socket = _get_socket(self.opts)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n _set_tcp_keepalive(self._socket, self.opts)\n self._socket.setblocking(1)\n self._socket.bind(_get_bind_addr(self.opts, \"ret_port\"))\n self._socket.listen(self.backlog)\n\n while True:\n try:\n # Wait for a connection to occur since the socket is\n # blocking.\n connection, address = self._socket.accept()\n # Wait for a free slot to be available to put\n # the connection into.\n # Sockets are picklable on Windows in Python 3.\n self.socket_queue.put((connection, address), True, None)\n except OSError as e:\n # ECONNABORTED indicates that there was a connection\n # but it was closed while still in the accept queue.\n # (observed on FreeBSD).\n if tornado.util.errno_from_exception(e) == errno.ECONNABORTED:\n continue\n raise", "def run_worker():\n listen = ['default']\n conn = Redis(host=app.config['RQ_DEFAULT_HOST'],\n port=app.config['RQ_DEFAULT_PORT'],\n db=0,\n password=app.config['RQ_DEFAULT_PASSWORD'])\n\n with Connection(conn):\n worker = Worker(map(Queue, listen))\n worker.work()", "def run(self):\n redis_servers = settings.get_redis_servers()\n\n for redis_server in redis_servers:\n redis_password = redis_server.get(\"password\")\n self.ping(redis_server[\"server\"], redis_server[\"port\"], redis_password)\n\n if len(self.failedList) > 0:\n self.sendMail()", "def __init__(self, *args, **kvargs):\n self.proxy_host = kvargs.get('proxy_host')\n self.proxy_user = kvargs.get('proxy_user')\n self.proxy_password = kvargs.get('proxy_password')\n self.proxy_port = kvargs.get('proxy_port')\n self.proxy_ssh_key_file = kvargs.get('proxy_ssh_key')\n self.proxy_connection = False\n self.host = kvargs.get('host')\n self.user = kvargs.get('user')\n self.password = kvargs.get('password')\n self.port = kvargs.get('port')\n self.dest_connection = False\n\n try:\n # Add host key policy\n if self.proxy_port is None:\n self.proxy_port = 22\n self.transport = paramiko.Transport((self.proxy_host, self.proxy_port))\n self.transport.start_client()\n if self.proxy_ssh_key_file:\n self.proxy_ssh_key = paramiko.RSAKey.from_private_key_file(self.proxy_ssh_key_file)\n conn_result = self.transport.auth_publickey(username=self.proxy_user, key=self.proxy_ssh_key)\n else:\n conn_result = self.transport.auth_password(username=self.proxy_user, password=self.proxy_password)\n if len(conn_result) == 0:\n self.proxy_connection = True\n else:\n logging.error('Unable to connect to proxy host. Authentication failed.')\n raise TobyException('Unable to connect to proxy host. Authentication failed.')\n except Exception as exp:\n logging.error('Unable to connect to proxy host: %s' % exp)\n raise TobyException('Unable to connect to proxy host: %s' % exp)\n\n try:\n if self.port is None:\n self.port = 22\n self.tunnel = paramiko.Transport(self.transport.open_channel(\n kind='direct-tcpip',\n dest_addr=(self.host, self.port),\n src_addr=('127.0.0.1', 0)))\n self.tunnel.start_client()\n conn_result = self.tunnel.auth_password(username=self.user, password=self.password)\n if len(conn_result) == 0:\n self.dest_connection = True\n else:\n logging.error('Unable to connect to destination host. Authentication failed.')\n raise TobyException('Unable to connect to destination host. Authentication failed.')\n except Exception as exp:\n logging.error('Unable to connect to destination host: %s' % exp)\n raise TobyException('Unable to connect to destination host: %s' % exp)\n\n try:\n self.handle = self.tunnel.open_session(20)\n self.handle.get_pty(width=160, height=0)\n self.handle.invoke_shell()\n self.handle.set_combine_stderr(True)\n self.handle.settimeout(60)\n tnh = self.handle\n got = []\n while True:\n _rd, _wr, _err = select([tnh], [], [], 10)\n if _rd:\n data = tnh.recv(1024)\n data = data.decode(\"utf-8\")\n got.append(data)\n if re.search('> ', data):\n tnh.send(b' start shell\\n')\n data = tnh.recv(1024)\n data = data.decode(\"utf-8\")\n if re.search(r'(\\$|>|#|%)[\\s\\t]?', data):\n break\n except Exception as exp:\n logging.error(\n 'Unable to fetch the prompt on destination host: %s' % exp)\n raise TobyException(\n 'Unable to fetch the prompt on destination host: %s' % exp)", "def start(self):\n if self.is_alive:\n self.logger.warning('Already started!')\n return\n self._create_tunnels()\n if not self.is_active:\n self._raise(BaseSSHTunnelForwarderError,\n reason='Could not establish session to SSH gateway')\n for _srv in self._server_list:\n thread = threading.Thread(\n target=self._serve_forever_wrapper,\n args=(_srv, ),\n name='Srv-{0}'.format(address_to_str(_srv.local_port))\n )\n thread.daemon = self.daemon_forward_servers\n thread.start()\n self._check_tunnel(_srv)\n self.is_alive = any(self.tunnel_is_up.values())\n if not self.is_alive:\n self._raise(HandlerSSHTunnelForwarderError,\n 'An error occurred while opening tunnels.')", "def ssh_tunnel():\n tunnel = SSHTunnelForwarder(\n config.dbreddit['ssh_host_ip'],\n remote_bind_address=('localhost', 5432),\n # local_bind_address=('localhost', 5432),\n ssh_username=config.dbreddit['user'],\n ssh_password=config.dbreddit['password'],\n )\n # Start the SSH tunnel\n print(tunnel)\n tunnel.start()\n return tunnel", "def start(self):\n self.protocol.makeConnection(self.transport)", "def start(self):\n self.protocol.makeConnection(self.transport)", "def __runRemoteListeningSockets(self, address, localHostName):\n ## Get the local machine name and the remote nodes one\n remoteNodesIP = self.__getLocalAndRemoteMachineNames()\n\n ## Strip out the nodes' names\n availableNodes = [node.strip() for node in self.runInfoDict['Nodes']]\n\n ## Get unique nodes\n uniqueNodes = list(set(availableNodes) - set([localHostName]))\n servers = []\n self.remoteServers = {}\n if len(uniqueNodes) > 0:\n ## There are remote nodes that need to be activated\n ## Modify the python path used by the local environment\n localEnv = os.environ.copy()\n pathSeparator = os.pathsep\n if \"PYTHONPATH\" in localEnv and len(localEnv[\"PYTHONPATH\"].strip()) > 0:\n previousPath = localEnv[\"PYTHONPATH\"].strip()+pathSeparator\n else:\n previousPath = \"\"\n localEnv[\"PYTHONPATH\"] = previousPath+pathSeparator.join(sys.path)\n ## Start\n for nodeId in uniqueNodes:\n ## Check how many processors are available in the node\n ntasks = availableNodes.count(nodeId)\n remoteHostName = remoteNodesIP[nodeId]\n\n ## Activate the remote socketing system\n ## let's build the command and then call the os-agnostic version\n if self._parallelLib == ParallelLibEnum.ray:\n self.raiseADebug(\"Setting up RAY server in node: \"+nodeId.strip())\n runScript = os.path.join(self.runInfoDict['FrameworkDir'],\"RemoteNodeScripts\",\"start_remote_servers.sh\")\n command=\" \".join([runScript,\"--remote-node-address\",nodeId, \"--address\",address, \"--num-cpus\",str(ntasks),\" --working-dir \",self.runInfoDict['WorkingDir'],\" --raven-framework-dir\",self.runInfoDict[\"FrameworkDir\"],\"--remote-bash-profile\",self.runInfoDict['RemoteRunCommand']])\n self.raiseADebug(\"command is: \"+command)\n command += \" --python-path \"+localEnv[\"PYTHONPATH\"]\n self.remoteServers[nodeId] = utils.pickleSafeSubprocessPopen([command],shell=True,env=localEnv)\n elif self._parallelLib == ParallelLibEnum.dask:\n remoteServerScript = os.path.join(self.runInfoDict['FrameworkDir'],\n \"RemoteNodeScripts\",\"start_dask.sh\")\n outputFile = os.path.join(self.runInfoDict['WorkingDir'],\"server_debug_\"+nodeId)\n command = ['ssh',nodeId,remoteServerScript,outputFile,\n self.daskSchedulerFile,str(ntasks),\n self.runInfoDict[\"FrameworkDir\"],\n self.runInfoDict['RemoteRunCommand'],\n self.runInfoDict['WorkingDir']]\n self.raiseADebug(\"command is: \"+\" \".join(command))\n command.append(self.__removeLibPythonFromPath(localEnv[\"PYTHONPATH\"]))\n self.remoteServers[nodeId] = utils.pickleSafeSubprocessPopen(command, env=localEnv)\n ## update list of servers\n servers.append(nodeId)\n if self._parallelLib == ParallelLibEnum.ray or self._parallelLib == ParallelLibEnum.dask:\n #wait for the servers to finish starting (prevents zombies)\n for nodeId in uniqueNodes:\n self.remoteServers[nodeId].wait()\n self.raiseADebug(\"server \"+str(nodeId)+\" result: \"+str(self.remoteServers[nodeId]))\n\n return servers", "async def main():\n xknx = XKNX()\n gatewayscanner = GatewayScanner(xknx)\n gateways = await gatewayscanner.scan()\n\n if not gateways:\n print(\"No Gateways found\")\n return\n\n gateway = gateways[0]\n\n if not gateway.supports_tunnelling:\n print(\"Gateway does not support tunneling\")\n return\n\n udp_transport = UDPTransport((gateway.local_ip, 0), (gateway.ip_addr, gateway.port))\n\n await udp_transport.connect()\n local_hpai = HPAI(*udp_transport.getsockname())\n\n for i in range(255):\n conn_state = ConnectionState(\n udp_transport, communication_channel_id=i, local_hpai=local_hpai\n )\n\n await conn_state.start()\n\n if conn_state.success:\n print(\"Disconnecting \", i)\n disconnect = Disconnect(\n udp_transport, communication_channel_id=i, local_hpai=local_hpai\n )\n\n await disconnect.start()\n\n if disconnect.success:\n print(\"Disconnected \", i)" ]
[ "0.65094125", "0.636926", "0.6272023", "0.6137986", "0.61329263", "0.61177987", "0.61006737", "0.6098699", "0.60555124", "0.59931874", "0.5980451", "0.59593314", "0.5934894", "0.59215707", "0.5857913", "0.58315027", "0.5825562", "0.5810092", "0.5768256", "0.5762866", "0.5756546", "0.57423633", "0.5729432", "0.5726936", "0.57253474", "0.5724491", "0.5719618", "0.5719618", "0.5717671", "0.57145065" ]
0.70374256
0
A test runner for the app's Click commands.
def runner(app): return app.test_cli_runner()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def runner() -> CliRunner:\n return click.testing.CliRunner()", "def runner(app):\n\n return app.test_cli_runner()", "def test_runner(self):\n run(HelloWorldApi, [\"/bin/test_api.py app_root/bin/libs\", \"app_root/bin/libs\"], sys.stdin.buffer,\n sys.__stdout__.buffer)", "def runner():\n return CliRunner()", "def runTests(self):\n \n pass", "def fixture_runner():\n return CliRunner()", "def runner() -> CliRunner:\n return CliRunner()", "def runner() -> CliRunner:\n return CliRunner()", "def main():\n click.echo(\"Hello, world!\")", "def main():\n run_test_all()", "def setUp(self):\n super().setUp()\n self.runner = CliRunner()", "def run(self):\n if self.all:\n cmd = self.apply_options(self.test_all_cmd)\n self.call_and_exit(cmd)\n else:\n cmds = (self.apply_options(self.unit_test_cmd, (\"coverage\",)),)\n if self.coverage:\n cmds += (self.apply_options(self.coverage_cmd),)\n self.call_in_sequence(cmds)", "def cli_runner(app):\n\n LOG.info(\"cli_runner.app: %s\", app)\n cli_runner = app.test_cli_runner()\n\n LOG.info(\"cli_runner.app.config: %s\", app.config)\n\n LOG.info(\"cli_runner.app.cli_runner: %s\", cli_runner)\n return cli_runner", "def test_run_command(self):\n self.build()\n self.data_formatter_commands()", "def runtest(self):", "def run(ctx, test_plan, only):\n\n handler = ValidateCommandHandler(ctx.obj['qa_dir'])\n if handler.validate():\n handler = RunCommandHandler(ctx.obj['qa_dir'],\n vcs_adapter=__vcs_factory__.create_cvs_adapter(ctx.obj['vcs']),\n test_plan=test_plan,\n report_dir=ctx.obj['report_dir'],\n debug=ctx.obj['debug'])\n\n handler.run_test_cases(only=only)\n\n else:\n exit(1)", "def run_main(): # pragma: no cover\n RunTestsCLI.run()", "def test(tasks, clargs, args=None, kwargs=None, **kw):\n from click.testing import CliRunner\n runner = CliRunner()\n\n obj = _get_obj(tasks,\n () if args is None else args,\n {} if kwargs is None else kwargs)\n return runner.invoke(_app, clargs, obj=obj, **kw)", "def task_test():\n return {\n 'actions': ['py.test tests/'],\n }", "def test_example_runs(self):\n run_example(\n verbose=False,\n testapp=self.testapp,\n )", "def main():\n tng.api.runner()", "def test_script(self) -> None:\n main()", "def startTestRun(self):", "def test(ctx):\n click.echo('testing')\n ctx.test()\n click.echo('done')", "def __main() :\n launchTests()", "def run(self, args): # pylint: disable=too-many-return-statements\n # safely log all args to tests.log\n self._log_args(args)\n\n # get a configured instance of the App\n self.app = self.app_init(args)\n\n # Setup\n exit_code = self.run_app_method(self.app, 'setup')\n if exit_code != 0:\n return exit_code\n\n # Run\n exit_code = self.run_app_method(self.app, 'run')\n if exit_code != 0:\n return exit_code\n\n # Teardown\n exit_code = self.run_app_method(self.app, 'teardown')\n if exit_code != 0:\n return exit_code\n\n try:\n # call exit for message_tc output, but don't exit\n self.app.tcex.playbook.exit(msg=self.app.exit_message)\n except SystemExit:\n pass\n\n return self._exit(self.app.tcex.exit_code)", "def test():\n test_app()\n test_pagebrowser()", "def test_app():\n pass", "def run_test_suite():\n local('. fabric_factory/ve/bin/activate; fabric_factory/src/project/manage.py test')", "def main(self, options):\n module_names = options.module_names or []\n\n os.environ[str('RB_TEST_MODULES')] = force_str(','.join(module_names))\n\n os.chdir(options.tree_root)\n os.environ[str('RB_RUNNING_TESTS')] = str('1')\n\n from django import setup\n from django.apps import apps\n from django.conf import settings\n\n if not apps.ready:\n setup()\n\n installed_apps = list(settings.INSTALLED_APPS)\n\n # If an explicit extension is specified, then we'll want to grab its\n # list of apps.\n extension_class_name = options.extension_class\n\n if extension_class_name:\n module_name, class_name = extension_class_name.rsplit('.', 1)\n\n try:\n extension_class = getattr(import_module(module_name),\n class_name)\n except AttributeError:\n console.error('The provided extension class \"%s\" could not be '\n 'found in %s'\n % (class_name, module_name))\n return 1\n except ImportError:\n console.error('The provided extension class module \"%s\" '\n 'could not be found'\n % module_name)\n return 1\n\n installed_apps += (extension_class.apps or\n [module_name.rsplit('.', 1)[0]])\n\n if options.app_names:\n installed_apps += options.app_names\n\n if installed_apps != list(settings.INSTALLED_APPS):\n settings.INSTALLED_APPS = installed_apps\n apps.set_installed_apps(installed_apps)\n\n from reviewboard.test import RBTestRunner\n\n use_pytest = options.pytest or os.path.exists('conftest.py')\n\n if not use_pytest:\n console.note(\n 'Tests are running using the legacy nose test runner. Review '\n 'Board 7 will switch to a pytest-based runner. To opt in to '\n 'the new behavior, run with --pytest or create a conftest.py '\n 'file.')\n\n test_runner = RBTestRunner(\n test_packages=module_names,\n cover_packages=module_names,\n verbosity=1,\n needs_collect_static=False,\n use_pytest=use_pytest)\n\n # Don't use +=, as we don't want to modify the list on the class.\n # We want to create a new one on the instance.\n test_runner.nose_options = \\\n test_runner.nose_options + (options.test_options or [])\n\n failures = test_runner.run_tests(options.tests)\n\n if failures:\n return 1\n else:\n return 0" ]
[ "0.7855431", "0.77106535", "0.68480515", "0.67668295", "0.6669386", "0.6475", "0.64695936", "0.64695936", "0.63834995", "0.6270718", "0.6234035", "0.6231622", "0.6230506", "0.6221766", "0.6186083", "0.61765647", "0.61760014", "0.61495066", "0.6146101", "0.6142871", "0.6096929", "0.60357636", "0.59933853", "0.59783614", "0.5966395", "0.59589887", "0.59153986", "0.59120625", "0.5909754", "0.58837837" ]
0.77260715
1
A test marker named "smoke" is used to run only a subset of tests Feel free to create your own markers. Add them to your tests with a ".mark.mymarker" and run only those tests with 'pytest m 'mymarker'
def pytest_configure(config): config.addinivalue_line( "markers", "smoke" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pytest_pilot_markers():", "def case(*ids):\n return pytest.mark.testrail(ids=ids)", "def runtests(ctx):\n run('pytest -s tests', pty=pty_available)\n run('flake8 --ignore E265,E266,E501 --exclude src, lib', pty=pty_available)", "def spec_tests():\n pass", "def test_markers(f):\n @wraps(f)\n def wrapper(*args, **kwds):\n Print._test_start(f.__name__)\n ret = f(*args, **kwds)\n Print._test_stop(f.__name__)\n return ret\n return wrapper", "def test_marks(data, request):\n\n current_marks = get_pytest_marks_on_item(request._pyfuncitem)\n assert len(current_marks) == 1\n if data == 1:\n assert current_marks[0].name == \"fast\"\n elif data == 2:\n assert current_marks[0].name == \"slow\"\n else:\n raise AssertionError()", "def no_run(obj):\n return pytest.mark.usefixtures(\"no_run\")(obj)", "def test_single_test_case():\n pass", "def smoke_tests():\n from cla_common.smoketest import smoketest\n from cla_public.apps.checker.tests.smoketests import SmokeTests\n\n return jsonify(smoketest(SmokeTests))", "def pytest_configure(config):\n config.addinivalue_line(\n \"markers\",\n \"serial: Tests that will not execute with more than 1 MPI process\")\n config.addinivalue_line(\"markers\",\n \"gpu: Tests that should only run on the gpu.\")\n config.addinivalue_line(\n \"markers\",\n \"cupy_optional: tests that should pass with and without CuPy.\")\n config.addinivalue_line(\"markers\", \"cpu: Tests that only run on the CPU.\")\n config.addinivalue_line(\"markers\", \"gpu: Tests that only run on the GPU.\")", "def test_main():\n # Setup\n # Exercise\n # Verify", "def pytest_runtest_setup(item):\n marker = item.get_closest_marker(name=\"hoverfly\")\n if not marker:\n return\n\n ensure_simulation_dir(item.config)\n\n stateful = marker.kwargs.pop(\"stateful\", False)\n record = marker.kwargs.pop(\"record\", False)\n\n if set(marker.kwargs) - {\"name\"}:\n raise RuntimeError(f\"Unknown argments passed to @hoverfly: {marker.kwargs}\")\n\n if record:\n item.fixturenames.append(\"_stateful_simulation_recorder\" if stateful else \"_simulation_recorder\")\n else:\n item.fixturenames.append(\"_simulation_replayer\")", "def test_skip():\n pytest.skip('for a reason!')", "def test_ignores_all(self, testdir):\n testdir.makeini(\"\"\"\n [pytest]\n markers = flake8\n flake8-ignore = E203\n *.py E300\n tests/*.py ALL E203 # something\n \"\"\")\n testdir.tmpdir.ensure(\"xy.py\")\n testdir.tmpdir.ensure(\"tests/hello.py\")\n result = testdir.runpytest(\"--flake8\", \"-s\")\n result.assert_outcomes(passed=1)\n result.stdout.fnmatch_lines([\n \"*collected 1*\",\n \"*xy.py .*\",\n \"*1 passed*\",\n ])", "def test(session) -> None:\n session.install(\".[test]\")\n session.run(\"pytest\", \"-n\", \"auto\", *session.posargs)", "def pytest_configure(config):\n config.addinivalue_line(\"markers\", \"format_sql: mark format_sql tests.\")", "def pytest_collection_modifyitems(config, items):\n if config.getoption(\"--slurm\"):\n return ## run all\n skip_slurm = pytest.mark.skip(reason=\"need --slurm option to run\")\n for item in items:\n if \"slurm\" in item.keywords:\n item.add_marker(skip_slurm)\n if config.getoption(\"--long\"):\n return\n skip_long = pytest.mark.skip(reason=\"need --long option to run\")\n for item in items:\n if \"long\" in item.keywords:\n item.add_marker(skip_long)\n # no --long no --slurm or only --long", "def test_dummy_test():\n pass", "def test_skip_in_test():\n pytest.skip()", "def test(self):\n for arch, python in self.python:\n self.run(f\"{python} -m pytest\")", "def tests():\n api.local('nosetests')", "def test_pytest():\n assert True", "def test_pytest():\n assert True", "def test():\n for cmd in [\n \"pytest --verbose --cov pike/ --cov-report term --cov-report html tests/\",\n ]:\n _run_in_venv(shlex.split(cmd))\n for linter in [[\"black\", \"--check\"], [\"flake8\"], [\"isort\", \"--check\"]]:\n _run_in_venv(linter + TEST_FILES)\n\n _run_in_venv(\n [\"mypy\", \"pike/\", \"tests/\", \"setup.py\", \"pikefile.py\", \"--show-error-codes\"]\n )\n _run_in_venv([\"mypy\", \"examples/\"])\n _run_in_venv([\"bandit\", \"-r\", \"pike/\"])", "def test_normal(testdir, monkeypatch):\n for examinator in pytest_vw.EXAMINATORS:\n monkeypatch.delenv(examinator, raising=False)\n testdir.makepyfile(\"\"\"\n def test_environmental_impact_compliance():\n emissions = 12000\n legal_limit = 300\n assert emissions < legal_limit\n \"\"\")\n result = testdir.runpytest()\n result.assert_outcomes(passed=0, failed=1)", "def test_fixture_simple_patch(testdir):\n\n # create a temporary pytest test module\n testdir.makepyfile(\n \"\"\"\n import requests\n\n def test_simple(requests_mock):\n with requests_mock.patch('/api/test') as patch:\n patch.returns = requests_mock.good('hello')\n response = requests.get('https://test.api/api/test')\n assert response.text == 'hello'\n assert patch.was_called_once()\n \"\"\"\n )\n\n # run pytest with the following cmd args\n result = testdir.runpytest(\"-v\")\n\n # fnmatch_lines does an assertion internally\n result.stdout.fnmatch_lines([\"*::test_simple PASSED*\"])\n\n # make sure that that we get a '0' exit code for the testsuite\n assert result.ret == 0", "def tests():", "def test():\n nose.run()", "def test(extra_args: list[str] | None = None, run_doctests: bool = False) -> None:\n pytest = import_optional_dependency(\"pytest\")\n import_optional_dependency(\"hypothesis\")\n cmd = [\"-m not slow and not network and not db\"]\n if extra_args:\n if not isinstance(extra_args, list):\n extra_args = [extra_args]\n cmd = extra_args\n if run_doctests:\n cmd = [\n \"--doctest-modules\",\n \"--doctest-cython\",\n f\"--ignore={os.path.join(PKG, 'tests')}\",\n ]\n cmd += [PKG]\n joined = \" \".join(cmd)\n print(f\"running: pytest {joined}\")\n sys.exit(pytest.main(cmd))", "def pytest_runtest_setup(item):\n if 'slow' in item.keywords and not item.config.getoption(\"--runslow\"):\n pytest.skip(\"need --runslow option to run\")" ]
[ "0.680616", "0.62240934", "0.6152068", "0.6072265", "0.60588026", "0.6049441", "0.5862266", "0.5828549", "0.57791716", "0.5772465", "0.5752862", "0.57491755", "0.57372534", "0.56901443", "0.56876606", "0.56696975", "0.56610435", "0.56579846", "0.5653875", "0.5643033", "0.5625987", "0.56146365", "0.56146365", "0.56120515", "0.5609939", "0.5592674", "0.5576736", "0.5570218", "0.554044", "0.5535362" ]
0.71748304
0
Return the _id field of the current class. We cannot use the "_id" property (i.e. trip._id) because valid properties cannot start with an underscore. We can access it using trip["_id"] but that is cumbersome to use and can throw if the _id field does not exist (which may or may not be what we want). Let's abstract it out since this is a common use case.
def get_id(self): return self["_id"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_id(self):\n return self.id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id" ]
[ "0.71313035", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754", "0.71026754" ]
0.7405886
0
computes output shape after padding
def compute_output_shape(self, input_shape): output_shape = [0] * self.rank for d in range(self.rank): output_shape[d] = sum(self.paddings[d]) + input_shape[d] return tf.TensorShape(output_shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pad_output_shape(input_shape):\n shape_1 = input_shape[0]\n shape_2 = 90\n shape_3 = input_shape[2]\n\n return (shape_1, shape_2, shape_3)", "def compute_output_shape(self, s):\n return s[0], s[1] + 2 * self.padding[0], s[2] + 2 * self.padding[1], s[3]", "def get_output_shape(self):\n weights = self.W.get_shape().as_list()\n input_size = np.asarray(self.incoming_shape[-3:-1])\n strides = np.asarray(self.strides[-3:-1])\n kernels = np.asarray(weights[0:2])\n num_output = weights[-1]\n dilations = np.asarray(self.dilation_rate)\n if (isinstance(self.padding, list) or isinstance(self.padding, tuple)) and len(self.padding) == 2:\n output_size = np.asarray(\n np.ceil((input_size + 2 * np.asarray(self.padding) - kernels - (kernels - 1) * (\n dilations - 1)) / strides + 1),\n dtype=np.int)\n else:\n output_size = np.asarray(\n np.ceil(input_size / strides) if self.padding == \"SAME\" or self.padding == \"ZEROPAD\" else np.ceil(\n (input_size - (kernels - 1) * dilations) / strides), dtype=np.int)\n \n output_shape = self.incoming_shape[:]\n output_shape[-3:-1] = output_size.tolist()\n output_shape[-1] = num_output\n return output_shape", "def determine_padding(self, input_shape: int, output_shape: int) -> int:\n padding = (((output_shape - 1) * self.stride) + 1 - input_shape + (self.dilation * (self.kernel_size - 1)))\n\n # integer division\n padding = padding // 2\n assert output_shape == l_out(\n input_shape, padding, self.dilation, self.kernel_size, self.stride\n ) and padding >= 0, f\"Input and output of {input_shape} and {output_shape} with \" \\\n f\"kernel {self.kernel_size}, dilation {self.dilation}, stride {self.stride} \" \\\n f\"are incompatible for a Conv1D network.\"\n return padding", "def padding(self):\n\t\treturn self.paddings_shape_param('W')", "def compute_output_shape(self, input_shape):\r\n return input_shape", "def conv_output_shape(\n h_w: Tuple[int, int],\n kernel_size: int = 1,\n stride: int = 1,\n pad: int = 0,\n dilation: int = 1,\n ):\n h = floor(\n ((h_w[0] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1\n )\n w = floor(\n ((h_w[1] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1\n )\n return h, w", "def compute_output_shape(self, input_shape):\n if tf.keras.backend.image_data_format() == 'channels_first':\n return (input_shape[0][0], input_shape[0][1]) + input_shape[1][2:4]\n\n return (input_shape[0][0],) + input_shape[1][1:3] + (input_shape[0][-1],)", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n \n if type(h_w) is not tuple:\n h_w = (h_w, h_w)\n \n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n \n if type(stride) is not tuple:\n stride = (stride, stride)\n \n if type(pad) is not tuple:\n pad = (pad, pad)\n \n h = (h_w[0] + (2 * pad[0]) - (dilation * (kernel_size[0] - 1)) - 1)// stride[0] + 1\n w = (h_w[1] + (2 * pad[1]) - (dilation * (kernel_size[1] - 1)) - 1)// stride[1] + 1\n \n return h, w", "def reshape_output_shape(input_shape):\n shape_1 = input_shape[0]\n shape_2 = 384\n return(shape_1, shape_2)", "def compute_output_shape(self,input_shape):\n return (input_shape[0][0])", "def output_shape(self):\n raise NotImplementedError", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n from math import floor\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n h = floor(((h_w[0] + (2 * pad) - (dilation *\n (kernel_size[0] - 1)) - 1) / stride) + 1)\n w = floor(((h_w[1] + (2 * pad) - (dilation *\n (kernel_size[1] - 1)) - 1) / stride) + 1)\n return h, w", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n h = math.floor( ((h_w[0] + (2 * pad) - ( dilation * (kernel_size[0] - 1)\n ) - 1 )/ stride) + 1)\n w = math.floor( ((h_w[1] + (2 * pad) - ( dilation * (kernel_size[1] - 1)\n ) - 1 )/ stride) + 1)\n return h, w", "def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1):\n from math import floor\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n\n if type(pad) is not tuple:\n pad = (pad, pad)\n\n h = floor(((h_w[0] + (2 * pad[0]) - (dilation * (kernel_size[0] - 1)) - 1) / stride) + 1)\n w = floor(((h_w[1] + (2 * pad[1]) - (dilation * (kernel_size[1] - 1)) - 1) / stride) + 1)\n return h, w", "def compute_output_shape(self, input_shape):\n return (input_shape[0][0], input_shape[0][1] + 2)", "def make_padding(kernel_size, stride, dilation):\n return -((-kernel_size - (kernel_size - 1) * (dilation - 1)) // stride + 1) // 2", "def slice_output_shape(input_shape):\n shape_1 = input_shape[0]\n shape_2 = 80\n shape_3 = input_shape[2]\n return (shape_1, shape_2, shape_3)", "def _eval_output_shape(lhs_shape, rhs_shape, padding, window_strides):\n output_shape = [lhs_shape[0]]\n for i in range(1, len(lhs_shape) - 1):\n if padding == 'SAME':\n output_shape.append((lhs_shape[i] - 1) * window_strides[i-1] + rhs_shape[i])\n if padding == 'VALID':\n output_shape.append((lhs_shape[i] - 1) * window_strides[i-1])\n output_shape.append(lhs_shape[-1])\n return tf.constant(output_shape)", "def __calc_padding(self, input_shape, kernel_size, stride=1):\n # default of pytorch for input_size = (C_in, H_in, W_in)\n if len(input_shape) == 3:\n if stride != (1,1):\n raise ValueError(\"calc padding only works for stride=(1,1)\")\n padding = (0,0)\n if kernel_size[0]%2 == 0 or kernel_size[1]%2 == 0:\n raise ValueError(\"the kernel size: {} is incompatible with CnnHighway. With this kernel, the conv output shape will not equal the input shape\".format(kernel_size))\n padding_height = int((kernel_size[0] - 1)/2)\n padding_width = int((kernel_size[1] - 1)/2)\n return (padding_height, padding_width)\n if len(input_shape) == 2:\n if stride != 1:\n raise ValueError(\"calc padding only works for stride=(1)\")\n padding = int((kernel_size -1)/2)\n return padding", "def paddings_shape_param(self, param):\n\t\tindex = self.variables['paddings_format'].index(param)\n\t\treturn self.variables['paddings'].shape[index]", "def pad(img, shape):#pad_size=32):\n\n if shape == 0:\n return img\n pad_shape = np.int16(np.ceil((np.array(shape) - np.array(img.shape[:2]))))\n height, width = img.shape[:2]\n\n # if height % shape == 0:\n # y_min_pad = 0\n # y_max_pad = 0\n # else:\n y_pad = pad_shape[0]\n y_min_pad = int(y_pad / 2)\n y_max_pad = y_pad - y_min_pad\n\n # if width % pad_size == 0:\n # x_min_pad = 0\n # x_max_pad = 0\n # else:\n x_pad = pad_shape[1]\n x_min_pad = int(x_pad / 2)\n x_max_pad = x_pad - x_min_pad\n\n # img = cv2.copyMakeBorder(img, y_min_pad, y_max_pad, x_min_pad, x_max_pad, cv2.BORDER_REFLECT_101)\n img = cv2.copyMakeBorder(img, y_min_pad, y_max_pad, x_min_pad, x_max_pad, cv2.BORDER_CONSTANT, value=0)\n\n return img, (x_min_pad, y_min_pad, x_max_pad, y_max_pad)", "def output_shape(self) ->torch.Size:\n input_shape = self.input_shape\n if self._reduce_mode in {None, 'none', 'None'}:\n return input_shape\n elif self._reduce_mode == 'concat':\n if len(input_shape) > 1:\n return input_shape[:-2] + (input_shape[-1] * input_shape[-2],)\n return input_shape\n else:\n return input_shape[1:]", "def output_shape_conv_and_pool_layer(rows: int,\n columns: int,\n kernel: int,\n stride: int = 1,\n padding: int = 0,\n dilatation: float = 1.) -> Tuple[int, int]:\n return (\n int((rows + 2 * padding - dilatation * (kernel - 1) - 1) / stride + 1),\n int((columns + 2 * padding - dilatation * (kernel - 1) - 1) / stride + 1),\n )", "def set_output_shape(self):\n self.output_shape = (reduce(mul, self.input_shape),)", "def get_output_shape(self):\n return self.incoming_shapes[0][:-1] + [sum([s[-1] for s in self.incoming_shapes])]", "def compute_output_shape(self, input_shape):\n return (input_shape[0][0], input_shape[0][1] + 1)", "def compute_output_shape(self, input_shape):\n return (input_shape[0][0], input_shape[0][1] + 1)", "def padding_width(self):\n\t\treturn self.paddings_shape_param('W')", "def set_output_shape(self):\n self.output_shape = ((self.input_shape[0] // self.stride[0],\n self.input_shape[1] // self.stride[1],\n self.input_shape[2]\n ))" ]
[ "0.76141727", "0.71460515", "0.7127075", "0.70725465", "0.69714063", "0.6915575", "0.68842566", "0.6832676", "0.68002874", "0.6773313", "0.67546433", "0.6744123", "0.6714085", "0.6710998", "0.6694745", "0.66736597", "0.6663072", "0.66281855", "0.6554645", "0.6497201", "0.64810586", "0.648056", "0.646498", "0.64522207", "0.641608", "0.6414549", "0.640966", "0.640966", "0.6380466", "0.6377556" ]
0.7320594
1
Custom implementation of the tf layer build method. Sets the shape of the random noise along the specified axis
def build(self, input_shape): shape = np.ones(len(input_shape), dtype=np.int32) shape[self._axis] = input_shape[self._axis] self._rand_shape = tf.constant(shape, dtype=tf.dtypes.int32)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def randn(*shape, **kwargs):\n return Tensor(np.random.randn(*shape), **kwargs)", "def build(self,input_shape):\n\n self.w = self.add_weight(shape=(input_shape[-1],self.units),\n initializer='random_normal',\n trainable=True)\n self.b = self.add_weight(shape=(self.units,),\n initializer='random_normal',\n trainable=True)", "def build(self, input_shape: tf.Tensor):\n self.dense = tf.keras.layers.Dense(self.channels, input_shape=input_shape)\n self.bn = tf.keras.layers.BatchNormalization(momentum=self.momentum)", "def glorot_init(shape):\n\n return tf.random_normal(shape=shape, stddev=tf.sqrt(2. / (shape[0] + shape[1])))", "def __call__(self, inputs_shape):\n assert not self._achieve_init\n self.W = 2 * np.random.randn(self._units, inputs_shape) / np.sqrt(inputs_shape)\n self.b = np.zeros((self._units, 1))\n super(Dense, self).__call__()", "def build_layer(self) :\n inputsWithBias = self.input_count + 1\n self.weights = np.random.rand(inputsWithBias, self.node_count)\n self.weights_and_activations = (self.weights, self.activations)", "def build(self, input_shape):\r\n self.kernel = self.add_weight(shape=(input_shape[-1], self.units), name='kernel')\r\n if self.recurrent:\r\n self.recurrent_kernel = self.add_weight(shape=(self.units, self.units), name='recurrent_kernel')\r\n self.bias = self.add_weight(shape=(self.units,), initializer='ones', name='bias')\r\n self.built = True", "def build(self, input_shape):\n if hasattr(self, \"timesteps\") and self.timesteps is not None:\n self.timestep_dim = self.timesteps\n else:\n self.timestep_dim = 1 # input_shape[0]\n\n self.input_dim = input_shape[-1]\n\n self.kernel = self.add_weight(\n shape=(self.input_dim, self.units * 4),\n name=\"kernel\",\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n )\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units * 4),\n name=\"recurrent_kernel\",\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint,\n )\n\n # add attention kernel\n self.attention_kernel = self.add_weight(\n shape=(self.input_dim, self.units * 4),\n name=\"attention_kernel\",\n initializer=self.attention_initializer,\n regularizer=self.attention_regularizer,\n constraint=self.attention_constraint,\n )\n\n # add attention weights\n # weights for attention model\n self.attention_weights = self.add_weight(\n shape=(self.input_dim, self.units),\n name=\"attention_W\",\n initializer=self.attention_initializer,\n regularizer=self.attention_regularizer,\n constraint=self.attention_constraint,\n )\n\n self.attention_recurrent_weights = self.add_weight(\n shape=(self.units, self.units),\n name=\"attention_U\",\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint,\n )\n\n if self.use_bias:\n if self.unit_forget_bias:\n\n def bias_initializer(shape, *args, **kwargs):\n return K.concatenate(\n [\n self.bias_initializer((self.units,), *args, **kwargs),\n initializers.Ones()((self.units,), *args, **kwargs),\n self.bias_initializer(\n (self.units * 2,), *args, **kwargs\n ),\n ]\n )\n\n else:\n bias_initializer = self.bias_initializer\n self.bias = self.add_weight(\n shape=(self.units * 4,),\n name=\"bias\",\n initializer=bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n )\n\n self.attention_bias = self.add_weight(\n shape=(self.units,),\n name=\"attention_b\",\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n )\n\n self.attention_recurrent_bias = self.add_weight(\n shape=(self.units, 1),\n name=\"attention_v\",\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n )\n else:\n self.bias = None\n self.attention_bias = None\n self.attention_recurrent_bias = None\n\n self.kernel_i = self.kernel[:, : self.units]\n self.kernel_f = self.kernel[:, self.units : self.units * 2]\n self.kernel_c = self.kernel[:, self.units * 2 : self.units * 3]\n self.kernel_o = self.kernel[:, self.units * 3 :]\n\n self.recurrent_kernel_i = self.recurrent_kernel[:, : self.units]\n self.recurrent_kernel_f = self.recurrent_kernel[\n :, self.units : self.units * 2\n ]\n self.recurrent_kernel_c = self.recurrent_kernel[\n :, self.units * 2 : self.units * 3\n ]\n self.recurrent_kernel_o = self.recurrent_kernel[:, self.units * 3 :]\n\n self.attention_i = self.attention_kernel[:, : self.units]\n self.attention_f = self.attention_kernel[:, self.units : self.units * 2]\n self.attention_c = self.attention_kernel[:, self.units * 2 : self.units * 3]\n self.attention_o = self.attention_kernel[:, self.units * 3 :]\n\n if self.use_bias:\n self.bias_i = self.bias[: self.units]\n self.bias_f = self.bias[self.units : self.units * 2]\n self.bias_c = self.bias[self.units * 2 : self.units * 3]\n self.bias_o = self.bias[self.units * 3 :]\n else:\n self.bias_i = None\n self.bias_f = None\n self.bias_c = None\n self.bias_o = None\n\n self.built = True", "def build(self, input_shape):\n if self.use_scale:\n self.scale = self.add_weight(\n name='scale',\n shape=(),\n initializer='ones',\n dtype=self.dtype,\n trainable=True)\n else:\n self.scale = None\n super(HypLuongAttention, self).build(input_shape)", "def build(self,unused):\n # (word_embeddings_shape, _) = input_shapes\n # width = word_embeddings_shape.as_list()[-1]\n if self.use_type_embeddings:\n self.type_embeddings = self.add_weight(\n \"token_type_embeddings\",\n shape=[self.token_type_vocab_size, self.word_embedding_width],\n initializer=get_initializer(self.initializer_range),\n dtype=self.dtype)\n\n self.position_embeddings = None\n if self.use_position_embeddings:\n self.position_embeddings = self.add_weight(\n \"position_embeddings\",\n shape=[self.max_position_embeddings, self.word_embedding_width],\n initializer=get_initializer(self.initializer_range),\n dtype=self.dtype)\n\n self.output_layer_norm = tf.keras.layers.LayerNormalization(\n name=LAYER_NORM_NAME, axis=-1, epsilon=1e-12, dtype=tf.float32)\n self.output_dropout = tf.keras.layers.Dropout(\n rate=self.dropout_prob, dtype=tf.float32)\n super().build(unused)", "def layer_weight_init(self, size):\n # TODO: make smarter init\n return np.random.uniform(size=size)", "def build(self, input_shape):\n self.nb_samples = input_shape[0]\n self.nb_filter = input_shape[self.axis_filter]\n self.rows = input_shape[self.axis_row]\n self.cols = input_shape[self.axis_col]\n\n # Calculate covariance axis\n if self.cov_mode == 'channel' or self.cov_mode == 'mean' or self.cov_mode == 'pmean':\n self.cov_dim = self.nb_filter\n else:\n self.cov_dim = self.rows * self.cols\n\n # Set out_dim accordingly.\n if self.cov_mode == 'mean' or self.cov_mode == 'pmean':\n self.out_dim = self.cov_dim + 1\n else:\n self.out_dim = self.cov_dim\n\n if self.cov_mode == 'pmean':\n self.mean_p = self.cov_beta\n self.name += '_pm_{}'.format(self.mean_p)\n print(\"use parametric non_trainable {}\".format(self.mean_p))\n\n if self.robust:\n print('use robust estimation with cov_alpha {}'.format(self.cov_alpha))\n self.name += '_rb'\n\n if self.cov_regulairzer == 'Fob':\n self.C_regularizer = FrobNormRegularizer(self.out_dim, self.cov_alpha)\n self.activity_regularizer = self.C_regularizer\n elif self.cov_regulairzer == 'vN':\n self.C_regularizer = VonNeumannDistanceRegularizer(self.out_dim, self.cov_alpha, self.eps)\n self.activity_regularizer = self.C_regularizer\n\n # add the alpha\n # self.alpha = self.add_weight(\n # shape=d\n # )\n self.built = True", "def _build(self):\n fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32)\n self(fake_mels)", "def __init__(self, x_axis, y_axis, input_dimension, sigma, learning_rate, random_seed=3):\n # Creating a random generator for random values; for initializing weights\n self.random_generator = np.random.RandomState(random_seed)\n \n self.x_axis = x_axis\n self.y_axis = y_axis\n self.input_dimension = input_dimension\n self.sigma = sigma\n self.learning_rate = learning_rate\n self.weights = np.array([[[0 for x in range(self.input_dimension)] for x in range(self.x_axis)] for x in range(y_axis)], dtype=float)", "def _basic_build(self, inputs_shape):\n\n d = inputs_shape[-1]\n h = self._real_units\n s = self._slots\n\n self._erase_W = self.add_variable(\n name=\"_erase_W\", shape=[d + h, h], initializer=self._kernel_initializer\n )\n self._erase_b = self.add_variable(\n name=\"_erase_b\",\n shape=[h],\n initializer=(\n self._bias_initializer\n if self._bias_initializer is not None\n else init_ops.constant_initializer(1.0, dtype=self.dtype)\n ),\n )\n\n self._reset_W = self.add_variable(\n name=\"_reset_W\", shape=[d + h, 1], initializer=self._kernel_initializer\n )\n self._reset_b = self.add_variable(\n name=\"_reset_b\",\n shape=[1],\n initializer=(\n self._bias_initializer\n if self._bias_initializer is not None\n else init_ops.constant_initializer(1.0, dtype=self.dtype)\n ),\n )\n\n self._add_W = self.add_variable(\n name=\"_add_W\", shape=[d + h, h], initializer=self._kernel_initializer\n )\n self._add_b = self.add_variable(\n name=\"_add_b\",\n shape=[h],\n initializer=(\n self._bias_initializer\n if self._bias_initializer is not None\n else init_ops.constant_initializer(1.0, dtype=self.dtype)\n ),\n )\n self.heads = self.add_variable(\n name=\"_heads\", shape=[s, d], initializer=self._kernel_initializer\n )\n\n self._beta = self.add_variable(\n name=\"_beta_no_reg\",\n shape=(),\n initializer=tf.compat.v1.constant_initializer(\n np.array([1.02]), dtype=np.float32\n ),\n )\n self._alpha = self.add_variable(\n name=\"_alpha_no_reg\",\n shape=(),\n initializer=tf.compat.v1.constant_initializer(\n np.array([0.98]), dtype=np.float32\n ),\n )", "def build(self, input_shape: tf.Tensor):\n self.conv = tf.keras.layers.Conv2D(\n self.channels, (1, 1), input_shape=input_shape)\n self.bn = tf.keras.layers.BatchNormalization(momentum=self.momentum)", "def build(self, input_shape):\n pass", "def init_layer(layer):\n \n if layer.weight.ndimension() == 4:\n (n_out, n_in, height, width) = layer.weight.size()\n n = n_in * height * width\n \n elif layer.weight.ndimension() == 2:\n (n_out, n) = layer.weight.size()\n\n std = math.sqrt(2. / n)\n scale = std * math.sqrt(3.)\n layer.weight.data.uniform_(-scale, scale)\n\n if layer.bias is not None:\n layer.bias.data.fill_(0.)", "def initializer(init, shape):\n if init == \"zero\":\n # The generated tensor has only zero-valued elements\n return tf.zeros(shape)\n elif init == \"he\":\n # fan_in gives the number of element in the tensor\n fan_in = np.prod(shape[0:-1])\n std = 1 / np.sqrt(fan_in)\n # The generated values follow a uniform distribution in the range [-stt,std)\n return tf.random_uniform(shape, minval=-std, maxval=std)", "def build(self, input_shape):\n hidden_dim = input_shape[2]\n self.W = self.add_weight(\n name='{}_W'.format(self.name),\n shape=(hidden_dim, hidden_dim,),\n initializer='uniform',\n trainable=True)\n self.b = self.add_weight(\n name='{}_b'.format(self.name),\n shape=(hidden_dim,),\n initializer='zeros',\n trainable=True)\n self.u = self.add_weight(\n name='{}_u'.format(self.name),\n shape=(hidden_dim,),\n initializer='uniform',\n trainable=True)\n super(AttentionLayer, self).build(input_shape)", "def build(self, input_shape):\n node_embed_shape = input_shape.node_embed\n edge_embed_shape = input_shape.edge_embed\n\n with tf.name_scope('node'):\n with tf.name_scope('U'):\n self.U = tf.keras.layers.Dense(self.units, use_bias=self.use_bias)\n self.U.build(node_embed_shape)\n\n with tf.name_scope('V'):\n self.V = tf.keras.layers.Dense(self.units, use_bias=self.use_bias)\n self.V.build(node_embed_shape)\n\n with tf.name_scope('norm'):\n self.norm_h = {\n \"batch\": tf.keras.layers.BatchNormalization(),\n \"layer\": tf.keras.layers.LayerNormalization()\n }.get(self.normalization, None)\n if self.norm_h:\n self.norm_h.build(node_embed_shape)\n\n with tf.name_scope('edge'):\n with tf.name_scope('A'):\n self.A = tf.keras.layers.Dense(self.units, use_bias=self.use_bias)\n self.A.build(edge_embed_shape)\n \n with tf.name_scope('B'):\n self.B = tf.keras.layers.Dense(self.units, use_bias=self.use_bias)\n self.B.build(node_embed_shape)\n\n with tf.name_scope('C'):\n self.C = tf.keras.layers.Dense(self.units, use_bias=self.use_bias)\n self.C.build(node_embed_shape)\n\n with tf.name_scope('norm'):\n self.norm_e = {\n 'batch': tf.keras.layers.BatchNormalization(),\n 'layer': tf.keras.layers.LayerNormalization(axis=-1)\n }.get(self.normalization, None)\n if self.norm_e:\n self.norm_e.build(edge_embed_shape)\n \n super().build(input_shape)", "def __init__(self, shape, dtype=tf.float32, name=None):\n super().__init__(tf.placeholder(dtype, [None] + list(shape), name=name))", "def sample_noise(batch_size, dim):\n\n return tf.random_uniform([batch_size, dim], minval=-1, maxval=1)", "def glorot(shape, name=None):\n if len(shape) == 2:\n init_range = np.sqrt(6.0 / (shape[0] + shape[1]))\n elif len(shape) == 1:\n init_range = np.sqrt(6.0 / shape[0])\n initial = tf.random_uniform(shape, minval=-init_range, maxval=init_range, dtype=tf.float32)\n return tf.get_variable(initializer=initial, name=name)", "def __init__(self, incoming, axis=-1, name='MultiplicativeNoiseLayer'):\n super(ArgMaxOneHot, self).__init__()\n \n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n \n self.axis = axis\n self.out = None\n self.name = name", "def glorot(shape, name=None, scale=1.):\n init_range = np.sqrt(6.0/(shape[-1]+shape[-2])) * scale\n initial = tf.random_uniform(shape, minval=-init_range, maxval=init_range, dtype=tf.float32)\n return tf.Variable(initial, name=name)", "def __init__(self, obs_dim, *, seed=None):\n # TODO: apply jax.jit() to everything in sight\n net_init, self._net_apply = self.make_stax_model()\n if seed is None:\n # oh well\n seed = np.random.randint((1 << 63) - 1)\n rng = jrandom.PRNGKey(seed)\n out_shape, self._net_params = net_init(rng, (-1, obs_dim))\n self._net_grads = jax.grad(self._net_apply)\n # output shape should just be batch dim, nothing else\n assert out_shape == (-1,), \"got a weird output shape %s\" % (out_shape,)", "def build(self, input_shape):\n assert len(input_shape) == 3\n assert input_shape[1] == input_shape[2]\n self.out_dim = input_shape[2]\n # self.b = K.eye(self.out_dim, name='strange?')\n self.built = True", "def build(self, input_shape):\n assert len(input_shape) == 3\n assert input_shape[1] == input_shape[2]\n self.out_dim = input_shape[2]\n # self.b = K.eye(self.out_dim, name='strange?')\n self.built = True", "def build(self, input_shape):\n assert len(input_shape) == 3\n assert input_shape[1] == input_shape[2]\n self.out_dim = input_shape[2]\n # self.b = K.eye(self.out_dim, name='strange?')\n self.built = True" ]
[ "0.6374517", "0.63439924", "0.6240142", "0.6123517", "0.60665405", "0.6052357", "0.59816384", "0.5958945", "0.5925781", "0.5909455", "0.59018105", "0.58936787", "0.5872386", "0.58613", "0.5829131", "0.58168006", "0.57844967", "0.57826954", "0.57500845", "0.5744017", "0.5712865", "0.5683608", "0.56702006", "0.56649685", "0.566421", "0.56582737", "0.56573826", "0.5650552", "0.5650552", "0.5650552" ]
0.7089877
0
Assert that the shape of the input tensor is the expected 5D spatiotemporal shape
def _check_shape(input_shape): msg = ('Input to FlattenAxis must be 5D with dimensions: ' '(n_observations, n_spatial_0, n_spatial_1, n_temporal, ' 'n_features), but received shape: {}'.format(input_shape)) assert len(input_shape) == 5, msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_shape(input_shape):\n msg = ('Input to SpatioTemporalExpansion must be 5D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_temporal, '\n 'n_features), but received shape: {}'.format(input_shape))\n assert len(input_shape) == 5, msg", "def test_shape(self):\n M = simulation.StateMonitor(self.G, ['a', 'v'])\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n\n nsteps = int_r(self.t_max/self.dt)\n self.assertEqual(M.v.shape, (self.N, nsteps))\n self.assertEqual(M.a.shape, (2, nsteps))", "def _test_obssize(t):\n return t.shape[0] != len(t.ids(axis='observation'))", "def assert_shape(tensor, shape, name):\n real_shape = tensor.get_shape().as_list()\n same_rank = len(real_shape) == len(shape)\n all_equal = all([(s == r or s == -1) for s, r in zip(shape, real_shape)])\n if not same_rank or not all_equal:\n raise tf.errors.InvalidArgumentError(\n 'Error: Expected tensor %s to have shape %s, but it had shape %s.' %\n (name, str(shape), str(real_shape)))", "def test_flatten3D():\n with tf.Session().as_default():\n tensor2d = tf.constant([[1, 2, 3]], dtype=tf.float32)\n with pytest.raises(AssertionError):\n output = flatten3D(tensor2d)\n\n tensor3d = tf.constant([[[1, 2, 3]]], dtype=tf.float32)\n assert tensor3d == flatten3D(tensor3d)\n\n init_shape = (3, 17, 23, 3, 5)\n expected_shape = (3, 17, 23*3*5)\n\n tensor5d = tf.constant(np.arange(0, np.prod(init_shape)).reshape(init_shape), tf.int32)\n assert tensor5d.eval().shape == init_shape\n output = flatten3D(tensor5d)\n assert output.eval().shape == expected_shape", "def test_shape(self):\n M = simulation.EventMonitor(self.G)\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n\n self.assertEqual(len(M.t), len(M.i))", "def _check_tensor_shapes(tensors):\n for tensor in tensors:\n tensor = tf.convert_to_tensor(value=tensor)\n tensor.get_shape().assert_has_rank(2)\n tensor.get_shape().assert_is_compatible_with(\n tf.convert_to_tensor(value=tensors[0]).get_shape())", "def assert_shape(tensor: tf.Tensor, expected: Tuple[Optional[int],\n ...]) -> Tuple[int, ...]:\n actual = tensor.shape\n match = len(actual) == len(expected) and all(\n y is None or x == y for x, y in zip(actual, expected))\n assert match, f\"wrong shape, expected {expected}, actual {actual}\"\n return actual", "def test_shape_of_nsp_head_output(batch_size: int):\n hidden_size = 10\n\n nsp_head = BertNSPHead()\n\n pooled_output = tf.random.uniform((batch_size, hidden_size))\n outputs = nsp_head(pooled_output)\n\n assert outputs.shape == (batch_size, 2)", "def test_generate_shapes(times, test_ndim, rng):\n mocktrans = MockTransition(dim=test_ndim)\n initrv = randvars.Constant(np.random.rand(test_ndim))\n proc = randprocs.markov.MarkovProcess(\n initarg=times[0], initrv=initrv, transition=mocktrans\n )\n states, obs = randprocs.markov.utils.generate_artificial_measurements(\n rng, prior_process=proc, measmod=mocktrans, times=times\n )\n\n assert states.shape[0] == len(times)\n assert states.shape[1] == test_ndim\n assert obs.shape[0] == len(times)\n assert obs.shape[1] == test_ndim", "def _check_shape(input_shape):\n msg = ('Input to SpatialExpansion must be 4D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_features), '\n 'but received shape: {}'.format(input_shape))\n assert len(input_shape) == 4, msg", "def test_track_shape_error():\n track = np.random.randn(50,1)\n try:\n bad_arm = survey.get_spiral_slice(track = track)\n except TypeError:\n assert True\n else:\n assert False", "def test_get_integration_time_shape():\n test_file = os.path.join(DATA_PATH, \"paper_test_file.uvh5\")\n test_uv = UVData()\n test_uv.read(test_file)\n\n baseline_array = np.array(list(set(test_uv.baseline_array)))\n inttime_array = utils.get_integration_time(test_uv, reds=baseline_array)\n test_shape = (test_uv.Nbls, test_uv.Ntimes)\n assert test_shape == inttime_array.shape", "def test_shape_Constant_tensor(self):\r\n as_t = T.as_tensor_variable\r\n self.validate((as_t(3), as_t(2), as_t(7), as_t(5)), (5, 2,\r\n 2, 3), 'valid')\r\n self.validate(as_t([3, 2, 7, 5]), (5, 2, 2, 3), 'valid')\r\n self.validate(as_t((3, 2, 7, 5)), (5, 2, 2, 3), 'valid')\r\n self.validate((3, 2, 7, 5), (as_t(5), as_t(2), as_t(2),\r\n as_t(3)), 'valid')\r\n self.validate((3, 2, 7, 5), as_t([5, 2, 2, 3]), 'valid')\r\n self.validate((3, 2, 7, 5), as_t((5, 2, 2, 3)), 'valid')\r\n self.validate(as_t([3, 2, 7, 5]), as_t([5, 2, 2, 3]), 'full')", "def test_infer_target_shape(self):\n t = Identity()\n assert t.infer_target_shape((5,)) == (5,)", "def test_check_X_too_many_dims():\n with pytest.raises(ValueError):\n check_X(np.ones((5,4,3)))", "def test_IODimensions(self):\n tasks = [(1,1,100,True),(10,1,100,True),(1,10,100,True),(10,10,100,True),\n (1,1,100,False),(10,1,100,False),(1,10,100,False),(10,10,100,False)]\n for t in tasks:\n N_in ,N_out, N_samples, tf = t\n X = np.random.randn(N_samples,N_in) if N_in > 1 else np.random.randn(N_samples)\n y = np.random.randn(N_samples,N_out) if N_out > 1 else np.random.randn(N_samples)\n Xp = np.random.randn(N_samples,N_in) if N_in > 1 else np.random.randn(N_samples)\n esn = ESN(N_in,N_out,teacher_forcing=tf)\n prediction_tr = esn.fit(X,y)\n prediction_t = esn.predict(Xp)\n self.assertEqual(prediction_tr.shape,(N_samples,N_out))\n self.assertEqual(prediction_t.shape,(N_samples,N_out))", "def test_noise_shape():\n test_sample = np.ones((2, 13, 21)) * 3\n test_noise = utils.generate_noise(test_sample)\n assert test_sample.shape == test_noise.shape", "def test_vector_shape(self):\n model = PoincareModel(self.data, size=20)\n self.assertEqual(model.kv.syn0.shape, (7, 20))", "def testQuestionTwo(self):\n self.assertEqual(AnswerQuestionTwo().shape, (5,5), \"Question two's output is not one dimension.\")", "def _check_shape(placeholder_shape, data_shape):\n\n return True", "def _AssertShapesMatch(op_name, in_tensor, out_tensor):\n in_shape = in_tensor.get_shape()\n out_shape = out_tensor.get_shape()\n\n if not in_shape.is_compatible_with(out_shape):\n raise ValueError('%s should not change tensor shape: input %s, '\n 'output %s' % (op_name, in_shape, out_shape))", "def _test_sampsize(t):\n return t.shape[1] != len(t.ids(axis='sample'))", "def _test_obsmdsize(t):\n md = t.metadata(axis='observation')\n return t.shape[0] != len(md) if md is not None else False", "def test_unet_verify_output_shape(simple_unet_data):\n unet = models.UNet()\n output = unet(simple_unet_data)\n print(\"Input shape:\", simple_unet_data.shape)\n print(\"Output shape:\", output.shape)\n assert simple_unet_data.shape == output.shape", "def _test_obsdup(t):\n return t.shape[0] != len(set(t.ids(axis='observation')))", "def test_infer_target_shape(self):\n t = Quantize()\n assert t.infer_target_shape((5,)) == (5,)", "def shape(tensor):\n raise NotImplementedError", "def test_dimension_size_infer(self, nt=100):\n i, j, k = dimify('i j k')\n shape = tuple([d.size for d in [i, j, k]])\n a = DenseData(name='a', shape=shape).indexed\n b = TimeData(name='b', shape=shape, save=True, time_dim=nt).indexed\n eqn = Eq(b[time, x, y, z], a[x, y, z])\n op = Operator(eqn)\n\n _, op_dim_sizes = op.arguments()\n assert(op_dim_sizes[time.name] == nt)", "def _assert_same_size(outputs, output_size):\n nest.assert_same_structure(outputs, output_size)\n flat_output_size = nest.flatten(output_size)\n flat_output = nest.flatten(outputs)\n\n for (output, size) in zip(flat_output, flat_output_size):\n if isinstance(size, tf.TensorShape):\n if output.shape == size:\n pass\n elif output[0].shape != tf.TensorShape(size):\n raise ValueError(\n \"The output size does not match the the required output_size\")" ]
[ "0.767875", "0.6563718", "0.64985377", "0.6418538", "0.64130336", "0.6398726", "0.63728774", "0.630607", "0.6295184", "0.629249", "0.6280885", "0.62366974", "0.62299126", "0.61740613", "0.61740327", "0.6142785", "0.6126741", "0.61242807", "0.6100432", "0.60915726", "0.6084971", "0.6078476", "0.60690975", "0.60629725", "0.60483766", "0.5998025", "0.596065", "0.59117246", "0.59073675", "0.5899857" ]
0.6978557
1
Assert that the shape of the input tensor is the expected 4D spatiotemporal shape
def _check_shape(input_shape): msg = ('Input to SpatialExpansion must be 4D with dimensions: ' '(n_observations, n_spatial_0, n_spatial_1, n_features), ' 'but received shape: {}'.format(input_shape)) assert len(input_shape) == 4, msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_shape(input_shape):\n msg = ('Input to SpatioTemporalExpansion must be 5D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_temporal, '\n 'n_features), but received shape: {}'.format(input_shape))\n assert len(input_shape) == 5, msg", "def test_shape(self):\n M = simulation.StateMonitor(self.G, ['a', 'v'])\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n\n nsteps = int_r(self.t_max/self.dt)\n self.assertEqual(M.v.shape, (self.N, nsteps))\n self.assertEqual(M.a.shape, (2, nsteps))", "def assert_shape(tensor: tf.Tensor, expected: Tuple[Optional[int],\n ...]) -> Tuple[int, ...]:\n actual = tensor.shape\n match = len(actual) == len(expected) and all(\n y is None or x == y for x, y in zip(actual, expected))\n assert match, f\"wrong shape, expected {expected}, actual {actual}\"\n return actual", "def _check_shape(input_shape):\n msg = ('Input to FlattenAxis must be 5D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_temporal, '\n 'n_features), but received shape: {}'.format(input_shape))\n assert len(input_shape) == 5, msg", "def test_flatten3D():\n with tf.Session().as_default():\n tensor2d = tf.constant([[1, 2, 3]], dtype=tf.float32)\n with pytest.raises(AssertionError):\n output = flatten3D(tensor2d)\n\n tensor3d = tf.constant([[[1, 2, 3]]], dtype=tf.float32)\n assert tensor3d == flatten3D(tensor3d)\n\n init_shape = (3, 17, 23, 3, 5)\n expected_shape = (3, 17, 23*3*5)\n\n tensor5d = tf.constant(np.arange(0, np.prod(init_shape)).reshape(init_shape), tf.int32)\n assert tensor5d.eval().shape == init_shape\n output = flatten3D(tensor5d)\n assert output.eval().shape == expected_shape", "def assert_shape(tensor, shape, name):\n real_shape = tensor.get_shape().as_list()\n same_rank = len(real_shape) == len(shape)\n all_equal = all([(s == r or s == -1) for s, r in zip(shape, real_shape)])\n if not same_rank or not all_equal:\n raise tf.errors.InvalidArgumentError(\n 'Error: Expected tensor %s to have shape %s, but it had shape %s.' %\n (name, str(shape), str(real_shape)))", "def _check_tensor_shapes(tensors):\n for tensor in tensors:\n tensor = tf.convert_to_tensor(value=tensor)\n tensor.get_shape().assert_has_rank(2)\n tensor.get_shape().assert_is_compatible_with(\n tf.convert_to_tensor(value=tensors[0]).get_shape())", "def test_shape(self):\n M = simulation.EventMonitor(self.G)\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n\n self.assertEqual(len(M.t), len(M.i))", "def test_track_shape_error():\n track = np.random.randn(50,1)\n try:\n bad_arm = survey.get_spiral_slice(track = track)\n except TypeError:\n assert True\n else:\n assert False", "def _test_obssize(t):\n return t.shape[0] != len(t.ids(axis='observation'))", "def test_get_integration_time_shape():\n test_file = os.path.join(DATA_PATH, \"paper_test_file.uvh5\")\n test_uv = UVData()\n test_uv.read(test_file)\n\n baseline_array = np.array(list(set(test_uv.baseline_array)))\n inttime_array = utils.get_integration_time(test_uv, reds=baseline_array)\n test_shape = (test_uv.Nbls, test_uv.Ntimes)\n assert test_shape == inttime_array.shape", "def test_vector_shape(self):\n model = PoincareModel(self.data, size=20)\n self.assertEqual(model.kv.syn0.shape, (7, 20))", "def _check_shape(placeholder_shape, data_shape):\n\n return True", "def test_generate_shapes(times, test_ndim, rng):\n mocktrans = MockTransition(dim=test_ndim)\n initrv = randvars.Constant(np.random.rand(test_ndim))\n proc = randprocs.markov.MarkovProcess(\n initarg=times[0], initrv=initrv, transition=mocktrans\n )\n states, obs = randprocs.markov.utils.generate_artificial_measurements(\n rng, prior_process=proc, measmod=mocktrans, times=times\n )\n\n assert states.shape[0] == len(times)\n assert states.shape[1] == test_ndim\n assert obs.shape[0] == len(times)\n assert obs.shape[1] == test_ndim", "def test_noise_shape():\n test_sample = np.ones((2, 13, 21)) * 3\n test_noise = utils.generate_noise(test_sample)\n assert test_sample.shape == test_noise.shape", "def test_shape_Constant_tensor(self):\r\n as_t = T.as_tensor_variable\r\n self.validate((as_t(3), as_t(2), as_t(7), as_t(5)), (5, 2,\r\n 2, 3), 'valid')\r\n self.validate(as_t([3, 2, 7, 5]), (5, 2, 2, 3), 'valid')\r\n self.validate(as_t((3, 2, 7, 5)), (5, 2, 2, 3), 'valid')\r\n self.validate((3, 2, 7, 5), (as_t(5), as_t(2), as_t(2),\r\n as_t(3)), 'valid')\r\n self.validate((3, 2, 7, 5), as_t([5, 2, 2, 3]), 'valid')\r\n self.validate((3, 2, 7, 5), as_t((5, 2, 2, 3)), 'valid')\r\n self.validate(as_t([3, 2, 7, 5]), as_t([5, 2, 2, 3]), 'full')", "def test_shape_of_nsp_head_output(batch_size: int):\n hidden_size = 10\n\n nsp_head = BertNSPHead()\n\n pooled_output = tf.random.uniform((batch_size, hidden_size))\n outputs = nsp_head(pooled_output)\n\n assert outputs.shape == (batch_size, 2)", "def shape(tensor):\n raise NotImplementedError", "def _AssertShapesMatch(op_name, in_tensor, out_tensor):\n in_shape = in_tensor.get_shape()\n out_shape = out_tensor.get_shape()\n\n if not in_shape.is_compatible_with(out_shape):\n raise ValueError('%s should not change tensor shape: input %s, '\n 'output %s' % (op_name, in_shape, out_shape))", "def test_infer_target_shape(self):\n t = Identity()\n assert t.infer_target_shape((5,)) == (5,)", "def _test_sampsize(t):\n return t.shape[1] != len(t.ids(axis='sample'))", "def testFunction(self, input_shape, stride, kernel_shape, padding,\n output_shape):\n self.assertEqual(conv._default_transpose_size(input_shape, stride,\n kernel_shape=kernel_shape,\n padding=padding),\n tuple(output_shape))", "def testQuestionTwo(self):\n self.assertEqual(AnswerQuestionTwo().shape, (5,5), \"Question two's output is not one dimension.\")", "def test_unet_verify_output_shape(simple_unet_data):\n unet = models.UNet()\n output = unet(simple_unet_data)\n print(\"Input shape:\", simple_unet_data.shape)\n print(\"Output shape:\", output.shape)\n assert simple_unet_data.shape == output.shape", "def test_dimension_size_infer(self, nt=100):\n i, j, k = dimify('i j k')\n shape = tuple([d.size for d in [i, j, k]])\n a = DenseData(name='a', shape=shape).indexed\n b = TimeData(name='b', shape=shape, save=True, time_dim=nt).indexed\n eqn = Eq(b[time, x, y, z], a[x, y, z])\n op = Operator(eqn)\n\n _, op_dim_sizes = op.arguments()\n assert(op_dim_sizes[time.name] == nt)", "def test_contains_shape(self):\n dim = Dimension(None, \"uniform\", -3, 4, shape=(4, 4))\n\n with pytest.raises(NotImplementedError):\n assert dists.uniform.rvs(-3, 4, size=(4, 4)) in dim", "def _validate_dimensionality(self):\r\n\r\n if self.time.ndim != 1:\r\n raise ValueError(\"time array must be one-dimensional\")\r\n npoints = self.data.shape[-1]\r\n if npoints != len(self.time):\r\n raise ValueError(\"mismatch of time and data dimensions\")", "def test_shape_error(self):\n raise unittest.SkipTest(\"Failing after fixing Poly unsoundness #4878\")\n with self.assertRaisesRegex(TypeError,\n re.escape(\"add got incompatible shapes for broadcasting: (v,), (4,)\")):\n self.CheckShapePolymorphism(\n lambda x, y: x + y,\n input_signature=[tf.TensorSpec([None]), tf.TensorSpec([4])],\n in_shapes=[\"(v,)\", \"(4,)\"],\n expected_output_signature=tf.TensorSpec([None]))\n\n four_ones = np.ones((4,))\n # We get the error even if we use correct actual arguments\n with self.assertRaisesRegex(TypeError,\n re.escape(\"add got incompatible shapes for broadcasting: (v,), (4,)\")):\n jax2tf.convert(lambda x, y: x + y,\n in_shapes=[\"(v,)\", \"(4,)\"])(four_ones, four_ones)\n\n with self.assertRaisesRegex(TypeError,\n re.escape(\"dot_general requires contracting dimensions to have the same shape, got [4] and [v].\")):\n jax2tf.convert(lambda x: jnp.matmul(x, x),\n in_shapes=[\"(v, 4)\"])(np.ones((4, 4)))\n\n # TODO: this is an opportunity to improve the translation, should not error\n with self.assertRaisesRegex(TypeError,\n \"Only integers, .* tensors are valid indices, got 0\"):\n jax2tf.convert(lambda x: jnp.split(x, 2),\n in_shapes=[\"(2*v,)\"])(four_ones)", "def test_IODimensions(self):\n tasks = [(1,1,100,True),(10,1,100,True),(1,10,100,True),(10,10,100,True),\n (1,1,100,False),(10,1,100,False),(1,10,100,False),(10,10,100,False)]\n for t in tasks:\n N_in ,N_out, N_samples, tf = t\n X = np.random.randn(N_samples,N_in) if N_in > 1 else np.random.randn(N_samples)\n y = np.random.randn(N_samples,N_out) if N_out > 1 else np.random.randn(N_samples)\n Xp = np.random.randn(N_samples,N_in) if N_in > 1 else np.random.randn(N_samples)\n esn = ESN(N_in,N_out,teacher_forcing=tf)\n prediction_tr = esn.fit(X,y)\n prediction_t = esn.predict(Xp)\n self.assertEqual(prediction_tr.shape,(N_samples,N_out))\n self.assertEqual(prediction_t.shape,(N_samples,N_out))", "def _test_obsmdsize(t):\n md = t.metadata(axis='observation')\n return t.shape[0] != len(md) if md is not None else False" ]
[ "0.71911967", "0.67564714", "0.670049", "0.65799737", "0.6575327", "0.6517123", "0.64665014", "0.64350545", "0.64287883", "0.6360004", "0.6293485", "0.6286067", "0.62737346", "0.62045807", "0.6179963", "0.6166289", "0.6125939", "0.6125699", "0.61026037", "0.6096365", "0.6082352", "0.60540557", "0.60368145", "0.59963787", "0.5988291", "0.59875053", "0.59728616", "0.594353", "0.5939804", "0.59370553" ]
0.68198144
1
Assert that the shape of the input tensor is the expected 5D spatiotemporal shape
def _check_shape(input_shape): msg = ('Input to SpatioTemporalExpansion must be 5D with dimensions: ' '(n_observations, n_spatial_0, n_spatial_1, n_temporal, ' 'n_features), but received shape: {}'.format(input_shape)) assert len(input_shape) == 5, msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_shape(input_shape):\n msg = ('Input to FlattenAxis must be 5D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_temporal, '\n 'n_features), but received shape: {}'.format(input_shape))\n assert len(input_shape) == 5, msg", "def test_shape(self):\n M = simulation.StateMonitor(self.G, ['a', 'v'])\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n\n nsteps = int_r(self.t_max/self.dt)\n self.assertEqual(M.v.shape, (self.N, nsteps))\n self.assertEqual(M.a.shape, (2, nsteps))", "def _test_obssize(t):\n return t.shape[0] != len(t.ids(axis='observation'))", "def assert_shape(tensor, shape, name):\n real_shape = tensor.get_shape().as_list()\n same_rank = len(real_shape) == len(shape)\n all_equal = all([(s == r or s == -1) for s, r in zip(shape, real_shape)])\n if not same_rank or not all_equal:\n raise tf.errors.InvalidArgumentError(\n 'Error: Expected tensor %s to have shape %s, but it had shape %s.' %\n (name, str(shape), str(real_shape)))", "def test_flatten3D():\n with tf.Session().as_default():\n tensor2d = tf.constant([[1, 2, 3]], dtype=tf.float32)\n with pytest.raises(AssertionError):\n output = flatten3D(tensor2d)\n\n tensor3d = tf.constant([[[1, 2, 3]]], dtype=tf.float32)\n assert tensor3d == flatten3D(tensor3d)\n\n init_shape = (3, 17, 23, 3, 5)\n expected_shape = (3, 17, 23*3*5)\n\n tensor5d = tf.constant(np.arange(0, np.prod(init_shape)).reshape(init_shape), tf.int32)\n assert tensor5d.eval().shape == init_shape\n output = flatten3D(tensor5d)\n assert output.eval().shape == expected_shape", "def test_shape(self):\n M = simulation.EventMonitor(self.G)\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n\n self.assertEqual(len(M.t), len(M.i))", "def _check_tensor_shapes(tensors):\n for tensor in tensors:\n tensor = tf.convert_to_tensor(value=tensor)\n tensor.get_shape().assert_has_rank(2)\n tensor.get_shape().assert_is_compatible_with(\n tf.convert_to_tensor(value=tensors[0]).get_shape())", "def assert_shape(tensor: tf.Tensor, expected: Tuple[Optional[int],\n ...]) -> Tuple[int, ...]:\n actual = tensor.shape\n match = len(actual) == len(expected) and all(\n y is None or x == y for x, y in zip(actual, expected))\n assert match, f\"wrong shape, expected {expected}, actual {actual}\"\n return actual", "def test_shape_of_nsp_head_output(batch_size: int):\n hidden_size = 10\n\n nsp_head = BertNSPHead()\n\n pooled_output = tf.random.uniform((batch_size, hidden_size))\n outputs = nsp_head(pooled_output)\n\n assert outputs.shape == (batch_size, 2)", "def test_generate_shapes(times, test_ndim, rng):\n mocktrans = MockTransition(dim=test_ndim)\n initrv = randvars.Constant(np.random.rand(test_ndim))\n proc = randprocs.markov.MarkovProcess(\n initarg=times[0], initrv=initrv, transition=mocktrans\n )\n states, obs = randprocs.markov.utils.generate_artificial_measurements(\n rng, prior_process=proc, measmod=mocktrans, times=times\n )\n\n assert states.shape[0] == len(times)\n assert states.shape[1] == test_ndim\n assert obs.shape[0] == len(times)\n assert obs.shape[1] == test_ndim", "def _check_shape(input_shape):\n msg = ('Input to SpatialExpansion must be 4D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_features), '\n 'but received shape: {}'.format(input_shape))\n assert len(input_shape) == 4, msg", "def test_track_shape_error():\n track = np.random.randn(50,1)\n try:\n bad_arm = survey.get_spiral_slice(track = track)\n except TypeError:\n assert True\n else:\n assert False", "def test_get_integration_time_shape():\n test_file = os.path.join(DATA_PATH, \"paper_test_file.uvh5\")\n test_uv = UVData()\n test_uv.read(test_file)\n\n baseline_array = np.array(list(set(test_uv.baseline_array)))\n inttime_array = utils.get_integration_time(test_uv, reds=baseline_array)\n test_shape = (test_uv.Nbls, test_uv.Ntimes)\n assert test_shape == inttime_array.shape", "def test_infer_target_shape(self):\n t = Identity()\n assert t.infer_target_shape((5,)) == (5,)", "def test_shape_Constant_tensor(self):\r\n as_t = T.as_tensor_variable\r\n self.validate((as_t(3), as_t(2), as_t(7), as_t(5)), (5, 2,\r\n 2, 3), 'valid')\r\n self.validate(as_t([3, 2, 7, 5]), (5, 2, 2, 3), 'valid')\r\n self.validate(as_t((3, 2, 7, 5)), (5, 2, 2, 3), 'valid')\r\n self.validate((3, 2, 7, 5), (as_t(5), as_t(2), as_t(2),\r\n as_t(3)), 'valid')\r\n self.validate((3, 2, 7, 5), as_t([5, 2, 2, 3]), 'valid')\r\n self.validate((3, 2, 7, 5), as_t((5, 2, 2, 3)), 'valid')\r\n self.validate(as_t([3, 2, 7, 5]), as_t([5, 2, 2, 3]), 'full')", "def test_check_X_too_many_dims():\n with pytest.raises(ValueError):\n check_X(np.ones((5,4,3)))", "def test_noise_shape():\n test_sample = np.ones((2, 13, 21)) * 3\n test_noise = utils.generate_noise(test_sample)\n assert test_sample.shape == test_noise.shape", "def test_IODimensions(self):\n tasks = [(1,1,100,True),(10,1,100,True),(1,10,100,True),(10,10,100,True),\n (1,1,100,False),(10,1,100,False),(1,10,100,False),(10,10,100,False)]\n for t in tasks:\n N_in ,N_out, N_samples, tf = t\n X = np.random.randn(N_samples,N_in) if N_in > 1 else np.random.randn(N_samples)\n y = np.random.randn(N_samples,N_out) if N_out > 1 else np.random.randn(N_samples)\n Xp = np.random.randn(N_samples,N_in) if N_in > 1 else np.random.randn(N_samples)\n esn = ESN(N_in,N_out,teacher_forcing=tf)\n prediction_tr = esn.fit(X,y)\n prediction_t = esn.predict(Xp)\n self.assertEqual(prediction_tr.shape,(N_samples,N_out))\n self.assertEqual(prediction_t.shape,(N_samples,N_out))", "def test_vector_shape(self):\n model = PoincareModel(self.data, size=20)\n self.assertEqual(model.kv.syn0.shape, (7, 20))", "def testQuestionTwo(self):\n self.assertEqual(AnswerQuestionTwo().shape, (5,5), \"Question two's output is not one dimension.\")", "def _check_shape(placeholder_shape, data_shape):\n\n return True", "def _AssertShapesMatch(op_name, in_tensor, out_tensor):\n in_shape = in_tensor.get_shape()\n out_shape = out_tensor.get_shape()\n\n if not in_shape.is_compatible_with(out_shape):\n raise ValueError('%s should not change tensor shape: input %s, '\n 'output %s' % (op_name, in_shape, out_shape))", "def _test_sampsize(t):\n return t.shape[1] != len(t.ids(axis='sample'))", "def _test_obsmdsize(t):\n md = t.metadata(axis='observation')\n return t.shape[0] != len(md) if md is not None else False", "def test_unet_verify_output_shape(simple_unet_data):\n unet = models.UNet()\n output = unet(simple_unet_data)\n print(\"Input shape:\", simple_unet_data.shape)\n print(\"Output shape:\", output.shape)\n assert simple_unet_data.shape == output.shape", "def _test_obsdup(t):\n return t.shape[0] != len(set(t.ids(axis='observation')))", "def test_infer_target_shape(self):\n t = Quantize()\n assert t.infer_target_shape((5,)) == (5,)", "def shape(tensor):\n raise NotImplementedError", "def test_dimension_size_infer(self, nt=100):\n i, j, k = dimify('i j k')\n shape = tuple([d.size for d in [i, j, k]])\n a = DenseData(name='a', shape=shape).indexed\n b = TimeData(name='b', shape=shape, save=True, time_dim=nt).indexed\n eqn = Eq(b[time, x, y, z], a[x, y, z])\n op = Operator(eqn)\n\n _, op_dim_sizes = op.arguments()\n assert(op_dim_sizes[time.name] == nt)", "def _assert_same_size(outputs, output_size):\n nest.assert_same_structure(outputs, output_size)\n flat_output_size = nest.flatten(output_size)\n flat_output = nest.flatten(outputs)\n\n for (output, size) in zip(flat_output, flat_output_size):\n if isinstance(size, tf.TensorShape):\n if output.shape == size:\n pass\n elif output[0].shape != tf.TensorShape(size):\n raise ValueError(\n \"The output size does not match the the required output_size\")" ]
[ "0.697913", "0.65635157", "0.6497553", "0.64191765", "0.6413171", "0.63976014", "0.6373584", "0.6307098", "0.6297198", "0.6292825", "0.62823594", "0.62372994", "0.62291473", "0.61750543", "0.6174745", "0.6141534", "0.61258924", "0.61253655", "0.6100978", "0.6091626", "0.6087043", "0.60791314", "0.60701585", "0.60618603", "0.6049178", "0.59981126", "0.5961154", "0.5912363", "0.590632", "0.5898946" ]
0.7679232
0
Build the SqueezeAndExcitation layer based on an input shape
def build(self, input_shape): self._n_channels = input_shape[-1] self._dense_units = int(np.ceil(self._n_channels / self._ratio)) if len(input_shape) == 4: pool_layer = tf.keras.layers.GlobalAveragePooling2D() elif len(input_shape) == 5: pool_layer = tf.keras.layers.GlobalAveragePooling3D() else: msg = ('SqueezeAndExcitation layer can only accept 4D or 5D data ' 'for image or video input but received input shape: {}' .format(input_shape)) logger.error(msg) raise RuntimeError(msg) self._hidden_layers = [ pool_layer, tf.keras.layers.Dense(self._dense_units, activation='relu'), tf.keras.layers.Dense(self._n_channels, activation='sigmoid'), tf.keras.layers.Multiply()]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _squeeze( inputs):\n input_channels = int(inputs.shape[-1])\n\n x = GlobalAveragePooling2D()(inputs)\n x = Dense(input_channels, activation='relu')(x)\n x = Dense(input_channels, activation='hard_sigmoid')(x)\n return x", "def SqueezeNet(input_shape=(224, 224, 3)):\n image_input = Input(shape=input_shape)\n\n network = Conv2D(64, (3, 3), strides=(2, 2), padding=\"valid\")(image_input)\n network = Activation(\"relu\")(network)\n network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)\n\n network = squeezenet_fire_module(\n input=network, input_channel_small=16, input_channel_large=64\n )\n network = squeezenet_fire_module(\n input=network, input_channel_small=16, input_channel_large=64\n )\n network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)\n\n network = squeezenet_fire_module(\n input=network, input_channel_small=32, input_channel_large=128\n )\n network = squeezenet_fire_module(\n input=network, input_channel_small=32, input_channel_large=128\n )\n network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)\n\n network = squeezenet_fire_module(\n input=network, input_channel_small=48, input_channel_large=192\n )\n network = squeezenet_fire_module(\n input=network, input_channel_small=48, input_channel_large=192\n )\n network = squeezenet_fire_module(\n input=network, input_channel_small=64, input_channel_large=256\n )\n network = squeezenet_fire_module(\n input=network, input_channel_small=64, input_channel_large=256\n )\n\n # Remove layers like Dropout and BatchNormalization, they are only needed in training\n # network = Dropout(0.5)(network)\n\n network = Conv2D(1000, kernel_size=(1, 1), padding=\"valid\", name=\"last_conv\")(\n network\n )\n network = Activation(\"relu\")(network)\n\n network = GlobalAvgPool2D()(network)\n network = Activation(\"softmax\", name=\"output\")(network)\n\n input_image = image_input\n model = Model(inputs=input_image, outputs=network)\n\n return model", "def _squeeze(inputs):\n input_channels = int(inputs.shape[-1])\n\n x = KL.GlobalAveragePooling2D()(inputs)\n x = KL.Dense(input_channels, activation='relu')(x)\n x = KL.Dense(input_channels, activation='hard_sigmoid')(x)\n x = KL.Reshape((1, 1, input_channels))(x)\n x = KL.Multiply()([inputs, x])\n\n return x", "def add_squeeze(self, input_name, squeeze_dims=[], name=None):\n return self._build_op(\n 'Squeeze', [input_name], name=name, attr={'squeeze_dims': squeeze_dims})", "def _build(self, inputs):\n\n # calculate how many slots we need from the 3 dimensions of the incoming conv layer (filter w/h plus depth)\n dims = inputs.get_shape().as_list()\n new_dim = 1\n for d in dims[1:]: # leave first axis as is (batch)\n new_dim = new_dim * d # multiply 'em up\n return tf.reshape(inputs, [-1, new_dim]) # -1=keep this dimension as is (it could be anything as this is the number of samples) and flatten the others", "def build(self, input_shape):\n if hasattr(self, \"timesteps\") and self.timesteps is not None:\n self.timestep_dim = self.timesteps\n else:\n self.timestep_dim = 1 # input_shape[0]\n\n self.input_dim = input_shape[-1]\n\n self.kernel = self.add_weight(\n shape=(self.input_dim, self.units * 4),\n name=\"kernel\",\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n )\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units * 4),\n name=\"recurrent_kernel\",\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint,\n )\n\n # add attention kernel\n self.attention_kernel = self.add_weight(\n shape=(self.input_dim, self.units * 4),\n name=\"attention_kernel\",\n initializer=self.attention_initializer,\n regularizer=self.attention_regularizer,\n constraint=self.attention_constraint,\n )\n\n # add attention weights\n # weights for attention model\n self.attention_weights = self.add_weight(\n shape=(self.input_dim, self.units),\n name=\"attention_W\",\n initializer=self.attention_initializer,\n regularizer=self.attention_regularizer,\n constraint=self.attention_constraint,\n )\n\n self.attention_recurrent_weights = self.add_weight(\n shape=(self.units, self.units),\n name=\"attention_U\",\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint,\n )\n\n if self.use_bias:\n if self.unit_forget_bias:\n\n def bias_initializer(shape, *args, **kwargs):\n return K.concatenate(\n [\n self.bias_initializer((self.units,), *args, **kwargs),\n initializers.Ones()((self.units,), *args, **kwargs),\n self.bias_initializer(\n (self.units * 2,), *args, **kwargs\n ),\n ]\n )\n\n else:\n bias_initializer = self.bias_initializer\n self.bias = self.add_weight(\n shape=(self.units * 4,),\n name=\"bias\",\n initializer=bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n )\n\n self.attention_bias = self.add_weight(\n shape=(self.units,),\n name=\"attention_b\",\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n )\n\n self.attention_recurrent_bias = self.add_weight(\n shape=(self.units, 1),\n name=\"attention_v\",\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n )\n else:\n self.bias = None\n self.attention_bias = None\n self.attention_recurrent_bias = None\n\n self.kernel_i = self.kernel[:, : self.units]\n self.kernel_f = self.kernel[:, self.units : self.units * 2]\n self.kernel_c = self.kernel[:, self.units * 2 : self.units * 3]\n self.kernel_o = self.kernel[:, self.units * 3 :]\n\n self.recurrent_kernel_i = self.recurrent_kernel[:, : self.units]\n self.recurrent_kernel_f = self.recurrent_kernel[\n :, self.units : self.units * 2\n ]\n self.recurrent_kernel_c = self.recurrent_kernel[\n :, self.units * 2 : self.units * 3\n ]\n self.recurrent_kernel_o = self.recurrent_kernel[:, self.units * 3 :]\n\n self.attention_i = self.attention_kernel[:, : self.units]\n self.attention_f = self.attention_kernel[:, self.units : self.units * 2]\n self.attention_c = self.attention_kernel[:, self.units * 2 : self.units * 3]\n self.attention_o = self.attention_kernel[:, self.units * 3 :]\n\n if self.use_bias:\n self.bias_i = self.bias[: self.units]\n self.bias_f = self.bias[self.units : self.units * 2]\n self.bias_c = self.bias[self.units * 2 : self.units * 3]\n self.bias_o = self.bias[self.units * 3 :]\n else:\n self.bias_i = None\n self.bias_f = None\n self.bias_c = None\n self.bias_o = None\n\n self.built = True", "def build(self, input_shape):\n pass", "def squeeze_excitation(config):\n def f(x):\n p = layers.GlobalAveragePooling2D(dtype=config.policy)(x)\n filters = int(p.shape[1])\n d0 = dense(filters // 4, config)(p)\n d1 = dense(filters, config, activation=layers.Lambda(\n activations.sigmoid, dtype=config.policy))(d0)\n d1 = layers.Reshape((1, 1, -1), dtype=config.policy)(d1)\n return layers.Multiply(dtype=config.policy)([x, d1])\n return f", "def build(self, input_shape):\r\n self.kernel = self.add_weight(shape=(input_shape[-1], self.units), name='kernel')\r\n if self.recurrent:\r\n self.recurrent_kernel = self.add_weight(shape=(self.units, self.units), name='recurrent_kernel')\r\n self.bias = self.add_weight(shape=(self.units,), initializer='ones', name='bias')\r\n self.built = True", "def _create_squeeze(cls, onnx_node, inputs, opset_version):\n axes = onnx_node.getattr(\"axes\")\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(axes)", "def build(self, unused_input_shapes):\n self.query_dense = self._projection_dense_layer(\"query\")\n self.key_dense = self._projection_dense_layer(\"key\")\n self.value_dense = self._projection_dense_layer(\"value\")\n self.attention_probs_dropout = tf.keras.layers.Dropout(\n rate=self.attention_probs_dropout_prob)\n super(CustomAttention, self).build(unused_input_shapes)", "def build(self, input_shape):\n assert len(input_shape) == 3\n assert input_shape[1] == input_shape[2]\n self.out_dim = input_shape[2]\n # self.b = K.eye(self.out_dim, name='strange?')\n self.built = True", "def build(self, input_shape):\n assert len(input_shape) == 3\n assert input_shape[1] == input_shape[2]\n self.out_dim = input_shape[2]\n # self.b = K.eye(self.out_dim, name='strange?')\n self.built = True", "def build(self, input_shape):\n assert len(input_shape) == 3\n assert input_shape[1] == input_shape[2]\n self.out_dim = input_shape[2]\n # self.b = K.eye(self.out_dim, name='strange?')\n self.built = True", "def _build(self, inp, is_training):\n x = inp\n orig_x = x\n if self.in_filters is None:\n self.in_filters = x.get_shape().as_list()[-1]\n assert self.in_filters == x.get_shape().as_list()[-1], 'Module was initialised for a different input shape'\n\n pool_op = tf.nn.max_pool if len(x.get_shape().as_list()) == 4 else tf.nn.max_pool3d\n\n # Handle strided convolutions\n kernel_size = self.kernel_size\n if np.prod(self.stride) != 1:\n kernel_size = self.stride\n orig_x = pool_op(orig_x, [1, ] + self.stride + [1, ], [1, ] + self.stride + [1, ], 'VALID')\n\n # Add a convolutional layer\n with tf.variable_scope('sub1'):\n x = BatchNorm()(x, is_training)\n x = leaky_relu(x, self.relu_leakiness)\n x = Convolution(self.out_filters, kernel_size, self.stride)(x)\n\n # Add a convolutional layer\n with tf.variable_scope('sub2'):\n x = BatchNorm()(x, is_training)\n x = leaky_relu(x, self.relu_leakiness)\n x = Convolution(self.out_filters, self.kernel_size)(x)\n\n # Add the residual\n with tf.variable_scope('sub_add'):\n # Handle differences in input and output filter sizes\n if self.in_filters < self.out_filters:\n orig_x = tf.pad(orig_x, [[0, 0]] * (len(x.get_shape().as_list()) - 1) +\n [[int(np.floor((self.out_filters - self.in_filters) / 2.)),\n int(np.ceil((self.out_filters - self.in_filters) / 2.))]])\n elif self.in_filters > self.out_filters:\n orig_x = Convolution(self.out_filters, [1] * len(self.kernel_size), 1)(orig_x)\n\n x += orig_x\n return x", "def build(self, input_shape):\n self.nb_samples = input_shape[0]\n self.nb_filter = input_shape[self.axis_filter]\n self.rows = input_shape[self.axis_row]\n self.cols = input_shape[self.axis_col]\n\n # Calculate covariance axis\n if self.cov_mode == 'channel' or self.cov_mode == 'mean' or self.cov_mode == 'pmean':\n self.cov_dim = self.nb_filter\n else:\n self.cov_dim = self.rows * self.cols\n\n # Set out_dim accordingly.\n if self.cov_mode == 'mean' or self.cov_mode == 'pmean':\n self.out_dim = self.cov_dim + 1\n else:\n self.out_dim = self.cov_dim\n\n if self.cov_mode == 'pmean':\n self.mean_p = self.cov_beta\n self.name += '_pm_{}'.format(self.mean_p)\n print(\"use parametric non_trainable {}\".format(self.mean_p))\n\n if self.robust:\n print('use robust estimation with cov_alpha {}'.format(self.cov_alpha))\n self.name += '_rb'\n\n if self.cov_regulairzer == 'Fob':\n self.C_regularizer = FrobNormRegularizer(self.out_dim, self.cov_alpha)\n self.activity_regularizer = self.C_regularizer\n elif self.cov_regulairzer == 'vN':\n self.C_regularizer = VonNeumannDistanceRegularizer(self.out_dim, self.cov_alpha, self.eps)\n self.activity_regularizer = self.C_regularizer\n\n # add the alpha\n # self.alpha = self.add_weight(\n # shape=d\n # )\n self.built = True", "def build(self, input_shape):\n self.embedding = layers.Embedding(MAX_WORDS, 7, input_length=MAX_LEN)\n self.conv_1 = layers.Conv1D(16, kernel_size=5, name=\"conv_1\", activation=\"relu\")\n self.pool_1 = layers.MaxPool1D(name=\"pool_1\")\n self.conv_2 = layers.Conv1D(\n 128, kernel_size=2, name=\"conv_2\", activation=\"relu\"\n )\n self.pool_2 = layers.MaxPool1D(name=\"pool_2\")\n self.flatten = layers.Flatten()\n self.dense = layers.Dense(1, activation=\"sigmoid\")\n super(CnnModel, self).build(input_shape)", "def build(\n self, input_shape\n ):\n\n if isinstance(input_shape, tuple):\n expert_shapes, routing_input_shape = input_shape\n else:\n expert_shapes, routing_input_shape = input_shape, None\n num_experts = len(expert_shapes)\n # num_binary is the number of binary vars required to encode the\n # num_experts choices.\n self._num_binary = math.ceil(math.log2(num_experts))\n # Boolean to check if num_experts is a power of 2.\n self._power_of_2 = (num_experts == 2**self._num_binary)\n if routing_input_shape is None:\n # z_logits is a trainable 3D tensor used for selecting the experts.\n # Axis 0: Number of non-zero experts to select.\n # Axis 1: Dummy axis of length 1 used for broadcasting.\n # Axis 2: Each num_binary-dimensional row corresponds to a \"single-expert\"\n # selector.\n self._z_logits = self.add_weight(\n name=\"z_logits\",\n shape=(self._num_nonzeros, 1, self._num_binary),\n initializer=self._z_initializer,\n trainable=True)\n # w_logits is a trainable tensor used to assign weights to the\n # single-expert selectors. Each element of w_logits is a logit.\n self._w_logits = self.add_weight(\n name=\"w_logits\",\n shape=(self._num_nonzeros, 1),\n initializer=self._w_initializer,\n trainable=True)\n else:\n self._z_logits = tf.keras.layers.Dense(\n self._num_nonzeros * self._num_binary,\n kernel_initializer=self._z_initializer,\n bias_initializer=self._z_initializer)\n self._w_logits = tf.keras.layers.Dense(\n self._num_nonzeros,\n kernel_initializer=self._w_initializer,\n bias_initializer=self._w_initializer)\n # binary_matrix is a (num_experts, num_binary)-matrix used for binary\n # encoding. The i-th row contains a num_binary-digit binary encoding of the\n # integer i.\n binary_matrix = np.array([\n list(np.binary_repr(val, width=self._num_binary))\n for val in range(num_experts)\n ]).astype(bool)\n # A constant tensor = binary_matrix, with an additional dimension for\n # broadcasting.\n self._binary_codes = tf.expand_dims(\n tf.constant(binary_matrix, dtype=bool), axis=0)\n self.built = True", "def add_input_and_output_shape(self, input_shape, output_shape):", "def build(self, input_shape):\n hidden_dim = input_shape[2]\n self.W = self.add_weight(\n name='{}_W'.format(self.name),\n shape=(hidden_dim, hidden_dim,),\n initializer='uniform',\n trainable=True)\n self.b = self.add_weight(\n name='{}_b'.format(self.name),\n shape=(hidden_dim,),\n initializer='zeros',\n trainable=True)\n self.u = self.add_weight(\n name='{}_u'.format(self.name),\n shape=(hidden_dim,),\n initializer='uniform',\n trainable=True)\n super(AttentionLayer, self).build(input_shape)", "def build(self, input_shape: tf.Tensor):\n self.dense = tf.keras.layers.Dense(self.channels, input_shape=input_shape)\n self.bn = tf.keras.layers.BatchNormalization(momentum=self.momentum)", "def build_net(self, trainable=True, name=None):\n input_shape = self._env_spec.observation_space.shape\n assert len(input_shape) in [2, 3]\n if len(input_shape) == 2:\n input_shape = (1, ) + input_shape\n\n with tf.variable_scope(name):\n l_in = layers.InputLayer(shape=(None, self._obs_dim), name=\"obs\")\n l_hid = layers.reshape(\n l_in, ([0], ) + input_shape, name=\"reshape_input\")\n\n if self._batch_norm:\n l_hid = layers.batch_norm(l_hid)\n\n for idx, conv_filter, filter_size, stride, pad in zip(\n range(len(self._conv_filters)),\n self._conv_filters,\n self._conv_filter_sizes,\n self._conv_strides,\n self._conv_pads,\n ):\n l_hid = layers.Conv2DLayer(\n l_hid,\n num_filters=conv_filter,\n filter_size=filter_size,\n stride=(stride, stride),\n pad=pad,\n nonlinearity=self._hidden_nonlinearity,\n name=\"conv_hidden_%d\" % idx,\n weight_normalization=self._weight_normalization,\n trainable=trainable,\n )\n if self._pooling:\n l_hid = layers.Pool2DLayer(\n l_hid, pool_size=self._pool_size)\n if self._batch_norm:\n l_hid = layers.batch_norm(l_hid)\n\n l_hid = layers.flatten(l_hid, name=\"conv_flatten\")\n l_action = layers.InputLayer(\n shape=(None, self._action_dim), name=\"actions\")\n\n n_layers = len(self._hidden_sizes) + 1\n if n_layers > 1:\n action_merge_layer = \\\n (self._action_merge_layer % n_layers + n_layers) % n_layers\n else:\n action_merge_layer = 1\n\n for idx, size in enumerate(self._hidden_sizes):\n if self._batch_norm:\n l_hid = batch_norm(l_hid)\n\n if idx == action_merge_layer:\n l_hid = layers.ConcatLayer([l_hid, l_action])\n\n l_hid = layers.DenseLayer(\n l_hid,\n num_units=size,\n nonlinearity=self._hidden_nonlinearity,\n trainable=trainable,\n name=\"hidden_%d\" % (idx + 1))\n\n if action_merge_layer == n_layers:\n l_hid = layers.ConcatLayer([l_hid, l_action])\n\n l_output = layers.DenseLayer(\n l_hid,\n num_units=1,\n nonlinearity=self._output_nonlinearity,\n trainable=trainable,\n name=\"output\")\n\n output_var = layers.get_output(l_output)\n\n f_qval = tensor_utils.compile_function(\n [l_in.input_var, l_action.input_var], output_var)\n output_layer = l_output\n obs_layer = l_in\n action_layer = l_action\n\n return f_qval, output_layer, obs_layer, action_layer", "def SqueezeNet(include_top=False, weights='imagenet',\n input_tensor=None, input_shape=(224,224,3),\n pooling='avg',\n classes=1000):\n \n if weights not in {'imagenet', None}:\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization) or `imagenet` '\n '(pre-training on ImageNet).')\n\n if weights == 'imagenet' and classes != 1000:\n raise ValueError('If using `weights` as imagenet with `include_top`'\n ' as true, `classes` should be 1000')\n\n\n input_shape = _obtain_input_shape(input_shape,\n default_size=227,\n min_size=48,\n data_format=K.image_data_format(),\n require_flatten=include_top)\n\n if input_tensor is None:\n img_input = Input(shape=input_shape)\n else:\n if not K.is_keras_tensor(input_tensor):\n img_input = Input(tensor=input_tensor, shape=input_shape)\n else:\n img_input = input_tensor\n\n\n x = Convolution2D(64, (3, 3), strides=(2, 2), padding='valid', name='conv1')(img_input)\n x = Activation('relu', name='relu_conv1')(x)\n x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool1')(x)\n\n x = fire_module(x, fire_id=2, squeeze=16, expand=64)\n x = fire_module(x, fire_id=3, squeeze=16, expand=64)\n x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool3')(x)\n\n x = fire_module(x, fire_id=4, squeeze=32, expand=128)\n x = fire_module(x, fire_id=5, squeeze=32, expand=128)\n x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool5')(x)\n\n x = fire_module(x, fire_id=6, squeeze=48, expand=192)\n x = fire_module(x, fire_id=7, squeeze=48, expand=192)\n x = fire_module(x, fire_id=8, squeeze=64, expand=256)\n x = fire_module(x, fire_id=9, squeeze=64, expand=256)\n \n if include_top:\n \n x = Dropout(0.5, name='drop9')(x)\n\n x = Convolution2D(classes, (1, 1), padding='valid', name='conv10')(x)\n x = Activation('relu', name='relu_conv10')(x)\n x = GlobalAveragePooling2D()(x)\n x = Activation('softmax', name='loss')(x)\n else:\n if pooling == 'avg':\n x = Dropout(0.7, name='drop9')(x)\n x = GlobalAveragePooling2D()(x)\n elif pooling=='max':\n x = GlobalMaxPooling2D()(x)\n elif pooling==None:\n pass\n else:\n raise ValueError(\"Unknown argument for 'pooling'=\" + pooling)\n\n\n if input_tensor is not None:\n inputs = get_source_inputs(input_tensor)\n else:\n inputs = img_input\n\n model = Model(inputs, x, name='squeezenet')\n\n # load weights\n if weights == 'imagenet':\n if include_top:\n weights_path = get_file('squeezenet_weights_tf_dim_ordering_tf_kernels.h5',\n WEIGHTS_PATH,\n cache_subdir='models')\n else:\n weights_path = get_file('squeezenet_weights_tf_dim_ordering_tf_kernels_notop.h5',\n WEIGHTS_PATH_NO_TOP,\n cache_subdir='models')\n \n model.load_weights(weights_path)\n if K.backend() == 'theano':\n layer_utils.convert_all_kernels_in_model(model)\n\n if K.image_data_format() == 'channels_first':\n\n if K.backend() == 'tensorflow':\n warnings.warn('You are using the TensorFlow backend, yet you '\n 'are using the Theano '\n 'image data format convention '\n '(`image_data_format=\"channels_first\"`). '\n 'For best performance, set '\n '`image_data_format=\"channels_last\"` in '\n 'your Keras config '\n 'at ~/.keras/keras.json.')\n return model", "def __init__(self,\n input_shape,\n filter_shape,\n padding,\n strides=None,\n dilation_rate=None,\n name=None,\n data_format=None,\n num_spatial_dims=None):\n num_batch_dims = None\n filter_shape = tensor_shape.as_shape(filter_shape)\n input_shape = tensor_shape.as_shape(input_shape)\n\n if filter_shape.ndims is not None:\n if (num_spatial_dims is not None and\n filter_shape.ndims != num_spatial_dims + 2):\n raise ValueError(\n \"`filters.shape.rank` must be `num_spatial_dims + 2`. Received: \"\n f\"filters.shape={filter_shape} of rank {filter_shape.rank} and \"\n f\"num_spatial_dims={num_spatial_dims}\")\n else:\n num_spatial_dims = filter_shape.ndims - 2\n\n if input_shape.ndims is not None and num_spatial_dims is not None:\n num_batch_dims = input_shape.ndims - num_spatial_dims - 1\n\n if num_spatial_dims is None:\n num_spatial_dims = input_shape.ndims - 2\n else:\n if input_shape.ndims is not None:\n if input_shape.ndims < num_spatial_dims + 2:\n raise ValueError(\n \"`input.shape.rank` must be >= than `num_spatial_dims + 2`. \"\n f\"Received: input.shape={input_shape} of rank {input_shape.rank} \"\n f\"and num_spatial_dims={num_spatial_dims}\")\n else:\n if num_batch_dims is None:\n num_batch_dims = input_shape.ndims - num_spatial_dims - 1\n\n if num_spatial_dims is None:\n raise ValueError(\n \"When `num_spatial_dims` is not set, one of `input.shape.rank` or \"\n \"`filters.shape.rank` must be known. \"\n f\"Received: input.shape={input_shape} of rank {input_shape.rank} and \"\n f\"`filters.shape={filter_shape}` of rank {filter_shape.rank}\")\n\n if num_batch_dims is None:\n num_batch_dims = 1\n\n if num_batch_dims < 1:\n raise ValueError(\n f\"Batch dims should be >= 1, but found {num_batch_dims}. \"\n \"Batch dims was estimated as \"\n \"`input.shape.rank - num_spatial_dims - 1` and `num_spatial_dims` \"\n \"was either provided or estimated as `filters.shape.rank - 2`. \"\n f\"Received: input.shape={input_shape} of rank {input_shape.rank}, \"\n f\"filters.shape={filter_shape} of rank {filter_shape.rank}, and \"\n f\"num_spatial_dims={num_spatial_dims}\")\n\n if data_format is None or not data_format.startswith(\"NC\"):\n input_channels_dim = tensor_shape.dimension_at_index(\n input_shape, num_spatial_dims + num_batch_dims)\n spatial_dims = range(num_batch_dims, num_spatial_dims + num_batch_dims)\n else:\n input_channels_dim = tensor_shape.dimension_at_index(\n input_shape, num_batch_dims)\n spatial_dims = range(\n num_batch_dims + 1, num_spatial_dims + num_batch_dims + 1)\n\n filter_dim = tensor_shape.dimension_at_index(filter_shape, num_spatial_dims)\n if not (input_channels_dim % filter_dim).is_compatible_with(0):\n raise ValueError(\n \"The number of input channels is not divisible by the corresponding \"\n f\"number of output filters. Received: input.shape={input_shape} with \"\n f\"{input_channels_dim} channels and filters.shape={filter_shape} \"\n f\"with {filter_dim} output filters.\")\n\n strides, dilation_rate = _get_strides_and_dilation_rate(\n num_spatial_dims, strides, dilation_rate)\n\n self.input_shape = input_shape\n self.filter_shape = filter_shape\n self.data_format = data_format\n self.strides = strides\n self.padding = padding\n self.name = name\n self.dilation_rate = dilation_rate\n self.num_batch_dims = num_batch_dims\n self.num_spatial_dims = num_spatial_dims\n self.conv_op = _WithSpaceToBatch(\n input_shape,\n dilation_rate=dilation_rate,\n padding=padding,\n build_op=self._build_op,\n filter_shape=filter_shape,\n spatial_dims=spatial_dims,\n data_format=data_format,\n num_batch_dims=num_batch_dims)", "def __init__(self, incoming, shape, name='ReshapeLayer'):\n super(ReshapeLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n \n self.shape = shape\n self.out = tf.zeros(self.get_output_shape())\n self.name = name", "def __init__(self, target_shape, **kwargs):\n super(Reshape, self).__init__(**kwargs)\n self.target_shape = nest.flatten(target_shape)", "def build(self, input_shape):\n dtype = tf.as_dtype(self.dtype or tf.keras.backend.floatx())\n if not (dtype.is_floating or dtype.is_complex):\n raise TypeError(\"Unable to build `Dense` layer with non-floating point \"\n \"dtype %s\" % (dtype,))\n input_shape = tf.TensorShape(input_shape)\n if tf.compat.dimension_value(input_shape[-1]) is None:\n raise ValueError(\"The last dimension of the inputs to `Dense` \"\n \"should be defined. Found `None`.\")\n self.last_dim = tf.compat.dimension_value(input_shape[-1])\n self.input_spec = tf.keras.layers.InputSpec(\n min_ndim=3, axes={-1: self.last_dim})\n # Determines variable shapes.\n if self.backward_compatible:\n kernel_shape = self.compatible_kernel_shape\n bias_shape = self.compatible_bias_shape\n else:\n kernel_shape = self.kernel_shape\n bias_shape = self.bias_shape\n\n self.kernel = self.add_weight(\n \"kernel\",\n shape=kernel_shape,\n initializer=self.kernel_initializer,\n dtype=self.dtype,\n trainable=True)\n if self.use_bias:\n self.bias = self.add_weight(\n \"bias\",\n shape=bias_shape,\n initializer=self.bias_initializer,\n dtype=self.dtype,\n trainable=True)\n else:\n self.bias = None\n super(Dense3D, self).build(input_shape)", "def build(self, input_layer, trainable=True):\n\n with tf.variable_scope(self.name):\n # Determine the size of the input when flattened\n input_layer_shape = input_layer.get_shape()[1:].dims\n flattened_dimension = reduce(lambda x,y: x*y, input_layer_shape, tf.Dimension(1))\n\n # Create the layer\n self.layer = tf.reshape(input_layer, [-1, flattened_dimension.value])\n\n return self.layer, None, None", "def _build_model(self, image_input_source, encoder_input_source, dropout_toggle):\n\t\t# We have to match this output size.\n\t\tbatch, input_height, input_width, input_depth = image_input_source.get_shape().as_list()\n\t\n\t\tfilter_sizes = [64, 64, 64] # Like VGG net, except made by a stupid person.\n\t\n\t\t# Convolutional ops will go here.\n\t\tc0, wc0, bc0 = self._build_conv(image_input_source, [3, 3, input_depth, filter_sizes[0]], [1, 1, 1, 1], activate=False)\n\t\tc1 = self._build_max_pool(c0, [1, 2, 2, 1], [1, 2, 2, 1])\n\t\tc2, wc2, bc2 = self._build_conv(self._build_dropout(c1, dropout_toggle), [3, 3, filter_sizes[0], filter_sizes[1]], [1, 1, 1, 1])\n\t\tc3 = self._build_max_pool(c2, [1, 2, 2, 1], [1, 2, 2, 1])\n\t\tc4, wc4, bc4 = self._build_conv(self._build_dropout(c3, dropout_toggle), [3, 3, filter_sizes[1], filter_sizes[2]], [1, 1, 1, 1])\n\t\tc5 = self._build_max_pool(c4, [1, 2, 2, 1], [1, 2, 2, 1])\n\t\tconv_output = c5\n\t\n\t\t# Transition to FC layers.\n\t\tpre_flat_shape = conv_output.get_shape().as_list()\n\t\tflatten = tf.reshape(conv_output, [-1, pre_flat_shape[1]*pre_flat_shape[2]*pre_flat_shape[3]])\n\t\n\t\t# Dense connections\n\t\tfc0, wf0, bf0 = self._build_fc(flatten, 512)\n\t\tfc1, wf1, bf1 = self._build_fc(fc0, 512)\n\t\tfc2, wf2, bf2 = self._build_fc(self._build_dropout(fc1, dropout_toggle), self.REPRESENTATION_SIZE)\n\t\tfc_out = fc2\n\t\n\t\t# Output point and our encoder mix-in.\n\t\tmu_output, wmu, bmu = self._build_fc(fc_out, self.REPRESENTATION_SIZE)\n\t\tz_output, wz, bz = self._build_fc(fc_out, self.REPRESENTATION_SIZE)\n\t\tencoded_output = tf.random_normal(mean=mu_output, stddev=z_output, shape=z_output.get_shape()) #tf.nn.softmax(fc_out)\n\t\tencoded_input = self._build_dropout(encoder_input_source + encoded_output, dropout_toggle) # Mix input and enc.\n\t\tencoded_input.set_shape(encoded_output.get_shape()) # Otherwise we can't ascertain the size.\n\t\n\t\t# More dense connections on the offset.\n\t\tdfc2, dwf2, dbf2 = self._build_fc(encoded_input, 512, weight=tf.transpose(wf2), bias=tf.transpose(bf1))\n\t\tdfc1, dwf1, dbf1 = self._build_fc(dfc2, 512, weight=tf.transpose(wf1), bias=tf.transpose(bf0))\n\t\tdfc0, dwf0, dbf0 = self._build_fc(self._build_dropout(dfc1, dropout_toggle), flatten.get_shape().as_list()[-1], weight=tf.transpose(wf0))\n\t\n\t\t# Expand for more convolutional operations.\n\t\tunflatten = tf.reshape(dfc0, [-1, pre_flat_shape[1], pre_flat_shape[2], pre_flat_shape[3]]) #pre_flat_shape)\n\t\n\t\t# More convolutions here.\n\t\tdc5 = self._build_unpool(unflatten, [1, 2, 2, 1])\n\t\tdc4, wdc4, bdc4 = self._build_deconv(self._build_dropout(dc5, dropout_toggle), c3.get_shape().as_list(), [3, 3, filter_sizes[1], filter_sizes[2]], [1, 1, 1, 1])\n\t\tdc3 = self._build_unpool(dc4, [1, 2, 2, 1])\n\t\tdc2, wdc2, bdc2 = self._build_deconv(self._build_dropout(dc3, dropout_toggle), c1.get_shape().as_list(), [3, 3, filter_sizes[0], filter_sizes[1]], [1, 1, 1, 1])\n\t\tdc1 = self._build_unpool(dc2, [1, 2, 2, 1])\n\t\tdc0, wdc0, bdc0 = self._build_deconv(dc1, [batch, input_height, input_width, input_depth], [3, 3, input_depth, filter_sizes[0]], [1, 1, 1, 1], activate=False)\n\t\tdeconv_output = dc0\n\t\n\t\t# Return result + encoder output\n\t\treturn deconv_output, encoded_output", "def _basic_build(self, inputs_shape):\n\n d = inputs_shape[-1]\n h = self._real_units\n s = self._slots\n\n self._erase_W = self.add_variable(\n name=\"_erase_W\", shape=[d + h, h], initializer=self._kernel_initializer\n )\n self._erase_b = self.add_variable(\n name=\"_erase_b\",\n shape=[h],\n initializer=(\n self._bias_initializer\n if self._bias_initializer is not None\n else init_ops.constant_initializer(1.0, dtype=self.dtype)\n ),\n )\n\n self._reset_W = self.add_variable(\n name=\"_reset_W\", shape=[d + h, 1], initializer=self._kernel_initializer\n )\n self._reset_b = self.add_variable(\n name=\"_reset_b\",\n shape=[1],\n initializer=(\n self._bias_initializer\n if self._bias_initializer is not None\n else init_ops.constant_initializer(1.0, dtype=self.dtype)\n ),\n )\n\n self._add_W = self.add_variable(\n name=\"_add_W\", shape=[d + h, h], initializer=self._kernel_initializer\n )\n self._add_b = self.add_variable(\n name=\"_add_b\",\n shape=[h],\n initializer=(\n self._bias_initializer\n if self._bias_initializer is not None\n else init_ops.constant_initializer(1.0, dtype=self.dtype)\n ),\n )\n self.heads = self.add_variable(\n name=\"_heads\", shape=[s, d], initializer=self._kernel_initializer\n )\n\n self._beta = self.add_variable(\n name=\"_beta_no_reg\",\n shape=(),\n initializer=tf.compat.v1.constant_initializer(\n np.array([1.02]), dtype=np.float32\n ),\n )\n self._alpha = self.add_variable(\n name=\"_alpha_no_reg\",\n shape=(),\n initializer=tf.compat.v1.constant_initializer(\n np.array([0.98]), dtype=np.float32\n ),\n )" ]
[ "0.62610316", "0.6183013", "0.6175946", "0.61648756", "0.60825264", "0.60796523", "0.60320926", "0.6001276", "0.5968941", "0.59427345", "0.5900571", "0.5879365", "0.5879365", "0.5879365", "0.58451587", "0.5822561", "0.57939166", "0.57673603", "0.5745453", "0.5732177", "0.57216245", "0.5716792", "0.5712786", "0.56901526", "0.5629075", "0.562215", "0.5615497", "0.56141037", "0.56068087", "0.55577767" ]
0.7372575
0
concatenates a hiresolution feature to the input tensor x in the middle of a sup3r resolution network.
def call(self, x, hi_res_feature): return tf.concat((x, hi_res_feature), axis=-1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_feature(x, x1):\n if x is None:\n x = x1\n else:\n x = np.concatenate((x, x1), axis=1)\n return x", "def forward(self, x_conv_out: torch.Tensor) -> torch.Tensor:\n x_proj = F.relu(self.proj(x_conv_out))\n x_gate = torch.sigmoid(self.gate(x_conv_out))\n x_highway = x_gate * x_proj + (1 - x_gate) * x_conv_out\n return x_highway", "def two_scale_forward_high(self, x_1x):\n x_hi = nn.functional.interpolate(\n x_1x,\n scale_factor=1.5,\n align_corners=self.align_corners,\n mode='bilinear')\n\n lo_outs = self.single_scale_forward(x_1x)\n pred_10x = lo_outs['cls_out']\n p_lo = pred_10x\n aux_lo = lo_outs['aux_out']\n logit_attn = lo_outs['logit_attn']\n\n hi_outs = self.single_scale_forward(x_hi)\n pred_15x = hi_outs['cls_out']\n p_hi = pred_15x\n aux_hi = hi_outs['aux_out']\n\n p_lo = p_lo * logit_attn\n aux_lo = aux_lo * logit_attn\n p_hi = scale_as(p_hi, p_lo)\n aux_hi = scale_as(aux_hi, aux_lo)\n\n # combine lo and hi predictions with attention\n joint_pred = p_lo + p_hi * (1 - logit_attn)\n joint_aux = aux_lo + aux_hi * (1 - logit_attn)\n\n output = [joint_pred, joint_aux]\n\n # Optionally, apply supervision to the multi-scale predictions\n # directly.\n scaled_pred_15x = scale_as(pred_15x, p_lo)\n output.extend(output)\n return output", "def forward(self, x: Tuple[Tensor]) -> Tensor:\n inputs = x[self.start_level:self.end_level + 1]\n assert len(inputs) == (self.end_level - self.start_level + 1)\n feature_add_all_level = self.convs_all_levels[0](inputs[0])\n target_h, target_w = feature_add_all_level.size()[2:]\n for i in range(1, len(inputs)):\n input_p = inputs[i]\n x_p = self.convs_all_levels[i](input_p)\n h, w = x_p.size()[2:]\n factor_h = target_h // h\n factor_w = target_w // w\n assert factor_h == factor_w\n feature_per_level = aligned_bilinear(x_p, factor_h)\n feature_add_all_level = feature_add_all_level + \\\n feature_per_level\n\n feature_add_all_level = self.conv_branch(feature_add_all_level)\n feature_pred = self.conv_pred(feature_add_all_level)\n return feature_pred", "def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.head(x)\n x = x + self.res_blocks(x)\n x = self.upsample(x)\n x = self.tail(x)\n return x", "def forward(self, x):\n #print('output of fetures.children() : %s'%str([i for i in self.features.children()]))\n #print(\"shape of input is %s\" % str(x.size()))\n for layer_no, layer in enumerate(self.features.children()):\n\n if layer_no is 23:\n y = layer(x)\n if layer_no is 33:\n z = layer(x)\n x = layer(x)\n\n #print('debug')\n #print('layer info: %s'%str(layer))\n #print(\"shape of x is %s\" % str(x.size()))\n\n x = self.conv1D_downstream1(x)\n x = self.conv1D_downstream2(x)\n x = self.upsample_1(x)\n\n z = self.conv1D_pool4(z)\n y = self.conv1D_pool3(y)\n #print('debug')\n #print(\"shape of x is %s\"%str(x.size()))\n #print(\"shape of z is %s\" % str(z.size()))\n\n if x.size() is not z.size():\n x = nn.functional.interpolate(x,size = (z.size()[2],z.size()[3]), mode = 'nearest')\n x = x+ z\n x = self.upsample_2(x)\n x = x+y\n x = self.upsample_3(x)\n\n return x", "def transform(self, X: Tensor) -> Tensor:\n expanded_X = X.unsqueeze(dim=-2).expand(\n *X.shape[:-1], self.feature_set.shape[0], -1\n )\n expanded_features = self.feature_set.expand(*expanded_X.shape[:-1], -1)\n appended_X = torch.cat([expanded_X, expanded_features], dim=-1)\n return appended_X.view(*X.shape[:-2], -1, appended_X.shape[-1])", "def modified_flory_huggins(self, x: np.array):\n return flory_huggins(self.r, x, power_factor=(2.0/3.0))", "def forward(self, x: Tensor) -> Tensor: # type: ignore\n x = self.backbone(x)\n x = x.view(x.size(0), -1)\n if self.head_layers is not None:\n out = self.imagehead(x)\n return out\n else:\n return x", "def feature_forward(self, x):\n raise NotImplementedError", "def output(self,x=None,h=None,in_features=0,in_features_h=0,reuse=False):\n assert (x is not None or h is not None)\n print('re'*10,reuse , self.name)\n stride=[1, self.cnn_stride, self.cnn_stride, 1]\n with tf.variable_scope(self.name):\n Wxi=self._create_weight([self.cnn_size, self.cnn_size, in_features, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='conv_Wxi')\n Whi=self._create_weight([self.cnn_size, self.cnn_size, in_features_h, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='conv_Whi')\n Wxf=self._create_weight([self.cnn_size, self.cnn_size, in_features, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='conv_Wxf')\n Whf=self._create_weight([self.cnn_size, self.cnn_size, in_features_h, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='conv_Whf')\n Wxc=self._create_weight([self.cnn_size, self.cnn_size, in_features, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='conv_Wxc')\n Whc=self._create_weight([self.cnn_size, self.cnn_size, in_features_h, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='conv_Whc')\n Wxo=self._create_weight([self.cnn_size, self.cnn_size, in_features, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='conv_Wxo')\n Who=self._create_weight([self.cnn_size, self.cnn_size, in_features_h, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='conv_Who')\n Wci=self._create_weight([1,self.height , self.width, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='ele_Wci')\n Wcf=self._create_weight([1,self.height , self.width, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='ele_Wcf')\n Wco=self._create_weight([1,self.height , self.width, self.out_features], stddev=self.weight_dev,\n kname=self.weight_init, name='ele_Wco')\n if not reuse and h is None:\n print('xr'*20,x.get_shape().as_list(),in_features,self.out_features,in_features_h)\n h=tf.zeros((1,self.height,self.width,self.out_features))\n self._input=tf.sigmoid(self._conv2d(x,Wxi,stride=stride,pre_name='Wxi')+self._conv2d(h,Whi,use_bias=True,stride=stride,pre_name='Whi')+Wci*self._cell)\n self._forget=tf.sigmoid(self._conv2d(x,Wxf,stride=stride,pre_name='Wxf')+self._conv2d(h,Whf,use_bias=True,stride=stride,pre_name='Whf')+Wcf*self._cell)\n self._cell=self._forget*self._cell+self._input*tf.tanh(self._conv2d(x,Wxc,stride=stride,pre_name='Wxc')+self._conv2d(h,Whc,stride=stride,use_bias=True,pre_name='Whc'))\n self._output=tf.sigmoid(self._conv2d(x,Wxo,stride=stride,pre_name='Wxo')+self._conv2d(h,Who,use_bias=True,stride=stride,pre_name='Who')+Wco*self._cell)\n else:\n # print('x'*10,x.shape,'\\\\n Wxi',Wxi.shape,'\\\\n h ',h.shape,Whi.shape,'\\\\n c ',Wci.shape)\n if h is None:\n # print('x'*20,x.get_shape().as_list(),in_features,out_features)\n self._input=tf.sigmoid(self._conv2d(x,Wxi,stride=stride,pre_name='Wxi',use_bias=True)+Wci*self._cell)\n self._forget=tf.sigmoid(self._conv2d(x,Wxf,stride=stride,pre_name='Wxf',use_bias=True)+Wcf*self._cell)\n self._cell=self._forget*self._cell+self._input*tf.tanh(self._conv2d(x,Wxc,stride=stride,pre_name='Wxc',use_bias=True))\n self._output=tf.sigmoid(self._conv2d(x,Wxo,stride=stride,pre_name='Wxo',use_bias=True)+Wco*self._cell)\n elif x is None:\n # print('h'*20,h.get_shape().as_list(),in_features_h,out_features)\n self._input=tf.sigmoid(self._conv2d(h,Whi,use_bias=True,stride=stride,pre_name='Whi')+Wci*self._cell)\n self._forget=tf.sigmoid(self._conv2d(h,Whf,use_bias=True,stride=stride,pre_name='Whf')+Wcf*self._cell)\n self._cell=self._forget*self._cell+self._input*tf.tanh(self._conv2d(h,Whc,stride=stride,use_bias=True,pre_name='Whc'))\n self._output=tf.sigmoid(self._conv2d(h,Who,use_bias=True,stride=stride,pre_name='Who')+Wco*self._cell)\n else:\n # print('xh'*20,x.get_shape().as_list(),in_features,out_features,in_features_h)\n self._input=tf.sigmoid(self._conv2d(x,Wxi,stride=stride,pre_name='Wxi')+self._conv2d(h,Whi,use_bias=True,stride=stride,pre_name='Whi')+Wci*self._cell)\n self._forget=tf.sigmoid(self._conv2d(x,Wxf,stride=stride,pre_name='Wxf')+self._conv2d(h,Whf,use_bias=True,stride=stride,pre_name='Whf')+Wcf*self._cell)\n self._cell=self._forget*self._cell+self._input*tf.tanh(self._conv2d(x,Wxc,stride=stride,pre_name='Wxc')+self._conv2d(h,Whc,stride=stride,use_bias=True,pre_name='Whc'))\n self._output=tf.sigmoid(self._conv2d(x,Wxo,stride=stride,pre_name='Wxo')+self._conv2d(h,Who,use_bias=True,stride=stride,pre_name='Who')+Wco*self._cell)\n h=self._output*tf.tanh(self._cell)\n\n return h", "def forward(self, x):\n x, _ = equiangular_calculator(x, self.ratio)\n x = x.permute(0, 3, 1, 2)\n\n if self.return_indices:\n x, indices = F.max_pool2d(x, self.kernel_size, return_indices=self.return_indices)\n else:\n x = F.max_pool2d(x, self.kernel_size)\n x = reformat(x)\n\n if self.return_indices:\n output = x, indices\n else:\n output = x\n\n return output", "def _data_augment(self, x):\n batch_size = x.size()[0]\n x = x.view(batch_size, -1)\n bias = torch.ones(batch_size, 1).to(self.device)\n x = torch.cat((bias, x), 1)\n\n return x", "def forward(self, x: torch.Tensor) -> torch.Tensor:\r\n padx = int(self.conv2d_guass.shape[2]/2)\r\n pady = int(self.conv2d_guass.shape[3]/2)\r\n \r\n ixx = x[:,0,:,:]\r\n ixx = ixx.unsqueeze(dim=1)\r\n sxx = nn.functional.conv2d(ixx, self.conv2d_guass, padding=(padx, pady))\r\n\r\n iyy = x[:,1,:,:]\r\n iyy = iyy.unsqueeze(dim=1)\r\n syy = nn.functional.conv2d(iyy, self.conv2d_guass, padding=(padx, pady))\r\n\r\n ixy = x[:,2,:,:]\r\n ixy = ixy.unsqueeze(dim=1)\r\n sxy = nn.functional.conv2d(ixy, self.conv2d_guass, padding=(padx, pady))\r\n \r\n output = torch.cat((sxx, syy, sxy), dim=1)\r\n return output", "def forward(self, x, y):\r\n x = torch.div(x, x.norm())\r\n y = torch.div(y, y.norm())\r\n if not self.is_cat:\r\n #x = x.mean(dim=0).unsqueeze(0) # addition over channels, x.shape = (1,256,7,7)\r\n x = torch.sum(x, dim=0).unsqueeze(0)\r\n else:\r\n x = x.reshape(-1) # under construction\r\n x = self.conv1(x)\r\n x = x.view(x.size(0), -1) # Size (1,256*7*7)\r\n\r\n y = self.conv2(y)\r\n y = torch.nn.functional.interpolate(y, size=(7,7), mode=\"bilinear\") #Size(1, 256, 7, 7)\r\n y = y.view(y.size(0), -1) # Size(1, 256*7*7)\r\n x = self.fc1(x) # Size (1,256)\r\n # x = self.bn1(x)\r\n # x = self.bn1(self.fc1(x))\r\n x = self.relu(x) # Size (1, 256)\r\n y = self.fc2(y)\r\n y = self.relu(y)\r\n\r\n # concat 2 features\r\n x = torch.cat((x, y), 1) # Size(1,512)\r\n\r\n x = self.fc3(x) # Size (1, 64)\r\n x = self.relu2(x)\r\n x = self.fc4(x) # Size (1, 4)\r\n return x", "def forward(self, x):\n\n out = F.relu(self.conv1(x))\n out = F.relu(self.conv2(out))\n\n out = F.relu(self.resnet_block(out))\n\n # 8 x 8 x 64\n noise = self.sample_noise((out.shape[0], self.noise_dim, out.shape[2], out.shape[3]))\n\n # print(noise.shape)\n # print(out.shape)\n\n out = torch.cat([out, noise], dim=1)\n\n # print(out.shape)\n\n out = F.relu(self.deconv1(out))\n out = F.tanh(self.deconv2(out))\n\n return out", "def h8_noup_1mix_halfattn_spec(x, h=None, init=False, ema=None, dropout_p=0.5, nr_resnet=5, nr_filters=160, nr_logistic_mix=10, resnet_nonlinearity='concat_elu'):\n\n counters = {}\n with arg_scope([nn.conv2d, nn.deconv2d, nn.gated_resnet, nn.dense, nn.nin], counters=counters, init=init, ema=ema, dropout_p=dropout_p):\n\n # parse resnet nonlinearity argument\n if resnet_nonlinearity == 'concat_elu':\n resnet_nonlinearity = nn.concat_elu\n elif resnet_nonlinearity == 'elu':\n resnet_nonlinearity = tf.nn.elu\n elif resnet_nonlinearity == 'relu':\n resnet_nonlinearity = tf.nn.relu\n else:\n raise('resnet nonlinearity ' +\n resnet_nonlinearity + ' is not supported')\n\n with arg_scope([nn.gated_resnet], nonlinearity=resnet_nonlinearity, h=h):\n\n # ////////// up pass through pixelCNN ////////\n xs = nn.int_shape(x)\n background = tf.concat(\n [\n ((tf.range(xs[1], dtype=tf.float32) - xs[1] / 2) / xs[1])[None, :, None, None] + 0. * x,\n ((tf.range(xs[2], dtype=tf.float32) - xs[2] / 2) / xs[2])[None, None, :, None] + 0. * x,\n ],\n axis=3\n )\n # add channel of ones to distinguish image from padding later on\n x_pad = tf.concat([x, tf.ones(xs[:-1] + [1])], 3)\n ul_list = [nn.down_shift(nn.down_shifted_conv2d(x_pad, num_filters=nr_filters, filter_size=[1, 3])) +\n nn.right_shift(nn.down_right_shifted_conv2d(x_pad, num_filters=nr_filters, filter_size=[2, 1]))] # stream for up and to the left\n\n for attn_rep in range(8):\n for rep in range(nr_resnet):\n ul_list.append(nn.gated_resnet(\n ul_list[-1], conv=nn.down_right_shifted_conv2d))\n\n ul = ul_list[-1]\n\n hiers = [1, ]\n hier = hiers[attn_rep % len(hiers)]\n raw_content = tf.concat([x, ul, background], axis=3)\n key, mixin = tf.split(nn.nin(nn.gated_resnet(raw_content, conv=nn.nin), nr_filters * 2 // 2), 2, axis=3)\n raw_q = tf.concat([ul, background], axis=3)\n if hier != 1:\n raw_q = raw_q[:, ::hier, ::hier, :]\n query = nn.nin(nn.gated_resnet(raw_q, conv=nn.nin), nr_filters // 2)\n if hier != 1:\n key = tf.nn.pool(key, [hier, hier], \"AVG\", \"SAME\", strides=[hier, hier])\n mixin = tf.nn.pool(mixin, [hier, hier], \"AVG\", \"SAME\", strides=[hier, hier])\n mixed = nn.causal_attention(key, mixin, query, causal_unit=1 if hier == 1 else xs[2] // hier)\n\n if hier != 1:\n mixed = tf.depth_to_space(tf.tile(mixed, [1, 1, 1, hier * hier]), hier)\n\n ul_list.append(nn.gated_resnet(ul, mixed, conv=nn.nin))\n\n\n x_out = nn.nin(tf.nn.elu(ul_list[-1]), 10 * nr_logistic_mix)\n\n return x_out", "def h8_noup_1mix_halfattn_spec(x, h=None, init=False, ema=None, dropout_p=0.5, nr_resnet=5, nr_filters=160, nr_logistic_mix=10, resnet_nonlinearity='concat_elu'):\n\n counters = {}\n with arg_scope([nn.conv2d, nn.deconv2d, nn.gated_resnet, nn.dense, nn.nin], counters=counters, init=init, ema=ema, dropout_p=dropout_p):\n\n # parse resnet nonlinearity argument\n if resnet_nonlinearity == 'concat_elu':\n resnet_nonlinearity = nn.concat_elu\n elif resnet_nonlinearity == 'elu':\n resnet_nonlinearity = tf.nn.elu\n elif resnet_nonlinearity == 'relu':\n resnet_nonlinearity = tf.nn.relu\n else:\n raise('resnet nonlinearity ' +\n resnet_nonlinearity + ' is not supported')\n\n with arg_scope([nn.gated_resnet], nonlinearity=resnet_nonlinearity, h=h):\n\n # ////////// up pass through pixelCNN ////////\n xs = nn.int_shape(x)\n background = tf.concat(\n [\n ((tf.range(xs[1], dtype=tf.float32) - xs[1] / 2) / xs[1])[None, :, None, None] + 0. * x,\n ((tf.range(xs[2], dtype=tf.float32) - xs[2] / 2) / xs[2])[None, None, :, None] + 0. * x,\n ],\n axis=3\n )\n # add channel of ones to distinguish image from padding later on\n x_pad = tf.concat([x, tf.ones(xs[:-1] + [1])], 3)\n ul_list = [nn.down_shift(nn.down_shifted_conv2d(x_pad, num_filters=nr_filters, filter_size=[1, 3])) +\n nn.right_shift(nn.down_right_shifted_conv2d(x_pad, num_filters=nr_filters, filter_size=[2, 1]))] # stream for up and to the left\n\n for attn_rep in range(6):\n for rep in range(nr_resnet):\n ul_list.append(nn.gated_resnet(\n ul_list[-1], conv=nn.down_right_shifted_conv2d))\n\n ul = ul_list[-1]\n\n hiers = [1, ]\n hier = hiers[attn_rep % len(hiers)]\n raw_content = tf.concat([x, ul, background], axis=3)\n key, mixin = tf.split(nn.nin(nn.gated_resnet(raw_content, conv=nn.nin), nr_filters * 2 // 2), 2, axis=3)\n raw_q = tf.concat([ul, background], axis=3)\n if hier != 1:\n raw_q = raw_q[:, ::hier, ::hier, :]\n query = nn.nin(nn.gated_resnet(raw_q, conv=nn.nin), nr_filters // 2)\n if hier != 1:\n key = tf.nn.pool(key, [hier, hier], \"AVG\", \"SAME\", strides=[hier, hier])\n mixin = tf.nn.pool(mixin, [hier, hier], \"AVG\", \"SAME\", strides=[hier, hier])\n mixed = nn.causal_attention(key, mixin, query, causal_unit=1 if hier == 1 else xs[2] // hier)\n\n if hier != 1:\n mixed = tf.depth_to_space(tf.tile(mixed, [1, 1, 1, hier * hier]), hier)\n\n ul_list.append(nn.gated_resnet(ul, mixed, conv=nn.nin))\n\n\n x_out = nn.nin(tf.nn.elu(ul_list[-1]), 10 * nr_logistic_mix)\n\n return x_out", "def forward(self, x):\n h = x\n\n # Get features\n local_feat = self.local_feat_blocks(h) # (N, C, H, W)\n global_feat = self.global_feat_blocks(local_feat)\n global_feat = self.activation(global_feat)\n global_feat = torch.sum(global_feat, dim=(2, 3))\n\n # GAN task output\n output = self.linear(global_feat)\n\n return output, local_feat, global_feat", "def x2hat(self, x):\n x_hat = t.tensor([\n [0, -x[2], x[1]],\n [x[2], 0, -x[0]],\n [-x[1], x[0], 0]\n ], device=self.DEVICE)\n return x_hat", "def reglu(x: Tensor) ->Tensor:\n assert x.shape[-1] % 2 == 0\n a, b = x.chunk(2, dim=-1)\n return a * F.relu(b)", "def forward(self, x):\n flows_forward, flows_backward = self.get_flow(x)\n b, n, _, h, w = x.size()\n\n # backward branch\n out_l = []\n feat_prop = x.new_zeros(b, self.num_feat, h, w)\n for i in range(n - 1, -1, -1):\n x_i = x[:, i, :, :, :]\n if i < n - 1:\n flow = flows_backward[:, i, :, :, :]\n feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))\n feat_prop = torch.cat([x_i, feat_prop], dim=1)\n feat_prop = self.backward_trunk(feat_prop)\n out_l.insert(0, feat_prop)\n\n # forward branch\n feat_prop = torch.zeros_like(feat_prop)\n for i in range(0, n):\n x_i = x[:, i, :, :, :]\n if i > 0:\n flow = flows_forward[:, i - 1, :, :, :]\n feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))\n\n feat_prop = torch.cat([x_i, feat_prop], dim=1)\n feat_prop = self.forward_trunk(feat_prop)\n\n # upsample\n out = torch.cat([out_l[i], feat_prop], dim=1)\n out = self.lrelu(self.fusion(out))\n out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))\n out = self.lrelu(self.conv_hr(out))\n out = self.conv_last(out)\n base = F.interpolate(x_i, scale_factor=4, mode='bilinear', align_corners=False)\n out += base\n out_l[i] = out\n\n return torch.stack(out_l, dim=1)", "def __call__(self, x):\n feat = x\n if self.dropout > 0:\n x = ht.dropout_op(x, 1 - self.dropout)\n\n x = ht.csrmm_op(self.mp, x)\n x = ht.matmul_op(x, self.weight)\n x = x + ht.broadcastto_op(self.bias, x)\n if self.activation == \"relu\":\n x = ht.relu_op(x)\n elif self.activation is not None:\n raise NotImplementedError\n return ht.concat_op(x, ht.matmul_op(feat, self.weight2), axis=1)", "def forward(self, x):\n x = self.features(x)\n return x", "def Heff(self, x):\r\n x = x.reshape(self.bond * self.p, -1)\r\n # Interactions between left environment and left site\r\n result = self.HA.reshape(self.bond * self.p, -1) @ x\r\n # Interactions between right environment and right site\r\n result += x @ self.HA.reshape(self.bond * self.p, -1).T\r\n # Interactions between left and right site\r\n x = x.reshape(self.bond, self.p, self.bond, self.p)\r\n result = result.reshape(self.bond, self.p, self.bond, self.p)\r\n result += np.einsum('xyij,lirj->lxry', self.NN_interaction, x)\r\n\r\n return result.ravel()", "def h6_noup_1mix_halfattn_spec(x, h=None, init=False, ema=None, dropout_p=0.5, nr_resnet=5, nr_filters=160, nr_logistic_mix=10, resnet_nonlinearity='concat_elu'):\n\n counters = {}\n with arg_scope([nn.conv2d, nn.deconv2d, nn.gated_resnet, nn.dense, nn.nin], counters=counters, init=init, ema=ema, dropout_p=dropout_p):\n\n # parse resnet nonlinearity argument\n if resnet_nonlinearity == 'concat_elu':\n resnet_nonlinearity = nn.concat_elu\n elif resnet_nonlinearity == 'elu':\n resnet_nonlinearity = tf.nn.elu\n elif resnet_nonlinearity == 'relu':\n resnet_nonlinearity = tf.nn.relu\n else:\n raise('resnet nonlinearity ' +\n resnet_nonlinearity + ' is not supported')\n\n with arg_scope([nn.gated_resnet], nonlinearity=resnet_nonlinearity, h=h):\n\n # ////////// up pass through pixelCNN ////////\n xs = nn.int_shape(x)\n background = tf.concat(\n [\n ((tf.range(xs[1], dtype=tf.float32) - xs[1] / 2) / xs[1])[None, :, None, None] + 0. * x,\n ((tf.range(xs[2], dtype=tf.float32) - xs[2] / 2) / xs[2])[None, None, :, None] + 0. * x,\n ],\n axis=3\n )\n # add channel of ones to distinguish image from padding later on\n x_pad = tf.concat([x, tf.ones(xs[:-1] + [1])], 3)\n ul_list = [nn.down_shift(nn.down_shifted_conv2d(x_pad, num_filters=nr_filters, filter_size=[1, 3])) +\n nn.right_shift(nn.down_right_shifted_conv2d(x_pad, num_filters=nr_filters, filter_size=[2, 1]))] # stream for up and to the left\n\n for attn_rep in range(6):\n for rep in range(nr_resnet):\n ul_list.append(nn.gated_resnet(\n ul_list[-1], conv=nn.down_right_shifted_conv2d))\n\n ul = ul_list[-1]\n\n hiers = [1, ]\n hier = hiers[attn_rep % len(hiers)]\n raw_content = tf.concat([x, ul, background], axis=3)\n key, mixin = tf.split(nn.nin(nn.gated_resnet(raw_content, conv=nn.nin), nr_filters * 2 // 2), 2, axis=3)\n raw_q = tf.concat([ul, background], axis=3)\n if hier != 1:\n raw_q = raw_q[:, ::hier, ::hier, :]\n query = nn.nin(nn.gated_resnet(raw_q, conv=nn.nin), nr_filters // 2)\n if hier != 1:\n key = tf.nn.pool(key, [hier, hier], \"AVG\", \"SAME\", strides=[hier, hier])\n mixin = tf.nn.pool(mixin, [hier, hier], \"AVG\", \"SAME\", strides=[hier, hier])\n mixed = nn.causal_attention(key, mixin, query, causal_unit=1 if hier == 1 else xs[2] // hier)\n\n if hier != 1:\n mixed = tf.depth_to_space(tf.tile(mixed, [1, 1, 1, hier * hier]), hier)\n\n ul_list.append(nn.gated_resnet(ul, mixed, conv=nn.nin))\n\n\n x_out = nn.nin(tf.nn.elu(ul_list[-1]), 10 * nr_logistic_mix)\n\n return x_out", "def forward(self, x):\n h = self.l1(x)\n h = h.view(x.shape[0], -1, self.bottom_width, self.bottom_width)\n h = self.block2(h)\n h = self.block3(h)\n h = self.block4(h)\n h = self.block5(h)\n h = self.b6(h)\n h = self.activation(h)\n h = torch.tanh(self.c6(h))\n\n return h", "def h6_shift_small_attn_spec(x, h=None, init=False, ema=None, dropout_p=0.5, nr_resnet=5, nr_filters=160, nr_logistic_mix=10, resnet_nonlinearity='concat_elu'):\n\n counters = {}\n with arg_scope([nn.conv2d, nn.deconv2d, nn.gated_resnet, nn.dense, nn.nin, nn.mem_saving_causal_shift_nin], counters=counters, init=init, ema=ema, dropout_p=dropout_p):\n\n # parse resnet nonlinearity argument\n if resnet_nonlinearity == 'concat_elu':\n resnet_nonlinearity = nn.concat_elu\n elif resnet_nonlinearity == 'elu':\n resnet_nonlinearity = tf.nn.elu\n elif resnet_nonlinearity == 'relu':\n resnet_nonlinearity = tf.nn.relu\n else:\n raise('resnet nonlinearity ' +\n resnet_nonlinearity + ' is not supported')\n\n with arg_scope([nn.gated_resnet], nonlinearity=resnet_nonlinearity, h=h):\n\n # ////////// up pass through pixelCNN ////////\n xs = nn.int_shape(x)\n background = tf.concat(\n [\n ((tf.range(xs[1], dtype=tf.float32) - xs[1] / 2) / xs[1])[None, :, None, None] + 0. * x,\n ((tf.range(xs[2], dtype=tf.float32) - xs[2] / 2) / xs[2])[None, None, :, None] + 0. * x,\n ],\n axis=3\n )\n # add channel of ones to distinguish image from padding later on\n x_pad = tf.concat([x, tf.ones(xs[:-1] + [1])], 3)\n ul_list = [nn.causal_shift_nin(x_pad, nr_filters)] # stream for up and to the left\n\n for attn_rep in range(12):\n for rep in range(nr_resnet):\n ul_list.append(nn.gated_resnet(\n ul_list[-1], conv=nn.mem_saving_causal_shift_nin))\n\n ul = ul_list[-1]\n\n hiers = [1, ]\n attn_chns = 32\n hier = hiers[attn_rep % len(hiers)]\n raw_content = tf.concat([x, ul, background], axis=3)\n key, mixin = tf.split(nn.nin(raw_content, attn_chns * 2), 2, axis=3)\n raw_q = tf.concat([ul, background], axis=3)\n if hier != 1:\n raw_q = raw_q[:, ::hier, ::hier, :]\n query = nn.nin(nn.gated_resnet(raw_q, conv=nn.nin), attn_chns)\n if hier != 1:\n key = tf.nn.pool(key, [hier, hier], \"AVG\", \"SAME\", strides=[hier, hier])\n mixin = tf.nn.pool(mixin, [hier, hier], \"AVG\", \"SAME\", strides=[hier, hier])\n mixed = nn.mem_saving_causal_attention(key, mixin, query, causal_unit=1 if hier == 1 else xs[2] // hier)\n\n if hier != 1:\n mixed = tf.depth_to_space(tf.tile(mixed, [1, 1, 1, hier * hier]), hier)\n\n ul_list.append(nn.gated_resnet(ul, mixed, conv=nn.nin))\n\n\n x_out = nn.nin(tf.nn.elu(ul_list[-1]), 10 * nr_logistic_mix)\n\n return x_out", "def forward(self, x):\n x1 = x[:, 0, :, :].reshape((-1, 1, obs_size * 2 + 1, obs_size * 2 + 1))\n x2 = x[:, 1, :, :].reshape((-1, (obs_size * 2 + 1) ** 2))\n if x2.shape[0] == 1:\n x2 = np.tile(x2, (minibatch_size, 1))\n h = F.relu(self.bn1(self.conv1(x)))\n h = F.relu(self.bn2(self.conv2(x)))\n h = F.relu(self.bn3(self.conv3(x)))\n h = self.l(h)\n return DiscreteActionValue(h)", "def forward_tensor(self, x):\n pass" ]
[ "0.6034361", "0.5986766", "0.5870083", "0.58451366", "0.577961", "0.5670602", "0.56492305", "0.5633024", "0.53897166", "0.5371125", "0.53708494", "0.5368953", "0.5363347", "0.53163177", "0.5294121", "0.5290483", "0.5279402", "0.5279402", "0.5259506", "0.525445", "0.5245808", "0.52399755", "0.52297956", "0.5207509", "0.5205685", "0.5199741", "0.5160559", "0.515224", "0.51506925", "0.51443505" ]
0.7315015
0
Keep series to be added in the db based on not been in series and have freq
def _cleasing(series: Optional[dict], freq:list) -> dict: if series is not None: if series["freq"] in freq: if series["final"].year == 2021: if series["fonte"] in fonte_in: if str(series["number"]) not in remove['tickers']: return series
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ts_check_frequency(self):\n if self.ts_df.index.freq is None:\n self._uvts_cls_logger.info(\"No specific frequency detected.\")\n self._uvts_cls_logger.info(\"Frequency chosen in initialization: \" + str(\n self.freq) + \" enter 'n' and call ts_resample() if you are satisfied with this value.\")\n if input(\"Should a histogram of time deltas be plotted y/n?\").strip().lower() == 'y':\n ff = pd.Series(self.ts_df.index[1:(len(self.ts_df))] - self.ts_df.index[0:(len(self.ts_df) - 1)])\n ff = ff.apply(lambda x: int(x.total_seconds() / (60 * 60)))\n plt.hist(ff, bins=120)\n plt.xlabel(\"Rounded time delta [H]\")\n plt.ylabel(\"Frequency of occurrence\")\n self._uvts_cls_logger.info(ff.value_counts())\n self._uvts_cls_logger.info(\"Should hourly frequency not fit, choose a reasonable frequency and call \"\n \"set_frequency(new_freq)\")\n else:\n pass\n else:\n self._uvts_cls_logger.info(\"Time series frequency: \" + str(self.ts_df.index.freq))", "def freq(self, frequency: Optional[int]):", "def ts_resample(self):\n try:\n ts_freq = pd.DataFrame(\n index=pd.date_range(self.ts_df.index[0], self.ts_df.index[len(self.ts_df) - 1], freq=self.freq),\n columns=['dummy'])\n except ValueError:\n self._uvts_cls_logger.exception(\"Exception occurred, possibly incompatible frequency!\")\n sys.exit(\"STOP\")\n\n if self.fill_method == 'ffill':\n self.ts_df = ts_freq.join(self.ts_df).drop(['dummy'], axis=1)\n self.ts_df.y = self.ts_df.y.fillna(method='ffill')\n # if np.isnan ( self.ts_df.y ).any ():\n # self.ts_df.y = self.ts_df.y.fillna ( method='bfill' )\n else: # interp\n xp = np.linspace(0, self.ts_df.size, self.ts_df.size, endpoint=False)\n fp = self.ts_df['y']\n # join\n self.ts_df = ts_freq.join(self.ts_df).drop(['dummy'], axis=1)\n # pick new points\n x = np.linspace(0, ts_freq.size, ts_freq.size, endpoint=False)\n x = x[self.ts_df['y'].isna()]\n print(x.size)\n print(x)\n\n # put the values\n self.ts_df.y[self.ts_df['y'].isna()] = np.interp(x, xp, fp)\n\n if np.isnan(self.ts_df.y).any():\n self._uvts_cls_logger.warning(\"Some NaN found, something went wrong, check the data!\")\n sys.exit(\"STOP\")\n\n self._uvts_cls_logger.info(\"Time series resampled at frequency: \" + str(self.ts_df.index.freq) +\n \". New shape of the data: \" + str(self.ts_df.shape))\n self._uvts_cls_logger.info(\"Using time series data of range: \" + str(min(self.ts_df.index)) + ' - ' + str(\n max(self.ts_df.index)) + \" and shape: \" + str(self.ts_df.shape))\n\n return self", "def freq(self, freq=None):\n raise NotImplementedError()", "def unique_freq(self):\n unique_freq = []\n for i in self.timestamp:\n if self.state_time_map[i] not in unique_freq and self.state_time_map[i] != self.UNKNOWNSTATE:\n unique_freq.append(self.state_time_map[i])\n for i in self.minimum_frequency_cluster:\n if i not in unique_freq:\n unique_freq.append(i)\n return unique_freq", "def freq():", "def FE_create_time_series_features(dft, ts_column, ts_adds_in=[]):\r\n dtf = copy.deepcopy(dft)\r\n reset_index = False\r\n try:\r\n # ts_column = None assumes that that index is the time series index\r\n reset_index = False\r\n if ts_column is None:\r\n reset_index = True\r\n ts_column = dtf.index.name\r\n dtf = dtf.reset_index()\r\n\r\n ### In some extreme cases, date time vars are not processed yet and hence we must fill missing values here!\r\n null_nums = dtf[ts_column].isnull().sum()\r\n if null_nums > 0:\r\n # missing_flag = True\r\n new_missing_col = ts_column + '_Missing_Flag'\r\n dtf[new_missing_col] = 0\r\n dtf.loc[dtf[ts_column].isnull(),new_missing_col]=1\r\n dtf[ts_column].fillna(method='ffill', inplace=True)\r\n print(' adding %s column due to missing values in data' %new_missing_col)\r\n if dtf[dtf[ts_column].isnull()].shape[0] > 0:\r\n dtf[ts_column].fillna(method='bfill', inplace=True)\r\n\r\n if dtf[ts_column].dtype == float:\r\n dtf[ts_column] = dtf[ts_column].astype(int)\r\n\r\n ### if we have already found that it was a date time var, then leave it as it is. Thats good enough!\r\n items = dtf[ts_column].apply(str).apply(len).values\r\n #### In some extreme cases,\r\n if all(items[0] == item for item in items):\r\n if items[0] == 4:\r\n ### If it is just a year variable alone, you should leave it as just a year!\r\n dtf[ts_column] = pd.to_datetime(dtf[ts_column],format='%Y')\r\n ts_adds = []\r\n else:\r\n ### if it is not a year alone, then convert it into a date time variable\r\n dtf[ts_column] = pd.to_datetime(dtf[ts_column], infer_datetime_format=True)\r\n ### this is where you create the time series features #####\r\n dtf, ts_adds = _create_ts_features(df=dtf, tscol=ts_column)\r\n else:\r\n dtf[ts_column] = pd.to_datetime(dtf[ts_column], infer_datetime_format=True)\r\n ### this is where you create the time series features #####\r\n dtf, ts_adds = _create_ts_features(df=dtf, tscol=ts_column)\r\n if not ts_adds_in:\r\n ts_adds_copy = dtf[ts_adds].select_dtypes(include='number').columns.tolist()\r\n ### drop those columns where all rows are same i.e. zero variance ####\r\n for col in ts_adds_copy:\r\n if dtf[col].std() == 0:\r\n dtf.drop(col, axis=1, inplace=True)\r\n print(' dropping column due to zero variance in %s column' %col)\r\n ts_adds.remove(col)\r\n else:\r\n rem_cols = left_subtract(dtf.columns.tolist(), ts_adds_in)\r\n dtf = dtf[rem_cols+ts_adds_in]\r\n\r\n # If you had reset the index earlier, set it back before returning\r\n # to make it consistent with the dataframe that was sent as input\r\n if reset_index:\r\n dtf = dtf.set_index(ts_column)\r\n elif ts_column in dtf.columns:\r\n dtf.drop(ts_column, axis=1, inplace=True)\r\n else:\r\n pass\r\n except Exception as e:\r\n print(e)\r\n print('Error in Processing %s column for date time features. Continuing...' %ts_column)\r\n return dtf, ts_adds", "def prune(self, min_freq):\n new_forward = {}\n new_backward = [\"OOV\"]\n new_freq = [0]\n j = 1\n for i in xrange(1,len(self.backward)):\n f = self.backward[i]\n if self.freq[i] >= min_freq:\n new_forward[f] = j\n new_backward.append(f)\n new_freq.append(self.freq[i])\n j += 1\n self.forward = new_forward\n self.backward = new_backward\n self.freq = new_freq\n self.counter = j", "def asfreq(self, **kwargs): # noqa: PR01\n return DataFrameDefault.register(pandas.DataFrame.asfreq)(self, **kwargs)", "def findClosed(freqSet, freqSup):", "def update_frequencies():\n pass", "def increaseFreq(self, desHz):\n from scipy.interpolate import interp1d\n import time\n from numpy import linspace, floor\n from decimal import getcontext, Decimal\n\n if desHz > 1000: # set max freq here \n raise ValueError('Max Frequency is 1000 (3 decimal places)')\n now = time.asctime(time.localtime(time.time())) \n stamp = ''.join(['%% The following created by alog_manip.MOOSalog.MOOSalog.increaseFreq\\n%% ', now])\n increase_msg = ''.join(['%% Resultant Frequency: ',str(desHz),' Hz'])\n # hiHz = {}\n self.outData = {} # erase pre-existing dict\n self.outData['header'] = [stamp,increase_msg,'%%%%'] + self.srcData['header']\n\n def create_msgs():\n \"\"\" Puts interpolated data into dict outData\n Primary interpolation function for increaseFreq\n Consider using uniaxial spline --> would have one function for all of dictionary dat\n \"\"\"\n getcontext().prec = 3 # will round to 3 decimal places\n orig_times = sorted(dat)\n for n in range(len(dat) - 1):\n linfun = interp1d([orig_times[n], orig_times[n+1]], \\\n [dat[orig_times[n]], dat[orig_times[n+1]]])\n dt = orig_times[n+1] - orig_times[n] # current\n freq = 1/dt # current\n if dt < (1/desHz):\n print('found instance where Freq already at/above desired Freq')\n else:\n new_dt = dt*freq/desHz\n new_times = linspace(orig_times[n],orig_times[n+1],floor(dt/new_dt))\n # print(new_times)\n new_values = linfun(new_times)\n # rounded_values = [float(Decimal(\"%.3f\" % e)) for e in new_values]\n rounded_times = [float(Decimal(\"%.3f\" % e)) for e in new_times]\n for m in range(len(rounded_times)):\n # this_time = int(new_times[m]*100000)/100000 # 5 decimal places in timstamp\n self.outData[sens][meas][rounded_times[m]] = new_values[m]\n\n ## go thru and pull out dictionaries {time: value} then send to interpolation func\n for sens in self.srcData:\n if sens is not 'header':\n self.outData[sens] = {}\n for meas in self.srcData[sens]:\n self.outData[sens][meas] = {}\n dat = self.srcData[sens][meas]\n if len(dat) == 1:\n self.outData[sens][meas] = dat # only 1 data point, no interp\n else:\n create_msgs()", "def ts_fit(series: TimeSeries) -> TimeSeries:\n pass", "def test_freq(self):\n model = BDF(debug=False)\n sid = 101\n freqs = 0.1\n freq = model.add_freq(sid, freqs, comment='freq')\n #print(freq)\n\n freqs = [2.0, 3.0]\n freq = model.add_freq(sid, freqs, comment='freq')\n #print(freq)\n\n f1 = 0.\n df = 2.0\n freq1 = model.add_freq1(sid, f1, df, ndf=5, comment='freq1')\n assert len(freq1.freqs) == 6, 'freqs=%s' % freq1.freqs\n #print(freq1)\n\n f1 = 1.\n f2 = 8.0\n freq2 = model.add_freq2(sid, f1, f2, nf=6, comment='freq2')\n assert len(freq2.freqs) == 7, 'freqs=%s' % freq2.freqs\n assert np.allclose(freq2.freqs.max(), f2), freq2.freqs\n #print(freq2)\n\n freq4 = model.add_freq4(sid, f1, f2, fspread=0.1, nfm=3, comment='freq4')\n #print(model.frequencies[sid])\n #print(freq4)\n\n fractions = [0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\n freq5 = model.add_freq5(sid, fractions, f1=0., f2=100., comment='freq5')\n\n fractions = np.linspace(0., 1.)\n unused_freq5b = model.add_freq5(sid, fractions, f1=0., f2=100., comment='freq5')\n model.validate()\n\n freq.raw_fields()\n freq.write_card()\n freq.write_card(size=16)\n\n freq1.raw_fields()\n freq1.write_card()\n freq1.write_card(size=16)\n\n freq2.raw_fields()\n freq2.write_card()\n freq2.write_card(size=16)\n\n freq4.raw_fields()\n freq4.write_card()\n freq4.write_card(size=16)\n\n freq5.raw_fields()\n freq5.write_card()\n freq5.write_card(size=16)\n\n bdf_file = StringIO()\n model.write_bdf(bdf_file, close=False)\n unused_out = bdf_file.getvalue()\n bdf_file.seek(0)\n\n model2 = read_bdf(bdf_file, punch=True, debug=False)\n model2.uncross_reference()\n model2.safe_cross_reference()\n model2.uncross_reference()\n save_load_deck(model)", "def set_frequency(self, new_freq):\n self.freq = new_freq\n self.ts_resample()", "def freq(self, freq: Optional[int] = None) -> Optional[int]:\n ...", "def usage_per_period(series, freq,\n on_power_threshold=DEFAULT_ON_POWER_THRESHOLD,\n max_dropout_rate=DEFAULT_MAX_DROPOUT_RATE,\n verbose=False,\n energy_unit='kwh'):\n\n # TODO: replace this evil hack to handle dataframes(!)\n if isinstance(series, pd.DataFrame):\n series = series.icol(0)\n\n assert(0 <= max_dropout_rate <= 1)\n\n period_range, boundaries = _indicies_of_periods(series.index, freq)\n name = str(series.name)\n hours_on_series = pd.Series(index=period_range, dtype=np.float,\n name=name + ' hours on')\n energy_series = pd.Series(index=period_range, dtype=np.float,\n name=name + ' ' + energy_unit)\n\n MAX_SAMPLES_PER_PERIOD = (secs_per_period_alias(freq) / \n get_sample_period(series))\n MIN_SAMPLES_PER_PERIOD = (MAX_SAMPLES_PER_PERIOD *\n (1 - max_dropout_rate))\n\n for period in period_range:\n try:\n period_start_i, period_end_i = boundaries[period]\n except KeyError:\n if verbose:\n print(\"No data available for \",\n period.strftime('%Y-%m-%d'))\n continue\n\n data_for_period = series[period_start_i:period_end_i]\n if data_for_period.size < MIN_SAMPLES_PER_PERIOD:\n if verbose:\n dropout_rate = (1 - (data_for_period.size /\n MAX_SAMPLES_PER_PERIOD))\n print(\"Insufficient samples for \",\n period.strftime('%Y-%m-%d'),\n \"; n samples = \", data_for_period.size,\n \"; dropout_rate = {:.2%}\".format(dropout_rate), sep='')\n print(\" start =\", data_for_period.index[0])\n print(\" end =\", data_for_period.index[-1])\n continue\n\n hours_on_series[period] = hours_on(data_for_period,\n on_power_threshold=on_power_threshold)\n energy_series[period] = energy(data_for_period, unit=energy_unit)\n\n return pd.DataFrame({'hours_on': hours_on_series,\n energy_unit: energy_series})", "def resample(self, freq: str = 'BM'):\n self.tsdf = self.tsdf.resample(freq).last()\n return self", "def _prepare_data_to_aug(\n self, col: pd.Series, freq=0.2\n ) -> Tuple[pd.Series, pd.Series]:", "def list_of_genres_no_political(pd_series):\n\n genres = []\n for genre_list in pd_series:\n for genre in genre_list:\n if genre not in genres:\n genres.append(genre)\n\n return genres", "def dt_freq(self):\n return DateTimeDefault.register(pandas.Series.dt.freq)(self)", "def computefreqstat(freqKind, freqGroupBy, freqDropColumns):\n logecho(\"computing frequency resampling\", level=\"debug\")\n\n if freqDropColumns:\n dropcol_list = list(freqDropColumns.split(','))\n data_df.drop(columns=dropcol_list, inplace=True)\n freq_df = data_df.groupby(freqGroupBy).resample(freqKind).mean()\n stats_name = resource_name + '-' + freqKind\n stats_desc = 'Resampled - [%s](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases)' % freqKind\n # delete internal _id column\n del freq_df[freq_df.columns[0]]\n\n freq_df.droplevel(0).reset_index(inplace=True)\n\n try:\n freq_df.drop(columns=list(freqGroupBy.split(',')), inplace=True)\n except Exception as e:\n logecho(' WARNING: %s' % e)\n\n freq_df.to_csv(stats_name + '-' + freqKind + '.csv')\n\n freq2_df = pd.read_csv(stats_name + '-' + freqKind + '.csv', na_values='')\n stats_dict = freq2_df.to_dict(orient='records')\n\n fields_dictlist = getFields(freq2_df)\n\n stats_sparse_dict = []\n for stat_record in stats_dict:\n sparse_record = {}\n for (k, v) in stat_record.items():\n if not pd.isnull(v):\n sparse_record[k] = v\n stats_sparse_dict.append(sparse_record)\n\n logecho('STATS DATA: %s' % stats_sparse_dict, level='debug')\n\n if not debug:\n os.remove(stats_name + '-' + freqKind + '.csv')\n\n return [stats_sparse_dict, fields_dictlist, stats_name, stats_desc, primarykey]", "def insert_buoyancy_frequency(self):\n # Extract the z-coordinate\n zs = self.interp_ds.coords[self.ztsp[0]].values\n \n # Compute the buoyancy frequency\n n_vals = self.buoyancy_frequency(zs)\n \n # Insert these data into the dataset\n self.interp_ds['N'] = ((self.ztsp[0]), n_vals)\n self.interp_ds['N'].attrs['units'] = '1/s'\n self.interp_ds['N'].attrs['long_name'] = 'buoyancy frequency'\n \n # Rebuild the interpolator\n self._build_interpolator()", "def resample_asfreq(self, resample_kwargs, fill_value):\n return ResampleDefault.register(pandas.core.resample.Resampler.asfreq)(\n self, resample_kwargs, fill_value\n )", "def get_freqs(self):\r\n return self.df_fix(self.freqs.copy(), 'freqs')", "def apply_freq_filter(self, min_freq):\n self._apply_filter(lambda ng, freq: freq < min_freq)", "def add_freq(idx, freq=None):\n idx = idx.copy()\n if freq is None:\n if idx.freq is None:\n freq = pd.infer_freq(idx)\n else:\n return idx\n idx.freq = pd.tseries.frequencies.to_offset(freq)\n if idx.freq is None:\n raise AttributeError(\n \"no discernible frequency found to `idx`. Specify\"\n \" a frequency string with `freq`.\"\n )\n return idx", "def source_freq(self) -> int:", "def update_mid_series(self):\n self.mids += [self.mid()]", "def reprocessSeries(self, tiltseriesdata):\n\t\treturn None" ]
[ "0.5726502", "0.5679235", "0.5555176", "0.55462915", "0.5373807", "0.53481096", "0.53470653", "0.53257716", "0.5324926", "0.532478", "0.5263858", "0.5252361", "0.52493316", "0.5245782", "0.52219915", "0.51781195", "0.51664275", "0.5162759", "0.5158153", "0.51567346", "0.5148454", "0.51478595", "0.5133122", "0.5126957", "0.5126787", "0.5096857", "0.50763446", "0.5076303", "0.50584066", "0.50520176" ]
0.6278065
0
process resp from suds response (last observation) and grabs information for the series
def _process_info(resp: suds.sudsobject) -> dict: last = resp.ultimoValor return dict(fonte = str(resp.fonte), gestor = str(resp.gestorProprietario), freq = str(resp.periodicidadeSigla), nome = str(resp.nomeCompleto), number = int(resp.oid), final = dt(last.ano, last.mes, last.dia))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_serieses(parsed_response: dict) -> list:\n serieses = parsed_response[\"message:GenericData\"][\"message:DataSet\"][\"generic:Series\"]\n if type(serieses) != list:\n serieses = [serieses]\n return serieses", "def get_series(params):\n response = generate_request(params)\n if response:\n serie = response.get('bmx')\n return serie.get('series')\n\n return \"\"", "def get_UNSTAT_meta(series, verbose):\r\n UNSTAT_meta = []\r\n n_series = len(series)\r\n for i, that_series in enumerate(series):\r\n d = that_series['description']\r\n s = that_series['code']\r\n indicator = that_series['indicator'][0] # 'indicator' is a list \r\n if verbose:\r\n print(\"Handling \", s)\r\n # check if series has some diaggregations\r\n if s in dim_aggrs.keys():\r\n # then get dimesions \r\n dims_req = requests.get(\"https://unstats.un.org/SDGAPI/v1/sdg/Series/\"+s+\"/Dimensions\")\r\n dims = json.loads(dims_req.content)\r\n # print(dims)\r\n # generate full list of dims\r\n full_dims_c = []\r\n full_dims_d = []\r\n # look for a dim in dims\r\n for d_a in dim_aggrs[s]:\r\n list_codes = [element for element in dims if element['id'] == d_a]\r\n codes = list_codes[0]['codes']\r\n # create list of codes and list of \r\n loc = [element['code'].rstrip() for element in codes if element['code'].rstrip() not in codes_ignore]\r\n lod = [element['description'].rstrip() for element in codes if element['code'].rstrip() not in codes_ignore]\r\n full_dims_c.append(loc)\r\n full_dims_d.append(lod)\r\n # now let's prepare list of possible combinations\r\n l_dims_c = list(itertools.product(*full_dims_c))\r\n l_dims_d = list(itertools.product(*full_dims_d))\r\n for j, c in enumerate(l_dims_c):\r\n var_name = s + '_' + '_'.join(c)\r\n var_desc = d + ', ' + ', '.join(l_dims_d[j])\r\n UNSTAT_meta.append({'code': var_name,\r\n 'name': var_desc,\r\n 'source': 'UNSTAT Global SDG Indicators Database',\r\n 'metadata': 'https://unstats.un.org/wiki/display/SDGeHandbook/Indicator+' + indicator})\r\n else:\r\n UNSTAT_meta.append({'code': s,\r\n 'name': d,\r\n 'source': 'UNSTAT Global SDG Indicators Database',\r\n 'metadata': 'https://unstats.un.org/wiki/display/SDGeHandbook/Indicator+' + indicator})\r\n if verbose:\r\n print(progress_bar(i+1, n_series, l=15))\r\n return UNSTAT_meta", "def _get_raw_data(self, url, series):\n url = self._get_url(url, series)\n try:\n response = self.http.request(url, headers=self._reqheaders)\n except httplib2.ServerNotFoundError as e:\n raise TVDBConnectError(e.message), None, sys.exc_info()[2]\n rep = response[0]\n log.debug(\n 'http-status:%s,content:%s', \n rep['status'], \n rep['content-type']\n )\n if int(rep['status']) >= 400:\n raise TVDBConnectError(\n 'Failed to get \"%s\" from thetvdb. errno:%s' % (\n series, rep['status']),\n rep['status']\n ) \n return response[1]", "def processReadback(resp):\n a = np.fromstring(resp, dtype='<u1')\n return {\n 'build': a[51],\n 'serDAC': a[56],\n 'noPllLatch': bool((a[58] & 0x80) > 0),\n 'ackoutI2C': a[61],\n 'I2Cbytes': a[69:61:-1],\n 'executionCounter': (a[53] << 8) + a[52]\n }", "async def test_series(aresponses):\n aresponses.add(\n MATCH_HOST,\n \"/api/series\",\n \"GET\",\n aresponses.Response(\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n text=load_fixture(\"series.json\"),\n ),\n )\n\n async with ClientSession() as session:\n client = Sonarr(HOST, API_KEY, session=session)\n response = await client.series()\n\n assert response\n assert isinstance(response, List)\n\n assert response[0]\n assert isinstance(response[0], models.SeriesItem)\n assert response[0].series\n assert isinstance(response[0].series, models.Series)\n\n assert response[0].seasons\n assert isinstance(response[0].seasons, List)\n\n assert response[0].seasons[0]\n assert isinstance(response[0].seasons[0], models.Season)", "def __extract_values_from_response(self, response_data):\n data = response_data[2:6]\n value_of_2point5micro = None\n value_of_10micro = None\n if len(data) == 4:\n value_of_2point5micro = self.mass2particles(\n 'pm2.5', float(data[0] + data[1] * 256) / 10.0)\n value_of_10micro = self.mass2particles(\n 'pm10', float(data[2] + data[3] * 256) / 10.0)\n self.logger.info(\"{}: get_values successful executed.\".format(self.sensor_name))\n if self.duty_cycle != 0:\n self.__duty_cycle_start = time.time()\n return value_of_10micro, value_of_2point5micro\n elif self.duty_cycle == 0:\n raise ValueError(\"{}: data is missing.\".format(self.sensor_name))", "def response(row):\n return row['response']", "def parse_response(response):\n data = []\n \n for report in response.get('reports', []):\n columnHeader = report.get('columnHeader', {})\n dimensionHeaders = columnHeader.get('dimensions', [])\n metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])\n rows = report.get('data', {}).get('rows', [])\n \n row_count = 0 \n for row in rows:\n #print '\\n\\n', 'ROW_COUNT: ', row_count, '\\n'\n data.append({}) \n\n dimensions = row.get('dimensions', [])\n dateRangeValues = row.get('metrics', [])\n\n for header, dimension in zip(dimensionHeaders, dimensions):\n #print header + ': ' + dimension\n data[row_count][header[3:]] = dimension\n \n for i, values in enumerate(dateRangeValues):\n #print 'Date range (' + str(i) + ')'\n for metricHeader, value in zip(metricHeaders, values.get('values')):\n #print metricHeader.get('name') + ': ' + value\n data[row_count][metricHeader.get('name')[3:]] = value\n \n row_count += 1 \n \n return data", "def transform_misses(record):\n \n response = {}\n response[\"datasetId\"] = dict(record).get(\"stableId\") \n response[\"internalId\"] = dict(record).get(\"datasetId\")\n response[\"exists\"] = False\n # response[\"datasetId\"] = '' \n response[\"variantCount\"] = 0\n response[\"callCount\"] = 0\n response[\"sampleCount\"] = 0\n response[\"frequency\"] = 0 \n response[\"numVariants\"] = 0 \n response[\"info\"] = {\"access_type\": dict(record).get(\"accessType\")}\n\n return response", "def process_response(self, response: response_domain_model):\n ...", "def labResponseExtract(self):\n if not self.getKeyword('TPL ID')=='PACMAN_cal_Lab_FSUResponse':\n print 'NOT a PACMAN_cal_Lab_FSUResponse file... nothing to be done'\n return\n # determine the number of scans:\n Nscans = int(self.raw['IMAGING_DATA_FSUA'].data.field('STEPPING_PHASE').max())\n self.labScans = []\n for n in range(Nscans)[1:]:\n s = {}\n for f in ['A', 'B']:\n w = np.where(self.raw['IMAGING_DATA_FSU'+f].data.field('STEPPING_PHASE')==n)\n tmp = {}\n tmp['TIME']=self.raw['IMAGING_DATA_FSU'+f].data.field('TIME')[w]\n if f=='A':\n tmp['DELTAL']=np.interp(self.raw['IMAGING_DATA_FSU'+f].data.field('TIME')[w],\n self.raw['METROLOGY_DATA'].data.field('TIME'),\n self.raw['METROLOGY_DATA'].data.field('DELTAL'))+\\\n np.interp(self.raw['IMAGING_DATA_FSU'+f].data.field('TIME')[w],\n self.raw['METROLOGY_DATA_FSUB'].data.field('TIME'),\n self.raw['METROLOGY_DATA_FSUB'].data.field('DELTAL'))\n else:\n tmp['DELTAL']=-np.interp(self.raw['IMAGING_DATA_FSU'+f].data.field('TIME')[w],\n self.raw['METROLOGY_DATA_FSUB'].data.field('TIME'),\n self.raw['METROLOGY_DATA_FSUB'].data.field('DELTAL'))\n tmp['A'] = self.raw['IMAGING_DATA_FSU'+f].data.field('DATA1')[w[0],:]\n tmp['B'] = self.raw['IMAGING_DATA_FSU'+f].data.field('DATA2')[w[0],:]\n tmp['C'] = self.raw['IMAGING_DATA_FSU'+f].data.field('DATA3')[w[0],:]\n tmp['D'] = self.raw['IMAGING_DATA_FSU'+f].data.field('DATA4')[w[0],:]\n tmp['GD']= self.raw['IMAGING_DATA_FSU'+f].data.field('GD')[w]\n tmp['GDSNR']= self.raw['IMAGING_DATA_FSU'+f].data.field('GDSNR')[w]\n tmp['PD']= self.raw['IMAGING_DATA_FSU'+f].data.field('PD')[w]\n tmp['PDSNR']= self.raw['IMAGING_DATA_FSU'+f].data.field('PDSNR')[w]\n s['FSU'+f]=tmp\n self.labScans.append(s)\n return", "def get_real_and_pred_data():\n if request.method == 'POST':\n \n entry = json.loads(request.data)\n \n date = entry['date']\n\n res = {}\n\n time_series, time_series_pred = make_predictions(time_series_logs, forecasting_models, \n FORECASTING_TARGETS, date)\n \n for target in FORECASTING_TARGETS:\n \n target_val = list(time_series[target])\n target_pred_val = list(time_series_pred[target])\n res[target] = target_val\n res[target + '_pred'] = target_pred_val\n \n xticks = list(map(str, time_series_pred['date']))\n \n \n \n res['xticks'] = xticks\n \n json_data = json.dumps(res)\n resp = Response(json_data, status=200, mimetype='application/json')\n resp.headers = {'Access-Control-Allow-Origin': '*'}\n\n return resp\n \n else:\n return 'only get request is allowed'", "def parse(self, response):", "def test():\n temp_data = fetch_temp_data(\n (\"https://opendata-download-metobs.smhi.se/api/version/\" +\n \"latest/parameter/1/station/52350/period/latest-day/data.json\"))\n data = temp_series(temp_data)\n print(data)", "def retrieve_time_series(api, series_ID):\r\n #Retrieve Data By Series ID \r\n series_search = api.data_by_series(series=series_ID)\r\n ##Create a pandas dataframe from the retrieved time series\r\n df = pd.DataFrame(series_search)\r\n return df", "def test_get_series_statistics(self):\n msg = \"Response status is not 200\"\n response = self.api.get_series_statistics(self.series_id, self.team_id)\n self.assertEqual(response.status_code, 200, msg)", "def _events_by_responses(stimulus_events, stim_metadata, behavioral_results, load_error_trials):\n\n has_response = 'rt' in behavioral_results\n if has_response:\n rt = behavioral_results['rt']\n resp = behavioral_results['response']\n\n if not load_error_trials:\n ok_trials = list(stim_metadata.correct)\n idx = np.where(ok_trials)\n stimulus_events = stimulus_events[idx[0], :]\n stim_metadata = stim_metadata.iloc[idx[0]]\n rt = rt[idx[0]]\n resp = resp[idx[0]]\n\n response_events = stimulus_events.copy()\n response_events = response_events[resp != 0]\n response_events[:, 0] = [response_events[k, 0]+rt[k] for k in range(len(response_events))]\n response_metadata = stim_metadata[resp != 0]\n\n else:\n assert load_error_trials, \"Can't exclude error trials when the metadata does not contain responses\"\n response_events = None\n response_metadata = None\n\n return response_events, response_metadata, stimulus_events, stim_metadata", "def _parse_ts_response(self, response, prompt):\n \n if prompt != SBE37Prompt.COMMAND:\n raise InstrumentProtocolException('ts command not recognized: %s', response)\n \n sample = None\n for line in response.split(SBE37_NEWLINE):\n sample = self._extract_sample(line, True)\n if sample:\n break\n \n if not sample: \n raise SampleException('Response did not contain sample: %s' % repr(response))\n \n return sample", "def parse_response(self):\n pass", "def process_response(self, result):\r\n if len(result) == 3:\r\n data = result[0]\r\n headers = result[2]\r\n if self.HEADER_API_VERSION in headers:\r\n api_version = headers[self.HEADER_API_VERSION]\r\n if (not self.already_printed_version_warning\r\n and not self.is_up_to_date(api_version)):\r\n print('Warning: Looks like you\\'re using an outdated API '\r\n 'Version, please consider updating (server ' +\r\n api_version + ' / client ' + self.__version__ + ')')\r\n self.already_printed_version_warning = True\r\n return data\r\n return result", "def parse_response(response):\n # a result should always have a status\n status = response['status']\n\n # a result _may_ have a results or a reason\n result = response.get('results', [])\n reason = response.get('reason', None)\n\n return status, result, reason", "def decode_timeseries(self, resp_ttb, tsobj,\n convert_timestamp=False):\n if resp_ttb is None:\n return tsobj\n\n self.maybe_err_ttb(resp_ttb)\n\n # NB: some queries return a BARE 'tsqueryresp' atom\n # catch that here:\n if resp_ttb == tsqueryresp_a:\n return tsobj\n\n # The response atom is the first element in the response tuple\n resp_a = resp_ttb[0]\n if resp_a == tsputresp_a:\n return\n elif resp_a == tsgetresp_a or resp_a == tsqueryresp_a:\n resp_data = resp_ttb[1]\n if len(resp_data) == 0:\n return\n elif len(resp_data) == 3:\n resp_colnames = resp_data[0]\n resp_coltypes = resp_data[1]\n tsobj.columns = self.decode_timeseries_cols(\n resp_colnames, resp_coltypes)\n resp_rows = resp_data[2]\n tsobj.rows = []\n for resp_row in resp_rows:\n tsobj.rows.append(\n self.decode_timeseries_row(resp_row, resp_coltypes,\n convert_timestamp))\n else:\n raise RiakError(\n \"Expected 3-tuple in response, got: {}\".format(resp_data))\n else:\n raise RiakError(\"Unknown TTB response type: {}\".format(resp_a))", "def get_udis_series(initial_date: str, end_date:str) -> dict:\n\n url = f\"{BANXICO_URL}/{BANXICO_UDIS_SERIE}/datos/{initial_date}/{end_date}\"\n udis_response = _request_handler.get(url, headers=_headers)\n udis_values_per_day = {}\n response = {}\n if udis_response:\n name = udis_response.get(\"bmx\", {}).get(\"series\", [])[0].get(\"titulo\", \"\")\n dates = udis_response.get(\"bmx\", {}).get(\"series\", [])[0].get(\"datos\", \"\")\n if dates:\n for date in dates:\n udis_values_per_day[date.get(\"fecha\", \"\")] = float(date.get(\"dato\"))\n\n max_udi_value = (max(dates, key=lambda x:float(x.get(\"dato\", -1))))\n min_udi_value = (min(dates, key=lambda x:float(x.get(\"dato\", -1))))\n average_udi = float(sum(float(d['dato']) for d in dates)) / len(dates)\n response= {\n \"name\": name,\n \"average_udi_value\": average_udi,\n \"max_udi_value\": {\n \"value\": float(max_udi_value.get(\"dato\", -1)),\n \"date\": max_udi_value.get(\"fecha\", -1)\n },\n \"min_udi_value\":{\n \"value\": float(min_udi_value.get(\"dato\", -1)),\n \"date\": min_udi_value.get(\"fecha\", -1)\n },\n \"dates_udis\": udis_values_per_day\n }\n\n return response\n else:\n return {}", "def _process_response(self, response, marker_elems=None):\r\n body = response.read()\r\n #print body\r\n if '<Errors>' not in body:\r\n rs = ResultSet(marker_elems)\r\n h = handler.XmlHandler(rs, self)\r\n xml.sax.parseString(body, h)\r\n return rs\r\n else:\r\n raise MTurkRequestError(response.status, response.reason, body)", "def getkeytimestats(s,refconvdf):\r\n df=refconvdf[refconvdf.convid==s]\r\n \r\n #first response\r\n firstrsp=df[df.idx_conv==1]\r\n if firstrsp.empty:\r\n firstrsp=None\r\n else: \r\n firstrsp=firstrsp.created_at.iloc[0]\r\n \r\n #closed part types \r\n clsparts=df[df.part_type=='close']\r\n \r\n #first closed\r\n firstcls=clsparts.head(1)\r\n if firstcls.empty:\r\n firstcls=None \r\n else: \r\n firstcls=firstcls.created_at.iloc[0]\r\n \r\n #clast closed\r\n lastcls=clsparts.tail(1)\r\n if lastcls.empty:\r\n lastcls=None\r\n else: \r\n lastcls=lastcls.created_at.iloc[0]\r\n return pd.Series(dict(first_response=firstrsp,first_closed=firstcls,last_closed=lastcls))", "def respuesta(response):\n for report in response.get('reports', []):\n\n columnHeader = report.get('columnHeader', {})\n dimensionHeaders = columnHeader.get('dimensions', [])\n metricHeaders = columnHeader.get(\n 'metricHeader', {}).get('metricHeaderEntries', [])\n\n return_data = []\n\n for row in report.get('data', {}).get('rows', []):\n\n dimensions = row.get('dimensions', [])\n dateRangeValues = row.get('metrics', [])\n pipeline_insert = {}\n for header, dimension in zip(dimensionHeaders, dimensions):\n pipeline_insert[header] = dimension\n\n for i, values in enumerate(dateRangeValues):\n\n for metricHeader, value in zip(metricHeaders, values.get('values')):\n pipeline_insert[metricHeader.get('name')] = value\n return_data.append(pipeline_insert)\n\n return return_data", "def main_response(self, data):", "def main_response(self, data):", "def handle_series_over(self, stats):\n #############################\n #\n #\n # YOUR CODE HERE\n #\n #\n #############################\n print(\"Series ended, these are the stats:\")\n print(str(stats))" ]
[ "0.6158879", "0.60087264", "0.5986832", "0.5605868", "0.5602771", "0.55909127", "0.5586363", "0.55628216", "0.5537049", "0.5475821", "0.5453993", "0.54405534", "0.5413769", "0.54081506", "0.5394293", "0.53770554", "0.53512913", "0.53332907", "0.532174", "0.5290337", "0.52856237", "0.5277273", "0.5246628", "0.52462476", "0.52431315", "0.52307254", "0.52138126", "0.5200647", "0.5200647", "0.5200286" ]
0.61579174
1
Generates the final list of series to be used (inserted) in the database. Raw input (by side effect) comes from file codigos.xlsx
def final_series(): tickers = pd.read_excel(os.path.abspath(os.path.dirname(__file__)) +"./codigos.xlsx", header=[0]).values.flatten() # tickers = pd.read_excel("./codigos.xlsx", # header=[0]).values.flatten() ls = fetch_series(list(set(tickers))) net_series = [s for s in ls if _cleasing(s, ["D", "M"]) is not None] p = os.path.abspath(os.path.dirname(__file__)) with open(p + "/series_bcb", "wb") as f: pickle.dump(net_series, f) # with open("./series_bcb", "wb") as f: # pickle.dump(net_series, f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def series_ingestion(series:List[dict]) -> None:\n for srs in series:\n try:\n add_series(\"BCB.\" + str(srs['number']), \n srs['nome'], \n *gestores[srs['gestor']])\n except:\n logger.error(f\"Unable to add series BCB.{srs['number']}\")", "def prepare_series(self, result_dir):\n output = {}\n output['title'] = self.title\n output['x'] = self.x\n output['y'] = self.y\n output['series'] = []\n for series in self.series:\n idfile = os.path.join(result_dir, 'benchmark_' + str(series['id']) +\n '.json')\n rows = json_from_file(idfile)\n # it is assumed each row has the same names of columns\n keys = rows[0].keys()\n # skip the series if it does not have required keys\n if self.x not in keys or self.y not in keys:\n continue\n points = [[row[self.x], row[self.y]] for row in rows]\n output['series'].append({'label': series['label'], 'points': points})\n # save the series to a file\n series_path = self._series_file(result_dir)\n if os.path.exists(series_path):\n figures = json_from_file(series_path)\n else:\n figures = {}\n figures[self.key] = output\n with open(series_path, 'w') as file:\n json.dump(figures, file, indent=4)\n # mark as done\n self.output['done'] = True", "def process_data(self):\n logging.debug('process_data called')\n\n pd_time_series = pd.read_csv(f'{self.out_dir}docs/downloaded/'\n f'{self.filename}')\n\n pd_time_series = pd_time_series.drop('Lat', axis=1)\n pd_time_series = pd_time_series.drop('Long', axis=1)\n no_of_dates = len(pd_time_series.columns) - 2\n dateindex = pd.date_range(start='1-22-2020',\n periods=no_of_dates,\n freq='D').strftime('%d-%m')\n\n new_cols = ['Province/State', 'Country/Region']\n for index in dateindex:\n new_cols.append(index)\n pd_time_series.columns = new_cols\n\n pd_time_series = pd_time_series.drop('Province/State', axis=1)\n pd_edit_series = pd_time_series.set_index('Country/Region')\n\n pd_edit_series = pd_edit_series.T\n\n return pd_edit_series", "def make_data():\n now = int(time())\n n = 20.0\n series1 = [[i*1000,sin(i/n)] for i in range(now-100, now)]\n series2 = [[i*1000,abs(sin(i/n))**((i%(2*n))/n)] for i in range(now-100, now)]\n series3 = [[i*1000,cos(i/(n+1))*1.5] for i in range(now-100, now)]\n series4 = [[series2[i][0], series2[i][1] * series3[i][1]] for i in range(len(series3))]\n data = [series1, series2, series3,series4]\n return data", "def to_work_series(self, data: pd.Series) -> pd.Series:\n ...", "def generate(start_date, episodes, steps, output_file):\n header = ','.join(FIELDS) + '\\n'\n with open(output_file, 'w') as fd:\n fd.write(header)\n data_arrays = []\n first_dp = generate_data_point(start_date)\n data_arrays.append(first_dp)\n\n interval = int(1440/steps)\n cur_ts = increment_ts(start_date, interval)\n\n while step_diff(start_date, cur_ts, interval) < steps*episodes:\n dp_tmp = generate_data_point(cur_ts)\n data_arrays.append(dp_tmp)\n cur_ts = increment_ts(cur_ts, interval)\n\n for dp in data_arrays:\n row = ','.join(dp) + '\\n'\n fd.write(row)", "def sequence_ingest(self,sequence):\n\t\tdata=self.data\n\t\t\n\t\tcounter=0\n\n\t\tfor item in data[sequence]:\n\t\t\tdatestring=item['specimenDate']\n\t\t\tdate=fetchdate(datestring)\n\t\t\trow,created=DailyCases.objects.get_or_create(specimenDate=date,areacode=item['areaCode'])\n\t\t\trow.areaname=item['areaName']\n\t\t\trow.dailyLabConfirmedCases=item['dailyLabConfirmedCases']\n\t\t\trow.totalLabConfirmedCases=item['totalLabConfirmedCases']\n\t\t\trow.changeInDailyCases=item['changeInDailyCases']\n\t\t\trow.dailyTotalLabConfirmedCasesRate=item['dailyTotalLabConfirmedCasesRate']\n\t\t\trow.previouslyReportedDailyCases=item['previouslyReportedDailyCases']\n\t\t\trow.previouslyReportedTotalCases=item['previouslyReportedTotalCases']\n\t\t\trow.changeInTotalCases=item['changeInTotalCases']\n\t\t\trow.save()\n\t\t\tcounter+=1\n\t\tlog.info(f'Processed: {counter} rows')", "def persistence_model(series):\n return [x for x in series]", "def processData(self):\n recordSet = AresChartsService.toMultiSeries(self.vals, self.chartKeys, self.selectedX , self.chartVals, extKeys=self.extKeys)\n self.aresObj.jsGlobal.add(\"data_%s = %s\" % (self.htmlId, json.dumps(recordSet)))", "def Pure_Graph_W_Shu(file_list, airline_list, include_data, processed_direc, rep_num):\n\n \n for airline in airline_list:\n rep_ite = 1\n Total_AC = []\n Total_Cluster_Size = []\n Total_IAPL = []\n for i in range(len(file_list)):\n## initialize the output lists\n Total_AC.append(0)\n Total_Cluster_Size.append(0)\n Total_IAPL.append(0)\n\n## Save the data in csv\n filename1 = \"%s%s_ACR.csv\" %(processed_direc,airline)\n with open(filename1, 'w') as myfile1:\n wr1 = csv.writer(myfile1, quoting=csv.QUOTE_ALL)\n wr1.writerow(file_list)\n\n filename2 = \"%s%s_IAPLR.csv\" %(processed_direc,airline)\n with open(filename2, 'w') as myfile2:\n wr2 = csv.writer(myfile2, quoting=csv.QUOTE_ALL)\n wr2.writerow(file_list)\n\n filename3 = \"%s%s_CSR.csv\" %(processed_direc,airline)\n with open(filename3, 'w') as myfile3:\n wr3 = csv.writer(myfile3, quoting=csv.QUOTE_ALL)\n wr3.writerow(file_list)\n\n while rep_ite < rep_num+1:\n## start the reptition\n\n year_IAPL = []\n year_Cluster_Size = []\n year_AC = []\n for file in file_list:\n## Get the directory path\n script_dir = os.path.dirname(os.getcwd())\n db_local_path = \"data/processed/atn_db.sqlite\"\n\n## df set up from Keshav (NO CHANGE)\n df = pd.DataFrame()\n db_path = os.path.join(script_dir, db_local_path)\n fields = [\"Origin_Airport_Code\", \"Destination_Airport_Code\", \"Can_Status\"]\n #df_net = pd.read_csv(comb_file, usecols=fields)\n df_net = atn_analysis.raw_query(db_path,file,airline)\n\n df[\"Origin_Airport_Code\"] = df_net.Origin_Airport_Code\n df[\"Destination_Airport_Code\"] = df_net.Destination_Airport_Code\n df[\"Can_Status\"] = df_net.Can_Status\n\n by_origin = df.groupby([df.Origin_Airport_Code]).Can_Status.count()\n airport_list = by_origin.index.tolist()\n df = df[df['Destination_Airport_Code'].isin(airport_list)]\n #print(df)\n df_tuple = pd.DataFrame()\n df_weighted = df.groupby([df.Origin_Airport_Code, df.Destination_Airport_Code]).Can_Status.count().reset_index()\n df_tuple[\"Origin\"] = df_weighted.Origin_Airport_Code\n df_tuple[\"Destination\"] = df_weighted.Destination_Airport_Code\n\n if int(file)%4 == 0:\n days = 366\n else:\n days = 365\n\n df_tuple[\"Weight\"] = df_weighted.Can_Status/days\n df_tuple.Weight = 1/df_tuple.Weight\n\n## Output lists initialization:\n\n\n## Graph object initialization\n graph = [tuple(x) for x in df_tuple.to_records(index=False)]\n G = nx.Graph()\n\n G.add_weighted_edges_from(graph)\n NodeNum = G.number_of_nodes()\n #print('Weighted Alebraic Connectivity: ', nx.algebraic_connectivity(G))\n year_AC.append(nx.algebraic_connectivity(G))\n\n sum_IAPL = 0\n sum_Cluster_Size = 0\n IAPL_list = []\n Cluster_Size_list = []\n Remove_List = []\n\n for node in G.nodes():\n## Get the list of the airports\n Remove_List.append(node)\n \n## Shuffle the lists\n random.shuffle(Remove_List)\n for l in Remove_List:\n G.remove_node(l)\n if len(G.nodes()) != 0:\n\n## Add up the data after removing each node\n largest_component_b = max(nx.connected_components(G), key=len)\n IAPL_list.append(inv_average_shortest_path_length_W(G))\n Cluster_Size_list.append(len(largest_component_b)/NodeNum) \n sum_IAPL = sum_IAPL + (inv_average_shortest_path_length_W(G))\n sum_Cluster_Size = sum_Cluster_Size + len(largest_component_b)/NodeNum\n \n## Save the data of the year\n year_IAPL.append(sum_IAPL)\n year_Cluster_Size.append(sum_Cluster_Size)\n\n with open(filename1, 'a') as myfile1:\n wr1 = csv.writer(myfile1, quoting=csv.QUOTE_ALL)\n wr1.writerow(year_AC)\n\n with open(filename2, 'a') as myfile2:\n wr2 = csv.writer(myfile2, quoting=csv.QUOTE_ALL)\n wr2.writerow(year_IAPL)\n\n with open(filename3, 'a') as myfile3:\n wr3 = csv.writer(myfile3, quoting=csv.QUOTE_ALL)\n wr3.writerow(year_Cluster_Size)\n\n\n # print('Unweighted Summation of IAPL: ', sum_IAPL)\n # print('Unweighted Summation of Cluster Size: ', sum_Cluster_Size)\n # print('Unweighted IAPL list', IAPL_list)\n for i in range(len(file_list)):\n## Get the sum for the average\n Total_AC[i] = Total_AC[i] + year_AC[i]\n Total_IAPL[i] = Total_AC[i] + year_IAPL[i]\n Total_Cluster_Size[i] = Total_Cluster_Size[i] + year_Cluster_Size[i]\n\n rep_ite = rep_ite + 1\n\n\n for i in range(len(file_list)):\n## Get the average\n Total_AC[i] = Total_AC[i]/rep_num\n Total_IAPL[i] = Total_IAPL[i]/rep_num\n Total_Cluster_Size[i] = Total_Cluster_Size[i]/rep_num\n\n\n## Plotting Command:\n plt.figure(num=1,figsize=(2.8,2.0),dpi=300)\n# line1 = plt.plot(file_list,Total_IAPL, label=\"{}\".format(airline))\n plt.plot(file_list,Total_IAPL, label=\"{}\".format(airline))\n plt.legend()\n \n plt.figure(num=2,figsize=(2.8,2.0),dpi=300)\n# line2 = plt.plot(file_list,Total_Cluster_Size, label=\"{}\".format(airline))\n plt.plot(file_list,Total_Cluster_Size, label=\"{}\".format(airline))\n plt.legend()\n\n plt.figure(num=3,figsize=(2.8,2.0),dpi=300)\n# line3 = plt.plot(file_list,Total_AC, label=\"{}\".format(airline))\n plt.plot(file_list,Total_AC, label=\"{}\".format(airline)) \n plt.legend()\n \n plt.figure(1)\n plt.title(\"IAPL (Random)\")\n plt.xlabel(\"Year\")\n plt.ylabel(\"IAPL\")\n plt.savefig(\"Pure_Graph_IAPLR.png\")\n\n plt.figure(2)\n plt.title(\"Cluster Size (Random)\")\n plt.xlabel(\"Year\")\n plt.ylabel(\"Cluster Size\")\n plt.savefig(\"Pure_Graph_CSR.png\")\n\n plt.figure(3)\n plt.title(\"Algebraic Connectivity (Random)\")\n plt.xlabel(\"Year\")\n plt.ylabel(\"Algebraic Connectivity\")\n plt.savefig(\"Pure_Graph_ACR.png\")\n\n plt.show()", "def data_generate_process():\n\n a = 0.8\n b = 0.4\n c = 0.1\n d = 0.3\n e = 0.7\n y_0 = 0.0\n a_0 = 0.2\n sigma_0 = 0.35\n\n data_len = 10000\n y_series = pd.Series([np.nan] * data_len)\n a_series = pd.Series([np.nan] * data_len)\n sigma_series = pd.Series([np.nan] * data_len)\n\n epsilon_normal = np.random.normal(loc=0.0, scale=1.0, size=data_len)\n\n y_series[0] = y_0\n a_series[0] = a_0\n sigma_series[0] = sigma_0\n\n for idx in range(1, data_len):\n epsilon_t = epsilon_normal[idx]\n sigma_t = np.sqrt(c + d * a_series[idx - 1] ** 2 + e * sigma_series[idx - 1] ** 2)\n a_t = epsilon_t * sigma_t\n y_series[idx] = a * y_series[idx - 1] + b + a_t\n a_series[idx] = a_t\n sigma_series[idx] = sigma_t\n\n return y_series, a_series, sigma_series", "def result(self):\n\n chart_series = [] # will hold all the series created\n\n # determine the sensor to plot from the sensor selected by the user.\n the_sensor = bmsapp.models.Sensor.objects.get(pk=self.request_params['select_sensor'])\n\n # get the requested averaging interval in hours\n averaging_hours = float(self.request_params['averaging_time'])\n\n # determine the start time for selecting records\n st_ts, end_ts = self.get_ts_range()\n\n # get the database records\n df = self.reading_db.dataframeForOneID(the_sensor.sensor_id, st_ts, end_ts, pytz.timezone(self.timezone))\n\n if not df.empty:\n\n # info needed to create each series (selection list, series name, visible)\n if self.schedule:\n occupied_times = df.ts.apply(self.schedule.is_occupied)\n unoccupied_times = -occupied_times\n\n series_info = [(None, 'All Data', True),\n (occupied_times, 'Occupied Periods', False),\n (unoccupied_times, 'Unoccupied Periods', False)]\n else:\n # no schedule, so just return the 'All Data' series\n series_info = [(None, 'All Data', True)]\n\n for mask, series_name, visibility in series_info:\n if mask is None:\n select_df = df\n else:\n select_df = df[mask]\n\n if averaging_hours:\n select_df = bmsapp.data_util.resample_timeseries(select_df, averaging_hours)\n\n histogram_series = bmsapp.data_util.histogram_from_series(select_df.val)\n\n chart_series.append({'x': [x for x,y in histogram_series],\n 'y': [y for x,y in histogram_series],\n 'type': 'scatter',\n 'mode': 'lines', \n 'name': series_name, \n 'visible': 'true' if visibility else 'legendonly'\n })\n\n opt = self.get_chart_options('plotly')\n opt['data'] = chart_series\n opt['layout']['title'] = the_sensor.title + ' Histogram: ' + self.building.title\n opt['layout']['xaxis']['title'] = the_sensor.unit.label\n opt['layout']['xaxis']['type'] = 'linear'\n opt['layout']['yaxis']['title'] = '% of Readings'\n opt['layout']['yaxis']['rangemode'] = 'tozero'\n\n html = basechart.chart_config.chart_container_html(opt['layout']['title'])\n\n return {'html': html, 'objects': [('plotly', opt)]}", "def test_generate_store_mmp_series(cls):\n\n cls.test_mmp_series_object.setup_mmp_data_for_mms(cls.temp_file_input_csv_confusion.name,\n 'SMILES', 'ID', 'PIC50',\n 3, 0.50001)\n\n cls.test_mmp_series_object.generate_store_mmp_series()\n # print cls.test_mmp_series_object.series_df.to_dict()\n cls.assertEqual(len(cls.test_mmp_series_object.series_df.to_dict()['MOL_ID']), 28)\n\n cls.test_mmp_series_object.generate_store_mmp_series(sgl_or_dbl_or_both='double')\n # print cls.test_mmp_series_object.series_df.to_dict()\n cls.assertEqual(len(cls.test_mmp_series_object.series_df.to_dict()['MOL_ID']), 17)\n\n cls.test_mmp_series_object.generate_store_mmp_series(sgl_or_dbl_or_both='both')\n # print cls.test_mmp_series_object.series_df.to_dict()\n cls.assertEqual(len(cls.test_mmp_series_object.series_df.to_dict()['MOL_ID']), 45)", "def get_sr_series(tables, out_name, max_sample=500):\n\n pt_ct = 0\n for year in YEARS:\n for state in TARGET_STATES:\n\n name_prefix = '{}_{}_{}'.format(out_name, state, year)\n local_file = os.path.join('/home/dgketchum/IrrigationGIS/EE_extracts/to_concatenate',\n '{}.csv'.format(name_prefix))\n if os.path.isfile(local_file):\n continue\n else:\n print(local_file)\n\n roi = ee.FeatureCollection(os.path.join(BOUNDARIES, state))\n\n start = '{}-01-01'.format(year)\n d = datetime.strptime(start, '%Y-%m-%d')\n epoch = datetime.utcfromtimestamp(0)\n start_millisec = str(int((d - epoch).total_seconds() * 1000))\n\n table = ee.FeatureCollection(tables)\n table = table.filter(ee.Filter.eq('YEAR', start_millisec))\n table = table.filterBounds(roi)\n table = table.randomColumn('rnd')\n points = table.size().getInfo()\n print('{} {} {} points'.format(state, year, points))\n\n n_splits = int(ceil(points / float(max_sample)))\n ranges = linspace(0, 1, n_splits + 1)\n diff = ranges[1] - ranges[0]\n\n for enum, slice in enumerate(ranges[:-1], start=1):\n slice_table = table.filter(ee.Filter.And(ee.Filter.gte('rnd', slice),\n ee.Filter.lt('rnd', slice + diff)))\n points = slice_table.size().getInfo()\n print('{} {} {} points'.format(state, year, points))\n\n name_prefix = '{}_{}_{}_{}'.format(out_name, state, enum, year)\n local_file = os.path.join('/home/dgketchum/IrrigationGIS/EE_extracts/to_concatenate',\n '{}.csv'.format(name_prefix))\n if os.path.isfile(local_file):\n continue\n else:\n print(local_file)\n\n pt_ct += points\n if points == 0:\n continue\n\n ls_sr_masked = daily_landsat(year, roi)\n stats = ls_sr_masked.sampleRegions(collection=table,\n properties=['POINT_TYPE', 'YEAR', 'LAT_GCS', 'Lon_GCS'],\n scale=30,\n tileScale=16)\n\n task = ee.batch.Export.table.toCloudStorage(\n stats,\n description=name_prefix,\n bucket='wudr',\n fileNamePrefix=name_prefix,\n fileFormat='CSV')\n\n task.start()\n print('{} total points'.format(pt_ct))", "def stream_ingest(df):\n global index\n\n i=0\n coords= []\n datum = collections.OrderedDict()\n for index, row in df.iterrows():\n datum[\"symbol\"]=str(df.iloc[index,0])\n datum[\"spot_price\"]=float(df.iloc[index,1])\n datum[\"option_type\"] = str(df.iloc[index, 4])\n datum[\"exposure\"] = str(df.iloc[index, 6])\n datum[\"strike_price\"] = float(df.iloc[index, 7])\n datum[\"maturity_y\"] = int(df.iloc[index, 8])\n datum[\"maturity_m\"] = int(df.iloc[index, 9])\n datum[\"maturity_d\"] = int(df.iloc[index, 10])\n datum[\"calendar\"] = str(df.iloc[index, 11])\n datum[\"day_count\"] = str(df.iloc[index, 12])\n datum[\"risk_free_rate\"] = float(df.iloc[index, 13])\n datum[\"dividend_rate\"] = float(df.iloc[index, 14])\n datum[\"calc_dt_y\"] = int(df.iloc[index, 15])\n datum[\"calc_dt_m\"] = int(df.iloc[index, 16])\n datum[\"calc_dt_d\"] = int(df.iloc[index, 17])\n datum[\"volatility\"] = float(df.iloc[index, 18])\n coords.append(h_db.encode_datum(my_type, datum))\n\n i= i + 1\n # Pump data in batches\n if i % DATA_PACK == 0:\n response = h_db.insert_records(\n table_name=NEW_TABLE,\n data=coords,\n list_encoding=ENCODING,\n options={})\n coords = []\n time.sleep(INGEST_FREQ)\n print(response)\n\n # Flush the last batch\n if i % DATA_PACK != 0:\n response = h_db.insert_records(\n table_name=NEW_TABLE,\n data=coords,\n list_encoding=ENCODING,\n options={})\n\n # 3 second delay to mimic real time ingest\n time.sleep(INGEST_FREQ)\n print(response)\n return coords", "def pre_process_salesrank(input_path, output_path):\n files = [pos_json for pos_json in os.listdir(input_path) if pos_json.endswith('.json')]\n # files = os.listdir(input_path)\n asins = list(map(lambda each:each.strip(\"_com_norm.json\"), files))\n\n if not os.path.exists(output_path):\n os.mkdir(output_path)\n\n seq = [i for i in range(40, 66760+1, 40)]\n \n big_df = pd.DataFrame(columns=['asin', 'rank'])\n \n for asin, filename in zip(asins, files):\n try:\n df = pd.read_json(os.path.join(input_path, filename), typ='series', convert_axes=False)\n df = df.to_frame(name='rank')\n df = df.assign(asin=asin)\n big_df = big_df.append(df, sort=False)\n print(asins.index(asin))\n except:\n print(f\"failed to process {filename}\")\n continue\n if asins.index(asin) in seq:\n big_df.index.name = 'ts'\n big_df.to_csv(f\"{output_path}/{asins.index(asin)}.csv\")\n print(asins.index(asin))\n big_df = pd.DataFrame(columns=['asin', 'rank'])", "def _get_data(self):\n \n print(\"Getting Data...\")\n self.data = sgs.dataframe(self.serie_name, \n start = self.start_date, \n end = self.end_date)\n\n print(f\"Done! {self.data.shape[0]} rows were collected\")\n \n self.data.reset_index(inplace=True)\n self.data.columns = ['date', 'cdi']\n\n return self.data", "def create_series(self):\n series = []\n for timeline_object in self.timeline['results']:\n count = timeline_object[\"count\"]\n series.insert(0, count)\n self.query_total = self.query_total + count\n label = self.query[0:30]\n if len(self.query) > 30:\n label = label + \"...\"\n label = label + \" (\" + str(self.query_total) + \")\"\n series.insert(0, label)\n return series", "def get_series(self):\n ids = self.get_ids()\n i = 0; j = 100\n while i < len(ids):\n curr_ids = ids[i:j]\n ids_with_coms = ','.join(curr_ids)\n id_param = { 'id': ids_with_coms }\n results = API().req_itunes(c.ITUNES_LOOKUP_URL +\n urllib.urlencode(id_param)).json()['results']\n self.series.extend(results)\n i += 100; j += 100\n\n return [Series.from_json(j) for j in self.series]", "def Generating_stock_daily_return_table():\r\n #Getting Names list\r\n Profitfile='pap//CombProfit.csv'\r\n path='D://Doktorat Marek//dane//'\r\n ProfitsFilePath=path+Profitfile\r\n quarterly_profit=pd.read_csv(ProfitsFilePath,index_col=0,header=0,parse_dates=True)\r\n Names_list=quarterly_profit.columns.tolist()\r\n \r\n Stock_returns=pd.DataFrame(index=pd.date_range('19980101','20180918',freq='D'),columns=Names_list)\r\n for name in Names_list:\r\n Stock_returns[name]=1+stock_returns(name)['Return']/100\r\n Stock_returns[name].fillna(1,inplace=True)\r\n \r\n WIG=pd.read_excel('D://Doktorat Marek//dane//notowania//Infostrefa//PL9999999995.xls')\r\n WIG['Date']=pd.to_datetime(WIG['Data'])\r\n WIG.set_index('Date',inplace=True)\r\n Stock_returns['WIG'] = 1+WIG['Zmiana']/100\r\n Stock_returns['WIG'].fillna(1,inplace=True)\r\n Stock_returns['Average']=Stock_returns.mean(1)\r\n \r\n FileReturns='D://Doktorat Marek//dane//Notowania//Stock_returns.csv'\r\n Stock_returns.to_csv(FileReturns,encoding='UTF-8')\r\n return 0", "def _create_phases(self):\n start_dates, end_dates = self._phase_range(self._change_dates)\n pop_list = [self.pop_dict[date] for date in start_dates]\n phase_series = PhaseSeries(\n self.dates[0], self.dates[-1], self.population, use_0th=self._use_0th\n )\n phase_itr = enumerate(zip(start_dates, end_dates, pop_list))\n for (i, (start_date, end_date, population)) in phase_itr:\n phase_series.add(\n start_date=start_date,\n end_date=end_date,\n population=population\n )\n return phase_series", "def compile_data():\r\n with open('sp500_tickers.pickle', 'rb') as file:\r\n tickers = pickle.load(file)\r\n metasp = pd.DataFrame()\r\n for count, ticker in enumerate(tickers):\r\n df = pd.read_csv('sp500_data\\{}.csv'.format(ticker))\r\n df.set_index('Date', inplace=True)\r\n df.rename(columns={'Adj Close': ticker}, inplace=True)\r\n df.drop(['Open', 'High', 'Low', 'Close', 'Volume'], 1, inplace=True)\r\n if metasp.empty:\r\n metasp = df\r\n else:\r\n metasp = metasp.join(df, how = 'outer')\r\n if count % 10 == 0:\r\n print(count)\r\n metasp.to_csv('sp500_meta.csv')", "def generate_final_data(model_names):\n\n for model_name in model_names:\n print(\"Creating fina data for \" + model_name[0])\n\n final_data = {}\n brush_data = common.load_json(\"../steps/\" + model_name[0] + \"/brush_data.json\")\n diff_data = common.load_json(\"../steps/\" + model_name[0] + \"/diff_plot_data.json\")\n distance_data = common.load_json(\"../steps/\" + model_name[0] + \"/distance_data.json\")\n\n final_data[0] = {\n \"step_number\" : 0,\n \"valid\" : brush_data['0'][\"valid\"],\n \"brush_data\" : sanitize_brush_data(brush_data['0']),\n \"diff_data\" : null_diff_data(),\n \"distance_data\" : null_distance_data()\n }\n\n for step_idx in range(1, len(brush_data)):\n print(str(step_idx) + \" \",)\n final_data[step_idx] = {}\n final_data[step_idx][\"step_number\"] = step_idx\n final_data[step_idx][\"valid\"] = brush_data[str(step_idx)][\"valid\"]\n final_data[step_idx][\"brush_data\"] = sanitize_brush_data(brush_data[str(step_idx)])\n final_data[step_idx][\"diff_data\"] = get_diff_data_step(diff_data, step_idx - 1)\n final_data[step_idx][\"distance_data\"] = get_distance_data_step(distance_data, str(step_idx))\n\n common.save_json(final_data, \"../final_data/\" + model_name[0] + \"/final_data.json\", compressed=False)", "def preprocess(dataframe_csvpath, cols_x, cols_y, window_in, window_out, data_div_frac, popu_size):\n \n #Loading .CSV file and creating dataframe\n df = pd.read_csv(dataframe_csvpath) \n len_ser = len(df[df['Series_No'] == 1])\n\n #randomly shuffle different series\n permute = np.random.permutation(range(1, len(set(df['Series_No']))))\n train_series_seq = permute[: int(len(set(df['Series_No'])) * data_div_frac)]\n test_series_seq = permute[int( len(set(df['Series_No'])) * data_div_frac):]\n \n #taking relevent columns from dataframe \n df_x = df[cols_x]\n df_y = df[cols_y]\n \n #Innitialize empty lists which are later to be appended\n train_seq, test_seq = [], []\n x_test = []\n y_true =[]\n \n #Creating time series data\n for series_no in train_series_seq:\n \n #new dataframe variable assignment for particular series drom df_x, df_y\n series_df_x = df_x[df_x['Series_No'] == series_no]\n series_df_y = df_x[df_y['Series_No'] == series_no]\n \n #converting into numpy arrays\n array_x = np.array(series_df_x)\n array_y = np.array(series_df_y)\n \n #for loop to append to x_train y_train arrays according to window_in, window_out\n for idx in range(len(series_df_x) - window_in - window_out + 1): #'len(series_df_x) - window_in - window_out + 1' needs to be checked\n arrayx = array_x.copy()\n x = arrayx [idx:idx + window_in, : len(cols_x) - 1]\n #print(x)\n x[:,0:3] = x[:,0:3] / popu_size\n #print(x)\n arrayy = array_y.copy()\n y = arrayy[idx + window_in :idx + window_in + window_out, : len(cols_y) - 1]\n y = y / popu_size\n train_seq.append((x, y)) #out col_x and col_y has last item 'Series number' so to remove that [, : len(cols_x)]\n #y_train.append(array_y[idx + window_in :idx + window_in + window_out, : len(cols_y) - 1])\n #print(train_seq)\n\n #repeat for test sequence\n for series_no in test_series_seq:\n \n #new dataframe variable assignment for particular series drom df_x, df_y\n series_df_x = df_x[df_x['Series_No'] == series_no]\n series_df_y = df_x[df_y['Series_No'] == series_no]\n \n #converting into numpy arrays\n array_x = np.array(series_df_x)\n array_y = np.array(series_df_y)\n \n #for loop to append to x_train y_train arrays according to window_in, window_out\n for idx in range(len(series_df_x) - window_in - window_out + 1): #'len(series_df_x) - window_in - window_out + 1' needs to be checked\n arrayx = array_x.copy()\n x = arrayx[idx:idx + window_in, : len(cols_x) - 1]\n x[:,0:3] = x[:,0:3] / popu_size\n x_test.append(x)\n arrayy = array_y.copy()\n y = arrayy[idx + window_in :idx + window_in + window_out, : len(cols_y) - 1]\n y = y / popu_size\n y_true.append(y)\n test_seq.append((x, y))\n \n \n #test_seq.append((array_x[idx:idx + window_in, : len(cols_x) - 1], array_y[idx + window_in :idx + window_in + window_out, : len(cols_y) - 1])) #out col_x and col_y has last item 'Series number' so to remove that [, : len(cols_x)]\n #y_test.append(array_y[idx + window_in :idx + window_in + window_out, : len(cols_y) - 1])\n\n \n win_len_per_ser = len_ser - window_in - window_out + 1\n \n return np.array(train_seq), np.array(test_seq), len_ser, win_len_per_ser, np.array(x_test), np.array(y_true)", "def generate_store_mmp_series(self, sgl_or_dbl_or_both='single', apply_pre_filter=False):\n\n series_data = []\n\n if sgl_or_dbl_or_both == 'single' or sgl_or_dbl_or_both == 'both':\n series_data_sgl = [(a, b, c, d, e, f) for a, b, c, d, e, f in\n self._iterator_mmp_series_numeric(apply_pre_filter=apply_pre_filter)]\n series_data.extend(series_data_sgl)\n # print series_data\n\n if sgl_or_dbl_or_both == 'double' or sgl_or_dbl_or_both == 'both':\n series_data_dbl = [(a, b, c, d, e, f) for a, b, c, d, e, f in\n self._iterator_mmp_series_numeric(sgl_or_dbl='double',\n apply_pre_filter=apply_pre_filter)]\n series_data.extend(series_data_dbl)\n # print series_data\n\n self.series_df = pd.DataFrame(series_data,\n columns=['SERIES_ID', 'SERIES_SEQ_ID',\n 'CONTEXT_ID', 'FRAG_ID',\n 'MOL_ID', 'ACTIVITY']\n )\n # self.series_df.set_index(['SERIES_ID', 'FRAG_ID'], inplace=True)\n\n # print('Parsed series CSV to dataframe of size %d, %d' % (self.series_df.shape[0], self.series_df.shape[1]))\n self.logger.info('Parsed series CSV to dataframe of size %d, %d' %\n (self.series_df.shape[0], self.series_df.shape[1]))", "def hist_data(self):\n # database address\n jydb_address = \"117.122.223.35\"\n jydb_user_id = \"zpy\"\n jydb_user_pwd = \"Z1pe1y1@zpy\"\n jydb_db_name = \"jydb02\"\n # preallocate data set\n back_testing_data = {}\n \n # on-line mode\n conn = pymysql.connect(jydb_address, jydb_user_id, jydb_user_pwd, jydb_db_name)\n # not using moving average\n if not self.moving_average: \n # iterate on all ism packages\n for _i, ism_pack in enumerate(tqdm(self.ism_data)):\n ismcode = ism_pack[0][0]\n ProRunDate = ism_pack[0][5]\n ProStopDate = ism_pack[0][6]\n MaxStoreSum = ism_pack[0][7]\n MinInvestShare = ism_pack[0][8]\n InnerCode_ls = flatten(list(map(lambda x: [y[0] for y in x[1:]], ism_pack[1:])))\n SecuCode_ls = flatten(list(map(lambda x: [y[1] for y in x[1:]], ism_pack[1:])))\n PriorType_ls = flatten(list(map(lambda x: [y[2] for y in x[1:]], ism_pack[1:])))\n # collect data from source conn\n flag_run = \"SELECT InnerCode, OpenPrice, ClosePrice FROM QT_DailyQuote WHERE \" + \\\n \"InnerCode IN (\" + \",\".join(InnerCode_ls) + \") AND \" + \\\n \"TradingDay=\\'\" + ProRunDate + \"\\'\"\n flag_stop = \"SELECT InnerCode, OpenPrice, ClosePrice FROM QT_DailyQuote WHERE \" + \\\n \"InnerCode IN (\" + \",\".join(InnerCode_ls) + \") AND \" + \\\n \"TradingDay=\\'\" + ProStopDate + \"\\'\"\n run_price = pd.read_sql(flag_run, conn)\n stop_price = pd.read_sql(flag_stop, conn)\n back_testing_data[ismcode] = pd.merge(run_price, stop_price, on='InnerCode', \n suffixes=('_run', '_stop'))\n else: # using moving average \n # iterate on all ism packages\n for _i, ism_pack in enumerate(self.ism_data):\n ismcode = ism_pack[0][0]\n ProRunDate = ism_pack[0][5]\n TradingDay_begin = former_market_date(ProRunDate, self.L_shift, conn)\n TradingDay_end = future_market_date(ProRunDate, self.R_shift, conn)\n InnerCode_ls = flatten(list(map(lambda x: [y[0] for y in x[1:]], ism_pack[1:])))\n SecuCode_ls = flatten(list(map(lambda x: [y[1] for y in x[1:]], ism_pack[1:])))\n PriorType_ls = flatten(list(map(lambda x: [y[2] for y in x[1:]], ism_pack[1:])))\n flag = \"SELECT InnerCode, TradingDay, OpenPrice, ClosePrice FROM QT_DailyQuote WHERE \" + \\\n \"InnerCode IN (\" + \",\".join(InnerCode_ls) + \") AND \" + \\\n \"TradingDay BETWEEN \\'\" + TradingDay_begin + \"\\' AND \\'\" + TradingDay_end + \"\\'\"\n back_testing_data[ismcode] = pd.read_sql(flag, conn)\n \n # close sql connection\n conn.close()\n \n return back_testing_data", "def lines_():\n query = f\"\"\"\nSELECT script_l, `name`, episode\nFROM script\nINNER JOIN characters\nON characters.char_id = script.characters_char_id\nINNER JOIN episodes\nON episodes.ep_id = script.episodes_ep_id\n\"\"\"\n data = pd.read_sql_query(query, engine)\n return data.to_json(orient=\"records\")", "def factor_exposure(self):\n exp_hs_all = pd.DataFrame([])\n exp_zz_all = pd.DataFrame([])\n for i in range(len(self.weekly_date)):\n date = self.weekly_date.iloc[i,0]\n factor = get_barra_factor_from_sql(date)\n factor['secID'] = factor.index.tolist()\n stocklist = factor.index.tolist()\n \n hs300 = get_index_composition(date,'000300.SH')\n zz500 = get_index_composition(date,'000905.SH')\n hs300['secID'] = hs300.index.tolist()\n zz500['secID'] = zz500.index.tolist()\n \n stocklist_hs300 = list(set(hs300.index.tolist()).intersection(set(stocklist)))\n stocklist_zz500 = list(set(zz500.index.tolist()).intersection(set(stocklist)))\n stocklist_hs300.sort()\n stocklist_zz500.sort()\n \n factor_hs = extract_part_from_all(stocklist_hs300,factor,'secID')\n factor_zz = extract_part_from_all(stocklist_zz500,factor,'secID')\n hs_weight = extract_part_from_all(stocklist_hs300,hs300,'secID')\n zz_weight = extract_part_from_all(stocklist_zz500,zz500,'secID')\n del factor_hs['secID'],factor_zz['secID'],hs_weight['secID'],zz_weight['secID']\n \n \n exp_hs = pd.DataFrame(np.dot(hs_weight.T,factor_hs))\n exp_zz = pd.DataFrame(np.dot(zz_weight.T,factor_zz))\n \n \n exp_hs_all = pd.concat([exp_hs_all,exp_hs], axis = 0)\n exp_zz_all = pd.concat([exp_zz_all,exp_zz], axis = 0) \n print(i)\n exp_hs_all.columns = ['Beta','Momentum','Size','EY','RV','Growth',\\\n 'BP','Leverage','Liquidity']\n exp_zz_all.columns = ['Beta','Momentum','Size','EY','RV','Growth',\\\n 'BP','Leverage','Liquidity']\n exp_hs_all.index = self.weekly_date.iloc[:,0]\n exp_zz_all.index = self.weekly_date.iloc[:,0]\n return exp_hs_all,exp_zz_all", "def execute_series(self):\n for n in xrange(self.conf[\"n_runs\"]):\n self.runs[n].execute()", "def generate_synth_data(n):" ]
[ "0.63566905", "0.59604704", "0.589756", "0.58577883", "0.5839274", "0.58158976", "0.5808274", "0.5788732", "0.57789326", "0.5719818", "0.56998694", "0.56896603", "0.56371063", "0.557891", "0.55630285", "0.5514822", "0.55077946", "0.5449205", "0.54456735", "0.54253983", "0.5420153", "0.5415774", "0.5412959", "0.5412076", "0.5390942", "0.5362657", "0.53400207", "0.5338921", "0.533588", "0.5327791" ]
0.69550884
0
add series for bcb into the database
def series_ingestion(series:List[dict]) -> None: for srs in series: try: add_series("BCB." + str(srs['number']), srs['nome'], *gestores[srs['gestor']]) except: logger.error(f"Unable to add series BCB.{srs['number']}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_column(self, column_name, series):\n self.data[column_name] = pd.Series(series)", "def add_to_database(dataframe):\r\n \r\n # df.shape returns the number of columns and rows in a dataframe\r\n # So using the first value returned, we can cycle through each row in the dataframe (where each row has information on a specific station)\r\n for i in range(0, (dataframe.shape[0]-1)):\r\n data = dataframe.iloc[i] # df.iloc[] just allows us to access elements via normal indexing of a pandas dataframe\r\n date_time = pd.to_datetime(data[\"last_update\"]/1000,unit='s')\r\n day = str(date_time.day_name())\r\n hour = str(date_time.hour)\r\n # Store all the information from the dataframe in a list\r\n elements = [data.get(\"address\"), int(data.get(\"available_bike_stands\")), int(data.get(\"available_bikes\")), int(data.get(\"banking\")), int(data.get(\"bike_stands\")), int(data.get(\"bonus\")), data.get(\"contract_name\"), float(data.get(\"last_update\")), data.get(\"name\"), int(data.get(\"number\")), data.get(\"position\").get(\"lat\"), data.get(\"position\").get(\"lng\"), data.get(\"status\"),day,str(date_time),str(hour)]\r\n \r\n # Add each of these elements to the table in our database\r\n cursor.execute(\"INSERT INTO seviBikes VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\", elements)\r\n conn.commit()", "def commitToDatabase(self, tiltseriesdata):\n\t\treturn", "def final_series():\n tickers = pd.read_excel(os.path.abspath(os.path.dirname(__file__)) +\"./codigos.xlsx\", \n header=[0]).values.flatten()\n # tickers = pd.read_excel(\"./codigos.xlsx\", \n # header=[0]).values.flatten()\n ls = fetch_series(list(set(tickers)))\n net_series = [s for s in ls if _cleasing(s, [\"D\", \"M\"]) is not None]\n p = os.path.abspath(os.path.dirname(__file__))\n with open(p + \"/series_bcb\", \"wb\") as f:\n pickle.dump(net_series, f)\n # with open(\"./series_bcb\", \"wb\") as f:\n # pickle.dump(net_series, f) ", "def add_bids_vertica(scenario):\n con = DB.VerticaConnection()\n\n for new_row in con.script_cursor(bis_v, scenario=scenario):\n Bid(new_row, is_new=True)\n\n\n for new_row in con.script_cursor(bhs_v, scenario=scenario):\n bid = Bid[new_row.dpg_id]\n if bid:\n bid.add_hour_data(new_row)\n\n\n # h_re = re.compile(r'(?<=_)\\d+')\n for new_row in con.script_cursor(bps_v, scenario=scenario):\n bid = Bid[new_row.dpg_id]\n if bid:\n bid.add_intervals_data(new_row)", "def add_to_database(self, df):\n \n from sqlalchemy import create_engine\n \n engine = create_engine(\"mysql://dublinbus:somepaawsord/researchpracticum\")\n con = engine.connect()\n df.to_sql(con=con, name='TimeTables', if_exists='append')\n con.close()", "def append(self, bts_node: BTSNode):\n pass", "def add_cytobands(self, cytobands):\n LOG.debug(f\"Inserting {len(cytobands)} cytoband intervals into database\")\n result = self.cytoband_collection.insert_many(cytobands)\n LOG.debug(f\"Number of inserted documents:{len(result.inserted_ids)}\")", "def add_data(self, year, month):\n data = _download_to_df(self.url, self.table_name, year, month)\n if 'INTERVENTION' in data.columns:\n data = data[data['INTERVENTION'] == 0]\n data = data.loc[:, self.table_columns]\n with self.con:\n data.to_sql(self.table_name, con=self.con, if_exists='append', index=False)\n self.con.commit()", "def add(self, b):\n if self.t is None:\n self.t = []\n self.chans = {k: [] for k in b.data.keys()}\n self.t.extend(list(b.t))\n for k in b.data.keys():\n self.chans[k].extend(b.data[k])", "def insert_timeseries(pool, timeseries, tms_id, end_date=None):\n new_timeseries = []\n for t in [i for i in timeseries]:\n if len(t) > 1:\n # Insert EventId in front of timestamp, value list\n t.insert(0, tms_id)\n new_timeseries.append(t)\n else:\n print('Invalid timeseries data:: %s', t)\n\n if end_date is None:\n end_date = new_timeseries[-1][1]\n\n try:\n\n ts = Timeseries(pool=pool)\n\n ts.insert_data(timeseries=new_timeseries, upsert=True)\n ts.update_end_date(id_=tms_id, end_date=end_date)\n\n except Exception as e:\n traceback.print_exc()\n print(\"Exception occurred while pushing timeseries for tms_id {} to curw_obs\".format(tms_id))", "def add_to_db(self, joined_charts):\n session = Session()\n for index, row in joined_charts.iterrows():\n account = Account(account=index, value=row[\"value\"])\n session.add(account)\n session.commit()", "def create_chart(conf, entries):\r\n serie_index = 0\r\n for serie in conf['series']:\r\n data = []\r\n for entry in entries:\r\n if entry is not None:\r\n data.append(entry.datatolist(str(serie['db'])))\r\n conf['series'][serie_index]['data'] = data\r\n serie_index += 1\r\n \r\n \"\"\" Add PlotBands \"\"\" \r\n plotBands = []\r\n last_entry = len(entries)-1\r\n n = 1\r\n while n < last_entry and\\\r\n entries[n].phase is not None and\\\r\n entries[n] is not None and\\\r\n entries[n].next().phase is not None:\r\n begin = entries[n].dt\r\n phase = entries[n].phase\r\n n += 1\r\n while entries[n] is not None and\\\r\n entries[n].phase is not None and\\\r\n entries[n].phase == phase and\\\r\n n < last_entry:\r\n n += 1\r\n end = entries[n].dt\r\n plotBand = {\r\n 'color': PhaseColor[phase],\r\n 'from': datetime_to_timestamp(begin),\r\n 'to': datetime_to_timestamp(end)\r\n }\r\n plotBands.append(plotBand)\r\n conf['xAxis']['plotBands'] = plotBands\r\n \r\n \"\"\" Add Labels \"\"\" \r\n condition_flag_allumage = '((prec.phase is not None) and (prec.phase is not PHASE_ALLUMAGE))'\r\n condition_next_is_not_maintien = '((next.phase is not None) and (next.phase is not PHASE_MAINTIEN))'\r\n labels = json.loads(json.dumps(ChartLabel)) #make a copy of original object\r\n labels['name'] = 'Labels'\r\n for entry in entries:\r\n if entry is not None and entry.phase is not None:\r\n #Label Allumage \r\n if entry.event is not None:\r\n data = {\r\n \"x\": datetime_to_timestamp(entry.dt),\r\n \"title\": 'Allumage'\r\n }\r\n labels['data'].append(data)\r\n \"\"\"\r\n # Label Combustion \r\n if entry.phase == PHASE_COMBUSTION and\\\r\n entry.prec() is not None and\\\r\n entry.prec().phase is not PHASE_COMBUSTION and\\\r\n entry.all_next_verify_condition(5, condition_next_is_not_maintien):\r\n data = {\r\n \"x\": datetime_to_timestamp(entry.dt),\r\n \"title\": 'Combustion'\r\n }\r\n labels['data'].append(data)\r\n \"\"\"\r\n conf['series'].append(labels)\r\n\r\n \"\"\" Add Subtitle (plotbands legend) \"\"\"\r\n #conf[\"subtitle\"] = ChartLegend\r\n\r\n \"\"\" Add Title (date begin date end) \"\"\"\r\n if len(entries) > 3:\r\n begin = pretty_date(entries[0].dt)\r\n end = pretty_date(entries[len(entries)-1].dt)\r\n #conf[\"title\"][\"text\"] = 'Monitoring Chaudiรจre du {0} au {1}'.format(begin, end)\r\n conf[\"title\"][\"text\"] = 'Monitoring Chaudiรจre'\r\n conf[\"subtitle\"][\"text\"] = ' du {0} au {1}'.format(begin, end)\r\n\r\n else:\r\n conf[\"title\"][\"text\"] = 'Monitoring Chaudiรจre'\r\n\r\n \"\"\" Return new conf \"\"\"\r\n return conf", "def ingest():\n db.delete_dataset_records(DATASET_ID)\n\n db.insert_dataset({\n 'dataset_id': DATASET_ID,\n 'title': 'North American Breeding Bird Survey (BBS)',\n 'version': '2016.0',\n 'url': 'https://www.pwrc.usgs.gov/bbs/'})\n\n to_taxon_id = insert_taxa()\n to_place_id = insert_places()\n to_event_id = insert_events(to_place_id)\n insert_counts(to_event_id, to_taxon_id)", "def create_new_tickers(tick_scrape):\n #Check if ticker exists, if not add it to the ticker table\n tick_db = sqlaq_to_df(ticker.fetch())\n #add the id to the tick_ftse table\n new_tick = pd.merge(\n tick_scrape,\n tick_db[[\"id\",\"ticker\"]],\n on=[\"ticker\"],\n how=\"left\"\n )\n #find tickers which don't exist\n new_tick = new_tick[new_tick.id.isnull()]\n logger.info(f\"{new_tick.shape[0]} items to add to ticker\")\n #add to db\n ticker.add_df(new_tick)\n #fetch updated table\n tick_db = sqlaq_to_df(ticker.fetch())\n return tick_db", "def book_series(self, key, value):\n val_n = clean_val(\"n\", value, str)\n val_x = clean_val(\"x\", value, str)\n\n _migration = self[\"_migration\"]\n _migration[\"serials\"].append(\n {\n \"title\": clean_val(\"a\", value, str),\n \"volume\": clean_val(\"v\", value, str),\n \"issn\": val_x,\n }\n )\n _migration[\"has_serial\"] = True\n raise IgnoreKey(\"book_series\")", "def commitToDatabase(self, tiltseriesdata):\n\t\tapDisplay.printError(\"you did not create a 'commitToDatabase' function in your script\")\n\t\traise NotImplementedError()", "def add_to_db(self, loglines):\n self.database = self.database.append(loglines, ignore_index=True)", "def add_times(self,df,link):\n \n if link not in self.to_concat:\n self.to_concat[link] = []\n self.to_concat[link].append(df)", "def add_data(self, label, description='', datapath='', samples=[], fibres=[], data_type='', date_created='', verbose = True):\n assert (self.connected)\n assert(type(label) == str)\n assert(type(datapath) == str)\n assert(type(samples) == list and len(samples) <= 4)\n assert(type(fibres) == list and len(fibres) <= 2)\n assert(type(date_created) == str)\n assert('\\n' not in label)\n assert(len(samples) <= 4)\n assert(len(fibres) <= 2)\n \n \n ADD_DATA_COMMAND = (\"INSERT INTO data \"\n \"(label,description, type, data, data_size, data_duration, data_numpoints, sampleId, sampleId2, sampleId3, sampleId4, fibreId, fibreId2, date_created) \"\n \"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\")\n \n # get binary data from the file path specified\n data = None\n data_size = 0\n num_data_points = 0\n duration = 0 \n if (datapath != ''):\n data = open(datapath, 'rb').read()\n data_size = path.getsize(datapath)\n if verbose: print(\"File uploaded: \" + str(data_size / 1000.0) + \" KB\")\n \n # get metadata from .csv file\n df = pd.read_csv(datapath)\n num_data_points = len(df) \n if (len(df) > 0):\n if ('time' in df):\n duration = df['time'].values[len(df)-1] - df['time'].values[0] \n elif ('Time' in df): \n duration = df['Time'].values[len(df)-1] - df['Time'].values[0]\n else:\n duration = -1 \n \n # fill in today's date,if none was given\n if (date_created == ''):\n date_created = date.today().strftime(\"%Y-%m-%d\")\n \n # Get sample ids \n sampleIds = []\n if (len(samples)>0 and type(samples[0]) == str):\n for s in samples:\n theId = self.get_by_label(s, 'samples')\n sampleIds.append(None if theId==-1 else theId )\n elif (len(samples)>0 and type(samples[0]) == int):\n sampleIds = samples\n # Ensure sample id list if exactly 4 items long\n sampleIds = [ sampleIds[i] if i<len(sampleIds) else None for i in range(4)]\n \n \n # get fibre ids\n fibreIds = []\n if (len(fibres)>0 and type(fibres[0]) == str):\n for f in fibres:\n theId = self.get_by_label(f, 'fibres')\n fibreIds.append(None if theId==-1 else theId )\n if (len(fibres)>0 and type(fibres[0]) == int):\n fibreIds = fibres\n # Ensure fibre id list if exactly 2 items long\n fibreIds = [ fibreIds[i] if i<len(fibreIds) else None for i in range(2)]\n \n \n new_data = (label, description, data_type, data, data_size, duration, num_data_points, sampleIds[0], sampleIds[1], sampleIds[2], sampleIds[3], fibreIds[0], fibreIds[1], date_created)\n \n \n \n self.cursor.execute(ADD_DATA_COMMAND, new_data)\n \n self.cnx.commit()\n \n \n if verbose: print(\"Data added successfully\")", "def write_BMS(self,BMS_LIST):\n tmp_n_modules = len(BMS_LIST)\n tmp_time =\"{:04d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}\".format(datetime.datetime.now().year,datetime.datetime.now().month,datetime.datetime.now().day,datetime.datetime.now().hour,datetime.datetime.now().minute,datetime.datetime.now().second)\n\n cursor = self._port.cursor()\n \n tmp_BMS = BMS_LIST\n\n # Preparing SQL query to INSERT a record into the database.\n if tmp_n_modules == 1:\n tmp_sql = \"INSERT INTO pylontech_bms (ts,device_name,soc,voltage,current,temperature,b_status,v_status,c_status,t_status) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n tmp_val = [(tmp_time,'Battery: 1',tmp_BMS[0][0],tmp_BMS[0][1],tmp_BMS[0][2],tmp_BMS[0][3],tmp_BMS[0][4],tmp_BMS[0][5],tmp_BMS[0][6],tmp_BMS[0][7])]\n\n elif tmp_n_modules == 2:\n tmp_sql = \"INSERT INTO pylontech_bms (ts,device_name,soc,voltage,current,temperature,b_status,v_status,c_status,t_status) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n tmp_val = [ (tmp_time,'Battery: 1',tmp_BMS[0][0],tmp_BMS[0][1],tmp_BMS[0][2],tmp_BMS[0][3],tmp_BMS[0][4],tmp_BMS[0][5],tmp_BMS[0][6],tmp_BMS[0][7]),\\\n (tmp_time,'Battery: 2',tmp_BMS[1][0],tmp_BMS[1][1],tmp_BMS[1][2],tmp_BMS[1][3],tmp_BMS[1][4],tmp_BMS[1][5],tmp_BMS[1][6],tmp_BMS[1][7])]\n \n elif tmp_n_modules == 3:\n tmp_sql = \"INSERT INTO pylontech_bms (ts,device_name,soc,voltage,current,temperature,b_status,v_status,c_status,t_status) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n tmp_val = [ (tmp_time,'Battery: 1',tmp_BMS[0][0],tmp_BMS[0][1],tmp_BMS[0][2],tmp_BMS[0][3],tmp_BMS[0][4],tmp_BMS[0][5],tmp_BMS[0][6],tmp_BMS[0][7]),\\\n (tmp_time,'Battery: 2',tmp_BMS[1][0],tmp_BMS[1][1],tmp_BMS[1][2],tmp_BMS[1][3],tmp_BMS[1][4],tmp_BMS[1][5],tmp_BMS[1][6],tmp_BMS[1][7]),\\\n (tmp_time,'Battery: 3',tmp_BMS[2][0],tmp_BMS[2][1],tmp_BMS[2][2],tmp_BMS[2][3],tmp_BMS[2][4],tmp_BMS[2][5],tmp_BMS[2][6],tmp_BMS[2][7])]\n\n elif tmp_n_modules == 4:\n tmp_sql = \"INSERT INTO pylontech_bms (ts,device_name,soc,voltage,current,temperature,b_status,v_status,c_status,t_status) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n tmp_val = [ (tmp_time,'Battery: 1',tmp_BMS[0][0],tmp_BMS[0][1],tmp_BMS[0][2],tmp_BMS[0][3],tmp_BMS[0][4],tmp_BMS[0][5],tmp_BMS[0][6],tmp_BMS[0][7]),\\\n (tmp_time,'Battery: 2',tmp_BMS[1][0],tmp_BMS[1][1],tmp_BMS[1][2],tmp_BMS[1][3],tmp_BMS[1][4],tmp_BMS[1][5],tmp_BMS[1][6],tmp_BMS[1][7]),\\\n (tmp_time,'Battery: 3',tmp_BMS[2][0],tmp_BMS[2][1],tmp_BMS[2][2],tmp_BMS[2][3],tmp_BMS[2][4],tmp_BMS[2][5],tmp_BMS[2][6],tmp_BMS[2][7]),\\\n (tmp_time,'Battery: 4',tmp_BMS[3][0],tmp_BMS[3][1],tmp_BMS[3][2],tmp_BMS[3][3],tmp_BMS[3][4],tmp_BMS[3][5],tmp_BMS[3][6],tmp_BMS[3][7])]\n\n elif tmp_n_modules == 5:\n tmp_sql = \"INSERT INTO pylontech_bms (ts,device_name,soc,voltage,current,temperature,b_status,v_status,c_status,t_status) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n tmp_val = [ (tmp_time,'Battery: 1',tmp_BMS[0][0],tmp_BMS[0][1],tmp_BMS[0][2],tmp_BMS[0][3],tmp_BMS[0][4],tmp_BMS[0][5],tmp_BMS[0][6],tmp_BMS[0][7]),\\\n (tmp_time,'Battery: 2',tmp_BMS[1][0],tmp_BMS[1][1],tmp_BMS[1][2],tmp_BMS[1][3],tmp_BMS[1][4],tmp_BMS[1][5],tmp_BMS[1][6],tmp_BMS[1][7]),\\\n (tmp_time,'Battery: 3',tmp_BMS[2][0],tmp_BMS[2][1],tmp_BMS[2][2],tmp_BMS[2][3],tmp_BMS[2][4],tmp_BMS[2][5],tmp_BMS[2][6],tmp_BMS[2][7]),\\\n (tmp_time,'Battery: 4',tmp_BMS[3][0],tmp_BMS[3][1],tmp_BMS[3][2],tmp_BMS[3][3],tmp_BMS[3][4],tmp_BMS[3][5],tmp_BMS[3][6],tmp_BMS[3][7]),\\\n (tmp_time,'Battery: 5',tmp_BMS[4][0],tmp_BMS[4][1],tmp_BMS[4][2],tmp_BMS[4][3],tmp_BMS[4][4],tmp_BMS[4][5],tmp_BMS[4][6],tmp_BMS[4][7])]\n\n elif tmp_n_modules == 6:\n tmp_sql = \"INSERT INTO pylontech_bms (ts,device_name,soc,voltage,current,temperature,b_status,v_status,c_status,t_status) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n tmp_val = [ (tmp_time,'Battery: 1',tmp_BMS[0][0],tmp_BMS[0][1],tmp_BMS[0][2],tmp_BMS[0][3],tmp_BMS[0][4],tmp_BMS[0][5],tmp_BMS[0][6],tmp_BMS[0][7]),\\\n (tmp_time,'Battery: 2',tmp_BMS[1][0],tmp_BMS[1][1],tmp_BMS[1][2],tmp_BMS[1][3],tmp_BMS[1][4],tmp_BMS[1][5],tmp_BMS[1][6],tmp_BMS[1][7]),\\\n (tmp_time,'Battery: 3',tmp_BMS[2][0],tmp_BMS[2][1],tmp_BMS[2][2],tmp_BMS[2][3],tmp_BMS[2][4],tmp_BMS[2][5],tmp_BMS[2][6],tmp_BMS[2][7]),\\\n (tmp_time,'Battery: 4',tmp_BMS[3][0],tmp_BMS[3][1],tmp_BMS[3][2],tmp_BMS[3][3],tmp_BMS[3][4],tmp_BMS[3][5],tmp_BMS[3][6],tmp_BMS[3][7]),\\\n (tmp_time,'Battery: 5',tmp_BMS[4][0],tmp_BMS[4][1],tmp_BMS[4][2],tmp_BMS[4][3],tmp_BMS[4][4],tmp_BMS[4][5],tmp_BMS[4][6],tmp_BMS[4][7]),\\\n (tmp_time,'Battery: 6',tmp_BMS[5][0],tmp_BMS[5][1],tmp_BMS[5][2],tmp_BMS[5][3],tmp_BMS[5][4],tmp_BMS[5][5],tmp_BMS[5][6],tmp_BMS[5][7])]\n\n elif tmp_n_modules == 7:\n tmp_sql = \"INSERT INTO pylontech_bms (ts,device_name,soc,voltage,current,temperature,b_status,v_status,c_status,t_status) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n tmp_val = [ (tmp_time,'Battery: 1',tmp_BMS[0][0],tmp_BMS[0][1],tmp_BMS[0][2],tmp_BMS[0][3],tmp_BMS[0][4],tmp_BMS[0][5],tmp_BMS[0][6],tmp_BMS[0][7]),\\\n (tmp_time,'Battery: 2',tmp_BMS[1][0],tmp_BMS[1][1],tmp_BMS[1][2],tmp_BMS[1][3],tmp_BMS[1][4],tmp_BMS[1][5],tmp_BMS[1][6],tmp_BMS[1][7]),\\\n (tmp_time,'Battery: 3',tmp_BMS[2][0],tmp_BMS[2][1],tmp_BMS[2][2],tmp_BMS[2][3],tmp_BMS[2][4],tmp_BMS[2][5],tmp_BMS[2][6],tmp_BMS[2][7]),\\\n (tmp_time,'Battery: 4',tmp_BMS[3][0],tmp_BMS[3][1],tmp_BMS[3][2],tmp_BMS[3][3],tmp_BMS[3][4],tmp_BMS[3][5],tmp_BMS[3][6],tmp_BMS[3][7]),\\\n (tmp_time,'Battery: 5',tmp_BMS[4][0],tmp_BMS[4][1],tmp_BMS[4][2],tmp_BMS[4][3],tmp_BMS[4][4],tmp_BMS[4][5],tmp_BMS[4][6],tmp_BMS[4][7]),\\\n (tmp_time,'Battery: 6',tmp_BMS[5][0],tmp_BMS[5][1],tmp_BMS[5][2],tmp_BMS[5][3],tmp_BMS[5][4],tmp_BMS[5][5],tmp_BMS[5][6],tmp_BMS[5][7]),\\\n (tmp_time,'Battery: 7',tmp_BMS[6][0],tmp_BMS[6][1],tmp_BMS[6][2],tmp_BMS[6][3],tmp_BMS[6][4],tmp_BMS[6][5],tmp_BMS[6][6],tmp_BMS[6][7])]\n\n elif tmp_n_modules == 8:\n tmp_sql = \"INSERT INTO pylontech_bms (ts,device_name,soc,voltage,current,temperature,b_status,v_status,c_status,t_status) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n tmp_val = [ (tmp_time,'Battery: 1',tmp_BMS[0][0],tmp_BMS[0][1],tmp_BMS[0][2],tmp_BMS[0][3],tmp_BMS[0][4],tmp_BMS[0][5],tmp_BMS[0][6],tmp_BMS[0][7]),\\\n (tmp_time,'Battery: 2',tmp_BMS[1][0],tmp_BMS[1][1],tmp_BMS[1][2],tmp_BMS[1][3],tmp_BMS[1][4],tmp_BMS[1][5],tmp_BMS[1][6],tmp_BMS[1][7]),\\\n (tmp_time,'Battery: 3',tmp_BMS[2][0],tmp_BMS[2][1],tmp_BMS[2][2],tmp_BMS[2][3],tmp_BMS[2][4],tmp_BMS[2][5],tmp_BMS[2][6],tmp_BMS[2][7]),\\\n (tmp_time,'Battery: 4',tmp_BMS[3][0],tmp_BMS[3][1],tmp_BMS[3][2],tmp_BMS[3][3],tmp_BMS[3][4],tmp_BMS[3][5],tmp_BMS[3][6],tmp_BMS[3][7]),\\\n (tmp_time,'Battery: 5',tmp_BMS[4][0],tmp_BMS[4][1],tmp_BMS[4][2],tmp_BMS[4][3],tmp_BMS[4][4],tmp_BMS[4][5],tmp_BMS[4][6],tmp_BMS[4][7]),\\\n (tmp_time,'Battery: 6',tmp_BMS[5][0],tmp_BMS[5][1],tmp_BMS[5][2],tmp_BMS[5][3],tmp_BMS[5][4],tmp_BMS[5][5],tmp_BMS[5][6],tmp_BMS[5][7]),\\\n (tmp_time,'Battery: 7',tmp_BMS[6][0],tmp_BMS[6][1],tmp_BMS[6][2],tmp_BMS[6][3],tmp_BMS[6][4],tmp_BMS[6][5],tmp_BMS[6][6],tmp_BMS[6][7]),\\\n (tmp_time,'Battery: 8',tmp_BMS[7][0],tmp_BMS[7][1],tmp_BMS[7][2],tmp_BMS[7][3],tmp_BMS[7][4],tmp_BMS[7][5],tmp_BMS[7][6],tmp_BMS[7][7])]\n\n else:\n print(\"Unsuported number of battery modules. Only 1-8 modules are supported. The module number parsed is:\" + str(tmp_n_modules))\n return False\n try:\n # Executing the SQL command\n cursor.executemany(tmp_sql, tmp_val)\n #print(cursor.rowcount, \"records inserted.\")\n # Commit your changes in the database\n self._port.commit()\n #print(\"successfully send data to database\")\n return True\n except:\n # Rolling back in case of error\n self._port.rollback()\n print(\"Failed to send data to database\")\n return False", "def addDataPoints(self):\n pass", "def UpdateBrandSeries():\r\n MilkSeries.objects.all().delete()\r\n MilkBrand.objects.all().delete()\r\n MilkTunnel.objects.all().delete()\r\n brandlist = [item.brand for item in MilkProd.objects.all()]\r\n unique_brandlist = {}.fromkeys(brandlist).keys()\r\n for item in unique_brandlist:\r\n b = MilkBrand(name=item)\r\n b.save()\r\n \r\n for brandstr in unique_brandlist:\r\n brandset = MilkProd.objects.filter(brand=brandstr)\r\n serieslist = [item.name for item in brandset]\r\n unique_series = {}.fromkeys(serieslist).keys()\r\n for ser in unique_series:\r\n s = MilkSeries(name=ser, BrandIn=brandstr)\r\n s.save()\r\n \r\n tunnellist = [item.tunnel for item in MilkProd.objects.all()]\r\n unique_tunnellist = {}.fromkeys(tunnellist).keys()\r\n for item in unique_tunnellist:\r\n t = MilkTunnel(name=item)\r\n t.save()", "def insert_into_sql(chunk):\n bulk_list = []\n for row in chunk:\n bulk_list.append(StockData(\n date=str(row[0])[0:4] + '-' + str(row[0])[4:6] + '-' + str(row[0])[6:8],\n code=row[1],\n code_name=row[2],\n d1_diff_rate=row[3],\n close=row[4],\n open=row[5],\n high=row[6],\n low=row[7],\n volume=row[8],\n clo5=row[9],\n clo10=row[10],\n clo20=row[11],\n clo40=row[12],\n clo60=row[13],\n clo80=row[14],\n clo100=row[15],\n clo120=row[16],\n clo5_diff_rate=row[17],\n clo10_diff_rate=row[18],\n clo20_diff_rate=row[19],\n clo40_diff_rate=row[20],\n clo60_diff_rate=row[21],\n clo80_diff_rate=row[22],\n clo100_diff_rate=row[23],\n clo120_diff_rate=row[24],\n yes_clo_5=row[25],\n yes_clo_10=row[26],\n yes_clo_20=row[27],\n yes_clo_40=row[28],\n yes_clo_60=row[29],\n yes_clo_80=row[30],\n yes_clo_100=row[31],\n yes_clo_120=row[32],\n vol5=row[33],\n vol10=row[34],\n vol20=row[35],\n vol40=row[36],\n vol60=row[37],\n vol80=row[38],\n vol100=row[39],\n vol120=row[40],\n ))\n StockData.objects.bulk_create(bulk_list)\n return bulk_list", "def loaddata(self):\n # Connect to the db\n self.conn, self.c = self.connect_db(self.dbname)\n # create the bdefile table to \n self.c.execute(oeeutil.sql_create_bdefile_table)\n # Delete any previous records\n self.c.execute('DELETE FROM bdefile')\n # hold the content for analysis\n for item in self.content:\n self.c.execute('INSERT INTO bdefile VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)', item)\n self.c.executescript(oeeutil.sql_create_bdefile_view)\n self.conn.commit()", "def _add_to_businesses(params):\n print params\n if not Business.query.filter_by(yelp_id=params['yelp_id']).first():\n business = Business()\n cat_list = []\n for key in params:\n # adds elements in category lists to category table if they don't already exist\n if key == \"categories\":\n for cat in params[key]:\n cat_list.append(cat)\n if not Category.query.filter_by(category_name=cat).first():\n category = Category(category_name=cat)\n db.session.add(category)\n # THROUGH LINE 40 REPLACED BY 30-34\n # for group in params[key]:\n # print type(group)\n # for subtype in group:\n # print type(subtype)\n # if not Category.query.filter_by(category_name=subtype).first():\n # category = Category(category_name=subtype)\n # db.session.add(category)\n # cat_list.append(subtype)\n # print cat_list\n elif key == \"yelp_id\":\n business.yelp_id = params[key]\n elif key == \"name\":\n business.name = params[key]\n elif key == \"address_line_1\":\n business.address_line_1 = params[key]\n elif key == \"address_line_2\":\n business.address_line_2 = params[key]\n elif key == \"city\":\n business.city = params[key]\n elif key == \"state\":\n business.state = params[key]\n elif key == \"zipcode\":\n business.zipcode = params[key]\n elif key == \"phone\":\n business.phone = params[key]\n elif key == \"latitude\":\n business.latitude = params[key]\n elif key == \"longitude\":\n business.longitude = params[key]\n try:\n db.session.add(business)\n db.session.commit()\n except:\n db.session.rollback()\n print business.name, \"has insufficient information, skipping.\"\n return None\n # creates rows in reference table\n for cat in cat_list:\n # creates row in reference table\n business = Business.query.filter_by(yelp_id=params['yelp_id']).first()\n catbus = BusinessCategory()\n print business.business_id\n catbus.business_id = business.business_id\n cat_object = Category.query.filter_by(category_name=cat).first()\n print cat_object.category_name\n catbus.category_id = cat_object.category_id\n\n if not BusinessCategory.query.filter_by(business_id=catbus.business_id,\n category_id=catbus.category_id).first():\n db.session.add(catbus)\n db.session.commit()\n\n print \"added \" + business.name + \" to db\"\n\n else:\n print \"Already in Dictionary\"\n return None", "def add_stock(self, symbol):\n verbose_message(\"Adding \" + symbol + \"...\")\n if symbol not in self.stocks:\n self.stocks += [symbol]\n\n data = StockData()\n\n data.name = StockDataCollection.get_stock_name(symbol)\n data.symbol = symbol\n data.market = StockDataCollection.get_market_data(symbol,\n str(self.start_date)[:USEFUL_TIMESTAMP_CHARS],\n str(self.end_date)[:USEFUL_TIMESTAMP_CHARS])\n\n # create a list of dates in the YYYY-MM-DD format\n data.str_dates = [str(i)[:USEFUL_TIMESTAMP_CHARS] for i in list(data.market.index)]\n data.dates = data.market.index\n\n for i in data.dates:\n if i not in self.dates:\n self.dates += [i]\n self.dates.sort()\n self.str_dates = [str(i)[:USEFUL_TIMESTAMP_CHARS] for i in list(self.dates)]\n\n for collection_function in self.features:\n collection_function(data)\n\n data.position = []\n for _ in data.dates:\n data.position += [0]\n if type(self.cash) is not pd.DataFrame:\n self.cash += [self.starting_capital]\n\n data.position = pd.DataFrame({\"Position\": data.position}).set_index(data.dates)\n if type(self.cash) is not pd.DataFrame:\n self.cash = pd.DataFrame({\"cash\": self.cash}).set_index(data.dates)\n debug_message(data)\n self.shuffled_data_reset()\n self.stock_data[symbol] = data", "def insert_values():\n pass", "def add_entry(source,lbs):\n\tnow = datetime.now()\n\tdate = now.strftime('%m-%d-%Y')\n\tdata = {date: {'Date': date, 'Weight': lbs}}\n\tsource.inject(data)", "def run(self):\n self.db.table('points').insert({\n 'name': 'biblioteca',\n 'rfid': '123456'\n })" ]
[ "0.6130202", "0.5961946", "0.5830775", "0.5714348", "0.57038665", "0.56634265", "0.556326", "0.55575716", "0.54669654", "0.5381389", "0.53632724", "0.5322067", "0.5307755", "0.52783656", "0.5196585", "0.5154061", "0.5117939", "0.50698835", "0.5065572", "0.5061166", "0.5053288", "0.5050006", "0.5018439", "0.50179297", "0.50149643", "0.5010153", "0.50081563", "0.4978217", "0.49698648", "0.49670687" ]
0.7271585
0
Static partition method. Performs partition by "plucking" (1 train_size) n messages out of the provided data set, and places them in a new test set.
def partition(data, device, train_size=0.8): # Start with a copy, will be training train = copy.deepcopy(data) test = [] # Get distribution of message indices, keep ordering test_len = int((1 - train_size) * len(data)) test_indices = sorted(random.sample(range(len(data)), test_len), reverse=True) # For each index, remove from train and append to test for i in test_indices: test.append(train.pop(i)) # Need to reverse test now test.reverse() # Now label each set individually (performed in place) Labeler(train) Labeler(test) # Rescale data as well train = DataRescaler(train).scaled_data test = DataRescaler(test).scaled_data # Convert to tensors # Inputs Xtrain = torch.tensor([[s.get('time')] + list(s.get('composite').values()) for s in train], dtype=torch.double).to(device) Xtest = torch.tensor([[s.get('time')] + list(s.get('composite').values()) for s in test], dtype=torch.double).to(device) # Targets Ttrain = torch.tensor([[s.get('distinct')] for s in train], dtype=torch.long).to(device) Ttest = torch.tensor([[s.get('distinct')] for s in test], dtype=torch.long).to(device) return(Xtrain, Ttrain, Xtest, Ttest)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _test_train_partition(self, sent_partition_size):\n\n self._train_data_partitioned = self._partition_dataset(\n unpartitioned_dataset=self._train_data,\n sent_partition_size=sent_partition_size\n )\n\n self._test_data_partitioned = self._partition_dataset(\n unpartitioned_dataset=self._test_data,\n sent_partition_size=sent_partition_size\n )", "def split_dataset(dataset, test_size):\n train_data = dataset.skip(test_size).shuffle(SHUFFLE_BUFFER_SIZE)\n train_data = train_data.padded_batch(BATCH_SIZE)\n \n test_data = dataset.take(test_size)\n test_data = test_data.padded_batch(BATCH_SIZE)\n \n return train_data, test_data", "def evenly_partition_dataset(data, labels, nb_teachers):\n\n # This will floor the possible number of batches\n batch_len = int(len(data) / nb_teachers)\n\n nclasses = len(labels[0])\n print(\"Start Index Selection\")\n data_sel = [data[labels[:, j] == 1] for j in range(nclasses)]\n print(\"End Index Selection\")\n i = 0\n data_sel_id = [0] * len(labels[0])\n partition_data = []\n partition_labels = []\n\n while True:\n partition_data.append(data_sel[i][data_sel_id[i]])\n partition_labels.append(np_utils.to_categorical(i, nclasses))\n\n if len(partition_data) == batch_len:\n partition_data = np.asarray(partition_data)\n partition_labels = np.asarray(partition_labels)\n yield partition_data, partition_labels\n partition_data = []\n partition_labels = []\n\n data_sel_id[i] += 1\n if data_sel_id[i] == len(data_sel[i]):\n data_sel_id[i] = 0\n i = (i + 1) % nclasses", "def partition(data, n):\n splits = []\n remaining = data.copy(deep=True)\n for i in range(n):\n split = remaining.sample(frac=1/(n-i), random_state=10)\n splits.append(split)\n remaining = remaining.drop(split.index)\n return splits", "def partition(self, data, labels):\n\t\tfor i in range(self.splits):\n\t\t\tyield self.makePartition(len(labels))", "def partition(self, data, labels):\n\t\treturn self.kfold.split(labels)", "def partition_dataset(data, labels, nb_teachers, teacher_id):\n\n # Sanity check\n assert (int(teacher_id) < int(nb_teachers))\n\n # This will floor the possible number of batches\n batch_len = int(len(data) / nb_teachers)\n\n # Compute start, end indices of partition\n start = teacher_id * batch_len\n end = (teacher_id + 1) * batch_len\n\n # Slice partition off\n partition_data = data[start:end]\n if labels is not None:\n partition_labels = labels[start:end]\n else:\n partition_labels = None\n\n return partition_data, partition_labels", "def test_partition_users():\n ratings = lktu.ml_test.ratings\n ratings = ratings.set_index('user') ##forces non-unique index\n with pytest.raises(ValueError):\n for split in xf.partition_users(ratings, 5, xf.SampleN(5)):\n pass", "def partition(self, data, labels):\n\t\traise Exception(\"Not implmented\")", "def partition_mnist():\n (x_train, y_train), testset = tf.keras.datasets.mnist.load_data()\n partitions = []\n # We keep all partitions equal-sized in this example\n partition_size = math.floor(len(x_train) / NUM_CLIENTS)\n for cid in range(NUM_CLIENTS):\n # Split dataset into non-overlapping NUM_CLIENT partitions\n idx_from, idx_to = int(cid) * partition_size, (int(cid) + 1) * partition_size\n partitions.append((x_train[idx_from:idx_to] / 255.0, y_train[idx_from:idx_to]))\n return partitions, testset", "def test_split(self):\n array = np.arange(1000)\n df = DataFlow.from_numpy(array)\n\n # first, test throw errors on invalid arguments\n def assert_invalid_arg(**kwargs):\n with self.assertRaises(ValueError):\n df.split(**kwargs)\n assert_invalid_arg(partitions=[])\n assert_invalid_arg(partitions=[1000, 1])\n assert_invalid_arg(partitions=[1000, -1])\n assert_invalid_arg(partitions=[1, 2])\n assert_invalid_arg(portions=[])\n assert_invalid_arg(portions=[1.0, 0.1])\n assert_invalid_arg(portions=[1.0, -1])\n assert_invalid_arg(portions=[0.1, 0.2])\n\n # next, test split without shuffling\n df1, df2, df3 = df.split(partitions=[700, 200, 100])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n df1, df2, df3 = df.split(portions=[-1, 0.2, 0.1])\n np.testing.assert_array_equal(df1.all()[0], array[:700])\n np.testing.assert_array_equal(df2.all()[0], array[700:900])\n np.testing.assert_array_equal(df3.all()[0], array[900:1000])\n\n # finally, test split with shuffling\n df1, df2 = df.split(portions=[0.5, -1], shuffle=True)\n self.assertEquals(len(df1), 500)\n self.assertEquals(len(df2), 500)\n df_array = np.concatenate([df1.all()[0], df2.all()[0]], axis=0)\n self.assertFalse(np.all(df_array == array))\n np.testing.assert_array_equal(np.sort(df_array), array)", "def split_data(test_data, split_ratio):\n split_index = int(split_ratio * len(test_data))\n \n # randomly permute the values in place\n random.shuffle(test_data)\n \n # take slices of the determined size\n training_set = copy.copy(test_data[:split_index])\n test_data = copy.copy(test_data[split_index:])\n\n return training_set, test_data", "def split_data(dataset, test_size=0.5):\n shuffled_data = np.random.RandomState(seed=721).permutation(dataset)\n train_set = shuffled_data[: int(len(dataset) * (1 - test_size)), :]\n test_set = shuffled_data[int(len(dataset) * (1 - test_size)):, :]\n return train_set, test_set", "def split_dataset(self, test_size=0.20):\n\t\t(self.training_data, self.test_data, self.training_labels, self.test_labels) = train_test_split(self.training_data, self.training_labels, test_size=test_size)", "def data_partition(num_workers, data_set, separate=True):\n\n size = data_set.data.shape[0]\n ind = list(range(size))\n\n if separate:\n shuffle(ind)\n # worker_size is the number of samples per worker. The last worker however receives the additional samples\n worker_size = size // num_workers\n data = dict.fromkeys(list(range(num_workers)))\n\n for w in range(num_workers):\n if w is not num_workers - 1:\n data[w] = ind[w * worker_size: (w+1) * worker_size]\n # data[w][\"X\"] = X_train[ind[w * worker_size: (w + 1) * worker_size], :]\n # data[w][\"Y\"] = Y_train[ind[w * worker_size: (w + 1) * worker_size], :]\n else:\n data[w] = ind[w * worker_size:]\n # data[w][\"X\"] = X_train[ind[w * worker_size:], :]\n # data[w][\"Y\"] = Y_train[ind[w * worker_size:], :]\n\n else:\n data = dict.fromkeys(list(range(num_workers)))\n for w in range(num_workers):\n shuffle(ind)\n data[w] = ind\n # data[w][\"X\"] = X_train[ind, :]\n # data[w][\"Y\"] = Y_train[ind, :]\n\n return data", "def partition_data(data, folds, fold_n, fold_size):\n\n # Training Data Partition\n train_1 = data[:(folds - fold_n) * fold_size]\n train_2 = data[(folds - fold_n + 1) * fold_size:]\n train = np.concatenate((train_1, train_2), axis=0)\n\n # Validation Data Partition\n val = data[(folds - fold_n) * fold_size:(folds - fold_n + 1) * fold_size]\n\n return train, val", "def partition(data: list, parts: list, *args: float) -> list:\n random.seed(42)\n partition_names = parts\n random.shuffle(data)\n n = len(data)\n rem, a, b = n, 0, 0\n parts = []\n\n for p in args:\n b = a + int(n*p)\n parts.append(data[a:b])\n rem -= (b - a)\n a = b\n # end\n\n parts.append(data[-rem:])\n return parts", "def partition(lis: list, n: int):\n # prevent destroying the original dataset\n lis_cp = copy.deepcopy(lis)\n random.shuffle(lis_cp)\n if len(lis) > n:\n return [lis_cp[i::n] for i in range(n)]\n else:\n return [[lis_cp[i]] for i in range(len(lis))]", "def split_data(self):\r\n print('split data')\r\n np.random.shuffle(self.dataList)\r\n l = len(self.dataList)/self.fold\r\n self.dataList = [self.dataList[i*l: (i+1)*l] for i in range(self.fold-1)] + [self.dataList[(self.fold-1)*l:]] # each element in the list is splitted data list\r", "def split_data(data, test_size):\r\n ntest = int(round(len(data) * (1 - test_size)))+1\r\n \r\n train, test = data[:ntest], data[ntest:]\r\n \r\n return train,test", "def data_split(self, test_size=0.2, stratify=None):\n return train_test_split(\n self, test_size=test_size, random_state=42, stratify=stratify\n )", "def test_partition(self):\n # one swap at the end\n list = [5, 6, 7, 8, 9, 2]\n partition(list, 0, 5)\n # assert list == [2, 6, 7, 8, 9, 5] # should be improved in future", "def train_test_split(dataset, split):\r\n train = list()\r\n train_size = split * len(dataset)\r\n dataset_copy = list(dataset) \r\n while len(train) < train_size:\r\n index = randrange(len(dataset_copy))\r\n train.append(dataset_copy.pop(index))\r\n return train, dataset_copy", "def partition_data(self):\n\n _header_ = self._header_ + 'partition_data(): '\n\n if self.verbose:\n print(_header_ + 'Partitioning data ...')\n\n network = self._useful_network()\n\n if self.nidx_train:\n # The only reason that allows .nidx to not be empty would be that a training Data was copied over\n # hence, the training node indices are retained and need to be excluded\n print(_header_ + 'Excluding %d training nodes transfered from training dataset ...' % len(self.nidx_train))\n nidx = set(self.nidx2lidx.keys()) - set(self.nidx_train)\n self.nidx_exclude += self.nidx_train\n self.nidx_train = []\n else:\n nidx = set(self.nidx2lidx.keys())\n\n for l in nidx:\n if l in network:\n if self.node_labels[l]:\n self.nidx_train.append(l)\n else:\n self.nidx_exclude.append(l)\n\n if self.verbose:\n print(_header_ + 'Found %d nodes' % len(self.nidx2lidx))\n print(' %d nodes with labels of interest' % len(self.nidx_train))\n print(' %d nodes can be used to predict' % len(self.nidx_pred))\n print(' %d nodes cannot be mapped due to lack of mappable links' % len(self.nidx_exclude))\n\n return self", "def __init__(self, partition, test=False, local_test_data_dir=_LOCAL_TEST_DATA_DIR):\n assert sum(partition) == 100, 'The sum of the partition list must be 100: {}'.format(partition)\n self._partition = partition\n self._test = test\n # Split the files up according to the self._partition list.\n self._partitioned_filenames = []\n filenames = data_filenames(shuffle=False, test=self._test,\n local_test_data_dir=local_test_data_dir)\n part_start = 0\n for i, part_size in enumerate(self._partition):\n part_end = part_start + int(len(filenames) * 0.01 * part_size)\n assert part_end - part_start > 0, 'The number of files in partition {} is zero.'.format(i)\n self._partitioned_filenames.append(filenames[part_start:part_end])", "def partition_train_valid_test2(data, classes, others, ratio=(1,1,1), rng=np.random.RandomState(1000)):\n k=sum(ratio) # ratio must be a vector of integers\n ind=kfold_cross_validation(classes,k=k,shuffle=True,rng=rng)\n sequence=np.arange(len(classes))\n train_ind=np.array([],dtype=int)\n valid_ind=np.array([],dtype=int)\n test_ind=np.array([],dtype=int)\n count=0\n for ki in range(k):\n if count<ratio[0]:\n train_ind=np.append(train_ind,sequence[ind==ki])\n count=count+1\n continue\n if count>=ratio[0] and count <ratio[0]+ratio[1]:\n valid_ind=np.append(valid_ind,sequence[ind==ki])\n count=count+1\n continue\n if count>=ratio[0]+ratio[1] and ratio[2]>0:\n test_ind=np.append(test_ind,sequence[ind==ki])\n count=count+1\n continue\n train_set_x=data[train_ind]\n train_set_y=classes[train_ind]\n if others is not None:\n train_set_others=others[train_ind]\n else:\n train_set_others=None\n valid_set_x=data[valid_ind]\n valid_set_y=classes[valid_ind]\n if others is not None:\n valid_set_others=others[valid_ind]\n else:\n valid_set_others=None\n test_set_x=data[test_ind]\n test_set_y=classes[test_ind]\n if others is not None:\n test_set_others=others[test_ind]\n else:\n test_set_others=None\n \n return train_set_x,train_set_y,train_set_others,valid_set_x,valid_set_y,valid_set_others,test_set_x,test_set_y,test_set_others", "def split_train_test(ratings):\r\n ratings = ratings.sample(frac=1).reset_index(drop=True)\r\n train_user_list = []\r\n train_item_list = []\r\n train_rating_list = []\r\n test_user_list = []\r\n test_item_list = []\r\n test_rating_list = []\r\n user_pool = set(ratings['userId'].unique())\r\n for idx in user_pool:\r\n flag = 0\r\n items = ratings[ratings['userId']==idx][['itemId','rating']]\r\n for i, row in items.iterrows():\r\n if flag == 0:\r\n test_user_list.append(int(idx))\r\n test_item_list.append(int(row['itemId']))\r\n test_rating_list.append(row['rating'])\r\n flag = 1\r\n else:\r\n train_user_list.append(int(idx))\r\n train_item_list.append(int(row['itemId']))\r\n train_rating_list.append(row['rating'])\r\n\r\n train = pd.DataFrame({'userId': train_user_list, 'itemId': train_item_list, 'rating': train_rating_list}, columns=['userId', 'itemId', 'rating'])\r\n test = pd.DataFrame({'userId': test_user_list, 'itemId': test_item_list, 'rating': test_rating_list}, columns=['userId', 'itemId', 'rating'])\r\n return [train, test]\r\n \r\n\r\n \r\n #train, test = train_test_split(ratings, test_size=0.1, shuffle=True)\r\n #return [train, test]\r", "def train_test_split(collection):\r\n num_docs_train = int(training_docs * BATCH_SIZE)\r\n train_corpus = collection[:num_docs_train]\r\n test_corpus = collection[num_docs_train:]\r\n return num_docs_train, train_corpus, test_corpus", "def train_test_split(self):\n random.seed(self.args.seed)\n nodes = [node for node in range(self.ncount)]\n random.shuffle(nodes)\n self.train_nodes = torch.LongTensor(nodes[0:self.args.training_size])\n self.validation_nodes = torch.LongTensor(nodes[self.args.training_size:self.args.training_size+self.args.validation_size])\n self.test_nodes = torch.LongTensor(nodes[self.args.training_size+self.args.validation_size:])", "def splitData(groupList, trainSize):\r\n from sklearn.model_selection import StratifiedShuffleSplit\r\n\r\n groupList[0]['text'] = cleanRealTexts(list(groupList[0]['text']))\r\n\r\n classLabels = np.array([])\r\n for i, group in enumerate(groupList):\r\n classLabels = np.append(classLabels, np.repeat(i, len(group)))\r\n\r\n classData = pd.concat(groupList).reset_index(drop=True)\r\n\r\n splits = list(StratifiedShuffleSplit(n_splits=i,\r\n test_size=1-trainSize,\r\n train_size=trainSize,\r\n random_state=0).split(X=classData, y=classLabels))[0]\r\n trainIdx, testIdx = splits\r\n\r\n trainData = classData.iloc[trainIdx]\r\n testData = classData.iloc[testIdx]\r\n trainLabels = classLabels[trainIdx]\r\n testLabels = classLabels[testIdx]\r\n\r\n return [[trainData, trainLabels], [testData, testLabels]]" ]
[ "0.67046744", "0.627256", "0.62224567", "0.6146135", "0.608464", "0.60470444", "0.6025138", "0.60056865", "0.5964145", "0.596257", "0.59514266", "0.59134537", "0.59086394", "0.5894597", "0.5875567", "0.5854712", "0.5852151", "0.5848135", "0.58325994", "0.58012825", "0.57805204", "0.577713", "0.57587653", "0.57305884", "0.5714537", "0.57144254", "0.5712983", "0.57010627", "0.5699726", "0.5695277" ]
0.6851961
0
Calculate the number of available moves for the passed in player
def number_moves(game, player): return float(len(game.get_legal_moves(player)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __moves_available(self, board: Board):\n player_moves = self.get_num_of_moves(board, self.player_color)\n opponent_moves = self.get_num_of_moves(board, self.opponent_color)\n # print(len(player_moves), len(opponent_moves))\n\n return player_moves - opponent_moves * 3", "def get_num_moves(self, player: PlayerColor) -> int:\r\n player_squares: List[Square] = self.get_player_squares(player)\r\n count: int = 0\r\n for player_square in player_squares:\r\n adj_squares: List[Square] = \\\r\n self._get_adjacent_squares(player_square.pos)\r\n for adj_square in adj_squares:\r\n if (adj_square.state == SquareState.OPEN):\r\n count += 1\r\n elif(adj_square.state == SquareState.OCCUPIED):\r\n opposite_square: Square = \\\r\n self.squares.get(\r\n self._get_opposite_pos(player_square.pos,\r\n adj_square.pos))\r\n if (opposite_square is not None\r\n and opposite_square.state == SquareState.OPEN):\r\n count += 1\r\n\r\n return count", "def open_move_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n return float(len(game.get_legal_moves(player)))", "def rate_board(board, player):\n approx_player_moves = sum(\n len(_get_empty_neighbors((i, j), board))\n for i in range(5)\n for j in range(5)\n if board[i][j] == player\n )\n approx_opponent_moves = sum(\n len(_get_empty_neighbors((i, j), board))\n for i in range(5)\n for j in range(5)\n if board[i][j] == -player\n )\n return approx_player_moves - approx_opponent_moves", "def get_number_of_moves(self):\n return self._number_of_moves", "def _get_n_players(env):\n return len(env.action_space.spaces)", "def __count_player(self):\n # Count up the player\n self.current_player += 1\n\n # Check is the self.current_player is a ghost player\n while self.current_player in self.ghost_players:\n self.current_player += 1\n\n # If the count is over 3 then reset to player 0 and count up the round\n if self.current_player > 3:\n self.current_player = 0\n self.round += 1", "def count_players(definition):\n _, player_definition = parse_player_definition(definition)\n return (int(player_definition['left_players']) +\n int(player_definition['right_players']))", "def noOfPlayers(self):\n\t\tnumber = 0\n\t\tfor n in range(6):\n\t\t\tif self.playerList[n] != None:\n\t\t\t\tnumber = number + 1\n\t\treturn number", "def __heuristic2__(game, player):\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - 2 * opp_moves)", "def number_of_moves(self):\n return self._move_seq.length()", "def player(board):\n total = 0\n for i in range(len(board)):\n for j in range(len(board)):\n total = total + utility_map[board[i][j]]\n\n # If they cancel out then equal number so X's turn\n if total == 0:\n return X\n else:\n return O", "def number_of_players(self) -> int:\n return self.param.number_of_players", "def get_available_moves(self, board):\n pass", "def count_discs(self, player: Player) -> int:\n count = 0\n player_disc = disc.get_disc(player)\n for i in range(self.size):\n for j in range(self.size):\n if self._grid[i][j] == player_disc:\n count += 1\n return count", "def run(self):\r\n noMove = 0\r\n while(noMove < 2):\r\n options = self.get_valid_moves()\r\n if len(options) > 0:\r\n res = False\r\n while(not res):\r\n move = self.players[self.turn-1].get_move(self.board.copy(),options.copy())\r\n res = self.update_board(move,_testing=False)\r\n else:\r\n noMove += 1\r\n self.turn = (self.turn * 2 ) % 3 # 1 --> 2 2 --> 1\r\n return self.gameCount()", "def count_chips(board, player):\n cont = 0\n for row in board:\n for col in row:\n if col == PLAYER_CHIPS[player]:\n cont += 1\n return cont", "def player(board):\n X_count = 0\n O_count = 0\n\n for row in board:\n X_count += row.count(X)\n O_count += row.count(O)\n\n if X_count <= O_count:\n return X\n else:\n return O", "def open_positions_score(game, player):\n moves = game.get_legal_moves()\n side_coef = 1 if player == game.active_player else -1\n \n if len(moves) == 0:\n result = float(\"-inf\")\n else:\n result = len(moves)\n \n \n return float(result*side_coef)", "def numberOfPlayers(self):\r\n return len(self.playerPreparers)", "def __heuristic1__(game, player):\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n if game.move_count < ((game.height * game.width)/2):\n return float(own_moves - 3 * opp_moves)\n else:\n return float(own_moves - opp_moves)", "def count_legal_moves(board, color):\n return len(legal_moves(board, color))", "def evaluate_position(num_items):\n comp_wins = 0\n player_wins = 0\n\n initial_move = random.randrange(MAX_REMOVE + 1)\n num_items -= initial_move\n next_move = random.randrange(MAX_REMOVE + 1)\n \n\n\n\n\n\n \n \n return 0", "def score(self,player, board):\r\n numPlayer = 0\r\n numOpp = 0\r\n for i in self.squares():\r\n if board[i] == player:\r\n numPlayer+= SQUARE_WEIGHTS[i]\r\n else:\r\n numOpp+=SQUARE_WEIGHTS[i]\r\n return numPlayer-numOpp", "def custom_score(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # return different between # of my agent's move and oppenent's\n return float(own_moves - opp_moves)", "def _evaluate_num_pieces(self, player):\n evaluation = 0\n if player is Player.black:\n evaluation += self.num_black_pieces * 10\n evaluation -= self.num_white_pieces * 10\n evaluation += self.num_black_kings * 10\n evaluation -= self.num_white_kings * 10\n elif player is Player.white:\n evaluation -= self.num_black_pieces * 10\n evaluation += self.num_white_pieces * 10\n evaluation -= self.num_black_kings * 10\n evaluation += self.num_white_kings * 10\n\n return evaluation", "def get_moves(self, board, player):\r\n width, height = self.board_size\r\n return self.get_moves_c(board, player, width, height)", "def player(board):\n if board == initial_state():\n return X\n\n total_x = 0\n total_o = 0\n\n for i in board:\n total_x += i.count(X)\n total_o += i.count(O)\n\n if (total_x + total_o) % 2 == 1:\n return O\n else:\n return X", "def evaluate(self):\n # if player has no move, then player lost, -inf or inf depend on who the player is\n # if player has moves, use heuristics.\n \n #checkColorMoves = self.getAvailableMoves(self.colorIndex)\n #otherColorMoves = self.getAvailableMoves(1-self.colorIndex)\n \n checkColorMoves = self.getAvailableMovesPreferLonger(self.colorIndex)\n otherColorMoves = self.getAvailableMovesPreferLonger(1-self.colorIndex)\n\n checkColorPieces = self.getPieceCount(self.colorIndex)\n otherColorPieces = self.getPieceCount(1-self.colorIndex)\n\n #checkColorEdgePieces = self.getEgdePieceCount(self.colorIndex)\n #otherColorEdgePieces = self.getEgdePieceCount(1-self.colorIndex)\n\n if self.player == 'computer':\n if checkColorMoves == 0: #computer doesn't have moves\n return float('-inf')\n elif otherColorMoves == 0: #user doesn't have moves\n return float('inf')\n else:\n #return checkColorPieces - otherColorPieces\n return checkColorMoves - otherColorMoves\n else:\n if checkColorMoves == 0: #user doesn't have moves\n return float('inf')\n elif otherColorMoves == 0: #computer doesn't have moves\n return float('-inf')\n else:\n #return otherColorPieces - checkColorPieces\n return otherColorMoves - checkColorMoves", "def count_left_players(definition):\n return int(parse_player_definition(definition)[1]['left_players'])" ]
[ "0.74395734", "0.7321936", "0.6822133", "0.6733796", "0.6693827", "0.6667687", "0.6616912", "0.66106665", "0.65502703", "0.6479705", "0.6458653", "0.64512634", "0.6449264", "0.64048064", "0.6316542", "0.62993276", "0.62872946", "0.62864095", "0.62860495", "0.6279876", "0.62721306", "0.6270671", "0.6266518", "0.6227371", "0.6216852", "0.62125957", "0.6199204", "0.61939114", "0.61756265", "0.6169437" ]
0.83128154
0
A measuer of how complete the board is. Returns float The percent of complete the game board is. Between 0 and 1.
def completeness_of_game(game): spaces = game.width * game.height played_spaces = len([x for x in game._board_state[:-3] if x == 1]) return float(played_spaces / spaces)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def percentage_complete(self) -> float:\n return self.__percentage_complete", "def percent_passing(self) -> float:\n num_meas = Enumerable(self.mlc_meas).select_many(lambda m: m.passed).count()\n num_pass = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: m.passed)\n .count(lambda p: bool(p) is True)\n )\n return float(100 * num_pass / num_meas)", "def percent_processed(self):\n try:\n return (self.pos / self.data_encap.size) * 100.0\n except ZeroDivisionError:\n return 100.0", "def percent_complete(self) -> int:\n return pulumi.get(self, \"percent_complete\")", "def mobility(self, board):\n valid_moves_computer = sum(sum(self.game.find_valid_moves(self.computer_color, board, self.board_size)))\n valid_moves_opponent = sum(sum(self.game.find_valid_moves(self.opponent_color, board, self.board_size)))\n\n if valid_moves_computer + valid_moves_opponent == 0:\n return 0\n else:\n return 100 * (valid_moves_computer - valid_moves_opponent) / (valid_moves_computer + valid_moves_opponent)", "def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))", "def percent_updated(self):\n return self.percent_complete - self.previous_percent_complete", "def pct(self):\n\t\treturn self.bottle.pct()", "def get_percent_completed(self):\n completed = self.object_list.filter(status__exact=True).count()\n total = len(self.object_list)\n return int(100 * completed / total) if total > 0 else 0", "def percentage(self):\n return sum(self.chunk_percentage) / self.total_steps", "def get_win_percentage(self) -> float:\n if self.wins == 0:\n return 0.0\n else:\n return round((self.wins / (self.wins + self.losses)) * 100, 2)", "def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n):\n controlled += 1.00\n \n return float(controlled / self.paint_blocks)", "def _calculate_score(self):\n mul = self._check_board()\n if mul > 0:\n inc = 100 * mul + ((mul - 1) * 25)\n self.score += inc", "def pulse_width_percent(self) -> float:", "def percent_left(self):\n return 100 - self.percent_complete", "def get_percentComplete(self):\n val = self.resource.get_cdmi_sys_meta().get(\"cdmi_percentComplete\",\n \"100\")\n return val", "def percent_done(self) -> int:\n percent = (self.downloaded_images/self.total_images) * 100\n return int(percent)", "def progress(self) -> int:\n return int(round(100 * self.somme() / self.finances))", "def displayed_percent(self):\n return (self.displayed_words / self.total_words) * 100", "def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)", "def pct_status(self):\r\n # DEPRECATED: self.info.n_answers will be removed\r\n # DEPRECATED: use self.t.n_answers instead\r\n if (self.info.get('n_answers')):\r\n self.n_answers = int(self.info['n_answers'])\r\n if self.n_answers != 0 and self.n_answers != None:\r\n return float(len(self.task_runs)) / self.n_answers\r\n else: # pragma: no cover\r\n return float(0)", "def value(self):\n black, white = 0, 0\n for sq in Othello.squares():\n piece = self.__board[sq]\n if piece == BLACK: black += 1\n elif piece == WHITE: white += 1\n if black == white:\n return 0.5\n elif black > white:\n return 1\n else:\n return 0", "def percentage_update(self):\n\n self.event_update()\n return self.percentage", "def get_percentComplete(self):\n val = self.collection.get_cdmi_sys_meta().get(\"cdmi_percentComplete\",\n \"100\")\n return val", "def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress", "def percentage_progress(self):\n\n if self.total_progress != 0:\n return float(self.progress) / self.total_progress\n return self.progress", "def as_percent(self):\n if self.dynamic:\n self._update_db_obj()\n return f\"{(self._db_obj.progress / self._db_obj.total) * 100}%\"", "def percent_b(self) -> float:\n return self._percent_b", "def percent_busy(self):\n return self._percent_busy", "def get_estimated_percentage(self):\n now_id = now_as_id()\n message_id = self.last_message_id\n if message_id >= now_id:\n return 100.0\n \n channel_id = self.source_channel.id\n if channel_id >= message_id:\n return 0.0\n \n if self.is_polling_done():\n return 100.0\n \n return (1.0 - (now_id - message_id) / (now_id - channel_id)) * 100.0" ]
[ "0.7187204", "0.7039708", "0.6988115", "0.6894421", "0.68699104", "0.6860702", "0.6820164", "0.6814431", "0.67519665", "0.6700369", "0.669995", "0.6684258", "0.6677539", "0.66357076", "0.6621043", "0.6610755", "0.65787625", "0.65730417", "0.6553437", "0.6544321", "0.6537376", "0.6506908", "0.6501359", "0.649143", "0.648848", "0.648848", "0.64221", "0.64021885", "0.6396602", "0.6384704" ]
0.7382144
0
Returns the average timeout depthlimited
def average_timeout_depth(self): if self.timeout_depths: return sum(self.timeout_depths) / len(self.timeout_depths) else: return -1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def averageTime(self):\n \n pass", "def getTimeDepth(d,v,dmax=200):\n\n d = np.sort(d)\n d = np.append(d,dmax)\n\n twttop = 2.*np.diff(d)/v # 2-way travel time within each layer\n twttop = np.append(0.,twttop)\n twttop = np.cumsum(twttop) # 2-way travel time from surface to top of each layer\n\n return d, twttop", "def calc_average_depth(self):\n for idx in range(self.size):\n if self._depth_buffer[idx] != []:\n self._depth[idx] = np.mean(self._depth_buffer[idx])", "def getavgvel(self):\n if self.total_time:\n return (6.28)/(self.total_time)", "def avg_extend_time(self):\r\n if self.total_extended:\r\n return self.total_extend_time/self.total_extended\r\n else: return 0", "def level_time_average(start_levels, attack_style, attack_bonus, strength_bonus):\n ticks_per_attack = 4 # Scimitar attack speed\n max_hit, accuracy = get_max_hit_and_accuracy(\n start_levels, attack_style, attack_bonus, strength_bonus)\n \n if attack_style == Attack_Style.ATTACK:\n start_exp = osrs.experience[start_levels.attack]\n end_exp = osrs.experience[start_levels.attack+1]\n elif attack_style == Attack_Style.STRENGTH:\n start_exp = osrs.experience[start_levels.strength]\n end_exp = osrs.experience[start_levels.strength+1]\n \n experience = end_exp - start_exp\n avg_hit = accuracy * max_hit / 2\n exp_per_hit = avg_hit * osrs.BASE_EXP_PER_DAMAGE\n ticks = experience / exp_per_hit * ticks_per_attack\n return ticks", "def average_speed(self):\n return self.total_distance * 3600 / self.total_time", "def get_reached(g, source, hops, timeout):\n\n paths = nx.single_source_shortest_path(g, source, hops)\n total = len(paths)\n reached = total\n\n for v in paths.itervalues():\n for i in range(1, len(v)):\n lat = g[v[i-1]][v[i]][\"latency\"]\n if lat > timeout:\n reached -= 1\n break\n\n return float(reached)/float(total)", "def normalized_total_time(p, max_time=3600000):\n if \"cdgp.wasTimeout\" in p and p[\"cdgp.wasTimeout\"] == \"true\":\n v = 3600000\n else:\n v = int(float(p[\"result.totalTimeSystem\"]))\n return max_time if v > max_time else v", "def avgtime(self):\n return (self._total_time['value'] / 1000) / self._total_time['count'] if self._total_time['count'] else 0", "def GetTimeoutScale(self):\n return 1.0", "def GetTimeoutScale(self):\n return 30.0", "def avg_inference_time(self):\n return self._avg_inference_time", "def get_max_dmag_from_depth(depth):\n return 2.5 * np.log10(depth)", "def find_average_duration(video: dict):\n global num_videos\n global total_duration\n\n if duration := video.get('duration'):\n with data_lock:\n num_videos += 1\n total_duration += (duration/1000)\n show_progress()", "def average_speed(self):\n return self._average_speed", "def calculate_timeout(self):\n return self.total_estimated_words() / self.minimum_wpm * 60", "def get_max_time_steps (self):\n return self.degreedays.thawing.num_timesteps", "def _updateLevel(self, level, lastUpdate, time, timeout):\n\t\ttimeoutsPassed = (time - lastUpdate) / timeout\n\t\treturn max(0, level - timeoutsPassed)", "def get_measurement(self):\n return self._convert_to_depth(self._avg_sample())", "def calc_average_speed(path: Path) -> float:\n\t\n\treturn KNOTS_TO_MPS_RATIO * avg(\n\t\tcoords.speed\n\t\tfor coords in path\n\t)", "def get_avg_duration(persons, fps):\r\n if len(persons) > 0:\r\n total_nb_frames = 0\r\n for person in persons:\r\n total_nb_frames = total_nb_frames + person[5] - person[4] \r\n # return the average number of frames by person, divided by the FPS rate to get a value in seconds \r\n return (total_nb_frames / len(persons)) / fps \r\n else:\r\n return 0", "def avgtr(self):\n return np.diff(self.trtimes).mean()", "def GetTimeoutScale(self):\n return 30", "def pc_work_time_avg(self) -> \"float\":\n return _beamforming_swig.doaesprit_sptr_pc_work_time_avg(self)", "def get_depth_milli_metres(self):\n self.depth = (self.get_depth_metres() * 1000).astype(np.float32)\n self.full_depth = copy.copy(self.depth)\n return self.depth", "def averageDistance(nbSteps, nbWalks, func):\n totalDistance = 0\n startPoint = (0, 0)\n for _ in range(nbWalks):\n arrival = None\n while arrival is None:\n arrival = func(startPoint, nbSteps)\n totalDistance += distance(startPoint, arrival)\n return pow(totalDistance/nbWalks, 2)", "def depth_percent(self):\n return self.container['depth_percent']", "def base_depth_average_for_date(resort_name, date):\n\n resort_table = resort_table_dict[resort_name]\n\n date_month = int(date[4:6])\n date_day = int(date[6:8])\n query = \"SELECT base_depth FROM %s WHERE CAST(EXTRACT(MONTH FROM status_date) AS INTEGER) = %d AND CAST(EXTRACT(DAY FROM status_date) AS INTEGER) = %d\" %(resort_table, date_month, date_day)\n connection = get_connection()\n total = 0\n counter = 0\n for row in get_select_query_results(connection, query):\n counter += 1\n total += int(row[0])\n if (counter != 0): \n base_depth_to_return = int(total/counter)\n else:\n base_depth_to_return = 0\n return json.dumps(base_depth_to_return)", "def avg_latency(self):\n return self._avg_latency" ]
[ "0.6291707", "0.600832", "0.59655666", "0.58856165", "0.5845937", "0.58268595", "0.57281715", "0.5727008", "0.5701169", "0.5675126", "0.5657336", "0.5652876", "0.5619992", "0.55982137", "0.55904514", "0.5574663", "0.55337244", "0.55325866", "0.55304694", "0.55241233", "0.55085665", "0.5506943", "0.55045795", "0.54983485", "0.54934835", "0.5477794", "0.54772794", "0.5453063", "0.5436129", "0.54318124" ]
0.8632682
0
Check if the depth is equal or greater than the search_depth of the agent or if there are no legal moves. Raise SearchTimeout if time_left is less than the TIMER_THRESHOLD.
def _terminal_test(self, game, depth): if self.time_left() < self.TIMER_THRESHOLD: self.timeout_depths.append(depth) raise SearchTimeout() beyond_search_depth = depth >= self.search_depth no_legal_moves = len(game.get_legal_moves()) == 0 return beyond_search_depth or no_legal_moves
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # TODO: finish this function!\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_move = (-1, -1)\n # The try/except block will automatically catch the exception# raised when the timer is about to expire.\n # Implementation of Iterative Deepening Search\n\n try:\n depth = 0 # initialisation of depth\n while True:\n # runs along as game is still active\n depth += 1 # increment depth after each search\n best_move = self.alphabeta(game, depth) # apply alpha beta to search\n\n except SearchTimeout: # cutoff - when timer runs out\n pass # Handle any actions required after timeout as needed\n # Failure\n\n # Return the best move from the last completed search iteration\n return best_move # Solution", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_move = (-1, -1)\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n self.search_depth = 0\n\n while self.time_left() > self.TIMER_THRESHOLD:\n self.search_depth += 1\n best_move = self.alphabeta(game, self.search_depth)\n\n except SearchTimeout:\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return best_move", "def test_get_move(self):\n\n class DynamicTimer():\n \"\"\"Dynamic Timer allows the time limit to be changed after the\n timer is initialized so that the search timeout can be triggered\n before the timer actually expires. This allows the timer to expire\n when an event occurs, regardless of the clock time required until\n the event happens.\n \"\"\"\n def __init__(self, time_limit):\n self.time_limit = time_limit\n self.start_time = curr_time_millis()\n\n def time_left(self):\n return self.time_limit - (curr_time_millis() - self.start_time)\n\n w, h = 11, 11 # board size\n adversary_location = (0, 0)\n method = \"minimax\"\n\n # The agent under test starts at the positions indicated below, and\n # performs an iterative deepening minimax search (minimax is easier to\n # test because it always visits all nodes in the game tree at every\n # level).\n origins = [(2, 3), (6, 6), (7, 4), (4, 2), (0, 5), (10, 10)]\n exact_counts = [(8, 8), (32, 10), (160, 39), (603, 35), (1861, 54), (3912, 62)]\n\n for idx in range(len(origins)):\n\n # set the initial timer high enough that the search will not\n # timeout before triggering the dynamic timer to halt by visiting\n # the expected number of nodes\n time_limit = 1e4\n timer = DynamicTimer(time_limit)\n eval_fn = makeEvalStop(exact_counts[idx][0], timer, time_limit)\n agentUT, board = self.initAUT(-1, eval_fn, True, method,\n origins[idx], adversary_location,\n w, h)\n legal_moves = board.get_legal_moves()\n chosen_move = agentUT.get_move(board, legal_moves, timer.time_left)\n\n diff_total = abs(board.counts[0] - exact_counts[idx][0])\n diff_unique = abs(board.counts[1] - exact_counts[idx][1])\n\n self.assertTrue(diff_total <= 1 and diff_unique == 0, ID_FAIL)\n\n self.assertTrue(chosen_move in legal_moves, INVALID_MOVE.format(\n legal_moves, chosen_move))", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_move = (-1, -1)\n\n # Opening Book\n # Check if my agent is first to move\n # If yes, use opening book\n if (game._board_state[-1] == None):\n if (not game.get_legal_moves()):\n return best_move\n else:\n best_move = (4,4)\n return best_move\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n\n # Iterative Deepning, stop when timeout\n depth = 0\n while (True):\n depth += 1\n best_move = self.alphabeta(game, depth)\n\n except SearchTimeout:\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return best_move", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n if not game.get_legal_moves():\n return (-1,-1)\n best_move = game.get_legal_moves()[0]\n search_depth = 1\n\t\n try:\n while(self.time_left()>self.TIMER_THRESHOLD):\n last_move = best_move\n best_move = self.alphabeta(game,search_depth)\n search_depth += 1\n\n except SearchTimeout:\n best_move = last_move\n return best_move\n\t\n # Return the best move from the last completed search iteration\n return best_move", "def make_move(self, time_limit, players_score):\n if self.search_alg is None:\n raise NotImplementedError(\"utils(make_move): self.search_alg is None!\")\n time_start = t.time()\n only_move = self.check_one_move()\n if only_move is not None:\n max_move = only_move\n else:\n depth = 1\n max_move, max_val = self.search_alg.search(self, depth, True)\n last_iteration_time = t.time() - time_start\n next_iteration_max_time = 4 * last_iteration_time\n time_until_now = t.time() - time_start\n while time_until_now + next_iteration_max_time < time_limit:\n depth += 1\n iteration_start_time = t.time()\n last_good_move = max_move\n max_move, val = self.search_alg.search(self, depth, True)\n if val == float('inf'):\n break\n if val == float('-inf'):\n max_move = last_good_move\n break\n last_iteration_time = t.time() - iteration_start_time\n next_iteration_max_time = 4 * last_iteration_time\n time_until_now = t.time() - time_start\n self.perform_move(maximizing_player=True, move=max_move)\n return max_move", "def get_move(self, game, time_left):\n self.time_left = time_left\n \n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_move = (-1, -1)\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n return self.minimax(game, self.search_depth)\n\n except SearchTimeout:\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return best_move", "def get_move(self, game, time_left):\n\n self.time_left = time_left\n legal_moves = game.get_legal_moves()\n if not legal_moves:\n return (-1, -1)\n\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_score = float(\"-inf\")\n best_move = legal_moves[0]\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n self.search_depth = 1\n while best_score is not float(\"inf\"):\n best_move = self.alphabeta(game, self.search_depth, alpha=float(\"-inf\"), beta=float(\"inf\"))\n self.search_depth += 1\n except SearchTimeout:\n return best_move\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return best_move", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_move = (-1, -1)\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n best_move = self.minimax(game, self.search_depth)\n except SearchTimeout:\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return best_move", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_move = (-1, -1)\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n return self.minimax(game, self.search_depth)\n\n except SearchTimeout:\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return best_move", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_move = (-1, -1)\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n return self.minimax(game, self.search_depth)\n\n except SearchTimeout:\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return best_move", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_move = (-1, -1)\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n return self.minimax(game, self.search_depth)\n\n except SearchTimeout:\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return best_move", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_move = (-1, -1)\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n return self.minimax(game, self.search_depth)\n\n except SearchTimeout:\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return best_move", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_move = (-1, -1)\n\n if self._iterative:\n depth = 0\n try:\n while depth < len(game.get_blank_spaces()):\n depth += 1\n best_move = self.alphabeta(game, depth)\n except SearchTimeout:\n if depth == 1:\n print(\"Warning: An iterative AlphaBetaPlayer timed out on \"\n \"the first level. Match time_limit too short?\")\n if depth >= 1:\n self._depths.append(depth)\n else:\n try:\n best_move = self.alphabeta(game, self.search_depth)\n except SearchTimeout:\n print(\"Warning: A fixed-depth AlphaBetaPlayer timed out. \"\n \"Match time_limit too short or search_depth too high.\")\n\n return best_move", "def minimax(self, game, depth):\n \n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n \n return self.minimax_helper(game, self.search_depth, maximizing_player = True)[1]", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n\n best_move = (-1, -1)\n\n while self.time_left() > self.TIMER_THRESHOLD:\n self.update(game)\n\n if not self.is_terminal(game):\n moves = game.get_legal_moves()\n scores = [(self._plays[game.forecast_move(m)], m) for m in moves]\n _, best_move = max(scores, key=lambda s: s[0])\n\n return best_move", "def get_move(self, game, time_left):\n self.time_left = time_left\n # check if new game, reset if it is\n self.check_reset(game)\n best_move = (-1, -1)\n depth = 1\n\n try:\n while True:\n # each completed iteration will be more accurate than previous, so update best move with each iteration's\n # root node (max) result\n best_move = self.alphabeta(game=game, depth=depth)\n\n # return best move if no result\n if best_move == (-1, -1):\n # DEBUG_PRINT\n # print('D={} MOVING TO=> {} from TERM\\n'.format(depth, best_move))\n return best_move\n\n # global stats\n # stats['max_search_depth'] = max(depth, stats['max_search_depth'])\n depth += 1\n\n\n except SearchTimeout:\n # DEBUG_PRINT\n # print('D={} MOVING TO=> {}\\n\\n\\n'.format(stats['max_search_depth'], best_move))\n pass\n\n return best_move", "def cutoff_search(board, depth):\r\n \r\n global start_time\r\n global depth_limit\r\n global winner_white\r\n global winner_black\r\n \r\n test = False\r\n current_time = float(time.time())\r\n if depth != 0 and (current_time - start_time) >= 10.0:\r\n test = True\r\n \r\n b_castle = 0\r\n w_castle = 0\r\n \r\n for i in winner_white:\r\n if i in board.white:\r\n w_castle += 1\r\n if w_castle == 2:\r\n test = True\r\n for i in winner_black:\r\n if i in board.black:\r\n b_castle += 1\r\n if b_castle == 2:\r\n test = True\r\n if (len(board.white) == 0) or (len(board.black) == 0):\r\n test = True\r\n \r\n if depth >= depth_limit:\r\n test = True\r\n \r\n return test", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n self.best_move = (-1, -1)\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n move = self.minimax(game, self.search_depth)\n return move\n\n except SearchTimeout:\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return self.best_move", "def test_minimax(self):\n h, w = 7, 7 # board size\n starting_location = (2, 3)\n adversary_location = (0, 0) # top left corner\n iterative_search = False\n method = \"minimax\"\n\n # The agent under test starts at position (2, 3) on the board, which\n # gives eight (8) possible legal moves [(0, 2), (0, 4), (1, 1), (1, 5),\n # (3, 1), (3, 5), (4, 2), (4, 4)]. The search function will pick one of\n # those moves based on the estimated score for each branch. The value\n # only changes on odd depths because even depths end on when the\n # adversary has initiative.\n value_table = [[0] * w for _ in range(h)]\n value_table[1][5] = 1 # depth 1 & 2\n value_table[4][3] = 2 # depth 3 & 4\n value_table[6][6] = 3 # depth 5\n heuristic = makeEvalTable(value_table)\n\n # These moves are the branches that will lead to the cells in the value\n # table for the search depths.\n expected_moves = [set([(1, 5)]),\n set([(3, 1), (3, 5)]),\n set([(3, 5), (4, 2)])]\n\n # Expected number of node expansions during search\n counts = [(8, 8), (24, 10), (92, 27), (418, 32), (1650, 43)]\n\n # Test fixed-depth search; note that odd depths mean that the searching\n # player (student agent) has the last move, while even depths mean that\n # the adversary has the last move before calling the heuristic\n # evaluation function.\n for idx in range(5):\n test_depth = idx + 1\n agentUT, board = self.initAUT(test_depth, heuristic,\n iterative_search, method,\n loc1=starting_location,\n loc2=adversary_location)\n\n # disable search timeout by returning a constant value\n agentUT.time_left = lambda: 1e3\n _, move = agentUT.minimax(board, test_depth)\n\n num_explored_valid = board.counts[0] == counts[idx][0]\n num_unique_valid = board.counts[1] == counts[idx][1]\n\n self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(\n method, test_depth, counts[idx][0], board.counts[0]))\n\n self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(\n method, test_depth, counts[idx][1], board.counts[1]))\n\n self.assertIn(move, expected_moves[idx // 2], WRONG_MOVE.format(\n method, test_depth, expected_moves[idx // 2], move))", "def search(self, time_budget: int) -> None:\n start_time = clock()\n num_rollouts = 0\n\n # do until we exceed our time budget\n while clock() - start_time < time_budget:\n node, state = self.select_node()\n turn = state.turn()\n outcome = self.roll_out(state)\n self.backup(node, turn, outcome)\n num_rollouts += 1\n run_time = clock() - start_time\n node_count = self.tree_size()\n self.run_time = run_time\n self.node_count = node_count\n self.num_rollouts = num_rollouts", "def iterative_depth_search(self, board, player, t_max=30, min_depth=4, stop_at_depth=False):\n\n\t\tt_elapsed = 0.0\n\t\tbest_move, max_depth = None, 1\n\t\talpha, beta = -float('inf'), float('inf')\n\n\t\twhile max_depth <= min_depth or t_elapsed <= t_max:\n\t\t\tif stop_at_depth and max_depth > min_depth:\n\t\t\t\tbreak\n\n\t\t\tstart = time.time()\n\t\t\tbest_moves, best_val = self.alpha_beta_search(board, alpha, beta, player, 0, max_depth)\n\t\t\tt_elapsed += time.time() - start\n\t\t\tmax_depth += 1\n\t\t\tself.update()\n\n\t\t\t# Checkmate found.\n\t\t\tif abs(best_val) == float('inf'):\n\t\t\t\tself.moves_til_checkmate = len(best_moves)\n\t\t\t\tbreak\n\n\t\tbest_move = best_moves[0]\n\n\t\treturn best_move, best_val", "def make_move(self, time_limit, players_score):\n finish_time = time.time() + time_limit\n depth = 1\n best_move = (-np.inf, (-1, 0))\n while True:\n for direction in self.directions:\n initial_state = utils.State(self.board, direction, self.pos, self.current_turn,\n self.fruits_on_board_dict,\n finish_time)\n try:\n outcome = self.minimax_algo.search(initial_state, depth, True)\n if outcome[0] > best_move[0]:\n best_move = outcome\n except TimeoutError:\n self.board[self.pos[0]][self.pos[1]] = -1\n self.pos = (self.pos[0] + best_move[1][0], self.pos[1] + best_move[1][1])\n self.board[self.pos[0]][self.pos[1]] = 1\n\n return best_move[1]\n depth += 1\n # print('bigger_depth : {} '.format(depth))", "def get_move(self, game, legal_moves, time_left):\n\n self.time_left = time_left\n \n options = game.get_legal_moves()\n assert options == legal_moves, \"Mismatched moves\"\n\n # Perform any required initializations, including selecting an initial\n # move from the game board (i.e., an opening book), or returning\n # immediately if there are no legal moves\n\n score, move = None, random.choice(legal_moves) if len(legal_moves) > 0 else None\n try:\n # Iterative deepening with Quiessance search:\n if self.iterative is True:\n results = deque(maxlen=3)\n for depth in range (self.search_depth, 25):\n score, move = self.dosearch(game, depth)\n results.append((score, move))\n if self.quiessant_search is True:\n if len(results) >=3 and all(x[1] == move for x in results):\n break\n elif score == float('-inf') or score == float ('inf'):\n break\n if self.time_left() < self.TIMER_THRESHOLD:\n break\n else:\n score, move = self.dosearch(game, self.search_depth)\n assert score is not None\n \n if len (options) > 0:\n assert not (move is None or move is (-1,-1)), \"Move ({}, {}) for '{}/{}' cannot be None or (-1,-1) if options ({}) exist\".format(move, score, self.method, self.score, options)\n assert move in options, \"Move ({}, {}) for '{}/{}' not from existing list of moves ({})\".format(move, score, self.method, self.score, options)\n except Timeout:\n # Handle any actions required at timeout, if necessary\n pass\n\n # Return the best move from the last completed search\n # (or iterative-deepening search iteration)\n return move", "def get_move(self, game, legal_moves, time_left):\n\n self.time_left = time_left\n move = (-1, -1) #Default\n\n # Perform any required initializations, including selecting an initial\n # move from the game board (i.e., an opening book), or returning\n # immediately if there are no legal moves\n\n max_depth = 0\n try:\n # The search method call (alpha beta or minimax) should happen in\n # here in order to avoid timeout. The try/except block will\n # automatically catch the exception raised by the search method\n # when the timer gets close to expiring\n if self.iterative:\n #Perform iterative search\n num_of_remaining_moves = len(game.get_blank_spaces())\n for depth in range(1,num_of_remaining_moves):\n if self.time_left() <= self.TIMER_THRESHOLD:\n return move\n\n if self.method == 'alphabeta':\n iterative_best_score, iterative_best_move = self.alphabeta(game, depth)\n else:\n iterative_best_score, iterative_best_move = self.minimax(game, depth)\n\n #Stores score and move of the deepest search\n score = iterative_best_score\n move = iterative_best_move\n max_depth = depth\n else:\n #Perform fixed-depth search\n if self.method == 'alphabeta':\n score, move = self.alphabeta(game, self.search_depth)\n else:\n score, move = self.minimax(game, self.search_depth)\n\n except Timeout:\n # Handle any actions required at timeout, if necessary\n pass\n\n return move", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n if not game.get_legal_moves():\n return (-1,-1)\n best_move = game.get_legal_moves()[0]\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n return self.minimax(game, self.search_depth)\n\n except SearchTimeout:\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return best_move", "def minimax(self, game, depth):\n\n def min_value(game, traversed_depth=1):\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n if traversed_depth==depth:\n return self.score(game,self)\n traversed_depth+=1\n v = float(\"inf\")\n for m in game.get_legal_moves():\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n v = min(v, max_value(game.forecast_move(m),traversed_depth))\n return v\n\n def max_value(game, traversed_depth=1):\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n if traversed_depth==depth:\n return self.score(game,self)\n traversed_depth+=1\n v = float(\"-inf\")\n for m in game.get_legal_moves():\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n v = max(v, min_value(game.forecast_move(m),traversed_depth))\n return v\n \n return max(game.get_legal_moves(), key=lambda m: min_value(game.forecast_move(m)))", "def heuristic_function(self, game_node: ConnectFourGame, search_depth: int):\n\n # Victory or defeat with a higher depth value is more desirable, because it means less moves are used to reach it\n depth_points = 2520 / (10 - search_depth)\n\n if game_node.winner_id is None:\n # Evaluates empty spaces for any player that is one disc away from a victory\n player_four_in_a_rows = self._determine_near_x_in_a_rows(game_node, game_node.victory_condition)\n ai_count = player_four_in_a_rows.get(self.ai_player_id, 0)\n other_player_count = sum([\n player_four_in_a_rows[player_id]\n for player_id in filter(lambda player_id: player_id != self.ai_player_id, player_four_in_a_rows)\n ])\n if ai_count - other_player_count != 0:\n return (ai_count - other_player_count) * 20\n\n # Evaluates grid positioning, granting bonus points for discs closer to the center of the grid\n max_deviation = floor(game_node.grid.width / 4.0)\n mid = ceil(game_node.grid.width / 2.0)\n ai_count = 0\n other_player_count = 0\n for col in range(mid - max_deviation, mid + max_deviation):\n for row in range(0, game_node.grid.height):\n curr_space = game_node.grid.grid_spaces[col][row]\n if curr_space.disc is None:\n break # Short-circuits when empty space in a column is reached\n elif curr_space.disc.player_id == self.ai_player_id:\n ai_count += 1\n else:\n other_player_count += 1\n if ai_count - other_player_count != 0:\n return (ai_count - other_player_count) * 2\n\n return 0\n elif game_node.winner_id == self.ai_player_id:\n return self.winner_heuristic_value + depth_points # Means AI player has won\n else:\n return -self.winner_heuristic_value - depth_points # Means AI player has lost", "def get_move(self, game, time_left):\n self.time_left = time_left\n \n max_depth = game.height * game.width\n \n \n # FIRST, check initial conditions wrt legal_moves\n legal_moves = game.get_legal_moves()\n # If there are no legal_moves return no move\n if len(legal_moves) == 0:\n return (-1,-1)\n # If there's only one legal_move return it, the only choice\n elif len(legal_moves) == 1:\n return legal_moves[0]\n # Otherwise, initialize best_choice at first legal_move\n else:\n best_move = legal_moves[0]\n try:\n for node_number in range(1, max_depth + 1):\n # This exception handing returns the best_move found\n # thus far in the event of a timeout\n \n best_move = self.alphabeta(game, node_number)\n return best_move\n except SearchTimeout:\n pass\n \n # Return the best_move found thus far (or ultimately in the event\n # of exhaustive search completion or timeout)\n return best_move", "def check_move_states(self, player, depth):\n\n if depth >= self.look_ahead:\n return True\n\n for move in gen_moves(player, self.__state.board, self.checker):\n self.__state.push(move)\n winner = self.checker.check_game_over(self.__pid, self.__opponent)\n if winner == self.__opponent:\n return False\n worker = move['xy2']\n if not self.check_build_states(player, worker, depth):\n return False\n self.__state.pop()\n return True" ]
[ "0.61455846", "0.6037407", "0.5798006", "0.5744633", "0.5734702", "0.56711614", "0.5609345", "0.560281", "0.5550985", "0.5535231", "0.5535231", "0.5535231", "0.5535231", "0.55233854", "0.5504624", "0.54934865", "0.5485863", "0.54704106", "0.5470082", "0.5467495", "0.54378605", "0.5416517", "0.5394538", "0.53736573", "0.53661734", "0.53427255", "0.5309117", "0.529258", "0.52775836", "0.5248552" ]
0.6744877
0
Check if the depth is equal or greater than the search_depth of the agent or if there are no legal moves. Raise SearchTimeout if time_left is less than the TIMER_THRESHOLD.
def _terminal_test(self, game, depth): if self.time_left() < self.TIMER_THRESHOLD: self.timeout_depths.append(depth) raise SearchTimeout() beyond_search_depth = depth >= self.search_depth no_legal_moves = len(game.get_legal_moves()) == 0 return beyond_search_depth or no_legal_moves
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # TODO: finish this function!\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_move = (-1, -1)\n # The try/except block will automatically catch the exception# raised when the timer is about to expire.\n # Implementation of Iterative Deepening Search\n\n try:\n depth = 0 # initialisation of depth\n while True:\n # runs along as game is still active\n depth += 1 # increment depth after each search\n best_move = self.alphabeta(game, depth) # apply alpha beta to search\n\n except SearchTimeout: # cutoff - when timer runs out\n pass # Handle any actions required after timeout as needed\n # Failure\n\n # Return the best move from the last completed search iteration\n return best_move # Solution", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_move = (-1, -1)\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n self.search_depth = 0\n\n while self.time_left() > self.TIMER_THRESHOLD:\n self.search_depth += 1\n best_move = self.alphabeta(game, self.search_depth)\n\n except SearchTimeout:\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return best_move", "def test_get_move(self):\n\n class DynamicTimer():\n \"\"\"Dynamic Timer allows the time limit to be changed after the\n timer is initialized so that the search timeout can be triggered\n before the timer actually expires. This allows the timer to expire\n when an event occurs, regardless of the clock time required until\n the event happens.\n \"\"\"\n def __init__(self, time_limit):\n self.time_limit = time_limit\n self.start_time = curr_time_millis()\n\n def time_left(self):\n return self.time_limit - (curr_time_millis() - self.start_time)\n\n w, h = 11, 11 # board size\n adversary_location = (0, 0)\n method = \"minimax\"\n\n # The agent under test starts at the positions indicated below, and\n # performs an iterative deepening minimax search (minimax is easier to\n # test because it always visits all nodes in the game tree at every\n # level).\n origins = [(2, 3), (6, 6), (7, 4), (4, 2), (0, 5), (10, 10)]\n exact_counts = [(8, 8), (32, 10), (160, 39), (603, 35), (1861, 54), (3912, 62)]\n\n for idx in range(len(origins)):\n\n # set the initial timer high enough that the search will not\n # timeout before triggering the dynamic timer to halt by visiting\n # the expected number of nodes\n time_limit = 1e4\n timer = DynamicTimer(time_limit)\n eval_fn = makeEvalStop(exact_counts[idx][0], timer, time_limit)\n agentUT, board = self.initAUT(-1, eval_fn, True, method,\n origins[idx], adversary_location,\n w, h)\n legal_moves = board.get_legal_moves()\n chosen_move = agentUT.get_move(board, legal_moves, timer.time_left)\n\n diff_total = abs(board.counts[0] - exact_counts[idx][0])\n diff_unique = abs(board.counts[1] - exact_counts[idx][1])\n\n self.assertTrue(diff_total <= 1 and diff_unique == 0, ID_FAIL)\n\n self.assertTrue(chosen_move in legal_moves, INVALID_MOVE.format(\n legal_moves, chosen_move))", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_move = (-1, -1)\n\n # Opening Book\n # Check if my agent is first to move\n # If yes, use opening book\n if (game._board_state[-1] == None):\n if (not game.get_legal_moves()):\n return best_move\n else:\n best_move = (4,4)\n return best_move\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n\n # Iterative Deepning, stop when timeout\n depth = 0\n while (True):\n depth += 1\n best_move = self.alphabeta(game, depth)\n\n except SearchTimeout:\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return best_move", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n if not game.get_legal_moves():\n return (-1,-1)\n best_move = game.get_legal_moves()[0]\n search_depth = 1\n\t\n try:\n while(self.time_left()>self.TIMER_THRESHOLD):\n last_move = best_move\n best_move = self.alphabeta(game,search_depth)\n search_depth += 1\n\n except SearchTimeout:\n best_move = last_move\n return best_move\n\t\n # Return the best move from the last completed search iteration\n return best_move", "def make_move(self, time_limit, players_score):\n if self.search_alg is None:\n raise NotImplementedError(\"utils(make_move): self.search_alg is None!\")\n time_start = t.time()\n only_move = self.check_one_move()\n if only_move is not None:\n max_move = only_move\n else:\n depth = 1\n max_move, max_val = self.search_alg.search(self, depth, True)\n last_iteration_time = t.time() - time_start\n next_iteration_max_time = 4 * last_iteration_time\n time_until_now = t.time() - time_start\n while time_until_now + next_iteration_max_time < time_limit:\n depth += 1\n iteration_start_time = t.time()\n last_good_move = max_move\n max_move, val = self.search_alg.search(self, depth, True)\n if val == float('inf'):\n break\n if val == float('-inf'):\n max_move = last_good_move\n break\n last_iteration_time = t.time() - iteration_start_time\n next_iteration_max_time = 4 * last_iteration_time\n time_until_now = t.time() - time_start\n self.perform_move(maximizing_player=True, move=max_move)\n return max_move", "def get_move(self, game, time_left):\n self.time_left = time_left\n \n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_move = (-1, -1)\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n return self.minimax(game, self.search_depth)\n\n except SearchTimeout:\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return best_move", "def get_move(self, game, time_left):\n\n self.time_left = time_left\n legal_moves = game.get_legal_moves()\n if not legal_moves:\n return (-1, -1)\n\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_score = float(\"-inf\")\n best_move = legal_moves[0]\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n self.search_depth = 1\n while best_score is not float(\"inf\"):\n best_move = self.alphabeta(game, self.search_depth, alpha=float(\"-inf\"), beta=float(\"inf\"))\n self.search_depth += 1\n except SearchTimeout:\n return best_move\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return best_move", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_move = (-1, -1)\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n best_move = self.minimax(game, self.search_depth)\n except SearchTimeout:\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return best_move", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_move = (-1, -1)\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n return self.minimax(game, self.search_depth)\n\n except SearchTimeout:\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return best_move", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_move = (-1, -1)\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n return self.minimax(game, self.search_depth)\n\n except SearchTimeout:\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return best_move", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_move = (-1, -1)\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n return self.minimax(game, self.search_depth)\n\n except SearchTimeout:\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return best_move", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_move = (-1, -1)\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n return self.minimax(game, self.search_depth)\n\n except SearchTimeout:\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return best_move", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n best_move = (-1, -1)\n\n if self._iterative:\n depth = 0\n try:\n while depth < len(game.get_blank_spaces()):\n depth += 1\n best_move = self.alphabeta(game, depth)\n except SearchTimeout:\n if depth == 1:\n print(\"Warning: An iterative AlphaBetaPlayer timed out on \"\n \"the first level. Match time_limit too short?\")\n if depth >= 1:\n self._depths.append(depth)\n else:\n try:\n best_move = self.alphabeta(game, self.search_depth)\n except SearchTimeout:\n print(\"Warning: A fixed-depth AlphaBetaPlayer timed out. \"\n \"Match time_limit too short or search_depth too high.\")\n\n return best_move", "def minimax(self, game, depth):\n \n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n \n return self.minimax_helper(game, self.search_depth, maximizing_player = True)[1]", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n\n best_move = (-1, -1)\n\n while self.time_left() > self.TIMER_THRESHOLD:\n self.update(game)\n\n if not self.is_terminal(game):\n moves = game.get_legal_moves()\n scores = [(self._plays[game.forecast_move(m)], m) for m in moves]\n _, best_move = max(scores, key=lambda s: s[0])\n\n return best_move", "def get_move(self, game, time_left):\n self.time_left = time_left\n # check if new game, reset if it is\n self.check_reset(game)\n best_move = (-1, -1)\n depth = 1\n\n try:\n while True:\n # each completed iteration will be more accurate than previous, so update best move with each iteration's\n # root node (max) result\n best_move = self.alphabeta(game=game, depth=depth)\n\n # return best move if no result\n if best_move == (-1, -1):\n # DEBUG_PRINT\n # print('D={} MOVING TO=> {} from TERM\\n'.format(depth, best_move))\n return best_move\n\n # global stats\n # stats['max_search_depth'] = max(depth, stats['max_search_depth'])\n depth += 1\n\n\n except SearchTimeout:\n # DEBUG_PRINT\n # print('D={} MOVING TO=> {}\\n\\n\\n'.format(stats['max_search_depth'], best_move))\n pass\n\n return best_move", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n self.best_move = (-1, -1)\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n move = self.minimax(game, self.search_depth)\n return move\n\n except SearchTimeout:\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return self.best_move", "def cutoff_search(board, depth):\r\n \r\n global start_time\r\n global depth_limit\r\n global winner_white\r\n global winner_black\r\n \r\n test = False\r\n current_time = float(time.time())\r\n if depth != 0 and (current_time - start_time) >= 10.0:\r\n test = True\r\n \r\n b_castle = 0\r\n w_castle = 0\r\n \r\n for i in winner_white:\r\n if i in board.white:\r\n w_castle += 1\r\n if w_castle == 2:\r\n test = True\r\n for i in winner_black:\r\n if i in board.black:\r\n b_castle += 1\r\n if b_castle == 2:\r\n test = True\r\n if (len(board.white) == 0) or (len(board.black) == 0):\r\n test = True\r\n \r\n if depth >= depth_limit:\r\n test = True\r\n \r\n return test", "def test_minimax(self):\n h, w = 7, 7 # board size\n starting_location = (2, 3)\n adversary_location = (0, 0) # top left corner\n iterative_search = False\n method = \"minimax\"\n\n # The agent under test starts at position (2, 3) on the board, which\n # gives eight (8) possible legal moves [(0, 2), (0, 4), (1, 1), (1, 5),\n # (3, 1), (3, 5), (4, 2), (4, 4)]. The search function will pick one of\n # those moves based on the estimated score for each branch. The value\n # only changes on odd depths because even depths end on when the\n # adversary has initiative.\n value_table = [[0] * w for _ in range(h)]\n value_table[1][5] = 1 # depth 1 & 2\n value_table[4][3] = 2 # depth 3 & 4\n value_table[6][6] = 3 # depth 5\n heuristic = makeEvalTable(value_table)\n\n # These moves are the branches that will lead to the cells in the value\n # table for the search depths.\n expected_moves = [set([(1, 5)]),\n set([(3, 1), (3, 5)]),\n set([(3, 5), (4, 2)])]\n\n # Expected number of node expansions during search\n counts = [(8, 8), (24, 10), (92, 27), (418, 32), (1650, 43)]\n\n # Test fixed-depth search; note that odd depths mean that the searching\n # player (student agent) has the last move, while even depths mean that\n # the adversary has the last move before calling the heuristic\n # evaluation function.\n for idx in range(5):\n test_depth = idx + 1\n agentUT, board = self.initAUT(test_depth, heuristic,\n iterative_search, method,\n loc1=starting_location,\n loc2=adversary_location)\n\n # disable search timeout by returning a constant value\n agentUT.time_left = lambda: 1e3\n _, move = agentUT.minimax(board, test_depth)\n\n num_explored_valid = board.counts[0] == counts[idx][0]\n num_unique_valid = board.counts[1] == counts[idx][1]\n\n self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(\n method, test_depth, counts[idx][0], board.counts[0]))\n\n self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(\n method, test_depth, counts[idx][1], board.counts[1]))\n\n self.assertIn(move, expected_moves[idx // 2], WRONG_MOVE.format(\n method, test_depth, expected_moves[idx // 2], move))", "def search(self, time_budget: int) -> None:\n start_time = clock()\n num_rollouts = 0\n\n # do until we exceed our time budget\n while clock() - start_time < time_budget:\n node, state = self.select_node()\n turn = state.turn()\n outcome = self.roll_out(state)\n self.backup(node, turn, outcome)\n num_rollouts += 1\n run_time = clock() - start_time\n node_count = self.tree_size()\n self.run_time = run_time\n self.node_count = node_count\n self.num_rollouts = num_rollouts", "def iterative_depth_search(self, board, player, t_max=30, min_depth=4, stop_at_depth=False):\n\n\t\tt_elapsed = 0.0\n\t\tbest_move, max_depth = None, 1\n\t\talpha, beta = -float('inf'), float('inf')\n\n\t\twhile max_depth <= min_depth or t_elapsed <= t_max:\n\t\t\tif stop_at_depth and max_depth > min_depth:\n\t\t\t\tbreak\n\n\t\t\tstart = time.time()\n\t\t\tbest_moves, best_val = self.alpha_beta_search(board, alpha, beta, player, 0, max_depth)\n\t\t\tt_elapsed += time.time() - start\n\t\t\tmax_depth += 1\n\t\t\tself.update()\n\n\t\t\t# Checkmate found.\n\t\t\tif abs(best_val) == float('inf'):\n\t\t\t\tself.moves_til_checkmate = len(best_moves)\n\t\t\t\tbreak\n\n\t\tbest_move = best_moves[0]\n\n\t\treturn best_move, best_val", "def make_move(self, time_limit, players_score):\n finish_time = time.time() + time_limit\n depth = 1\n best_move = (-np.inf, (-1, 0))\n while True:\n for direction in self.directions:\n initial_state = utils.State(self.board, direction, self.pos, self.current_turn,\n self.fruits_on_board_dict,\n finish_time)\n try:\n outcome = self.minimax_algo.search(initial_state, depth, True)\n if outcome[0] > best_move[0]:\n best_move = outcome\n except TimeoutError:\n self.board[self.pos[0]][self.pos[1]] = -1\n self.pos = (self.pos[0] + best_move[1][0], self.pos[1] + best_move[1][1])\n self.board[self.pos[0]][self.pos[1]] = 1\n\n return best_move[1]\n depth += 1\n # print('bigger_depth : {} '.format(depth))", "def get_move(self, game, legal_moves, time_left):\n\n self.time_left = time_left\n \n options = game.get_legal_moves()\n assert options == legal_moves, \"Mismatched moves\"\n\n # Perform any required initializations, including selecting an initial\n # move from the game board (i.e., an opening book), or returning\n # immediately if there are no legal moves\n\n score, move = None, random.choice(legal_moves) if len(legal_moves) > 0 else None\n try:\n # Iterative deepening with Quiessance search:\n if self.iterative is True:\n results = deque(maxlen=3)\n for depth in range (self.search_depth, 25):\n score, move = self.dosearch(game, depth)\n results.append((score, move))\n if self.quiessant_search is True:\n if len(results) >=3 and all(x[1] == move for x in results):\n break\n elif score == float('-inf') or score == float ('inf'):\n break\n if self.time_left() < self.TIMER_THRESHOLD:\n break\n else:\n score, move = self.dosearch(game, self.search_depth)\n assert score is not None\n \n if len (options) > 0:\n assert not (move is None or move is (-1,-1)), \"Move ({}, {}) for '{}/{}' cannot be None or (-1,-1) if options ({}) exist\".format(move, score, self.method, self.score, options)\n assert move in options, \"Move ({}, {}) for '{}/{}' not from existing list of moves ({})\".format(move, score, self.method, self.score, options)\n except Timeout:\n # Handle any actions required at timeout, if necessary\n pass\n\n # Return the best move from the last completed search\n # (or iterative-deepening search iteration)\n return move", "def get_move(self, game, legal_moves, time_left):\n\n self.time_left = time_left\n move = (-1, -1) #Default\n\n # Perform any required initializations, including selecting an initial\n # move from the game board (i.e., an opening book), or returning\n # immediately if there are no legal moves\n\n max_depth = 0\n try:\n # The search method call (alpha beta or minimax) should happen in\n # here in order to avoid timeout. The try/except block will\n # automatically catch the exception raised by the search method\n # when the timer gets close to expiring\n if self.iterative:\n #Perform iterative search\n num_of_remaining_moves = len(game.get_blank_spaces())\n for depth in range(1,num_of_remaining_moves):\n if self.time_left() <= self.TIMER_THRESHOLD:\n return move\n\n if self.method == 'alphabeta':\n iterative_best_score, iterative_best_move = self.alphabeta(game, depth)\n else:\n iterative_best_score, iterative_best_move = self.minimax(game, depth)\n\n #Stores score and move of the deepest search\n score = iterative_best_score\n move = iterative_best_move\n max_depth = depth\n else:\n #Perform fixed-depth search\n if self.method == 'alphabeta':\n score, move = self.alphabeta(game, self.search_depth)\n else:\n score, move = self.minimax(game, self.search_depth)\n\n except Timeout:\n # Handle any actions required at timeout, if necessary\n pass\n\n return move", "def get_move(self, game, time_left):\n self.time_left = time_left\n\n # Initialize the best move so that this function returns something\n # in case the search fails due to timeout\n if not game.get_legal_moves():\n return (-1,-1)\n best_move = game.get_legal_moves()[0]\n\n try:\n # The try/except block will automatically catch the exception\n # raised when the timer is about to expire.\n return self.minimax(game, self.search_depth)\n\n except SearchTimeout:\n pass # Handle any actions required after timeout as needed\n\n # Return the best move from the last completed search iteration\n return best_move", "def minimax(self, game, depth):\n\n def min_value(game, traversed_depth=1):\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n if traversed_depth==depth:\n return self.score(game,self)\n traversed_depth+=1\n v = float(\"inf\")\n for m in game.get_legal_moves():\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n v = min(v, max_value(game.forecast_move(m),traversed_depth))\n return v\n\n def max_value(game, traversed_depth=1):\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n if traversed_depth==depth:\n return self.score(game,self)\n traversed_depth+=1\n v = float(\"-inf\")\n for m in game.get_legal_moves():\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n v = max(v, min_value(game.forecast_move(m),traversed_depth))\n return v\n \n return max(game.get_legal_moves(), key=lambda m: min_value(game.forecast_move(m)))", "def heuristic_function(self, game_node: ConnectFourGame, search_depth: int):\n\n # Victory or defeat with a higher depth value is more desirable, because it means less moves are used to reach it\n depth_points = 2520 / (10 - search_depth)\n\n if game_node.winner_id is None:\n # Evaluates empty spaces for any player that is one disc away from a victory\n player_four_in_a_rows = self._determine_near_x_in_a_rows(game_node, game_node.victory_condition)\n ai_count = player_four_in_a_rows.get(self.ai_player_id, 0)\n other_player_count = sum([\n player_four_in_a_rows[player_id]\n for player_id in filter(lambda player_id: player_id != self.ai_player_id, player_four_in_a_rows)\n ])\n if ai_count - other_player_count != 0:\n return (ai_count - other_player_count) * 20\n\n # Evaluates grid positioning, granting bonus points for discs closer to the center of the grid\n max_deviation = floor(game_node.grid.width / 4.0)\n mid = ceil(game_node.grid.width / 2.0)\n ai_count = 0\n other_player_count = 0\n for col in range(mid - max_deviation, mid + max_deviation):\n for row in range(0, game_node.grid.height):\n curr_space = game_node.grid.grid_spaces[col][row]\n if curr_space.disc is None:\n break # Short-circuits when empty space in a column is reached\n elif curr_space.disc.player_id == self.ai_player_id:\n ai_count += 1\n else:\n other_player_count += 1\n if ai_count - other_player_count != 0:\n return (ai_count - other_player_count) * 2\n\n return 0\n elif game_node.winner_id == self.ai_player_id:\n return self.winner_heuristic_value + depth_points # Means AI player has won\n else:\n return -self.winner_heuristic_value - depth_points # Means AI player has lost", "def get_move(self, game, time_left):\n self.time_left = time_left\n \n max_depth = game.height * game.width\n \n \n # FIRST, check initial conditions wrt legal_moves\n legal_moves = game.get_legal_moves()\n # If there are no legal_moves return no move\n if len(legal_moves) == 0:\n return (-1,-1)\n # If there's only one legal_move return it, the only choice\n elif len(legal_moves) == 1:\n return legal_moves[0]\n # Otherwise, initialize best_choice at first legal_move\n else:\n best_move = legal_moves[0]\n try:\n for node_number in range(1, max_depth + 1):\n # This exception handing returns the best_move found\n # thus far in the event of a timeout\n \n best_move = self.alphabeta(game, node_number)\n return best_move\n except SearchTimeout:\n pass\n \n # Return the best_move found thus far (or ultimately in the event\n # of exhaustive search completion or timeout)\n return best_move", "def check_move_states(self, player, depth):\n\n if depth >= self.look_ahead:\n return True\n\n for move in gen_moves(player, self.__state.board, self.checker):\n self.__state.push(move)\n winner = self.checker.check_game_over(self.__pid, self.__opponent)\n if winner == self.__opponent:\n return False\n worker = move['xy2']\n if not self.check_build_states(player, worker, depth):\n return False\n self.__state.pop()\n return True" ]
[ "0.6146046", "0.60382676", "0.5797216", "0.57454133", "0.57356983", "0.5671075", "0.56099486", "0.56038743", "0.5551499", "0.5535788", "0.5535788", "0.5535788", "0.5535788", "0.55238247", "0.5503368", "0.5494259", "0.5486273", "0.5470563", "0.546864", "0.54667735", "0.54375833", "0.54149055", "0.5393566", "0.5374527", "0.5367167", "0.5343437", "0.5308711", "0.5291801", "0.5277862", "0.5246445" ]
0.6742982
1
Execute a PYJQ query to extract from each model's results the dict related to input book_page. Collect labels in span groups at Spacy doc level and set appropriate extensions values on tokens
def load_bner_onto_tokens_extension(question, book_page): doc = nlp(question) doc._.BOOK_PAGE = book_page doc.spans["bner_spans"] = [] itree = IntervalTree() #we use pyjq to make a selection over a nested json with the aim to retain from each model the dict associated with the book_page ## se appendix 2 in readme expr = '[. | to_entries[] | .key as $k | .value[] += {"model" : $k}] | .[].value[] | select (.book_page == '+ str(book_page) +')' results = pyjq.all(expr, dict_models_results) #we now start consolidation of entities registered at question level for prefix in models_priority: model_result = [el for el in results if el["model"] == prefix][0] bner_q = model_result["bner_question"] for ent in bner_q: #print(ent) #print("------------------------------------------------------------------------------------------") prefixed_label = "{}:{}".format(prefix, ent["label"]) #create a span by char not by token ids because tokenization may differ between models span = doc.char_span(ent["char_limits"][0], ent["char_limits"][1], alignment_mode = "expand", label = prefixed_label) #if it's medication related, add supplementary details if ("drugbank_id" in ent) and (ent["drugbank_id"] != ""): span._.IS_MEDICATION = 1 span._.MEDICATION_DETAILS["drugbank_id"] = ent["drugbank_id"] elif ("rxnorm_link" in ent) and (ent["rxnorm_link"] != ""): span._.IS_MEDICATION = 1 span._.MEDICATION_DETAILS["rxnorm_link"] = ent["rxnorm_link"] #store this span in a span group for this model name doc.spans["bner_spans"].append(span) #check if it doesn't overlap alread inserted intervals in itree flag_overlaps = itree.overlaps(span.start,span.end) if (flag_overlaps is False): doc.ents += (span,) #store also in the interval tree itree[span.start : span.end] = prefixed_label #now, for each token, query the itree to get all entities it belongs to for tok in doc: #entries in the response from the interval tree are triplets (istart,istop,data) list_ents_of_token = itree[tok.i] list_ents_of_token_onlydata = [el[2] for el in list_ents_of_token] tok._.bner.extend(list_ents_of_token) #promote some labels to be usable in spacy matchers later on if "bionlp13cg:ORGAN" in list_ents_of_token_onlydata: tok._.IS_BODY_ORGAN = 1 elif "bc5cdr:DISEASE" in list_ents_of_token_onlydata: tok._.IS_DISEASE = 1 elif "drugbank:MEDICATION_DRUGBANK" in list_ents_of_token_onlydata: tok._.IS_MEDICATION = 1 return doc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def within_book_search_json(request, book_id):\n query = request.GET.get('q')\n term = query # todo: meta options?\n book = Book.objects.get(pk=book_id)\n\n if not query or len(query) < 3:\n return\n\n # todo: method on objectmanager to search by keyword\n notes = book.notes.filter(\n Q(subject__icontains=term) |\n Q(quote__icontains=term) |\n Q(comment__icontains=term)\n )\n terms = book.terms.filter(\n Q(term__text__icontains=term) |\n Q(term__definition__icontains=term) |\n Q(quote__icontains=term) |\n Q(quote__icontains=term)\n )\n sections = book.sections.filter(\n Q(title__icontains=term) |\n Q(authors__name__icontains=term) |\n Q(subtitle__icontains=term) |\n Q(summary__icontains=term)\n )\n\n results = {'notes': [], 'terms': [], 'sections': []}\n for note in notes:\n results['notes'].append({\n 'title': highlighter.highlight(note.subject, query),\n 'description': highlighter.highlight(note.quote, query, 200),\n 'price': note.get_page_display(),\n 'url': note.get_absolute_url(),\n })\n\n for term in terms:\n results['terms'].append({\n 'title': highlighter.highlight(term.term.text, query),\n 'description': highlighter.highlight(term.quote, query, 200),\n 'price': term.get_page_display(),\n 'url': term.get_absolute_url(),\n })\n\n for section in sections:\n authors = ', '.join(a.name for a in section.authors.all())\n results['sections'].append({\n 'title': highlighter.highlight(section.title, query),\n 'description': highlighter.highlight(authors, query),\n 'price': section.get_page_display(),\n 'url': section.get_absolute_url(),\n })\n\n return JsonResponse({\n 'results': {\n 'books': {\n 'name': 'Notes',\n 'results': results['notes'],\n },\n 'authors': {\n 'name': 'Terms',\n 'results': results['terms'],\n },\n 'sections': {\n 'name': 'Sections',\n 'results': results['sections'],\n },\n }\n })", "def process_nbk_html(self, limit):\n model = Model(self.graph)\n c = 0\n books_not_found = set()\n for nbk in self.book_ids:\n c += 1\n nbk_id = 'GeneReviews:'+nbk\n book_item = self.all_books.get(nbk)\n url = '/'.join((self.rawdir, book_item['file']))\n\n # figure out if the book is there; if so, process, otherwise skip\n book_dir = '/'.join((self.rawdir, 'books'))\n book_files = os.listdir(book_dir)\n if ''.join((nbk, '.html')) not in book_files:\n # logger.warning(\"No book found locally for %s; skipping\", nbk)\n books_not_found.add(nbk)\n continue\n logger.info(\"Processing %s\", nbk)\n\n page = open(url)\n soup = BeautifulSoup(page.read())\n\n # sec0 == clinical description\n clin_summary = \\\n soup.find(\n 'div', id=re.compile(\".*Summary.sec0\"))\n if clin_summary is not None:\n p = clin_summary.find('p')\n ptext = p.text\n ptext = re.sub(r'\\s+', ' ', ptext)\n\n ul = clin_summary.find('ul')\n if ul is not None:\n item_text = list()\n for li in ul.find_all('li'):\n item_text.append(re.sub(r'\\s+', ' ', li.text))\n ptext += ' '.join(item_text)\n\n # add in the copyright and citation info to description\n ptext = \\\n ' '.join(\n (ptext,\n '[GeneReviews:NBK1116, GeneReviews:NBK138602, ' +\n nbk_id+']'))\n\n model.addDefinition(nbk_id, ptext.strip())\n\n # get the pubs\n pmid_set = set()\n pub_div = soup.find('div', id=re.compile(r\".*Literature_Cited\"))\n if pub_div is not None:\n ref_list = pub_div.find_all('div', attrs={'class': \"bk_ref\"})\n for r in ref_list:\n for a in r.find_all(\n 'a', attrs={'href': re.compile(r\"pubmed\")}):\n if re.match(r'PubMed:', a.text):\n pmnum = re.sub(r'PubMed:\\s*', '', a.text)\n else:\n pmnum = \\\n re.search(\n r'\\/pubmed\\/(\\d+)$', a['href']).group(1)\n if pmnum is not None:\n pmid = 'PMID:'+str(pmnum)\n self.graph.addTriple(\n pmid,\n model.object_properties['is_about'],\n nbk_id)\n pmid_set.add(pmnum)\n reference = Reference(\n self.graph,\n pmid, Reference.ref_types['journal_article'])\n reference.addRefToGraph()\n\n # TODO add author history, copyright, license to dataset\n\n # TODO get PMID-NBKID equivalence (near foot of page),\n # and make it \"is about\" link\n # self.gu.addTriple(\n # self.graph, pmid,\n # self.gu.object_properties['is_about'], nbk_id)\n # for example: NBK1191 PMID:20301370\n\n # add the book to the dataset\n self.dataset.setFileAccessUrl(book_item['url'])\n\n if limit is not None and c > limit:\n break\n\n # finish looping through books\n\n l = len(books_not_found)\n if len(books_not_found) > 0:\n if l > 100:\n logger.warning(\"There were %d books not found.\", l)\n else:\n logger.warning(\n \"The following %d books were not found locally: %s\",\n l, str(books_not_found))\n logger.info(\n \"Finished processing %d books for clinical descriptions\", c-l)\n\n return", "def process_input_files(list_input_files):\n global dict_models_results\n global list_spacy_docs\n \n for input_file in list_input_files:\n prefix = prefix_from_filename(input_file)\n \n with open(input_file) as f:\n list_cases = json.load(f)\n dict_models_results[prefix] = list_cases\n \n \n #extract list of questions from all vignettes and create a mapping page -> vignette question\n dict_questions = {}\n for prefix, list_cases in dict_models_results.items():\n for vignette in list_cases:\n dict_questions[vignette[\"book_page\"]] = vignette[\"question\"]\n \n \n for book_page,question in dict_questions.items():\n doc_q = load_bner_onto_tokens_extension(question, book_page)\n list_spacy_docs.append(doc_q)\n \n return", "def processVS(self, model=\"okapi\", l = 0.5):\n self.results[model] = {}\n for query in self.queries:\n res = {}\n for doc in query.docs.values():\n sum_otf = 0.0\n for term in doc.terms:\n if model == \"okapi\":\n sum_otf += self.okapiTF(term.tf, doc.dlen, self.avgdlen)\n if model == \"tfidf\":\n sum_otf += self.tfidf(term.tf, term.df, doc.dlen, self.avgdlen, self.num_docs)\n if model == \"bm25\":\n sum_otf += self.bm25(term.tf, term.df, query.query_tf[term.term], doc.dlen, self.avgdlen, self.num_docs)\n res[doc.id] = sum_otf\n self.results[model][query.id] = res", "def parseDocumentsForW2ui(response, obj_type):\n records = []\n #create a list of dicts\n for record in response[\"data\"]:\n records.append(record.to_mongo())\n return parseDocObjectsToStrings(records, obj_type)", "def _scrape(self):", "def docs_from_records(records):\n\t# Need selenium server running:\n\t# java -jar selenium-server-standalone-2.35.0.jar -role node -hub http://localhost:4444/grid/register -browser browserName=htmlunit\n\tbrowser = webdriver.Remote(\"http://127.0.0.1:4444/wd/hub\",desired_capabilities=webdriver.DesiredCapabilities.HTMLUNITWITHJS)\n\tfor r in records:\n\t\tfor p in r['proceedings']:\n\t\t\tp['fetched_document'] = None\n\t\t\tif p['document'] is not None:\n\t\t\t\tp['fetched_document'] = fetch_pdf(['document'])\n\t\tyield r", "def book_search_results(key, title):\n\n payload = {\"key\": key, \"q\": title}\n query = requests.get(\"https://www.goodreads.com/search.xml\", params=payload)\n\n doc = untangle.parse(query.content)\n\n results = doc.GoodreadsResponse.search.results\n\n books = []\n\n if len(results) > 0:\n for work in results.work:\n book = {}\n\n book['title'] = work.best_book.title.cdata\n book['book_id'] = int(work.best_book.id.cdata.encode('utf8'))\n book['author_id'] = int(work.best_book.author.id.cdata.encode('utf8'))\n book['author_fname'] = work.best_book.author.name.cdata\n book['image_url'] = work.best_book.image_url.cdata.encode('utf8')\n books.append(book)\n\n return books", "def show_books_by_page(page):\n result = {'books': query.get_book_list()}\n return json.dumps(result, ensure_ascii=False)", "def library_searched():\n\n searched_result = []\n \n updated_books = duplicated_code()\n\n if request.method == 'POST':\n if request.form['type_search'] == 'book':\n book_title = request.form['search']\n for book in updated_books:\n if book['title'] == book_title:\n searched_result.append(book)\n return render_template(\"library_searched.html\", result = searched_result)\n elif request.form['type_search'] == 'genre':\n book_genre = request.form['search']\n for book in updated_books:\n if book['genre'] == book_genre:\n searched_result.append(book)\n return render_template(\"library_searched.html\", result = searched_result)\n elif request.form['type_search'] == 'author':\n book_author = request.form['search']\n for book in updated_books:\n if book['author_name'] == book_author:\n searched_result.append(book)\n return render_template(\"library_searched.html\", result = searched_result)\n else:\n return render_template(\"library_searched.html\")", "def parse(self, response, **kwargs):\n title = response.xpath('//*[@id=\"wrap\"]/h1/text()').extract_first()\n if title:\n url_to_full_version = response._get_url()\n first_160 = ''.join(response.xpath('//*[@id=\"woe\"]/section/div/p/text()').extract())[:160]\n base_date = response.xpath('//*[@id=\"wrap\"]/div/div[2]/text()').extract_first()\n date_formatted = conf.exec_func_chain(base_date,\n [conf.clean_records_regex,\n lambda v: v[0:-2],\n lambda v: conf.parse_dtts(v, '%b %d, %Y')])\n\n tags = response.xpath('//*[@id=\"woe\"]/section[3]/div/div/a/text()').extract()\n authors_section = response.xpath('//*[@id=\"wrap\"]/div/div[1]/div/span/a')\n for row in authors_section:\n full_author_url = Selector(text=row.extract()).xpath('///@href') \\\n .extract_first()\n author_fullname = conf.clean_records_regex(\n Selector(text=row.extract()).xpath('///span/text()').extract_first())\n if date_formatted >= conf.crawl_date[0].get('LastExecutionDate'):\n conf.write_data_append('articles.json', json.dumps({'title': title,\n 'urlFullVersion': url_to_full_version,\n 'first160': first_160,\n 'dateFormatted': date_formatted,\n 'tags': tags,\n 'authorUrl': f\"{conf.gd_base_url}\"\n f\"{full_author_url}\",\n 'authorName': author_fullname,\n 'author_key': full_author_url.rsplit('/')[-2]\n }))", "def get_ebooks(self, book):\n request_url = \"%s?q=%s\" % (self.API_URL, book)\n json_data = self.make_request(request_url)\n if not json_data:\n return []\n ebooks = []\n for book in json_data['docs']:\n if book['ebook_count_i'] >= 1:\n ebooks.append({'title': book['title'], 'ebook_count': book['ebook_count_i']})\n return ebooks", "def scrap_book_info(book_url):\n response = requests.get(book_url)\n page = response.content\n soup = BeautifulSoup(page, \"html.parser\")\n\n return {\n \"product_page_url\": book_url,\n \"upc\": soup.select_one(\"table tr:nth-child(1) > td\").text,\n \"title\": soup.select_one(\"article div.col-sm-6.product_main > h1\").text,\n \"price_including_tax\": soup.select_one(\"table tr:nth-child(4) > td\").text,\n \"price_excluding_tax\": soup.select_one(\"table tr:nth-child(3) > td\").text,\n \"number_available\": number_only(soup.select_one(\"#content_inner > article > table tr:nth-child(6) > td\").text),\n \"product_description\": soup.select_one(\"article > p\").text,\n \"category\": soup.select_one(\"#default > div > div > ul > li:nth-child(3) > a\").text,\n \"review_rating\": word_to_number(soup.select_one(\".star-rating\")[\"class\"][1]),\n \"image_url\": remove_suffix(soup.select_one(\"#product_gallery img\")[\"src\"]),\n }", "def parse_docs_from_page(self, page_url: str, page_text: str) -> Iterable[Document]:\n # parse html response\n url = \"https://www.med.navy.mil/directives/Pages/Publications.aspx\"\n base_url = 'https://www.med.navy.mil'\n parsed_docs = []\n doc_name_list = []\n if (page_url.find(\"Publications\") != -1):\n doc_type = \"NAVMED\"\n elif (page_url.find(\"BUMEDNotes\") != -1):\n doc_type = \"BUMEDNOTE\"\n elif (page_url.find(\"BUMEDInstructions\") != -1):\n doc_type = \"BUMEDINST\"\n cac_required = ['CAC', 'PKI certificate required', 'placeholder', 'FOUO']\n page = requests.get(page_url, verify=CERTIFICATE_DIR + '/cat3.pem')\n soup = bs4.BeautifulSoup(page.content, 'html.parser')\n webpart = soup.find(id=\"onetidDoclibViewTbl0\")\n items = webpart.find_all('a')\n meta = webpart.find_all(lambda tag: tag.name == 'td' and tag.get('class') == ['ms-vb2'])\n meta_list = list(meta)\n meta_list = [remove_html_tags(str(t)) for t in meta_list]\n meta_list = [str(t).encode('ascii', 'ignore').decode('ascii') for t in meta_list]\n meta_list = [x.replace(\"\\r\\n\", \" \") for x in meta_list]\n if (doc_type == \"NAVMED\"):\n n = 3\n elif (doc_type == \"BUMEDINST\" or doc_type == \"BUMEDNOTE\"):\n n = 4\n meta_ = [meta_list[i:i + n] for i in range(0, len(meta_list), n)]\n if (doc_type == \"NAVMED\"):\n subject = webpart.find_all(lambda tag: tag.name == 'td' and tag.get('class') == ['ms-vb-title'])\n name_list = list(subject)\n name_list = [remove_html_tags(str(t)) for t in name_list]\n name_list = [str(t).encode('ascii', 'ignore').decode('ascii') for t in name_list]\n subnum = [str(t[1]).split()[:2] for t in meta_]\n title_ = [str(t[1]).split()[2:] for t in meta_]\n title = [' '.join(t) for t in title_]\n title = [str(t).replace(',', '') for t in title]\n date = [t[0] for t in meta_]\n metadata = zip(subnum, title, date, name_list)\n metadata = [list(a) for a in metadata]\n elif (doc_type == \"BUMEDINST\"):\n subject = webpart.find_all(lambda tag: tag.name == 'td' and tag.get('class') == ['ms-vb-title'])\n name_list = list(subject)\n name_list = [remove_html_tags(str(t)) for t in name_list]\n name_list = [str(t).encode('ascii', 'ignore').decode('ascii') for t in name_list]\n metadata = list(zip(name_list, meta_))\n elif (doc_type == \"BUMEDNOTE\"):\n metadata = meta_\n item_list = list(items)\n pdf_links = [link['href'] for link in item_list if link['href'].endswith(('pdf', 'html'))]\n pdf_links = [\"https://www.med.navy.mil\" + a for a in pdf_links]\n pdf_links = [str(a).replace(' ', '%20') for a in pdf_links]\n if (doc_type == \"BUMEDINST\" or doc_type == \"BUMEDNOTE\"):\n metadata = [list(ele) for ele in metadata]\n for i in range(0, len(metadata)):\n metadata[i].append(pdf_links[i])\n for item in metadata:\n if (doc_type == \"NAVMED\"):\n pdf_di = DownloadableItem(doc_type='pdf', web_url=item[4])\n if (str(item[3])[0].isdigit()):\n doc_name = \"NAVMED P-\" + str(item[3]).split()[0]\n doc_num = \"P-\" + str(item[3]).split()[0]\n if (doc_name in doc_name_list):\n number_of_times = sum(1 for s in doc_name_list if doc_name in s)\n doc_name = doc_type + \" \" + doc_num + \"-\" + str(number_of_times)\n doc_num = doc_num + \"-\" + str(number_of_times)\n else:\n doc_name = \"NAVMED \" + str(item[0][1]) + \" \" + ' '.join(str(item[3]).split()[:3])\n doc_num == str(item[0][1]) + \" \" + ' '.join(str(item[3]).split()[:3])\n if (doc_name in doc_name_list):\n number_of_times = sum(1 for s in doc_name_list if doc_name in s)\n doc_name = doc_type + \" \" + doc_num + \"-\" + str(number_of_times)\n doc_num = doc_num + \"-\" + str(number_of_times)\n doc_title = str(item[1])\n publication_date = str(item[2])\n cac_login_required = False\n crawler_used = \"navy_med_pubs\"\n source_page_url = page_url\n downloadable_items = [pdf_di]\n version_hash_fields = {\n \"item_currency\": str(item[3]).split('/')[-1], # version metadata found on pdf links\n \"pub_date\": publication_date.strip(),\n \"document_title\": doc_title.strip(),\n \"document_number\": doc_num.strip()\n }\n\n elif (doc_type == \"BUMEDINST\"):\n pdf_di = DownloadableItem(doc_type='pdf', web_url=item[2])\n doc_num = str(item[0]).split()[0]\n doc_name = doc_type + \" \" + doc_num\n doc_title = str(item[1][1])\n publication_date = str(item[1][0])\n if (str(item[2]).endswith('html')):\n cac_login_required = True\n elif (str(item[2]).endswith('pdf')):\n cac_login_required = False\n if (doc_name in doc_name_list):\n number_of_times = sum(1 for s in doc_name_list if doc_name in s)\n doc_name = doc_type + \" \" + doc_num + \"-\" + str(number_of_times)\n doc_num = doc_num + \"-\" + str(number_of_times)\n doc_name_list.append(doc_name)\n elif (doc_type == \"BUMEDNOTE\"):\n pdf_di = DownloadableItem(doc_type='pdf', web_url=item[4])\n doc_num = str(item[0]).split()[1]\n doc_name = doc_type + \" \" + doc_num\n doc_title = str(item[2])\n publication_date = str(item[1])\n cac_login_required = False\n if (doc_name in doc_name_list):\n number_of_times = sum(1 for s in doc_name_list if doc_name in s)\n doc_name = doc_type + \" \" + doc_num + \"-\" + str(number_of_times)\n doc_num = doc_num + \"-\" + str(number_of_times)\n doc_name_list.append(doc_name)\n version_hash_fields = {\n \"doc_name\": doc_name, # version metadata found on pdf links\n \"pub_date\": publication_date.strip(),\n \"document_title\": doc_title.strip(),\n \"document_number\": doc_num.strip()\n }\n version_hash_raw_data = version_hash_fields\n doc = Document(\n doc_name=re.sub(',', '', doc_name.strip()),\n doc_title=re.sub('\\\\\"', '', doc_title),\n doc_num=re.sub(',', '', doc_num.strip()),\n doc_type=re.sub(',', '', doc_type.strip()),\n publication_date=publication_date,\n cac_login_required=cac_login_required,\n crawler_used=\"navy_med_pubs\",\n source_page_url=page_url.strip(),\n version_hash_raw_data=version_hash_fields,\n downloadable_items=[pdf_di]\n )\n parsed_docs.append(doc)\n return parsed_docs", "def extract_data():\n books = WebScraper().get_top_100_data()\n time.sleep(2)\n BookDetailsWebScrapper().save_book_details(books)\n _save_extract_state(books)", "def tag_conjunction_entities(annotated_pages):\n for page_id in annotated_pages:\n page = Page.objects(id=page_id).first()\n #page = db_conn.pages.find_one({\"_id\":page_id}) # TODO: refactor\n annotation_ids = [p.id for p in page[\"annotations_ids\"]]\n all_annotations = list(Annotation.objects(id__in=annotation_ids))\n # retrieve meta-annotations from that page\n meta_annotations = list(Annotation.objects(id__in=annotation_ids, entity_type=\"meta-annotation\"))\n #all_annotations = list(db_conn.annotations.find({\"_id\":{\"$in\":annotation_ids}})) # TODO: refactor\n #meta_annotations = list(db_conn.annotations.find({\"_id\":{\"$in\":annotation_ids} # TODO: refactor\n # ,\"entity_type\":\"meta-annotation\"}))\n if(len(meta_annotations)>0):\n logger.debug(\"Meta-annotations: %s\"%meta_annotations)\n for meta_annotation in meta_annotations:\n logger.info(\"Processing meta-annotation %s\"%meta_annotation[\"id\"])\n line_span = sorted(list(set([(position[\"page_id\"], position[\"line_n\"]) \n for position in meta_annotation[\"positions\"]])))\n top_entities_ids = [ann.id for ann in meta_annotation[\"top_entities\"]]\n top_entities = list(Annotation.objects(id__in=top_entities_ids))\n #top_entities = [db_conn.annotations.find_one({\"_id\":top_annotation_id}) \n # for top_annotation_id in meta_annotation[\"top_entities\"]]\n tokens = []\n for page_obj, line_n in line_span:\n page = Page.objects(id=page_obj.id).first()\n #page = db_conn.pages.find_one({\"_id\":page_id})\n for line in page[\"lines\"]:\n if line[\"line_number\"]==line_n:\n tokens.append((page_obj,line_n,line[\"tokens\"]))\n try:\n for entity in top_entities:\n assert entity is not None\n true_conjunctions = []\n meta_annotation_start = (top_entities[0][\"positions\"][0][\"page_id\"]\n ,top_entities[0][\"positions\"][0][\"line_n\"]\n ,top_entities[0][\"positions\"][0][\"start\"])\n meta_annotation_end = (top_entities[-1][\"positions\"][-1][\"page_id\"]\n ,top_entities[-1][\"positions\"][-1][\"line_n\"]\n ,top_entities[-1][\"positions\"][-1][\"end\"])\n conjunctions = [(token,page,line) for page,line,toks in tokens for token in toks\n if(token[\"offset_start\"] >= meta_annotation_start[2] and token[\"offset_end\"] <= meta_annotation_end[2])]\n true_conjunctions += [(page,line,token) for token,page,line in conjunctions \n if not is_annotated(page,line,token,all_annotations)]\n if(len(true_conjunctions)>0):\n logger.debug(\"Conjunctions found: %s\"%true_conjunctions)\n conjunction_annotations = []\n all_ann_ids = [annotation[\"ann_id\"] for annotation in all_annotations \n if '+' not in annotation[\"ann_id\"] ]\n identifier_counter = int(sorted(all_ann_ids, key=lambda x: int(x.replace('T','')))[-1].replace(\"T\",\"\"))\n logger.debug(sorted(all_ann_ids, key=lambda x: int(x.replace('T','')))[-1])\n for page_obj, line_n, token in true_conjunctions:\n identifier_counter += 1\n conjunction_annotation = Annotation(entity_type=\"conjunction\"\n , ingestion_timestamp=datetime.utcnow()\n , annotation_ingester_version=__version__\n , pageid=meta_annotation.pageid\n , filename=meta_annotation.filename\n , bid=meta_annotation.bid)\n conjunction_annotation.surface = token[\"surface\"]\n conjunction_annotation.ann_id = \"T%i\"%identifier_counter\n conjunction_annotation.positions.append(PagePosition(page_id = page_obj\n , start = token[\"offset_start\"]\n , end = token[\"offset_end\"]\n , line_n = line_n))\n conjunction_annotation.save()\n conjunction_annotations.append(conjunction_annotation)\n logger.info(\"(Page: %s) %i conjunction annotations were created and stored in MongoDB\"%(page_obj.id\n , len(conjunction_annotations)))\n logger.debug(\"N %i of top entities before adding conjunction entities\"%len(meta_annotation[\"top_entities\"]))\n meta_annotation[\"top_entities\"] += conjunction_annotations\n logger.debug(\"N %i of top entities after adding conjunction entities\"%len(meta_annotation[\"top_entities\"]))\n Annotation.objects(id=meta_annotation.id).update_one(set__top_entities = meta_annotation[\"top_entities\"])\n for conj_annotation in conjunction_annotations:\n for position in conj_annotation[\"positions\"]:\n page = Page.objects(id=position.page_id.id).first()\n page[\"annotations_ids\"].append(conj_annotation)\n page.save()\n except AssertionError as e:\n #raise e\n logger.warning(\"The meta-annotation %s has no top-level entities and generated the following error: %s\"%(meta_annotation[\"_id\"],e))\n except Exception as e:\n raise e", "def extract_competencies(pdf: PDFQuery) -> List[Dict]:\n\n page_count = get_page_count(pdf)\n results: List[Dict] = []\n\n for i in range(page_count - 1):\n # Limit the extraction to the current page and only extract text\n selectors = [\n ('with_parent', 'LTPage[page_index=\"%s\"]' % (i)),\n ('with_formatter', 'text'),\n ]\n\n # Try to find a \"Modulnummer\" on that page. If there is none, then it's\n # not a module-description page.\n try:\n selectors.append(\n get_selector_for_element_text(\n pdf, i, (\"Modulnummer\",), (\"Titel\",), (Point(\n 120, 0), Point(\n 490, 1)), \"id\"))\n except ValueError as err:\n eprint(\n \"No \\\"Modulnummer\\\" found on page %s, skipping...\" %\n (i + 1))\n continue\n\n # Find the module title\n try:\n selectors.append(\n get_selector_for_element_text(\n pdf, i, (\"Titel\",), (\"Leistungspunkte\", \"Credits\"), (Point(\n 120, 0), Point(\n 490, 1)), \"name\"))\n except ValueError as err:\n eprint(\"Error parsing \\\"Titel\\\": %s\" % (err))\n\n # Find the module competencies\n try:\n selectors.append(\n get_selector_for_element_text(\n pdf,\n i,\n (\"Lernziele / Kompetenzen\",\n \"Lernziele/Kompetenzen\"),\n (\"Voraussetzungen\",\n ),\n (Point(\n 120,\n 0),\n Point(\n 490,\n 1)),\n \"competencies\"))\n except ValueError as err:\n eprint(\"Error parsing \\\"Lernziele / Kompetenzen\\\": %s\" % (err))\n\n # Find the module requirements\n try:\n selectors.append(\n get_selector_for_element_text(\n pdf, i, (\"Voraussetzungen\",), (\"Niveaustufe\",), (Point(\n 120, 0), Point(\n 490, 1)), \"requirements\"))\n except ValueError as err:\n eprint(\"Error parsing \\\"Voraussetzungen\\\": %s\" % (err))\n\n # Do the extraction\n page_results: Dict = pdf.extract(selectors)\n\n # Add the pagenumber for convenience reasons\n page_results['page'] = i + 1\n\n # Trim extrated text\n page_results['id'] = page_results['id'].strip()\n page_results['name'] = page_results['name'].strip()\n\n # Split the extracted sentences (which also does a trim to each\n # sentence)\n page_results['competencies'] = split_sentences(\n page_results['competencies'])\n page_results['requirements'] = split_sentences(\n page_results['requirements'])\n\n results.append(page_results)\n\n return results", "def get_auto_anno_PUBMED_batch_list(request):\n\n json_resp = {}\n usecase = request.GET.get('usecase')\n # print(usecase)\n languages = ['English', 'english']\n use_obj = UseCase.objects.get(name=usecase)\n json_resp['batch_list'] = []\n batch = Report.objects.filter(name=use_obj,language__in = languages,institute = 'PUBMED').values('batch')\n for el in batch:\n if el['batch'] not in json_resp['batch_list']:\n json_resp['batch_list'].append( el['batch'])\n # print(json_resp['batch_list'])\n json_resp['batch_list'] = sorted(json_resp['batch_list'])\n # print(json_resp)\n return JsonResponse(json_resp)", "def parse_document(self, response):\n document = response.meta['document']\n document['title'] = ' '.join(response.css('p.s32B251D').css(\n 'span.s7D2086B4 ::text').extract())\n paragraphs = []\n for paragraph in response.css('p'):\n spans = [span for span in paragraph.css('span ::text').extract()\n if span != u'\\xa0' and span != '']\n if len(spans):\n paragraphs.append(u' '.join(spans))\n document['sentences'] = paragraphs\n yield document", "def fetch_self(self):\r\n self.parsed_doc['names'] = self.fetch_candidate_name() \r\n self.parsed_doc['phones'] = self.fetch_phone_numbers() \r\n self.parsed_doc['emails'] = self.fetch_emails() \r\n self.parsed_doc['github'] = self.fetch_github() \r\n self.parsed_doc['linkedin'] = self.fetch_linkedin() \r\n self.parsed_doc['degrees'] = self.fetch_degrees() \r\n self.parsed_doc['skills'] = self.fetch_skills() \r\n self.parsed_doc['education'] = self.fetch_education() \r\n self.parsed_doc['languages'] = self.fetch_languages() \r\n self.parsed_doc['addresses'] = self.fetch_address() \r\n self.parsed_doc['raw_resume'] = self.stringtext", "def get_books_data():\n entry = mongo.db.Books\n output = list()\n look_up_type = None\n if 'title' in request.args:\n look_up_type = 'title'\n if len(request.args['title']) <= 2:\n return render_template('error.html', message=\"Must enter characters\"), 400\n value = request.args['title'].strip('\"')\n title = entry.find({'title': {'$regex': value}})\n if title:\n for book in title:\n output.append({'title': book['title']})\n elif 'related_books' in request.args:\n look_up_type = 'similar_books'\n if len(request.args['related_books']) <= 2:\n return render_template('error.html', message=\"Must enter characters\"), 400\n value = request.args['related_books'].strip('\"')\n related_books = entry.find(\n {'similar_books': {'$regex': value}})\n if related_books:\n for related in related_books:\n for link in related['similar_books']:\n if value in link:\n output.append(({'similar_books': link}))\n elif 'author' in request.args:\n look_up_type = 'author'\n if len(request.args['author']) <= 2:\n return render_template('error.html', message=\"Must enter characters\"), 400\n value = request.args['author'].strip('\"')\n authors = entry.find({'author': {'$regex': value}})\n if authors:\n for name in authors:\n output.append({'author': name['author']})\n if len(output) == 0:\n return render_template('error.html', message=\"No Entries Found\"), 400\n return render_template('gottenBooks.html', output=output, look_up_type=look_up_type), 200", "def scrape_one_book(self, url):\n\n if url in self.url_to_explore:\n self.url_to_explore.remove(url)\n req = requests.get(url, headers = self.headers).content\n soup = BeautifulSoup(req, 'html5lib')\n soupbody = soup.body\n\n book_data = {}\n # get book url\n book_url = url\n book_data[\"url\"] = book_url\n\n # get book title\n book_title = soupbody.find('h1', attrs={'id':'bookTitle'}).text.strip()\n if book_title:\n book_data[\"title\"] = book_title\n\n # # get book id\n reg = 'https://www.goodreads.com/book/show/([0-9]+)'\n book_id = re.search(reg, url).group(1)\n book_data[\"id\"] = book_id\n\n # get book ISBN\n book_databox = soupbody.find('div', attrs={'id':'bookDataBox'})\n if book_databox:\n all_float_divs = book_databox.find_all('div',\n attrs = {'class' : 'clearFloats'})\n book_isbn = ''\n for div in all_float_divs:\n title = div.find('div',\n attrs = {'class':'infoBoxRowTitle'}).text.strip()\n if title == 'ISBN':\n book_isbn = div.find('div',\n attrs = {'class':'infoBoxRowItem'}).contents[0].strip()\n book_data[\"ISBN\"] = book_isbn\n\n # get book author url and author name\n author_name_container = soupbody.find('div',\n attrs = {'class':\"authorName__container\"})\n if author_name_container:\n all_authors = author_name_container.find_all('a',\n href = True, attrs = {'class':\"authorName\"})\n cur_author_url = []\n cur_author_name = []\n for author in all_authors:\n cur_author_url.append(author['href'])\n name = author.find('span', attrs = {'itemprop':'name'}).text.strip()\n cur_author_name.append(name)\n book_data[\"authorURLs\"] = cur_author_url\n book_data[\"author_names\"] = cur_author_name\n\n # get book rating and review\n book_meta = soupbody.find('div', attrs = {'id':'bookMeta'})\n if book_meta:\n rating = book_meta.find('span',\n attrs = {'itemprop':'ratingValue'}).text.strip()\n book_data[\"rating\"] = rating\n\n book_rating_count_container = book_meta.find('meta',\n attrs = {'itemprop':'ratingCount'})\n if book_rating_count_container:\n book_rating_count = book_rating_count_container['content']\n book_data[\"rating_count\"] = book_rating_count\n\n book_review_count_container = book_meta.find('meta',\n attrs = {'itemprop':'reviewCount'})\n if book_review_count_container:\n book_review_count = book_review_count_container['content']\n book_data[\"review_count\"] = book_review_count\n\n # get book image\n image_tag = soupbody.find('img', attrs = {'id':'coverImage'})\n if image_tag:\n image_src = image_tag['src']\n book_data[\"bookImage\"] = image_src\n # print(authorLink.span.text)\n\n # get related_books\n related_works_container = soupbody.find('div', id=re.compile('relatedWorks-'))\n if related_works_container:\n related_books_div = related_works_container.find('div', class_='bigBoxBody')\n if related_books_div:\n related_books_carousel = related_books_div.find('div', class_='bookCarousel')\n if related_books_carousel:\n carousel_row = related_books_carousel.find('div', class_='carouselRow')\n if carousel_row:\n related_books_list_li = carousel_row.find('ul').find_all('li')\n related_books = []\n for item in related_books_list_li:\n link = item.find('a', href = True)['href']\n self.url_to_explore.add(link)\n related_books.append(link)\n book_data[\"similar_books\"] = related_books\n\n self.data_collection.push_to_collection(book_data)\n print(\"Book successfully scraped: \" + book_title)", "def annotationlabel(request,action=None):\n\n username = request.session['username']\n mode1 = request.session['mode']\n auto_required = request.GET.get('ns_id', None)\n mode = NameSpace.objects.get(ns_id=mode1)\n\n # print('mode',mode1)\n usecase = request.session['usecase']\n # language = request.GET.get('language',request.session['language'])\n type = 'labels'\n\n if request.method == 'GET' and action.lower() == 'user_labels':\n\n \"\"\"GET request: given the report, the labels annotated by the user are returned\"\"\"\n\n language = request.GET.get('language', request.session['language'])\n user_get = request.GET.get('username',username)\n report_id = request.GET.get('report_id')\n report1 = Report.objects.get(id_report = report_id,language = language)\n # if auto_required == 'Robot':\n # mode = NameSpace.objects.get(ns_id=auto_required)\n if auto_required is not None:\n mode_1 = NameSpace.objects.get(ns_id=auto_required)\n else:\n mode_1 = mode\n json_dict = get_user_gt(user_get,mode_1,report1,language,'labels')\n return JsonResponse(json_dict,safe=False)\n\n elif request.method == 'GET' and action.lower() == 'all_labels':\n\n \"\"\" GET request: given the use case, all the labels associated to that usecase are returned. \"\"\"\n\n labels = AnnotationLabel.objects.filter(name=usecase).values('seq_number','label','annotation_mode')\n print(labels)\n json_dict = {}\n if len(labels) > 0:\n\n if mode1 == 'Human' or auto_required == 'Human':\n json_dict['labels'] = []\n for el in labels:\n json_val = {}\n if 'Manual' in el['annotation_mode']:\n # if int(el['seq_number']) > count: # i primi 20 sono inseriti automaticamente\n json_val['label'] = (el['label'])\n json_val['seq_number'] = (el['seq_number'])\n json_dict['labels'].append(json_val)\n if mode1 == 'Robot' or auto_required == 'Robot':\n json_dict['labels'] = []\n for el in labels:\n json_val = {}\n if 'Automatic' in el['annotation_mode']:\n json_val['label'] = (el['label'])\n json_val['seq_number'] = (el['seq_number'])\n json_dict['labels'].append(json_val)\n\n else:\n json_dict['labels'] = []\n\n json_dict['labels'] = sorted(json_dict['labels'], key=lambda json: json['seq_number'])\n print(json_dict)\n return JsonResponse(json_dict)\n\n elif request.method == 'POST' and action.lower() == 'delete':\n\n \"\"\"PSOT request: given the report, the labels the user annotated are removed together with the\n associated groundtruth.\"\"\"\n\n request_body_json = json.loads(request.body)\n report_id = request_body_json['report_id']\n user = User.objects.get(username=username,ns_id=mode)\n language = request.GET.get('language', request.session['language'])\n report1 = Report.objects.get(id_report=report_id,language = language)\n if user is None or report1 is None:\n json_response = {'error': 'An error occurred getting parameters.'}\n return json_response\n to_del = Associate.objects.filter(username=user, ns_id=mode, id_report=report1, language=language)\n if mode1 == 'Human':\n try:\n with transaction.atomic():\n\n if to_del.exists():\n json_response = delete_all_annotation(to_del, user, report1,language, type,mode)\n\n else:\n json_response = {'msg':'nothing to do'}\n\n except Exception as error:\n print(error)\n json_response = {'error': 'An error occurred saving the ground_truth and the labels'}\n return JsonResponse(json_response)\n else:\n return JsonResponse(json_response)\n else:\n json_response = restore_robot_annotation(report1, 'labels', user)\n return JsonResponse(json_response)\n\n\n if request.method == 'POST' and action.lower() == 'insert':\n\n \"\"\"PSOT request: given the report, the labels the user annotated are added in the database and a new \n JSON groundtruth is created. \"\"\"\n\n request_body_json = json.loads(request.body)\n report_id = request_body_json['report_id']\n user = User.objects.get(username=username,ns_id=mode)\n language = request.GET.get('language', request.session['language'])\n report1 = Report.objects.get(id_report=report_id,language = language)\n\n if user is None or report1 is None:\n json_response = {'error': 'An error occurred getting the parameters.'}\n return JsonResponse(json_response)\n\n labels_to_save = request_body_json['labels']\n # In this case the user manually deletes all the labels (NOT WITH CLEAR BUTTON) and saves.\n if len(labels_to_save) == 0 and mode1 == 'Human':\n\n \"\"\"If there are not labels to save, if there is a ground truth saved in the database, this is removed,\n otherwise no action is performed. \"\"\"\n\n rows = Associate.objects.filter(username = user,ns_id=mode, id_report = report1, language = language)\n if rows.exists():\n try:\n with transaction.atomic():\n json_response = delete_all_annotation(rows,user,report1,language,type,mode)\n\n except Exception as error:\n print(error)\n json_response = {'error': 'An error occurred.'}\n return JsonResponse(json_response, status=500)\n else:\n return JsonResponse(json_response)\n else:\n json_response = {'message': 'Nothing to save.'}\n return JsonResponse(json_response)\n\n if len(labels_to_save) == 0 and mode1 == 'Robot':\n\n \"\"\" If there are not labels to save and the name space is Robot no action is performed and the already \n existing ground-truth is kept \"\"\"\n to_del = Associate.objects.filter(id_report=report1, language=language, username=user, ns_id=mode)\n # print('RESTORE')\n json_response = restore_robot_annotation(report1, 'labels',user)\n return JsonResponse(json_response)\n\n update = True\n\n \"\"\" Check if the user's labels she inserted are as many as the rows already present in the db: \n if they are not: update the annotation: the old annotation is replaced with the new one\n if they are: check if the labels existing are those inserted, in this case nothing is done, otherwise \n the current groundtruth is updated. \"\"\"\n\n existing_rows = Associate.objects.filter(username = user,ns_id=mode, id_report =report1,language =language)\n if existing_rows.exists():\n if existing_rows.count() == len(labels_to_save):\n for label in labels_to_save:\n label1 = AnnotationLabel.objects.get(name=usecase, label=label['label'], seq_number=label['seq_number'])\n if not Associate.objects.filter(username=user,ns_id=mode, seq_number=label1.seq_number, label=label1,\n id_report=report1, language=language).exists():\n update = True\n break\n else:\n update = False\n if update == True:\n try:\n with transaction.atomic():\n # Remove all the existing labels inserted by the user for that report. The existing ground truth is kept untile the deletion is successful\n to_del = Associate.objects.filter(username=user,ns_id=mode, id_report=report1,language = language)\n delete_all_annotation(to_del,user,report1,language,type,mode)\n\n json_resp_labels = update_annotation_labels(labels_to_save,usecase,user,report1,language,mode)\n\n jsonDict = serialize_gt(type, usecase, username, report_id,language,mode)\n GroundTruthLogFile.objects.create(username=user,ns_id=mode, id_report=report1, language = language,\n gt_json=jsonDict, gt_type=type,insertion_time=Now())\n\n except (Exception) as error:\n print(error)\n print('rolled back')\n json_response = {'error': 'An error occurred saving the ground_truth '\n 'and the labels, the transaction rolledback'}\n return JsonResponse(json_response)\n\n else:\n return JsonResponse(json_resp_labels)\n else:\n if mode1 == 'Human':\n if not GroundTruthLogFile.objects.filter(gt_type='labels', username=user, ns_id=mode, id_report=report1,\n language=language).exists():\n js = serialize_gt('labels', usecase, username, report1.id_report, language, mode)\n\n GroundTruthLogFile.objects.create(gt_json=js, insertion_time=Now(), username=user,\n ns_id=mode, id_report=report1, language=language,\n gt_type='labels')\n\n ass = Associate.objects.filter(username=user, id_report=report1, language=language,\n ns_id=mode).values('label', 'seq_number')\n for el in ass:\n lab = AnnotationLabel.objects.get(label=el['label'], seq_number=el['seq_number'])\n Associate.objects.filter(username=user, ns_id=mode, label=lab, seq_number=lab.seq_number,\n id_report=report1, language=language).delete()\n Associate.objects.create(username=user, ns_id=mode, label=lab, seq_number=lab.seq_number,\n insertion_time=Now(), id_report=report1, language=language)\n\n json_response = {'message': 'ok'}\n else:\n json_response = {'message': 'no changes detected'}\n return JsonResponse(json_response)\n\n elif mode1 == 'Robot':\n\n \"\"\" In this section the name space Robot is handled: If the user is in the AUTOMATIC MODE and the labels\n she inserts are those annotated by the algorithm, this means that she agrees with the annotation of the \n Robot user. The annotation does not change, only the insertion time is changed.\"\"\"\n\n try:\n with transaction.atomic():\n # in questa sezione solo se la gt รจ uguale a prima, l'utente acconsente alla gt della macchina\n user_robot = User.objects.get(username='Robot_user', ns_id=mode)\n gt_robot = GroundTruthLogFile.objects.filter(username=user_robot, ns_id=mode,\n id_report=report1, language=language,\n gt_type='labels')\n\n gt = GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report1,\n language=language,\n gt_type='labels')\n if gt_robot.count() == 1 and not gt.exists():\n # if gt_robot[0].insertion_time == gt[0].insertion_time:\n js = serialize_gt('labels', usecase, username, report1.id_report, language, mode)\n GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report1,\n language=language,\n gt_type='labels').delete()\n\n GroundTruthLogFile.objects.create(gt_json=js, insertion_time=Now(), username=user,\n ns_id=mode, id_report=report1, language=language,\n gt_type='labels')\n\n ass = Associate.objects.filter(username=user, id_report=report1, language=language,\n ns_id=mode).values('label', 'seq_number')\n for el in ass:\n lab = AnnotationLabel.objects.get(label=el['label'], seq_number=el['seq_number'])\n Associate.objects.filter(username=user, ns_id=mode, label=lab,\n seq_number=lab.seq_number,\n id_report=report1, language=language).delete()\n Associate.objects.create(username=user, ns_id=mode, label=lab,\n seq_number=lab.seq_number,\n insertion_time=Now(), id_report=report1, language=language)\n\n except Exception as error:\n print(error)\n print('rolled back')\n json_response = {'error': 'An error occurred updating labels dates'}\n return JsonResponse(json_response)\n else:\n json_response = {'message': 'dates updated'}\n return JsonResponse(json_response)", "def get_doc_prov(j, gcis_url, refList, orgList):\n doc = ProvEsDocument()\n \n org = requests.get(j['href']).json()\n \n doc_attrs = [\n (\"prov:type\", 'gcis:organization'),\n (\"prov:label\", j['name']),\n (\"prov:location\", \"%s%s\"%(gcis_url, j['uri'])),\n (\"gcis:organization_type_identifier\", j['organization_type_identifier']),\n (\"gcis:country_code\", j['country_code']),\n ]\n orgID = 'bibo:%s' % j['identifier']\n doc.agent(orgID, doc_attrs)\n\n for child in org['children']:\n cOrgURI = child['organization']\n rel = child['relationship']\n\n cOrg = next(o for o in orgList if o['uri'] == cOrgURI)\n cOrgID = 'bibo:%s'%cOrg['identifier']\n\n #cOrgAttrs = [\n # (\"prov:type\", 'gcis:organization'),\n # (\"prov:label\", cOrg['name']),\n # (\"prov:location\", cOrg['uri']),\n # (\"gcis:organization_type_identifier\", cOrg['organization_type_identifier']),\n # (\"gcis:country_code\", cOrg['country_code']),\n # ]\n #doc.entity(cOrgID, cOrgAttrs)\n #doc.hadMember(orgID, cOrgID)\n #for parent in org['parents']:\n # pOrgURI = parent['organization']\n # rel = parent['relationship']\n # pOrg = next(o for o in orgList if o['uri'] == pOrgURI)\n # pOrgID = 'bibo:%s'%pOrg['identifier']\n # doc.hadMember(pOrgID, orgID)\n\n prov_json = json.loads(doc.serialize())\n\n return prov_json", "def parse_exported_highlights(self, raw):\n self._log(\"%s:parse_exported_highlight()\" % self.app_name)\n\n # Create the annotations, books table as needed\n self.annotations_db = \"%s_imported_annotations\" % self.app_name_\n self.create_annotations_table(self.annotations_db)\n self.books_db = \"%s_imported_books\" % self.app_name_\n self.create_books_table(self.books_db)\n\n self.annotated_book_list = []\n self.selected_books = None\n\n # Generate the book metadata from the selected book\n row = self.opts.gui.library_view.currentIndex()\n book_id = self.opts.gui.library_view.model().id(row)\n db = self.opts.gui.current_db\n mi = db.get_metadata(book_id, index_is_id=True)\n\n # Populate author, title at a minimum\n title = \"A Book With Some Exported Annotations\"\n author = \"John Smith\"\n\n # Populate a BookStruct\n book_mi = BookStruct()\n book_mi.active = True\n book_mi.author = author\n book_mi.book_id = mi.id\n book_mi.title = title\n book_mi.uuid = None\n book_mi.last_update = time.mktime(time.localtime())\n book_mi.reader_app = self.app_name\n book_mi.cid = mi.id\n book_mi.annotations = len(self.highlights)\n\n # Add annotations to the database\n for timestamp in sorted(self.highlights.keys()):\n book_mi.last_update = timestamp\n\n # Populate an AnnotationStruct\n ann_mi = AnnotationStruct()\n\n # Required items\n ann_mi.book_id = book_mi['book_id']\n ann_mi.last_modification = timestamp\n\n # Optional items\n if 'annotation_id' in self.highlights[timestamp]:\n ann_mi.annotation_id = self.highlights[timestamp]['annotation_id']\n if 'highlight_color' in self.highlights[timestamp]:\n ann_mi.highlight_color = self.highlights[timestamp]['highlight_color']\n if 'highlight_text' in self.highlights[timestamp]:\n highlight_text = '\\n'.join(self.highlights[timestamp]['highlight_text'])\n ann_mi.highlight_text = highlight_text\n if 'note_text' in self.highlights[timestamp]:\n note_text = '\\n'.join(self.highlights[timestamp]['note_text'])\n ann_mi.note_text = note_text\n\n # Add annotation to annotations_db\n self.add_to_annotations_db(self.annotations_db, ann_mi)\n\n # Increment the progress bar\n self.opts.pb.increment()\n\n # Update last_annotation in books_db\n self.update_book_last_annotation(self.books_db, timestamp, ann_mi.book_id)\n\n # Add book to books_db\n self.add_to_books_db(self.books_db, book_mi)\n self.annotated_book_list.append(book_mi)\n\n # Update the timestamp\n self.update_timestamp(self.annotations_db)\n self.update_timestamp(self.books_db)\n self.commit()\n\n # Return True if successful\n return True", "def processSearchResult(self):", "def structure_PBDMS_annotations(documents, kb_data):\n \n doc_annotations = list()\n partial_func = partial(parse_PBDMS_doc, kb_data)\n \n with multiprocessing.Pool(processes=10) as pool:\n doc_annotations = pool.map(partial_func, documents)\n \n return doc_annotations", "def transform_book_details(self, books_dict):\n for i, book_dict in enumerate(books_dict):\n print(f'Transforming data for book {i + 1}')\n with open(f'{PATH_TO_DATA}/{i}.html', 'r') as f:\n content = f.read()\n self._add_book_details(book_dict, content)", "def parse(self, response, **kwargs):\n\n key_url = response._get_url().rsplit('/')[-2] # get the author nickname after the last slash\n name = response.xpath('//*[@id=\"woe\"]/div[2]/div/div[1]/div[2]/h3/text()').extract_first()\n job_title = response.xpath('//*[@id=\"woe\"]/div[2]/div/div[1]/div[2]/p/text()').extract_first()\n linkedin_url = response.xpath('//*[@id=\"woe\"]/div[2]/div/div[1]/div[1]/ul/li/a/@href').extract_first()\n date_title_part = response.xpath('//*[@id=\"woe\"]/div[2]/div/div[2]/div[position() > 1]')\n for row in date_title_part:\n row_extracted = row.extract()\n art_date = Selector(text=row_extracted).xpath('///span/text()').extract_first()\n date_formatted = conf.parse_dtts(art_date, '%B %d, %Y')\n article_title = Selector(text=row_extracted).xpath('///a/text()').extract_first()\n article_url = Selector(text=row_extracted).xpath('///a/@href').extract_first()\n\n if date_formatted >= conf.crawl_date[0].get('LastExecutionDate'):\n conf.write_data_append('authors.json', json.dumps({'keyUrl': key_url,\n 'name': name,\n 'jobTitle': job_title,\n 'linkedinUrl': linkedin_url,\n 'date': date_formatted,\n 'article_title': article_title,\n 'article_url': article_url}))", "def parse_docs_from_page(self, page_url: str, page_text: str) -> Iterable[Document]:\n # get the data\n data = requests.get(page_url)\n\n # load data into bs4\n soup = BeautifulSoup(data.text, 'html.parser')\n # links = []\n pdf_dis = []\n dates = []\n table = []\n version_hash_fields = []\n\n for tr in soup.find_all('tr'):\n date_col = soup.find_all('td', attrs={'class': 'fd-col2'})\n hyperlink_col = soup.find_all('td', attrs={'class': 'fd-col1'})\n values = [td.text for td in tr.find_all('td')]\n table.append(values)\n for link in hyperlink_col:\n pdf_url = 'https://www.health.mil/' + link.find('a')['href']\n pdf_di = DownloadableItem(doc_type='pdf',\n web_url=pdf_url)\n pdf_dis.append(pdf_di)\n for date in date_col:\n dates.append(date.text)\n\n doc_nums = []\n doc_titles = []\n doc_names = []\n for row in table[1:]:\n doc_data = row[0].split(':')\n\n if len(doc_data) == 1: # if no colon then no doc number\n if doc_data[0] == \"(DTM)-19-004 -Military Service by Transgender Persons and Persons with Gender Dysphoria (Change 1)\":\n doc_nums.append(\"19-004\")\n doc_names.append(\"DTM\")\n doc_titles.append(doc_data[0][14:])\n version_hash_fields.append({\"doc_name\": 'DTM', \"doc_title\": doc_data[0][14:]})\n else:\n doc_nums.append(\" \")\n doc_titles.append(doc_data[0])\n doc_names.append(doc_data[0])\n version_hash_fields.append({\"doc_name\": doc_data[0], \"doc_title\": doc_data[0]})\n else:\n\n tmptitle = doc_data[1][1:].replace(\"\\u201cClinical\",\"Clinical\").replace(\"System,\\u201d\",\"System\").replace(\"BUILDER\\u2122 \", \"Builder\").replace(\"\\u2013\",\"\")\n\n if \"Volume\" in tmptitle:\n doc_nums.append(doc_data[0][7:]+\" Volume \"+tmptitle.split()[-1])\n else:\n doc_nums.append(doc_data[0][7:])\n doc_titles.append(doc_data[1][1:].replace(\"\\u201cClinical\",\"Clinical\").replace(\"System,\\u201d\",\"System\").replace(\"BUILDER\\u2122 \", \"Builder\").replace(\"\\u2013\",\"\"))\n doc_names.append(doc_data[0][:6])\n\n version_hash_fields.append({\"doc_name\": doc_data[0][:7], \"doc_title\": doc_data[1]})\n\n parsed_docs = []\n page_url = 'https://www.health.mil/About-MHS/OASDHA/Defense-Health-Agency/Resources-and-Management/DHA-Publications'\n num_docs = len(doc_nums)\n for i in range(num_docs):\n # put all the relevant info into dictionaries\n doc = Document(doc_type=doc_names[i].replace(\" \",\"-\"),\n doc_title=doc_titles[i],\n doc_num=doc_nums[i],\n doc_name=doc_names[i].replace(\" \",\"-\")+\" \"+doc_nums[i],\n publication_date=dates[i],\n cac_login_required=False,\n crawler_used='dha_pubs',\n source_page_url=page_url,\n downloadable_items=[pdf_dis[i]],\n version_hash_raw_data=version_hash_fields[i])\n parsed_docs.append(doc)\n\n return parsed_docs" ]
[ "0.5708067", "0.55803597", "0.52704716", "0.5067415", "0.4997363", "0.49662167", "0.4906672", "0.48808354", "0.4867383", "0.48577002", "0.48404002", "0.48006877", "0.4791166", "0.47886", "0.47818494", "0.47446042", "0.47305262", "0.47226065", "0.46784338", "0.46754616", "0.46706814", "0.4667083", "0.46646255", "0.46604046", "0.46583143", "0.46550775", "0.46422642", "0.46371165", "0.46355218", "0.4634822" ]
0.6609978
0
0MQ server startup and communication management
def start_server(self) -> None: with self.socket.bind(self.address): print("ZeroMQ Server listening at {}".format(self.address)) while True: payload_rx = self.socket.recv(flags=0) if payload_rx: self.decode_payload(payload_rx) self.socket.send_string(self.reply(), flags=0, copy=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start(self):\n self._connect()\n self._init_exchange()\n self._init_queue()\n self._bind_queue()", "def main(connection_file):\n\n ctx = zmq.Context.instance()\n\n with open(connection_file) as f:\n cfg = json.loads(f.read())\n\n reg_url = cfg['interface']\n iopub_port = cfg['iopub']\n iopub_url = f\"{reg_url}:{iopub_port}\"\n\n session = Session(key=cfg['key'].encode('ascii'))\n sub = ctx.socket(zmq.SUB)\n\n # This will subscribe to all messages:\n sub.SUBSCRIBE = b''\n # replace with b'' with b'engine.1.stdout' to subscribe only to engine 1's stdout\n # 0MQ subscriptions are simple 'foo*' matches, so 'engine.1.' subscribes\n # to everything from engine 1, but there is no way to subscribe to\n # just stdout from everyone.\n # multiple calls to subscribe will add subscriptions, e.g. to subscribe to\n # engine 1's stderr and engine 2's stdout:\n # sub.SUBSCRIBE = b'engine.1.stderr'\n # sub.SUBSCRIBE = b'engine.2.stdout'\n sub.connect(iopub_url)\n while True:\n try:\n idents, msg = session.recv(sub, mode=0)\n except KeyboardInterrupt:\n return\n # ident always length 1 here\n topic = idents[0].decode('utf8', 'replace')\n if msg['msg_type'] == 'stream':\n # stdout/stderr\n # stream names are in msg['content']['name'], if you want to handle\n # them differently\n print(\"{}: {}\".format(topic, msg['content']['text']))\n elif msg['msg_type'] == 'error':\n # Python traceback\n c = msg['content']\n print(topic + ':')\n for line in c['traceback']:\n # indent lines\n print(' ' + line)\n elif msg['msg_type'] == 'error':\n # Python traceback\n c = msg['content']\n print(topic + ':')\n for line in c['traceback']:\n # indent lines\n print(' ' + line)", "def start(self):\n zmq_uri = (\n \"{protocol}://{address}:{port}\".format(\n protocol=self.protocol, address=self.address, port=self.port\n )\n if self.port\n else \"{protocol}://{address}\".format( # noqa\n protocol=self.protocol, address=self.address\n )\n )\n log.debug(\"ZMQ URI: %s\", zmq_uri)\n self.ctx = zmq.Context()\n if hasattr(zmq, self.type):\n skt_type = getattr(zmq, self.type)\n else:\n skt_type = zmq.PULL\n self.sub = self.ctx.socket(skt_type)\n self.sub.connect(zmq_uri)\n if self.hwm is not None:\n self.sub.setsockopt(zmq.RCVHWM, self.hwm)\n if self.recvtimeout is not None:\n log.debug(\"Setting RCVTIMEO to %d\", self.recvtimeout)\n self.sub.setsockopt(zmq.RCVTIMEO, self.recvtimeout)\n if self.keepalive is not None:\n log.debug(\"Setting TCP_KEEPALIVE to %d\", self.keepalive)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE, self.keepalive)\n if self.keepalive_idle is not None:\n log.debug(\"Setting TCP_KEEPALIVE_IDLE to %d\", self.keepalive_idle)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE_IDLE, self.keepalive_idle)\n if self.keepalive_interval is not None:\n log.debug(\"Setting TCP_KEEPALIVE_INTVL to %d\", self.keepalive_interval)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE_INTVL, self.keepalive_interval)", "def server_activate(self):\n\t\tself.socket.listen(self.request_queue_size)", "def initzmq(self):\n\n if \"topics\" not in self.configData:\n raise Exception(\"Topics not found in %s\" % self.configPath)\n\n for topic in self.configData['topics']:\n addr = self.gen_address(topic['protocol'], topic['address'],\n topic['port'])\n socket = self.build_socket(topic['paradigm'], topic['topic'], addr)\n self.topics[topic['name']] = socket", "def startup(self,context):\n master_socket = int(12345)\n self.task_queue = context.InputQueue\n self.result_queue = context.OutputQueue\n manager = Manager()\n self.dict_position = manager.dict()\n self.dict_cycle = manager.dict()\n self.dict_worker_info = manager.dict()\n\n TaskManager.register('get_job_queue',\n callable = lambda:self.task_queue)\n TaskManager.register('get_result_queue',\n callable = lambda:self.result_queue)\n TaskManager.register('get_data',\n callable = lambda:self.dict_position)\n TaskManager.register('get_cycle',\n callable = lambda:self.dict_cycle)\n TaskManager.register('set_worker_info',\n callable = lambda:self.dict_worker_info)\n self.m = TaskManager(address = ('', master_socket),\n authkey = b'secret')\n\n\n thread = Thread(target=self.runServer)\n thread.start()", "def init_client():\n init_config()\n begin_sending_packets()", "def init_connections(self):\n context = zmq.Context()\n self.sock_reply = context.socket(zmq.REQ)\n self.sock_reply.connect(self.sock_consumer_url)\n # Informs prev_stage that I am ready to work\n self.sock_reply.send_pyobj(\"READY\")\n # Create and register poller\n self.poll = zmq.Poller()\n self.poll.register(self.sock_reply, zmq.POLLIN)\n return True", "def main():\n s = start_server()\n accept_connection(s)", "def _setup_communication(self):\n state = self.ui.checkBox_comm.checkState()\n if state:\n try:\n sys.path.append(\"..\")\n from zmq_interface.gui_interface import ZmqInterface\n except ImportError as e:\n self.write_text(\"ZMQ interface failed to import. No remote control for this session.\")\n self.disable_visualizer()\n return\n try:\n ##TODO: let user specify ports\n self.com = ZmqInterface(rep_port=REPLY_PORT,\n gui_handle=self)\n except Exception as e:\n #traceback.print_exc(file=sys.stdout)\n self.write_text(\"ZMQ interface failed to start. No remote control for this session. Reason: %s\" % e)\n self.disable_visualizer()\n return\n self.start = self._start_session\n self.stop = self._stop_session\n self.load_config = self._load_state\n self.save_config = self._save_state\n self.com_timer = QtCore.QTimer()\n self.com_timer.timeout.connect(self._check_coms)\n self.com_timer.start(200)\n self.write_text(\"ZMQ interface set up. Reply port on %s\" % self.com.rep_port)\n self.enable_visualizer()\n else:\n if self.com:\n self.com.close()\n if self.com_timer:\n self.com_timer.stop()\n self.com = None\n self.com_timer = None\n self.enable_visualizer()\n self.write_text(\"ZMQ interface closed.\")", "def start(config, brokerTimeout = 60.0):\n \n manager = multiprocessing.Manager()\n serverUpEvent = manager.Event()\n broker = multiprocessing.Process(target=startSTOMPBroker, args=(config,serverUpEvent))\n broker.daemon = True\n broker.name = 'STOMP-Broker'\n broker.start()\n\n serverUpEvent.wait(brokerTimeout)\n if not serverUpEvent.is_set():\n logger.fatal(\"Broker not available after %.1f seconds. Giving up\", brokerTimeout)\n return -1\n #host side logic\n host = config.get('Broker', 'host') \n port = int(config.get('Broker', 'port'))\n username = config.get('Broker', 'username')\n password = config.get('Broker', 'password')\n\n hostEngine = HostStompEngine(config)\n stompProtocolFactory = StompProtocolFactory(hostEngine, username, password)\n \n HostXMLRPCService(config).makeEngineAccesible(hostEngine)\n\n\n reactor.connectTCP(host, port, stompProtocolFactory)\n reactor.run()", "def main(_):\n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n socket.connect(CORENLP_ADDRESS)\n socket.send(\"stop\")\n message = socket.recv()\n print(\"Received reply [%s]\" % message)", "def setupTcp(self):\n \tself.tcpManager = QueuedConnectionManager()\n \tself.tcpReader = QueuedConnectionReader(self.tcpManager, 0)\n \tself.tcpWriter = ConnectionWriter(self.tcpManager, 0)", "def setup(self) -> None:\n self.running = True\n self.listen()\n self.start_workers()\n\n # Send server socket to workers.\n assert self.socket is not None\n for work_queue in self.work_queues:\n work_queue[0].send(self.family)\n send_handle(work_queue[0], self.socket.fileno(),\n self.workers[self.current_worker_id].pid)\n self.socket.close()", "def __init__(self, port=1071):\n\n context = zmq.Context()\n\n self.socket = context.socket(zmq.REP)\n self.socket.bind('tcp://*:' + str(port))\n\n self.socket.recv()", "def init_connect_mq(self):\n try:\n mq_username = Configs.mq_username\n mq_pwd = Configs.mq_pwd\n mq_ip_addr = Configs.mq_ip_addr\n mq_port_num = Configs.mq_port_num\n mq_vhost = Configs.mq_vhost\n\n mq_credentials = pika.PlainCredentials(mq_username, mq_pwd)\n mq_connection = pika.BlockingConnection(\n pika.ConnectionParameters(host=mq_ip_addr, port=mq_port_num, virtual_host=mq_vhost,\n credentials=mq_credentials))\n # connect to mq channel\n self.mq_channel = mq_connection.channel()\n self.mq_channel.exchange_declare(exchange=Configs.mq_exchange_name, exchange_type='topic', durable='true')\n # self.mq_channel.queue_declare(queue='test', durable=False, arguments={'x-message-ttl': 10000})\n self.mq_conn_flag = True\n print(\" ************** MQ Connect Success ************** \")\n except Exception as e:\n print(e)", "def __init__(self, ip='127.0.0.1', port='50020'):\n self.ip = ip \n self.port = port\n self.ctx = zmq.Context()\n self.socket = zmq.Socket(self.ctx, zmq.REQ) # this is pub socket", "def server_init(log_set, conf_set, header_set, commands_w_set):\n global log_th, conf_th, header_th, command_w_th\n log_th = log_set\n conf_th = conf_set\n header_th = header_set\n command_w_th = commands_w_set\n sock_ip = conf_set.get_item(q_key='general').get('sock_ip')\n port = int(conf_set.get_item(q_key='general').get('port'))\n return ThreadedTCPServer((sock_ip, port), ThreadedTCPRequestHandler)", "def init_server_socket(self):\r\n server_log.info('Initializing server on {}:{}'.format(self.host, self.port))\r\n self.create_socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.set_reuse_addr()\r\n self.bind((self.host, self.port))\r\n self.listen(5)\r\n server_log.info('Initialization complete!')", "def start(self):\n super().start()\n loop = IOLoop.current()\n # Collect and send all IOPub messages, for all time\n # TODO: Check errors from this loop and restart as needed (or shut down the kernel)\n loop.add_callback(self.relay_iopub_messages)", "def start_server():\n server = WebsocketServer(9001, host='0.0.0.0')\n server.set_fn_message_received(message_received)\n server.set_fn_client_left(client_left)\n print(\"Started\")\n server.run_forever()", "def setup(self):\n self.context = zmq.Context()\n self.sub_socket = self.context.socket(zmq.SUB)\n if self.filter:\n self.sub_socket.setsockopt(zmq.SUBSCRIBE, self.filter)\n self.sub_socket.connect('tcp://'+self.host+':'+str(self.com_port))\n return self", "def activate(self):\n self.socket.listen(self.request_queue_size)", "def setup_for_run(self):\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server.bind((self.ip_address, self.port))\n self.server.listen(100)", "def server():", "def server():", "def run(self):\n\t\t\n\t\tself.connect(self.config[\"server\"])", "def connect(self):\n assert self.listening\n assert not self.connected\n ctx = zmq.Context.instance()\n port = NODE_INFOS[self.ID].port\n self._send_socket = ctx.socket(zmq.PUB)\n self._send_socket.bind(f\"tcp://*:{port}\")\n self.connected = True", "def do_start_messaging(self, *arg):\n print_info(\"Starting messaging\")\n\n # Send commands to POCS via this publisher\n try:\n self.cmd_publisher = PanMessaging.create_publisher(\n self.cmd_pub_port)\n print_info(\"Command publisher started on port {}\".format(\n self.cmd_pub_port))\n except Exception as e:\n print_warning(\"Can't start command publisher: {}\".format(e))\n\n try:\n self.cmd_subscriber = PanMessaging.create_subscriber(\n self.cmd_sub_port)\n print_info(\"Command subscriber started on port {}\".format(\n self.cmd_sub_port))\n except Exception as e:\n print_warning(\"Can't start command subscriber: {}\".format(e))\n\n # Receive messages from POCS via this subscriber\n try:\n self.msg_subscriber = PanMessaging.create_subscriber(\n self.msg_sub_port)\n print_info(\"Message subscriber started on port {}\".format(\n self.msg_sub_port))\n except Exception as e:\n print_warning(\"Can't start message subscriber: {}\".format(e))\n\n # Send messages to PAWS\n try:\n self.msg_publisher = PanMessaging.create_publisher(\n self.msg_pub_port)\n print_info(\"Message publisher started on port {}\".format(\n self.msg_pub_port))\n except Exception as e:\n print_warning(\"Can't start message publisher: {}\".format(e))", "def start(self):\n\n # First we have to initialise the display type before we initialise\n # the display controller! The second needs the display type.\n self._initialise_display_type()\n self._initialise_display_controller()\n\n # Now lets start ZMQ\n self._zmq_scheduler_reply_thread = threading.Thread(\n target=self._handle_incoming_zmq\n )\n t_name = 'ZMQ reply socket monitor'\n self._zmq_scheduler_reply_thread.name = t_name\n self._zmq_scheduler_reply_thread.daemon = True\n self._zmq_scheduler_reply_thread.start()" ]
[ "0.6537591", "0.6530103", "0.6514542", "0.65044683", "0.64858484", "0.6449997", "0.6438438", "0.6425711", "0.6412168", "0.63570845", "0.6355636", "0.6352996", "0.62894076", "0.6263325", "0.62401557", "0.62241405", "0.6221612", "0.6191344", "0.6186266", "0.6111829", "0.6066045", "0.60621566", "0.60447997", "0.6040094", "0.60241026", "0.60241026", "0.6010317", "0.599821", "0.59760433", "0.596906" ]
0.71882457
0
Test setting jacobian options
def test_jacobian_options(self, mocker): spy = mocker.spy(qml.gradients, "param_shift") a = jax.numpy.array([0.1, 0.2]) dev = qml.device("default.qubit", wires=1) def cost(a, device): with qml.queuing.AnnotatedQueue() as q: qml.RY(a[0], wires=0) qml.RX(a[1], wires=0) qml.expval(qml.PauliZ(0)) tape = qml.tape.QuantumScript.from_queue(q) return execute( [tape], device, gradient_fn=param_shift, gradient_kwargs={"shifts": [(np.pi / 4,)] * 2}, )[0] jax.grad(cost)(a, device=dev) for args in spy.call_args_list: assert args[1]["shifts"] == [(np.pi / 4,)] * 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_test_jacobian(self):\n self.set_up()\n inputObject = self.vmecOptimization.vmecInputObject\n rbc = np.copy(inputObject.rbc)\n zbs = np.copy(inputObject.zbs)\n inputObject.rbc = 0*inputObject.rbc\n inputObject.zbs = 0*inputObject.zbs\n orientable = self.vmecOptimization.test_jacobian(inputObject)\n self.assertFalse(orientable)\n # Reset boundary\n inputObject.rbc = rbc\n inputObject.zbs = zbs\n self.tear_down()", "def jacobian(self, x):\n pass", "def jacobian_information(self):\n has_jacobian = False\n jacobian_free_solvers = []\n return has_jacobian, jacobian_free_solvers", "def test_param_unused(self, operable_mock_device_2_wires):\n\n def circuit(x, y):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n q = JacobianQNode(circuit, operable_mock_device_2_wires)\n q._construct([1.0, 1.0], {})\n assert q.par_to_grad_method == {0: \"F\", 1: \"0\"}", "def set_jac_usage(self,use_jac):\n if type(use_jac).__name__ == 'bool':\n self._use_jac = use_jac\n else:\n raise KINSOL_Exception(\"The variable sent to 'set_jac_usage' must be a boolean.\")", "def jacobian(self, dt):\n raise NotImplementedError", "def jacobianstructure(self):\n pass", "def test_bogus_gradient_method_set(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n # in mutable mode, the grad method would be\n # recomputed and overwritten from the\n # bogus value 'J'. Caching stops this from happening.\n node = JacobianQNode(circuit, operable_mock_device_2_wires, mutable=False)\n\n node.evaluate([0.0], {})\n node.par_to_grad_method[0] = \"J\"\n\n with pytest.raises(ValueError, match=\"Unknown gradient method\"):\n node.jacobian(0.5)", "def setup_method(self):\n self.x0 = (1.0, [1.0, 1.0])\n self.sol = (-0.195, np.array([-0.195, -0.1]))\n\n self.tol = 3 # number of decimal places\n\n self.niter = 100\n self.disp = False\n\n # fix random seed\n np.random.seed(1234)\n\n self.kwargs = {\"method\": \"L-BFGS-B\", \"jac\": True}\n self.kwargs_nograd = {\"method\": \"L-BFGS-B\"}", "def jacobian(self, c):\n\n raise NotImplementedError", "def test_f2py_opts(self):\n\n distribution = Distribution()\n instance = build_src(distribution)\n instance.initialize_options()\n\n instance.f2py_opts = '--2d-numpy --g3-numpy'\n\n instance.finalize_options()\n\n assert_(instance.f2py_opts == ['--2d-numpy', '--g3-numpy'])", "def test_system_jacobian(self, scml_system):\n el_jac = np.arange(4).reshape(2, 2)\n el_over_omega = np.arange(4, 6)\n torque_over_el = np.arange(6, 8)\n # Set the el. jacobian returns to specified values\n scml_system.electrical_motor.electrical_jac_return = (el_jac, el_over_omega, torque_over_el)\n me_jac = np.arange(8, 12).reshape(2, 2)\n me_over_torque = np.arange(12, 14)\n # Set the mech. jabobian returns to specified values\n scml_system.mechanical_load.mechanical_jac_return = me_jac, me_over_torque\n sys_jac = scml_system._system_jacobian(0, np.array([0, 1, 2, 3]), [0, -1])\n\n #\n assert np.all(sys_jac[-2:, -2:] == el_jac), 'The el. jacobian is false'\n assert np.all(sys_jac[:2, :2] == me_jac), 'The mech. jacobian is false'\n assert np.all(sys_jac[2:, 0] == el_over_omega), 'the derivative of the el.state over omega is false'\n assert np.all(sys_jac[2:, 1] == np.zeros(2))\n assert np.all(sys_jac[:-2, 2:] == np.array([[72, 84], [78, 91]])), 'The derivative of the mech.state ' \\\n 'over the currents is false'", "def jacobian_ik(robot, q_init: dict, q_goal: dict, params=None, use_limits=True):\n if params is None:\n tol = 1e-6\n maxiter = 5000\n dt = 1e-3\n method = \"dls_inverse\"\n else:\n tol = params[\"tol\"]\n maxiter = params[\"maxiter\"]\n dt = params[\"dt\"]\n method = params[\"method\"]\n\n n = robot.n\n ub = np.array(variable_dict_to_list(robot.ub))\n lb = np.array(variable_dict_to_list(robot.lb))\n q_bar = (ub + lb) / 2.0\n q = np.array(variable_dict_to_list(q_init))\n\n N_ee = len(robot.end_effectors)\n\n k = 0.01 # DLS jacobian inverse damping factor\n k0 = 20 # joint limit gain\n\n # gains\n K_p = np.eye(3) * 1000 # position gain\n K_o = np.eye(3) * 1000 # orientation gain\n\n K = np.eye(6)\n K[:3, :3] = K_p\n K[3:, 3:] = K_o\n K = np.kron(np.eye(N_ee), K)\n\n count = 0\n\n # Initialize system\n e = error(robot, q, q_goal)\n J, J_star = stacked_jacobians(robot, q)\n ll, llinv = stacked_L(robot, q, q_goal)\n q_dot = np.dot(J_star, np.dot(K, np.dot(llinv, e)))\n # loop unitl error is converged AND all joint angles are within bounds.\n while (\n np.linalg.norm(e) > tol or (any((q > ub) | (q < lb)) and use_limits)\n ) and count < maxiter:\n\n J, J_star = stacked_jacobians(robot, q) # get jacobians\n\n e = error(robot, q, q_goal) # Error to goal\n\n ll, llinv = stacked_L(\n robot, q, q_goal\n ) # Accounting for Euler Error (see eqn. 387 on p. 139)\n\n if use_limits:\n q_dot = (\n -k0 / n * (q - q_bar) / (ub - lb) * q_dot\n ) # Joint angle avoidance using eqn. 3.57 on p. 126\n q_dot = np.dot(J_star, np.dot(K, np.dot(llinv, e))) + np.dot(\n (np.eye(n) - np.dot(J_star, J)), q_dot\n )\n\n q = q + q_dot * dt # update joint angles\n q = (q + np.pi) % (2 * np.pi) - np.pi # wrap angles to -pi to pi\n\n if count % 100 == 0:\n print(\"count: %s\" % count)\n print(\"error: %s\" % e)\n print(\"q_dot: %s\", q_dot)\n U, S, V = np.linalg.svd(J)\n cond = np.min(S) / np.max(S)\n print(\"Jacobian condition: %s\" % cond)\n\n print(\"q: %s\" % q)\n count += 1\n\n if count >= maxiter:\n print(\"Did not find config!\")\n print(\"iterations: %s\" % count)\n print(\"error: %s\" % e)\n ja_violations = (q > ub) | (q < lb)\n print(\"Violations: %s\" % ja_violations)\n return q, count\n else:\n\n print(\"Finished\")\n print(\"iterations: %s\" % count)\n print(\"error: %s\" % e)\n print(\"Joint Angles: %s\" % q)\n ja_violations = (q > ub) | (q < lb)\n print(\"Violations: %s\" % ja_violations)\n return q, count", "def jacobian(self, x1, x2, out=None):\n raise NotImplementedError", "def test_jacobi_analytical(env_class: mantrap.environment.base.GraphBasedEnvironment.__class__):\n env = env_class(torch.rand(2), ego_type=mantrap.agents.DoubleIntegratorDTAgent)\n env.add_ado(position=torch.rand(2) * 5, goal=torch.rand(2) * 10)\n\n ego_controls = torch.rand((5, 2)) / 10.0\n ego_controls.requires_grad = True\n ego_trajectory = env.ego.unroll_trajectory(controls=ego_controls, dt=env.dt)\n\n # Initialize HJ module and compute partial derivative dx_rel/du_robot using auto-grad.\n module = mantrap.modules.HJReachabilityModule(env=env, t_horizon=5)\n _ = module._constraint_core(ego_trajectory, ado_ids=env.ado_ids, tag=\"test\", enable_auto_grad=True)\n dx_rel_du_auto_grad = []\n for ado_id in env.ado_ids:\n x_rel = module.x_relative[f\"test/{ado_id}\"]\n grad = [torch.autograd.grad(x, ego_controls, retain_graph=True)[0] for x in x_rel]\n dx_rel_du_auto_grad.append(torch.stack(grad).reshape(4, -1))\n dx_rel_du_auto_grad = torch.stack(dx_rel_du_auto_grad)\n\n # Compute the same partial derivative analytically, by calling the `compute_jacobian_analytically()`\n # function. Since we cannot inverse a vector (dJ/dx_rel), we can check whether the jacobian\n # computed using the pre-computed dJ/dx_rel and the auto-grad (!) dx_rel/du results in the same\n # jacobian as the result of `compute_jacobian_analytically()`, which is only the case if\n # dx_rel/du(auto-grad) = dx_rel/du(analytic) since dJ/dx has non-zero elements.\n jacobian_analytical = module.compute_jacobian_analytically(ego_trajectory, grad_wrt=ego_controls,\n ado_ids=env.ado_ids, tag=\"test\")\n dj_dx_rel = []\n for ado_id in env.ado_ids:\n dj_dx_rel.append(module.value_gradient(x=module.x_relative[f\"test/{ado_id}\"]))\n jacobian_auto_grad = np.matmul(np.stack(dj_dx_rel), dx_rel_du_auto_grad)\n\n assert np.allclose(jacobian_analytical, jacobian_auto_grad)", "def test_not_differentiable(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.BasisState(x, wires=[1])\n return qml.expval(qml.PauliZ(0))\n\n q = JacobianQNode(circuit, operable_mock_device_2_wires)\n q._construct([np.array([1.0])], {})\n assert q.par_to_grad_method == {0: None}", "def test_jax(self, approx_order, strategy, tol):\r\n jax = pytest.importorskip(\"jax\")\r\n from jax import numpy as jnp\r\n from pennylane.interfaces.jax import JAXInterface\r\n from jax.config import config\r\n\r\n config.update(\"jax_enable_x64\", True)\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n params = jnp.array([0.543, -0.654])\r\n\r\n def cost_fn(x):\r\n with JAXInterface.apply(qml.tape.QubitParamShiftTape()) as tape:\r\n qml.RX(x[0], wires=[0])\r\n qml.RY(x[1], wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\r\n\r\n tape.trainable_params = {0, 1}\r\n tapes, fn = finite_diff(tape, n=1, approx_order=approx_order, strategy=strategy)\r\n jac = fn([t.execute(dev) for t in tapes])\r\n return jac\r\n\r\n res = jax.jacobian(cost_fn)(params)\r\n x, y = params\r\n expected = np.array(\r\n [\r\n [-np.cos(x) * np.sin(y), -np.cos(y) * np.sin(x)],\r\n [-np.cos(y) * np.sin(x), -np.cos(x) * np.sin(y)],\r\n ]\r\n )\r\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def JacobianFunction(p,x,y,z):\n \n n = len(x)\n \n J = np.array([ np.ones((n)),x,x**2,y,y**2,x*y ])\n \n return J", "def test_jacobi_symbol():\r\n assert jacobi_symbol.jacobi_symbol(-1, 5) == 1\r\n assert jacobi_symbol.jacobi_symbol(-1, 13) == 1\r\n assert jacobi_symbol.jacobi_symbol(-1, 3) == -1\r\n assert jacobi_symbol.jacobi_symbol(-1, 7) == -1\r\n assert jacobi_symbol.jacobi_symbol(2, 3) == -1\r\n assert jacobi_symbol.jacobi_symbol(2, 5) == -1\r\n assert jacobi_symbol.jacobi_symbol(2, 7) == 1\r\n assert jacobi_symbol.jacobi_symbol(2, 17) == 1\r\n assert jacobi_symbol.jacobi_symbol(3, 3) == 0\r\n assert jacobi_symbol.jacobi_symbol(3, 5) == -1\r\n assert jacobi_symbol.jacobi_symbol(3, 7) == -1\r\n assert jacobi_symbol.jacobi_symbol(3,5) == jacobi_symbol.jacobi_symbol(-2,5)\r\n assert jacobi_symbol.jacobi_symbol(-1,5) == jacobi_symbol.jacobi_symbol(4,5)\r\n assert jacobi_symbol.jacobi_symbol(11,7) == jacobi_symbol.jacobi_symbol(4,7)\r\n assert jacobi_symbol.jacobi_symbol(-3,7) == jacobi_symbol.jacobi_symbol(4,7)\r\n assert jacobi_symbol.jacobi_symbol(10,7) == jacobi_symbol.jacobi_symbol(3,7)\r\n assert jacobi_symbol.jacobi_symbol(2, 45) == -1\r\n assert jacobi_symbol.jacobi_symbol(3, 45) == 0\r\n assert jacobi_symbol.jacobi_symbol(7, 45) == -1\r\n assert jacobi_symbol.jacobi_symbol(2, 15) == 1\r\n assert jacobi_symbol.jacobi_symbol(1001, 9907) == -1 #wikepedia example\r", "def test_parameter_derivatives(self):\n self.set_up()\n shape_gradient = np.zeros((self.vmecOptimization.nzeta+1,\n self.vmecOptimization.ntheta)) \n self.assertRaises(ValueError,\n self.vmecOptimization.vmec_shape_gradient,\n shape_gradient,self.vmecOptimization.vmecOutputObject)\n self.tear_down()", "def jacobian_func(f):\n jacobian = jacfwd(f)\n return jacobian", "def test_all_finite_difference(self, operable_mock_device_2_wires):\n\n def circuit(x, y, z):\n qml.Rot(x, y, z, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n q = JacobianQNode(circuit, operable_mock_device_2_wires)\n q._construct([1.0, 1.0, 1.0], {})\n assert q.par_to_grad_method == {0: \"F\", 1: \"F\", 2: \"F\"}", "def test_matrix_parameter(self, execute_kwargs, tol):\n a = jax.numpy.array(0.1)\n U = jax.numpy.array([[0, 1], [1, 0]])\n\n def cost(a, U, device):\n with qml.queuing.AnnotatedQueue() as q:\n qml.QubitUnitary(U, wires=0)\n qml.RY(a, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n tape.trainable_params = [0]\n return execute([tape], device, **execute_kwargs)[0]\n\n dev = qml.device(\"default.qubit\", wires=2)\n res = jax.jit(cost, static_argnums=2)(a, U, device=dev)\n assert np.allclose(res, -np.cos(a), atol=tol, rtol=0)\n\n jac_fn = jax.grad(cost, argnums=(0))\n res = jac_fn(a, U, device=dev)\n assert np.allclose(res, np.sin(a), atol=tol, rtol=0)", "def calculate_jacobian(robot_position, landmark_pos):\n\n return None", "def test_jacobian(self):\n\n gT1 = Pose2(1, 2, np.pi/2)\n gT2 = Pose2(-1, 4, np.pi)\n\n expected = Pose2(2, 2, np.pi/2)\n\n def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]):\n # print(f\"{this = },\\n{v = },\\n{len(H) = }\")\n\n key0 = this.keys()[0]\n key1 = this.keys()[1]\n gT1, gT2 = v.atPose2(key0), v.atPose2(key1)\n error = Pose2(0, 0, 0).localCoordinates(gT1.between(gT2))\n \n if len(H) > 0:\n result = gT1.between(gT2)\n H[0] = -result.inverse().AdjointMap()\n H[1] = np.eye(3)\n return error\n \n noise_model = gtsam.noiseModel.Unit.Create(3)\n cf = ge.CustomFactor(noise_model, gtsam.KeyVector([0, 1]), error_func)\n v = Values()\n v.insert(0, gT1)\n v.insert(1, gT2)\n \n bf = gtsam.BetweenFactorPose2(0, 1, Pose2(0, 0, 0), noise_model)\n\n gf = cf.linearize(v)\n gf_b = bf.linearize(v)\n\n J_cf, b_cf = gf.jacobian()\n J_bf, b_bf = gf_b.jacobian()\n np.testing.assert_allclose(J_cf, J_bf)\n np.testing.assert_allclose(b_cf, b_bf)", "def is_Jacobian(J):\n return isinstance(J, Jacobian_generic)", "def jacobian(self,var,g=None):\n if (g==None):g=self.g\n jac=np.zeros([self.n+1,self.n])\n for i in range(self.n):\n for j in range(self.n):\n if(i==j): jac[i][j]=2.*(var[i]+1.)-g*np.sum([self.XXZ.Z(i,k) for k in range(self.n) if k!=i])\n else: jac[i][j]=g*self.XXZ.Z(i,j)\n for i in range(self.n):\n jac[self.n][i]=1.\n return jac", "def test_scalar_jacobian(self, execute_kwargs, tol):\n a = jax.numpy.array(0.1)\n dev = qml.device(\"default.qubit\", wires=2)\n\n def cost(a):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute([tape], dev, **execute_kwargs)[0]\n\n res = jax.jit(jax.grad(cost))(a)\n assert res.shape == ()\n\n # compare to standard tape jacobian\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n tape.trainable_params = [0]\n tapes, fn = param_shift(tape)\n expected = fn(dev.batch_execute(tapes))\n\n assert expected.shape == ()\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def jacobian(theta, event, parameters_to_fit):\n for (key, val) in enumerate(parameters_to_fit):\n setattr(event.model.parameters, val, theta[key])\n return event.chi2_gradient(parameters_to_fit)", "def jacobian_g(self, x, out=None, **kwargs):\n return self._base_nlp.jacobian_d(x, out=out)" ]
[ "0.6496576", "0.6447693", "0.63923275", "0.6203111", "0.60555536", "0.6051566", "0.60316074", "0.6018218", "0.5993192", "0.59864306", "0.5792883", "0.57222456", "0.5718024", "0.5713784", "0.56529015", "0.5624789", "0.5615789", "0.55724496", "0.55696857", "0.55681354", "0.5566883", "0.55328983", "0.5519549", "0.5516664", "0.5496209", "0.5494276", "0.5493546", "0.5452658", "0.5429442", "0.5407077" ]
0.71294117
0
Test that an error is raised if an gradient transform is used with grad_on_execution=True
def test_incorrect_gradients_on_execution(self): a = jax.numpy.array([0.1, 0.2]) dev = qml.device("default.qubit", wires=1) def cost(a, device): with qml.queuing.AnnotatedQueue() as q: qml.RY(a[0], wires=0) qml.RX(a[1], wires=0) qml.expval(qml.PauliZ(0)) tape = qml.tape.QuantumScript.from_queue(q) return execute( [tape], device, gradient_fn=param_shift, grad_on_execution=True, )[0] with pytest.raises( ValueError, match="Gradient transforms cannot be used with grad_on_execution=True" ): jax.grad(cost)(a, device=dev)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_gradient_convergence(self):\n pass", "def test_grad_test_values(self):\r\n backup = theano.config.compute_test_value\r\n theano.config.compute_test_value = 'raise'\r\n try:\r\n x = tensor.scalar('x')\r\n x.tag.test_value = 1\r\n # Used to crash due to undefined test value.\r\n tensor.grad(ifelse(0, x, x), x)\r\n finally:\r\n theano.config.compute_test_value = backup", "def test_gradient_transform(self):\n dev = qml.device(\"default.qubit\", wires=2)\n\n @qml.qnode(dev, diff_method=qml.gradients.param_shift)\n def circuit():\n return qml.probs(wires=0)\n\n with pytest.warns(UserWarning, match=\"gradient of a tape with no trainable parameters\"):\n info = qml.specs(circuit)()\n assert info[\"diff_method\"] == \"pennylane.gradients.parameter_shift.param_shift\"\n assert info[\"gradient_fn\"] == \"pennylane.gradients.parameter_shift.param_shift\"", "def test_no_gradients_on_execution(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy_execute = mocker.spy(qml.devices.DefaultQubit, \"batch_execute\")\n spy_gradients = mocker.spy(qml.devices.DefaultQubit, \"gradients\")\n\n def cost(a):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n dev,\n gradient_fn=\"device\",\n grad_on_execution=False,\n gradient_kwargs={\"method\": \"adjoint_jacobian\"},\n )[0]\n\n a = jax.numpy.array([0.1, 0.2])\n jax.jit(cost)(a)\n\n assert dev.num_executions == 1\n spy_execute.assert_called()\n spy_gradients.assert_not_called()\n\n jax.grad(cost)(a)\n spy_gradients.assert_called()", "def testNestedFunctionGradientCall(self):\n check_numerics_callback.enable_check_numerics()\n\n x = constant_op.constant(1.0 - 1e-8, dtype=dtypes.float32)\n\n @def_function.function\n def asinp1(x):\n # asin()'s gradient overflows at the value close to 1.0.\n return math_ops.asin(x) + 1.0\n\n @def_function.function\n def loss(x):\n return math_ops.square(asinp1(x))\n\n with backprop.GradientTape() as tape:\n tape.watch(x)\n y = loss(x)\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: self.evaluate(tape.gradient(y, x)))\n self.assertTrue(re.search(r\"gradient\", message))", "def test_gradients_check(self):\n model = PoincareModel(self.data, negative=3)\n try:\n model.train(epochs=1, batch_size=1, check_gradients_every=1)\n except Exception as e:\n self.fail('Exception %s raised unexpectedly while training with gradient checking' % repr(e))", "def testCustomGradientWithNaNWithTfFunction(self):\n check_numerics_callback.enable_check_numerics()\n\n @custom_gradient.custom_gradient\n def func_with_bad_grad(x):\n output = math_ops.sin(x)\n @def_function.function\n def grad(dy):\n # `dy` will come in as 1.0. Taking log of -1.0 leads to NaN.\n return math_ops.log(-dy)\n return output, grad\n\n x = constant_op.constant(-2.0, dtype=dtypes.float16)\n def f(x):\n return func_with_bad_grad(x)\n\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: gradient_checker_v2.compute_gradient(f, [x]))\n\n # Check the content of the error message.\n self.assertTrue(re.search(r\"graph op.*\\\"Log\\\"\", message))\n self.assertTrue(re.search(r\"dtype.*float16\", message))\n if context.executing_eagerly():\n self.assertIn(\"shape: ()\\n\", message)\n self.assertTrue(re.search(r\"Input tensor.*Tensor.*Neg:0\", message))\n self.assertIn(\"grad\", message)", "def test_wrong_gradients_raises_assertion(self):\n model = PoincareModel(self.data, negative=3)\n model._loss_grad = Mock(return_value=np.zeros((2 + model.negative, model.size)))\n with self.assertRaises(AssertionError):\n model.train(epochs=1, batch_size=1, check_gradients_every=1)", "def test_grad_on_execution(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(dev, \"execute_and_gradients\")\n\n def cost(a):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n dev,\n gradient_fn=\"device\",\n gradient_kwargs={\n \"method\": \"adjoint_jacobian\",\n \"use_device_state\": True,\n },\n )[0]\n\n a = jax.numpy.array([0.1, 0.2])\n jax.jit(cost)(a)\n\n # adjoint method only performs a single device execution, but gets both result and gradient\n assert dev.num_executions == 1\n spy.assert_called()", "def run_check_grad(hyperparameters):\n # This creates small random data with 20 examples and\n # 10 dimensions and checks the gradient on that data.\n num_examples = 20\n num_dimensions = 10\n\n weights = np.random.randn(num_dimensions + 1, 1)\n data = np.random.randn(num_examples, num_dimensions)\n targets = np.random.rand(num_examples, 1)\n\n diff = check_grad(logistic,\n weights,\n 0.001,\n data,\n targets,\n hyperparameters)\n\n print(\"diff =\", diff)", "def run_check_grad(hyperparameters):\n\n # This creates small random data with 7 examples and\n # 9 dimensions and checks the gradient on that data.\n num_examples = 7\n num_dimensions = 9\n\n weights = np.random.randn(num_dimensions+1, 1)\n data = np.random.randn(num_examples, num_dimensions)\n targets = (np.random.rand(num_examples, 1) > 0.5).astype(int)\n\n diff = check_grad(logistic, # function to check\n weights,\n 0.001, # perturbation\n data,\n targets,\n hyperparameters)\n\n print \"diff =\", diff", "def test_wrong_predicate(self):\n\n with pytest.raises(ValueError):\n naughty_case_opt = lq.optimizers.CaseOptimizer(\n (lambda var: False, lq.optimizers.Bop()),\n default_optimizer=optimizers.Adam(0.01),\n )\n\n # Simple MNIST model\n mnist = tf.keras.datasets.mnist\n (train_images, train_labels), _ = mnist.load_data()\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation=\"relu\"),\n tf.keras.layers.Dense(10, activation=\"softmax\"),\n ]\n )\n model.compile(\n loss=\"sparse_categorical_crossentropy\",\n optimizer=naughty_case_opt,\n metrics=[\"acc\"],\n )\n\n # Should raise on first call to apply_gradients()\n model.fit(train_images[:1], train_labels[:1], epochs=1)", "def gradcheck(func, inputs, eps=1e-6, atol=1e-5, rtol=1e-3, raise_exception=True):\n tupled_inputs = _as_tuple(inputs)\n\n # Make sure that gradients are saved for all inputs\n any_input_requiring_grad = False\n for inp in tupled_inputs:\n if isinstance(inp, tf.Tensor):\n if _requires_grad(inp):\n if inp.dtype != tf.float64:\n warnings.warn(\n 'At least one of the inputs that requires gradient '\n 'is not of double precision floating point. '\n 'This check will likely fail if all the inputs are '\n 'not of double precision floating point. ')\n any_input_requiring_grad = True\n # inp.retain_grad()\n if not any_input_requiring_grad:\n raise ValueError(\n 'gradcheck expects at least one input tensor to require gradient, '\n 'but none of the them have requires_grad=True.')\n\n output = _differentiable_outputs(func(*tupled_inputs))\n\n def fail_test(msg):\n if raise_exception:\n raise RuntimeError(msg)\n return False\n\n for i, o in enumerate(output):\n if not _requires_grad(o):\n continue\n\n def fn(input):\n return _as_tuple(func(*input))[i]\n\n analytical, reentrant, correct_grad_sizes = get_analytical_jacobian(tupled_inputs, o)\n numerical = get_numerical_jacobian(fn, tupled_inputs, eps=eps)\n\n if not correct_grad_sizes:\n return fail_test('Analytical gradient has incorrect size')\n\n for j, (a, n) in enumerate(zip(analytical, numerical)):\n if _numel(a) != 0 or _numel(n) != 0:\n if not allclose(a, n, rtol, atol):\n return fail_test('Jacobian mismatch for output %d with respect to input %d,\\n'\n 'numerical:%s\\nanalytical:%s\\n' % (i, j, n, a))\n\n if not reentrant:\n return fail_test('Backward is not reentrant, i.e., running backward with same '\n 'input and grad_output multiple times gives different values, '\n 'although analytical gradient matches numerical gradient')\n\n # check if the backward multiplies by grad_output\n with tf.GradientTape(persistent=True) as tape:\n output = _differentiable_outputs(func(*tupled_inputs))\n\n if any([_requires_grad(o) for o in output]):\n diff_input_list = list(iter_tensors(tupled_inputs, True))\n grads_input = tape.gradient(output, diff_input_list, [tf.zeros_like(o) for o in output])\n\n if not len(grads_input) == 0:\n raise RuntimeError(\"no Tensors requiring grad found in input\")\n\n # grads_input = torch.autograd.grad(output, diff_input_list, [torch.zeros_like(o) for o in output],\n # allow_unused=True)\n for gi, i in zip(grads_input, diff_input_list):\n if gi is None:\n continue\n if not tf.reduce_all(tf.equal(gi, 0)):\n return fail_test('backward not multiplied by grad_output')\n if gi.dtype != i.dtype:\n return fail_test(\"grad is incorrect type\")\n if gi.shape != i.shape:\n return fail_test('grad is incorrect size')\n\n return True", "def test_param_to_gradient(self):\n pass", "def test_gradient_supported(self):\n # gradient supported here\n wrapped = EfficientSU2(2) # a circuit wrapped into a big instruction\n plain = wrapped.decompose() # a plain circuit with already supported instructions\n\n # gradients not supported on the following circuits\n x = Parameter(\"x\")\n duplicated = QuantumCircuit(2)\n duplicated.rx(x, 0)\n duplicated.rx(x, 1)\n\n needs_chainrule = QuantumCircuit(2)\n needs_chainrule.rx(2 * x, 0)\n\n custom_gate = WhatAmI(x)\n unsupported = QuantumCircuit(2)\n unsupported.append(custom_gate, [0, 1])\n\n tests = [\n (wrapped, True), # tuple: (circuit, gradient support)\n (plain, True),\n (duplicated, False),\n (needs_chainrule, False),\n (unsupported, False),\n ]\n\n # used to store the info if a gradient callable is passed into the\n # optimizer of not\n info = {\"has_gradient\": None}\n optimizer = partial(gradient_supplied, info=info)\n\n sampler = Sampler()\n estimator = Estimator()\n fidelity_primitive = ComputeUncompute(sampler)\n\n pvqd = PVQD(\n fidelity=fidelity_primitive,\n ansatz=None,\n initial_parameters=np.array([]),\n estimator=estimator,\n optimizer=optimizer,\n )\n problem = TimeEvolutionProblem(self.hamiltonian, time=0.01)\n for circuit, expected_support in tests:\n with self.subTest(circuit=circuit, expected_support=expected_support):\n pvqd.ansatz = circuit\n pvqd.initial_parameters = np.zeros(circuit.num_parameters)\n _ = pvqd.evolve(problem)\n self.assertEqual(info[\"has_gradient\"], expected_support)", "def test_gradients(self):\n ex = self._create_example()\n decoder_input_fn = FixedDecoderInputs(\n inputs=tf.convert_to_tensor(\n ex.target, dtype=tf.float32),\n sequence_length=tf.convert_to_tensor(\n ex.target_len, dtype=tf.int32))\n\n model = self.create_model()\n decoder_output = model.encode_decode(\n source=tf.convert_to_tensor(\n ex.source, dtype=tf.float32),\n source_len=tf.convert_to_tensor(\n ex.source_len, dtype=tf.int32),\n decoder_input_fn=decoder_input_fn,\n target_len=tf.convert_to_tensor(\n ex.target_len, dtype=tf.int32))\n\n # Get a loss to optimize\n losses = seq2seq_losses.cross_entropy_sequence_loss(\n logits=decoder_output.logits,\n targets=tf.ones_like(decoder_output.predictions),\n sequence_length=tf.convert_to_tensor(\n ex.target_len, dtype=tf.int32))\n mean_loss = tf.reduce_mean(losses)\n\n optimizer = tf.train.AdamOptimizer()\n grads_and_vars = optimizer.compute_gradients(mean_loss)\n train_op = optimizer.apply_gradients(grads_and_vars)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n _, grads_and_vars_ = sess.run([train_op, grads_and_vars])\n\n for grad, _ in grads_and_vars_:\n self.assertFalse(np.isnan(grad).any())", "def _test_gradient_against_estimate(self, dtype, random, use_gpu):\n z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)\n q = np.zeros((test_obs, 10)).astype(dtype)\n q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1\n\n logits = array_ops.placeholder(dtype, name='z')\n sparsemax_op = sparsemax(logits)\n loss_op = sparsemax_loss(logits, sparsemax_op, q)\n\n with self.test_session(use_gpu=use_gpu):\n err = gradient_checker.compute_gradient_error(\n logits, z.shape, loss_op, (test_obs,), x_init_value=z, delta=1e-9)\n\n self.assertLess(err, 1e-4)", "def gradient_check(op, *args, **kwargs):\n\n if( not 'id_list' in kwargs.keys() ):\n kwargs.update({\"id_list\":[0]})\n\n id_list = kwargs.get(\"id_list\", [0])\n\n for i in id_list:\n\n if(not isinstance(args[i], Variable)):\n raise Exception(\"input {:g} is not a variable\".format(i))\n\n if(isinstance(args[i], Variable) and not args[i].requires_grad):\n raise Exception(\"input {:g} doesn't require gradient\".format(i))\n\n nelems = args[i].numel()\n\n \"\"\" numerical gradient \"\"\"\n\n wrapper, p = numdiff_wrapper(op, args, kwargs, i)\n jacobian_numerical = numdiff_unified(wrapper, p)\n\n \"\"\" analytic gradient \"\"\"\n\n jacobian_analytic = []\n\n if(len(kwargs.keys()) > 1):\n \"\"\"function has dictionary inputs\"\"\"\n f = op(*args, **kwargs)\n else:\n f = op(*args)\n\n output_nelems = f.data.numel()\n\n for k in range(output_nelems):\n\n output_grad = torch.zeros(f.data.size())\n output_grad.view(output_nelems, 1)[k] = 1\n\n f.backward(output_grad, retain_variables=True)\n\n jacobian_analytic.append( np.copy( args[i].grad.data.view( nelems ).numpy() ) )\n\n for params_i in args:\n if(isinstance(params_i, torch.autograd.Variable) and params_i.requires_grad):\n params_i.grad.data.zero_()\n\n jacobian_analytic = np.asarray(jacobian_analytic)\n\n \"\"\"\n compare jacobian_analytic with jacobian_numerical\n \"\"\"\n\n if( np.allclose(jacobian_analytic, jacobian_numerical) ):\n\n print \"gradient is correct\"\n\n else:\n\n rel_error = np.linalg.norm( jacobian_analytic - jacobian_numerical ) / \\\n np.maximum( np.linalg.norm( jacobian_analytic ), np.linalg.norm( jacobian_numerical) )\n\n print 'analytic jacobian :'\n print jacobian_analytic\n\n print 'numerical jacobian :'\n print jacobian_numerical\n\n print 'jacobian difference :'\n print jacobian_analytic - jacobian_numerical\n\n print 'relative error:'\n print rel_error", "def test_custom_gradient_transform(self):\n dev = qml.device(\"default.qubit\", wires=2)\n\n @qml.gradients.gradient_transform\n def my_transform(tape):\n return tape, None\n\n @qml.qnode(dev, diff_method=my_transform)\n def circuit():\n return qml.probs(wires=0)\n\n info = qml.specs(circuit)()\n assert info[\"diff_method\"] == \"test_specs.my_transform\"\n assert info[\"gradient_fn\"] == \"test_specs.my_transform\"", "def test_grad_image(func, motion, optimized, preserve_result, timage, tkernel,\n conv2dstrides):\n # TODO: Upgrade utils.py to allow simultaneous testing of uneven args.\n tfe_utils.test_rev_tensor(func, motion, optimized, preserve_result, (0,),\n timage, tkernel, conv2dstrides)\n tfe_utils.test_rev_tensor(func, motion, optimized, preserve_result, (1,),\n timage, tkernel, conv2dstrides)", "def gradient_supplied(fun, x0, jac, info):\n result = OptimizerResult()\n result.x = x0\n result.fun = 0\n info[\"has_gradient\"] = jac is not None\n\n return result", "def internal_grad_fn(unused_op, *result_grads): # pylint: disable=unused-variable\n return tape_grad_fn(*result_grads)", "def check_gradient(f, x, delta=1e-5, tol=1e-4):\n\n assert isinstance(x, np.ndarray)\n assert x.dtype == np.float\n \n orig_x = x.copy()\n #print('check_g, orig_x befor',orig_x)\n #print('check_g, x befor',x)\n #print('befor first pass in grad check')\n fx, analytic_grad = f(x)\n #print('after first pass in grad check')\n #print('check_g, orig_x after',orig_x)\n #print('check_g, x.shape',x.shape)\n #print('func',f(x)[0])\n #print('fx=',fx,'analityc_grad=',analytic_grad)\n \n assert np.all(np.isclose(orig_x, x, tol)), \"Functions shouldn't modify input variables\"\n\n assert analytic_grad.shape == x.shape\n #print('analitical grad.shape',analytic_grad.shape)\n analytic_grad = analytic_grad.copy()\n\n # We will go through every dimension of x and compute numeric\n # derivative for it\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n #print('it.shape=',it.shape)\n while not it.finished:\n ix = it.multi_index\n #print('ix',ix)\n #print('x[ix]',x[ix])\n analytic_grad_at_ix = analytic_grad[ix]\n #print('analitical_grad-at_ix',analytic_grad_at_ix)\n orig_x = x.copy()\n #print('orig_x',orig_x)\n #print('x.shape befor delta',x.shape)\n orig_x[ix]+=delta\n #print('x.shape after delta',x.shape)\n #print('orig_x[ix] delta +',orig_x[ix])\n fx_plus=f(orig_x)[0]\n #fx_plus=fx_plus_full[ix[0]]\n #print('fx__plus',fx_plus)\n orig_x = x.copy()\n orig_x[ix]-=delta\n #print('orig_x[ix] delta -',orig_x[ix])\n fx_minus=f(orig_x)[0]\n #print('fx_minus',fx_minus)\n \n divider=2*delta\n #print('divider',divider)\n #numeric_grad_at_ix = np.divide((fx_plus-fx_minus),divider)\n numeric_grad_at_ix = (fx_plus-fx_minus)/divider\n #print('numeric_grad_at_ix',numeric_grad_at_ix)\n #print('fx(ix)', fx[ix])\n\n # TODO compute value of numeric gradient of f to idx\n \n if not np.isclose(numeric_grad_at_ix, analytic_grad_at_ix, tol):\n print(\"Gradients are different at %s. Analytic: %2.5f, Numeric: %2.5f\" % (ix, analytic_grad_at_ix, numeric_grad_at_ix))\n return False\n\n it.iternext()\n\n print(\"Gradient check passed!\")\n return True", "def gradient_check(meta_model: MetaLearnerModel,\n training_sample: MetaTrainingSample,\n logger: Logger,\n epsilon: float = 10e-7) -> bool:\n if training_sample.final_output is None:\n raise ValueError(\"For gradient check, 'final_output' must not be None\")\n if training_sample.learner_training_batches is None:\n raise ValueError(\"For gradient check, 'learner_training_batches' must not be None\")\n if training_sample.learner_validation_batch is None:\n raise ValueError(\"For gradient check, 'learner_validation_batch' must not be None\")\n if training_sample.initial_learner_weights is None:\n raise ValueError(\"For gradient check, 'initial_learner_weights' must not be None\")\n\n state_tensors = meta_model.predict_model.state_tensors\n input_tensors = get_input_tensors(meta_model.train_model)\n learner = meta_model.predict_model.learner\n\n sess = K.get_session()\n\n # first step is to evaluate gradients of meta-learner parameters using our method\n # to evaluate gradients, I use 'train_model' version of meta-learner\n\n # initialize meta-learner (train) states\n assert len(state_tensors) == len(training_sample.initial_states)\n feed_dict = dict(zip(meta_model.states_placeholder, training_sample.initial_states))\n sess.run(meta_model.init_train_states_updates, feed_dict=feed_dict)\n\n # standardize input for current meta-training sample\n inputs = standardize_predict_inputs(meta_model.train_model, training_sample.inputs)\n\n # compute gradients on current meta-learner parameters and training sample\n feed_dict = dict(zip(input_tensors, inputs))\n feed_dict[meta_model.learner_grad_placeholder] = training_sample.learner_grads\n\n # our method of computation of meta-learner gradients - this is what i want to check here for being correct\n evaluation = sess.run(fetches=meta_model.chained_grads, feed_dict=feed_dict)\n evaluated_meta_grads = np.concatenate([grad.flatten() for grad in evaluation])\n\n # gradient check for each meta-learner weight\n # for gradient checking i use 'predict_model' version of meta-learner (which is used for training Learner)\n n_meta_learner_params = get_trainable_params_count(meta_model.train_model)\n approximated_meta_grads = np.zeros(shape=n_meta_learner_params)\n\n valid_x, valid_y = training_sample.learner_validation_batch\n learner_valid_ins = standardize_train_inputs(learner, valid_x, valid_y)\n\n # tensors used for updating meta-learner weights\n trainable_meta_weights = sess.run(meta_model.predict_model.trainable_weights)\n meta_weights_placeholder = [tf.placeholder(shape=w.get_shape(), dtype=tf.float32)\n for w in meta_model.predict_model.trainable_weights]\n meta_weights_updates = [tf.assign(w, new_w) for w, new_w in zip(meta_model.predict_model.trainable_weights,\n meta_weights_placeholder)]\n\n def calculate_loss(new_weights):\n # update weights of meta-learner ('predict_model')\n f_dict = dict(zip(meta_weights_placeholder, new_weights))\n sess.run(meta_weights_updates, feed_dict=f_dict)\n\n # initialize learner parameters\n learner.set_weights(training_sample.initial_learner_weights)\n\n # initialize meta-learner (predict) states\n f_dict = dict(zip(meta_model.states_placeholder, training_sample.initial_states))\n sess.run(meta_model.init_predict_states_updates, feed_dict=f_dict)\n\n # train learner using same batches as in the sample (meta 'predict_model' is used here)\n for x, y in training_sample.learner_training_batches:\n learner.train_on_batch(x, y)\n\n # calculate new learner loss on validation set after training\n f_dict = dict(zip(meta_model.predict_model.learner_inputs, learner_valid_ins))\n new_loss = sess.run(fetches=[learner.total_loss], feed_dict=f_dict)[0]\n\n return new_loss\n\n grad_ind = 0\n for i, w in enumerate(trainable_meta_weights):\n # set meta-learner ('predict_model') params to new, where only one weight is changed by some epsilon\n if w.ndim == 2:\n for j in range(w.shape[0]):\n for k in range(w.shape[1]):\n changed_meta_learner_weights = [w.copy() for w in trainable_meta_weights]\n changed_meta_learner_weights[i][j][k] += epsilon\n loss1 = calculate_loss(changed_meta_learner_weights)\n changed_meta_learner_weights[i][j][k] -= 2 * epsilon\n loss2 = calculate_loss(changed_meta_learner_weights)\n approximated_meta_grads[grad_ind] = (loss1 - loss2) / (2 * epsilon)\n grad_ind += 1\n elif w.ndim == 1:\n for j in range(w.shape[0]):\n changed_meta_learner_weights = [w.copy() for w in trainable_meta_weights]\n changed_meta_learner_weights[i][j] += epsilon\n loss1 = calculate_loss(changed_meta_learner_weights)\n changed_meta_learner_weights[i][j] -= 2 * epsilon\n loss2 = calculate_loss(changed_meta_learner_weights)\n approximated_meta_grads[grad_ind] = (loss1 - loss2) / (2 * epsilon)\n grad_ind += 1\n else:\n raise ValueError(\"Only weights with ndim == 1 or ndim == 2 are supported in grad check\")\n\n approximated_grad_diff = np.linalg.norm(approximated_meta_grads - evaluated_meta_grads) / \\\n (np.linalg.norm(approximated_meta_grads) + np.linalg.norm(evaluated_meta_grads))\n\n if approximated_grad_diff > epsilon:\n logger.error(\"GRAD-CHECK: (epsilon={}, dist={})!\".format(epsilon, approximated_grad_diff))\n return False\n else:\n logger.debug(\"Grad-Check passed. (epsilon={}, dist={})\".format(epsilon, approximated_grad_diff))\n\n return True", "def test_validation(self):\n\n gct = OCIO.GradingRGBCurveTransform(OCIO.GRADING_LOG)\n gct.validate()\n\n # 3rd control point x is lower than 2nd control point x.\n vals = OCIO.GradingRGBCurve(OCIO.GRADING_LOG)\n vals.red = OCIO.GradingBSplineCurve([0, 0, 0.5, 0.2, 0.2, 0.5, 1, 1])\n \n with self.assertRaises(OCIO.Exception):\n gct.setValue(vals);", "def is_bad_grad(grad_output):\n grad_output = grad_output.data\n return grad_output.ne(grad_output).any() or grad_output.gt(1e6).any()", "def test_gradient_exception_on_sample(self):\n dev = qml.device(\"default.qubit\", wires=2, shots=1000)\n\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.sample(qml.PauliZ(0)), qml.sample(qml.PauliX(1))\n\n with pytest.raises(\n qml.QuantumFunctionError,\n match=\"Circuits that include sampling can not be differentiated.\",\n ):\n grad_fn = autograd.jacobian(circuit)\n grad_fn(1.0)", "def test_grad_with_backward_mode(self, execute_kwargs):\n dev = qml.device(\"default.qubit\", wires=2)\n params = jax.numpy.array([0.1, 0.2, 0.3])\n expected_results = jax.numpy.array([-0.3875172, -0.18884787, -0.38355705])\n\n def cost(a, cache):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.RY(a[2], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n res = qml.interfaces.execute([tape], dev, cache=cache, **execute_kwargs)[0]\n return res\n\n cost = jax.jit(cost)\n\n results = jax.grad(cost)(params, cache=None)\n for r, e in zip(results, expected_results):\n assert jax.numpy.allclose(r, e, atol=1e-7)", "def test_hadamard_grad_raises():\n tape = qml.tape.QuantumScript()\n with pytest.raises(NotImplementedError, match=\"The Hadamard test gradient\"):\n qml.gradients.hadamard_grad(tape)", "def check_grad(f, args, kw_args=None, rtol=1e-8):\n # Default to no keyword arguments.\n if kw_args is None:\n kw_args = {}\n\n # Get the associated function in LAB.\n lab_f = getattr(B, f.__name__)\n\n def create_f_i(i, args_):\n # Create a function that only varies the `i`th argument.\n def f_i(x):\n return B.mean(lab_f(*(args_[:i] + (x,) + args_[i + 1 :]), **kw_args))\n\n return f_i\n\n # Walk through the arguments.\n for i in range(len(args)):\n # Numerically compute gradient.\n f_i = create_f_i(i, args)\n numerical_grad = gradient(f_i)(args[i])\n\n # Check AutoGrad gradient.\n autograd_grad = grad(f_i)(args[i])\n approx(numerical_grad, autograd_grad, rtol=rtol)\n\n # Check TensorFlow gradient.\n tf_args = tuple([as_tf(arg) for arg in args])\n f_i = tf.function(create_f_i(i, tf_args), autograph=False)\n with tf.GradientTape() as t:\n t.watch(tf_args[i])\n tf_grad = t.gradient(f_i(tf_args[i]), tf_args[i]).numpy()\n approx(numerical_grad, tf_grad, rtol=rtol)\n\n # Check PyTorch gradient.\n torch_args = tuple([as_torch(arg, grad=False) for arg in args])\n f_i = torch.jit.trace(create_f_i(i, torch_args), torch_args[i])\n arg = torch_args[i].requires_grad_(True)\n f_i(arg).backward()\n approx(numerical_grad, arg.grad, rtol=rtol)\n\n # Check JAX gradient.\n jax_args = tuple([jnp.asarray(arg) for arg in args])\n f_i = create_f_i(i, jax_args)\n jax_grad = jax.jit(jax.grad(f_i))(jax_args[i])\n approx(numerical_grad, jax_grad, rtol=rtol)" ]
[ "0.7141852", "0.6999792", "0.6979812", "0.6958912", "0.6902775", "0.6743894", "0.6662304", "0.6653443", "0.65697926", "0.6568476", "0.6530911", "0.650202", "0.6501163", "0.6478995", "0.6458334", "0.6353595", "0.63159007", "0.62760735", "0.62685746", "0.6248912", "0.61487573", "0.61441845", "0.6138951", "0.6083854", "0.60796547", "0.6070899", "0.6066785", "0.6064523", "0.6014477", "0.59946907" ]
0.7932203
0
Test that an error is raised if the interface is unknown
def test_unknown_interface(self): a = jax.numpy.array([0.1, 0.2]) dev = qml.device("default.qubit", wires=1) def cost(a, device): with qml.queuing.AnnotatedQueue() as q: qml.RY(a[0], wires=0) qml.RX(a[1], wires=0) qml.expval(qml.PauliZ(0)) tape = qml.tape.QuantumScript.from_queue(q) return execute( [tape], device, gradient_fn=param_shift, interface="None", )[0] with pytest.raises(ValueError, match="Unknown interface"): cost(a, device=dev)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_device_intf():\n\n cable = Cable(device_a_name=\"deva\", interface_a_name=\"inta\", device_z_name=\"devb\", interface_z_name=\"intb\")\n assert cable.get_device_intf(\"a\") == (\"deva\", \"inta\")\n assert cable.get_device_intf(\"z\") == (\"devb\", \"intb\")\n\n with pytest.raises(ValueError):\n cable.get_device_intf(\"v\")", "def test_unknown_service(self):\n raise NotImplementedError # FIXME", "def test_init_unknown_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n Sample('Not_a_Sample', self.sample_template)", "def test_init_unknown_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n SampleTemplate(2)", "def _validate_interface_exists(params, error_callback):\n local_interface = params['local_interface']\n net_override = params['net_config_override']\n if not net_override and local_interface not in netifaces.interfaces():\n message = ('Invalid local_interface specified. %s is not available.' %\n local_interface)\n error_callback(message)", "def test_init(self):\n nt.assert_raises(Exception, CisInterface.CisInput, 'error')", "def test_doesnt_implement_can_handle(self):\r\n self.assertRaises(NotImplementedError, Importer.can_handle, \"\")", "def test_init_unknown_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n PrepSample('Not_a_Sample', self.prep_template)", "def is_interface_type(self):\n raise exceptions.NotImplementedError()", "def test_unknown_resource_under_service(self):\n raise NotImplementedError # FIXME", "def test_missing_image():\n\n model = Instafilter(\"Lo-Fi\")\n\n with pytest.raises(OSError):\n model(\"xxx\")", "async def test_unknown_error(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n user_input = MOCK_USER_INPUT.copy()\n with patch(\n \"homeassistant.components.sonarr.config_flow.Sonarr.update\",\n side_effect=Exception,\n ):\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={CONF_SOURCE: SOURCE_USER},\n data=user_input,\n )\n\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"unknown\"", "def test_missing_model():\n\n with pytest.raises(KeyError):\n Instafilter(\"xxx\")", "def test_unavailable(self):\n feature_guard = _make_requires(False, \"Error text\")\n\n @feature_guard\n def inner(): # pragma: nocover\n pytest.fail(\"Should not be called\")\n\n with pytest.raises(NotImplementedError) as e:\n inner()\n\n assert \"Error text\" in str(e.value)", "async def test_endpoint_has_access_interface_error(self):\n with self.patch_handle_dropped_connection:\n with self.assertRaises(aiohttp.web.HTTPServiceUnavailable):\n await has_access_handler(self.mock_request)\n self.handle_dropped_connection_mock.assert_called_once()", "def test_init(self):\n nt.assert_raises(Exception, CisInterface.CisOutput, 'error')", "def test_pull_error(self):\n raise NotImplementedError", "def test_prevent_wrong_type(self):\n self.assertRaises(cinv.host.Error, self.wrong_host_type)", "def test__init__raise_exception(self):\n self.assertRaises(TypeError, MasterNodeInterface)", "def test_init_unknown_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n PrepTemplate(2)", "def test_error1(self):\n try:\n api = self.load_api_description('error1.json')\n self.fail('No error thrown for undefined type')\n except APIDescriptionException:\n pass", "def test_must_provide_interface(self):\n\n # Prepare.\n app = self.factory()\n request = self.getRequest(app)\n m = model.factory()\n f = model.factory(cls=model.Foo)\n\n # It's OK to poke a foo.\n state_changer = request.state_changer\n self.assertTrue(state_changer.can_perform(f, a.POKE))\n\n # But not a model.\n self.assertFalse(state_changer.can_perform(m, a.POKE))", "def test_get_simulator_device_type_by_platform_not_found(self, _, _2):\n with self.assertRaises(test_runner.SimulatorNotFoundError) as context:\n iossim_util.get_simulator_device_type_by_platform(\n iossim_util.get_simulator_list(), 'iPhone XI')\n expected_message = ('Simulator does not exist: Not found device '\n '\"iPhone XI\" in devicetypes')\n self.assertTrue(expected_message in str(context.exception))", "def test_check_if_error(self):\n with self.assertRaises(MyError):\n ExecutionExitCodeController(ERROR_RETURN_CODE, ERROR_MESSAGE)\\\n .check_if_error()", "def test_check_if_error(self):\n with self.assertRaises(MyError):\n BaseErrorExitCodeController(ERROR_RETURN_CODE, ERROR_MESSAGE)\\\n .check_if_error()", "async def test_device_unknown_error(hass):\n with patch.object(axis.device, \"get_device\", side_effect=Exception):\n await setup_axis_integration(hass)\n assert hass.data[AXIS_DOMAIN] == {}", "def test_not_implemented(self):\n\n test_handler = EventHandler(self.mock_interruption_event)\n\n with self.assertRaises(NotImplementedError):\n test_handler.handle()", "def not_existing_error_test(self):\n client = TestClient()\n error = client.run(\"upload some_nonsense\", ignore_error=True)\n self.assertTrue(error)\n self.assertIn(\"ERROR: No packages found matching pattern 'some_nonsense'\",\n client.user_io.out)", "def test_check_if_error(self):\n with self.assertRaises(MyError):\n SshErrorExitCodeController(255, ERROR_MESSAGE)\\\n .check_if_error()", "def test_raises_when_accessing_none_implementation(self):\n\n class APIObj(\n platform.APIObject,\n collections.namedtuple(\"APIObj\", \"implementation\"),\n ):\n def __new__(cls):\n return super().__new__(cls, implementation=None)\n\n obj = APIObj()\n\n with pytest.raises(AttributeError) as exc_info:\n obj.implementation # pylint: disable=pointless-statement\n\n assert \"invalid access to 'implementation': not initialized\" in str(\n exc_info.value\n )" ]
[ "0.67788285", "0.6699145", "0.66089153", "0.65495366", "0.65344524", "0.647308", "0.6403296", "0.63941544", "0.636498", "0.6334388", "0.63309187", "0.62822324", "0.62677455", "0.623547", "0.6222963", "0.6220105", "0.6214236", "0.62119627", "0.62050354", "0.6197082", "0.6190831", "0.61269134", "0.6115843", "0.6080223", "0.6078598", "0.6062787", "0.60573876", "0.6052869", "0.6037536", "0.60311234" ]
0.68783915
0
Test that grad on execution uses the `device.execute_and_gradients` pathway
def test_grad_on_execution(self, mocker): dev = qml.device("default.qubit", wires=1) spy = mocker.spy(dev, "execute_and_gradients") def cost(a): with qml.queuing.AnnotatedQueue() as q: qml.RY(a[0], wires=0) qml.RX(a[1], wires=0) qml.expval(qml.PauliZ(0)) tape = qml.tape.QuantumScript.from_queue(q) return execute( [tape], dev, gradient_fn="device", gradient_kwargs={ "method": "adjoint_jacobian", "use_device_state": True, }, )[0] a = jax.numpy.array([0.1, 0.2]) jax.jit(cost)(a) # adjoint method only performs a single device execution, but gets both result and gradient assert dev.num_executions == 1 spy.assert_called()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_no_gradients_on_execution(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy_execute = mocker.spy(qml.devices.DefaultQubit, \"batch_execute\")\n spy_gradients = mocker.spy(qml.devices.DefaultQubit, \"gradients\")\n\n def cost(a):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n dev,\n gradient_fn=\"device\",\n grad_on_execution=False,\n gradient_kwargs={\"method\": \"adjoint_jacobian\"},\n )[0]\n\n a = jax.numpy.array([0.1, 0.2])\n jax.jit(cost)(a)\n\n assert dev.num_executions == 1\n spy_execute.assert_called()\n spy_gradients.assert_not_called()\n\n jax.grad(cost)(a)\n spy_gradients.assert_called()", "def test_incorrect_gradients_on_execution(self):\n a = jax.numpy.array([0.1, 0.2])\n\n dev = qml.device(\"default.qubit\", wires=1)\n\n def cost(a, device):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n device,\n gradient_fn=param_shift,\n grad_on_execution=True,\n )[0]\n\n with pytest.raises(\n ValueError, match=\"Gradient transforms cannot be used with grad_on_execution=True\"\n ):\n jax.grad(cost)(a, device=dev)", "def test_gradient_transform(self):\n dev = qml.device(\"default.qubit\", wires=2)\n\n @qml.qnode(dev, diff_method=qml.gradients.param_shift)\n def circuit():\n return qml.probs(wires=0)\n\n with pytest.warns(UserWarning, match=\"gradient of a tape with no trainable parameters\"):\n info = qml.specs(circuit)()\n assert info[\"diff_method\"] == \"pennylane.gradients.parameter_shift.param_shift\"\n assert info[\"gradient_fn\"] == \"pennylane.gradients.parameter_shift.param_shift\"", "def test_grad_with_backward_mode(self, execute_kwargs):\n dev = qml.device(\"default.qubit\", wires=2)\n params = jax.numpy.array([0.1, 0.2, 0.3])\n expected_results = jax.numpy.array([-0.3875172, -0.18884787, -0.38355705])\n\n def cost(a, cache):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.RY(a[2], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n res = qml.interfaces.execute([tape], dev, cache=cache, **execute_kwargs)[0]\n return res\n\n cost = jax.jit(cost)\n\n results = jax.grad(cost)(params, cache=None)\n for r, e in zip(results, expected_results):\n assert jax.numpy.allclose(r, e, atol=1e-7)", "def test_gradients(self):\n ex = self._create_example()\n decoder_input_fn = FixedDecoderInputs(\n inputs=tf.convert_to_tensor(\n ex.target, dtype=tf.float32),\n sequence_length=tf.convert_to_tensor(\n ex.target_len, dtype=tf.int32))\n\n model = self.create_model()\n decoder_output = model.encode_decode(\n source=tf.convert_to_tensor(\n ex.source, dtype=tf.float32),\n source_len=tf.convert_to_tensor(\n ex.source_len, dtype=tf.int32),\n decoder_input_fn=decoder_input_fn,\n target_len=tf.convert_to_tensor(\n ex.target_len, dtype=tf.int32))\n\n # Get a loss to optimize\n losses = seq2seq_losses.cross_entropy_sequence_loss(\n logits=decoder_output.logits,\n targets=tf.ones_like(decoder_output.predictions),\n sequence_length=tf.convert_to_tensor(\n ex.target_len, dtype=tf.int32))\n mean_loss = tf.reduce_mean(losses)\n\n optimizer = tf.train.AdamOptimizer()\n grads_and_vars = optimizer.compute_gradients(mean_loss)\n train_op = optimizer.apply_gradients(grads_and_vars)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n _, grads_and_vars_ = sess.run([train_op, grads_and_vars])\n\n for grad, _ in grads_and_vars_:\n self.assertFalse(np.isnan(grad).any())", "def test_gradients(self):\n # Load from files.\n flow_ab = read_flow_file('pwcnet/warp/test_data/flow_ab.flo')\n img_b = read_image('pwcnet/warp/test_data/image_b.png', as_float=True)\n\n H = img_b.shape[0]\n W = img_b.shape[1]\n C = img_b.shape[2]\n img_shape = [None, H, W, C]\n flow_shape = [None, H, W, 2]\n input = tf.placeholder(shape=img_shape, dtype=tf.float32)\n flow_tensor = tf.placeholder(shape=flow_shape, dtype=tf.float32)\n warped_tensor = backward_warp(input, flow_tensor)\n\n grad_op = tf.gradients(warped_tensor, [input, flow_tensor])\n grads = self.sess.run(grad_op, feed_dict={input: [img_b], flow_tensor: [flow_ab]})\n for gradient in grads:\n self.assertNotAlmostEqual(np.sum(gradient), 0.0)", "def test_gradient_supported(self):\n # gradient supported here\n wrapped = EfficientSU2(2) # a circuit wrapped into a big instruction\n plain = wrapped.decompose() # a plain circuit with already supported instructions\n\n # gradients not supported on the following circuits\n x = Parameter(\"x\")\n duplicated = QuantumCircuit(2)\n duplicated.rx(x, 0)\n duplicated.rx(x, 1)\n\n needs_chainrule = QuantumCircuit(2)\n needs_chainrule.rx(2 * x, 0)\n\n custom_gate = WhatAmI(x)\n unsupported = QuantumCircuit(2)\n unsupported.append(custom_gate, [0, 1])\n\n tests = [\n (wrapped, True), # tuple: (circuit, gradient support)\n (plain, True),\n (duplicated, False),\n (needs_chainrule, False),\n (unsupported, False),\n ]\n\n # used to store the info if a gradient callable is passed into the\n # optimizer of not\n info = {\"has_gradient\": None}\n optimizer = partial(gradient_supplied, info=info)\n\n sampler = Sampler()\n estimator = Estimator()\n fidelity_primitive = ComputeUncompute(sampler)\n\n pvqd = PVQD(\n fidelity=fidelity_primitive,\n ansatz=None,\n initial_parameters=np.array([]),\n estimator=estimator,\n optimizer=optimizer,\n )\n problem = TimeEvolutionProblem(self.hamiltonian, time=0.01)\n for circuit, expected_support in tests:\n with self.subTest(circuit=circuit, expected_support=expected_support):\n pvqd.ansatz = circuit\n pvqd.initial_parameters = np.zeros(circuit.num_parameters)\n _ = pvqd.evolve(problem)\n self.assertEqual(info[\"has_gradient\"], expected_support)", "def internal_grad_fn(unused_op, *result_grads): # pylint: disable=unused-variable\n return tape_grad_fn(*result_grads)", "def testNestedFunctionGradientCall(self):\n check_numerics_callback.enable_check_numerics()\n\n x = constant_op.constant(1.0 - 1e-8, dtype=dtypes.float32)\n\n @def_function.function\n def asinp1(x):\n # asin()'s gradient overflows at the value close to 1.0.\n return math_ops.asin(x) + 1.0\n\n @def_function.function\n def loss(x):\n return math_ops.square(asinp1(x))\n\n with backprop.GradientTape() as tape:\n tape.watch(x)\n y = loss(x)\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: self.evaluate(tape.gradient(y, x)))\n self.assertTrue(re.search(r\"gradient\", message))", "def test_grad(self):\r\n for shp0, shp1 in [((1,), (2,)),\r\n ((3,), (1,)),\r\n ((1,), (1,)),\r\n ((3,), (2,)),\r\n ((3, 2), (1, 1)),\r\n ((3, 2), (1, 4)),\r\n ((3, 2), (4, 1)),\r\n ((3, 2), (4, 5)),\r\n ((1, 2), (4, 5)),\r\n ((3, 1), (4, 5)),\r\n ((1, 1), (4, 5)),\r\n ((1, 1), (1, 1)),\r\n ]:\r\n data0 = numpy.random.rand(*shp0).astype(floatX)\r\n data1 = numpy.random.rand(*shp1).astype(floatX)\r\n utt.verify_grad(tensor.outer, [data0, data1])", "def test_empty_circuit_grad(self, differentiator, op):\n differentiator.refresh()\n op = differentiator.generate_differentiable_op(analytic_op=op)\n circuit = tf.convert_to_tensor([], dtype=tf.string)\n psums = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)\n\n # Calculate tfq gradient.\n symbol_values_tensor = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)\n symbol_names_tensor = tf.convert_to_tensor([], dtype=tf.string)\n with tf.GradientTape() as g:\n g.watch(symbol_values_tensor)\n expectations = op(circuit, symbol_names_tensor,\n symbol_values_tensor, psums)\n grads = g.gradient(expectations, symbol_values_tensor)\n self.assertShapeEqual(grads.numpy(),\n tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32))", "def run_check_grad(hyperparameters):\n # This creates small random data with 20 examples and\n # 10 dimensions and checks the gradient on that data.\n num_examples = 20\n num_dimensions = 10\n\n weights = np.random.randn(num_dimensions + 1, 1)\n data = np.random.randn(num_examples, num_dimensions)\n targets = np.random.rand(num_examples, 1)\n\n diff = check_grad(logistic,\n weights,\n 0.001,\n data,\n targets,\n hyperparameters)\n\n print(\"diff =\", diff)", "def test_grad_test_values(self):\r\n backup = theano.config.compute_test_value\r\n theano.config.compute_test_value = 'raise'\r\n try:\r\n x = tensor.scalar('x')\r\n x.tag.test_value = 1\r\n # Used to crash due to undefined test value.\r\n tensor.grad(ifelse(0, x, x), x)\r\n finally:\r\n theano.config.compute_test_value = backup", "def test_gradients_check(self):\n model = PoincareModel(self.data, negative=3)\n try:\n model.train(epochs=1, batch_size=1, check_gradients_every=1)\n except Exception as e:\n self.fail('Exception %s raised unexpectedly while training with gradient checking' % repr(e))", "def test_gradients_pytree(self):\n def f(x):\n # x: dict(x=[b, 3, 4])\n # res: dict(res=[b, 3, 4])\n return dict(res=x[\"x\"] * 2.)\n\n f_tf = self.CheckShapePolymorphism(\n f,\n input_signature=[dict(x=tf.TensorSpec([None, 3, 4]))],\n in_shapes=[dict(x=(\"b, 3, 4\"))],\n expected_output_signature=None)\n\n x = dict(x=np.ones((2, 3, 4), dtype=np.float32))\n xv = tf.Variable(x[\"x\"], dtype=np.float32)\n def tf_value_and_grad(xv):\n # xv: [b, 3, 4]\n # res_value: dict(res=[b, 3, 4])\n # res_grad: dict(grad=[b, 3, 4])\n with tf.GradientTape() as tape:\n tape.watch(xv)\n res_tf = f_tf(dict(x=xv))\n res_tf_grad = tape.gradient(res_tf, xv)\n return res_tf, dict(grad=res_tf_grad)\n\n res_tf, res_tf_grad = tf_value_and_grad(xv)\n # Now use TF tracing for the gradient\n tf_grad = tf.function(tf_value_and_grad, autograph=False).get_concrete_function(\n tf.TensorSpec([None, 3, 4]))\n # The shape of the value\n self.assertEqual((None, 3, 4), tuple(tf_grad.output_shapes[0][\"res\"]))\n # The shape of the gradient should match the input\n self.assertEqual((None, 3, 4), tuple(tf_grad.output_shapes[1][\"grad\"]))", "def test_gradient_convergence(self):\n pass", "def gradient_check(op, *args, **kwargs):\n\n if( not 'id_list' in kwargs.keys() ):\n kwargs.update({\"id_list\":[0]})\n\n id_list = kwargs.get(\"id_list\", [0])\n\n for i in id_list:\n\n if(not isinstance(args[i], Variable)):\n raise Exception(\"input {:g} is not a variable\".format(i))\n\n if(isinstance(args[i], Variable) and not args[i].requires_grad):\n raise Exception(\"input {:g} doesn't require gradient\".format(i))\n\n nelems = args[i].numel()\n\n \"\"\" numerical gradient \"\"\"\n\n wrapper, p = numdiff_wrapper(op, args, kwargs, i)\n jacobian_numerical = numdiff_unified(wrapper, p)\n\n \"\"\" analytic gradient \"\"\"\n\n jacobian_analytic = []\n\n if(len(kwargs.keys()) > 1):\n \"\"\"function has dictionary inputs\"\"\"\n f = op(*args, **kwargs)\n else:\n f = op(*args)\n\n output_nelems = f.data.numel()\n\n for k in range(output_nelems):\n\n output_grad = torch.zeros(f.data.size())\n output_grad.view(output_nelems, 1)[k] = 1\n\n f.backward(output_grad, retain_variables=True)\n\n jacobian_analytic.append( np.copy( args[i].grad.data.view( nelems ).numpy() ) )\n\n for params_i in args:\n if(isinstance(params_i, torch.autograd.Variable) and params_i.requires_grad):\n params_i.grad.data.zero_()\n\n jacobian_analytic = np.asarray(jacobian_analytic)\n\n \"\"\"\n compare jacobian_analytic with jacobian_numerical\n \"\"\"\n\n if( np.allclose(jacobian_analytic, jacobian_numerical) ):\n\n print \"gradient is correct\"\n\n else:\n\n rel_error = np.linalg.norm( jacobian_analytic - jacobian_numerical ) / \\\n np.maximum( np.linalg.norm( jacobian_analytic ), np.linalg.norm( jacobian_numerical) )\n\n print 'analytic jacobian :'\n print jacobian_analytic\n\n print 'numerical jacobian :'\n print jacobian_numerical\n\n print 'jacobian difference :'\n print jacobian_analytic - jacobian_numerical\n\n print 'relative error:'\n print rel_error", "def run_check_grad(hyperparameters):\n\n # This creates small random data with 7 examples and\n # 9 dimensions and checks the gradient on that data.\n num_examples = 7\n num_dimensions = 9\n\n weights = np.random.randn(num_dimensions+1, 1)\n data = np.random.randn(num_examples, num_dimensions)\n targets = (np.random.rand(num_examples, 1) > 0.5).astype(int)\n\n diff = check_grad(logistic, # function to check\n weights,\n 0.001, # perturbation\n data,\n targets,\n hyperparameters)\n\n print \"diff =\", diff", "def test_grad_writeback(self):\n self.run_subtests(\n {\n \"change_first_weight_grad\": [False, True],\n \"change_data\": [False, True], # change `.data` vs. variable itself\n \"set_to_none\": [False, True],\n },\n self._test_grad_writeback,\n )", "def get_apply_gradients_ops_func():\n return [opt.apply_gradients(grads)]", "def compute_gradients(self, f, args, grad_ys=None):\n if tf.executing_eagerly():\n grad_fn = tf.contrib.eager.gradients_function(f)\n if grad_ys is not None:\n grads = grad_fn(*args, dy=grad_ys)\n else:\n grads = grad_fn(*args)\n else:\n res = f(*args)\n grads = tf.gradients(res, args, grad_ys=grad_ys)\n return self.evaluate(grads)", "def test_backprop(self, differentiator, op):\n differentiator.refresh()\n op = differentiator.generate_differentiable_op(analytic_op=op)\n\n def exact_grad(theta):\n new_theta = 2 * np.pi * theta\n return -2 * np.pi * np.sin(new_theta) * np.exp(np.cos(new_theta))\n\n bit = cirq.GridQubit(0, 0)\n circuits = util.convert_to_tensor(\n [cirq.Circuit(cirq.X(bit)**sympy.Symbol('rx')) for _ in range(2)])\n pstring = util.convert_to_tensor([[\n cirq.PauliSum.from_pauli_strings([cirq.PauliString({bit: cirq.Z})])\n ] for _ in circuits])\n base_rot_angles = tf.constant([[0.25], [0.125]])\n with tf.GradientTape() as g:\n g.watch(base_rot_angles)\n input_angles = 2 * base_rot_angles\n exp_res = tf.exp(\n op(circuits, tf.convert_to_tensor(['rx']), input_angles,\n pstring))\n\n grad = g.gradient(exp_res, base_rot_angles)\n exact = [[exact_grad(0.25)], [exact_grad(0.125)]]\n\n # will this be too tight? time will tell.\n self.assertAllClose(exact, grad.numpy(), rtol=0.01, atol=0.01)", "def compute_gradients(self):\n raise NotImplementedError()", "def test_gradients_update(self):\n # Reset models.\n self.model.load_state_dict(self.initial_model_dict)\n self.actor_model.load_state_dict(self.initial_actor_model_dict)\n\n # There should be no calculated gradient yet.\n for p in self.model.parameters():\n self.assertIsNone(p.grad)\n for p in self.actor_model.parameters():\n self.assertIsNone(p.grad)\n\n polybeast.learn(*self.learn_args)\n\n # Check that every parameter for the learner model has a gradient, and that\n # there is at least some non-zero gradient for each set of paramaters.\n for p in self.model.parameters():\n self.assertIsNotNone(p.grad)\n self.assertFalse(torch.equal(p.grad, torch.zeros_like(p.grad)))\n\n # Check that the actor model has no gradients associated with it.\n for p in self.actor_model.parameters():\n self.assertIsNone(p.grad)", "def test_parameter_gradients(net, X, Y, name, p, grad_p, loss, index):\n eps = 1e-7\n backup = p[index]\n p[index] += eps\n A1 = net.forward(X)\n loss1 = net.loss(Y, A1[-1])\n ratio = (loss1 - loss) / eps\n assert np.isclose(grad_p[index], ratio)\n p[index] = backup", "def _evaluate_gradient(self, **variables):\n pass", "def has_gradients(self) -> bool:\n return False", "def gradcheck(func, inputs, eps=1e-6, atol=1e-5, rtol=1e-3, raise_exception=True):\n tupled_inputs = _as_tuple(inputs)\n\n # Make sure that gradients are saved for all inputs\n any_input_requiring_grad = False\n for inp in tupled_inputs:\n if isinstance(inp, tf.Tensor):\n if _requires_grad(inp):\n if inp.dtype != tf.float64:\n warnings.warn(\n 'At least one of the inputs that requires gradient '\n 'is not of double precision floating point. '\n 'This check will likely fail if all the inputs are '\n 'not of double precision floating point. ')\n any_input_requiring_grad = True\n # inp.retain_grad()\n if not any_input_requiring_grad:\n raise ValueError(\n 'gradcheck expects at least one input tensor to require gradient, '\n 'but none of the them have requires_grad=True.')\n\n output = _differentiable_outputs(func(*tupled_inputs))\n\n def fail_test(msg):\n if raise_exception:\n raise RuntimeError(msg)\n return False\n\n for i, o in enumerate(output):\n if not _requires_grad(o):\n continue\n\n def fn(input):\n return _as_tuple(func(*input))[i]\n\n analytical, reentrant, correct_grad_sizes = get_analytical_jacobian(tupled_inputs, o)\n numerical = get_numerical_jacobian(fn, tupled_inputs, eps=eps)\n\n if not correct_grad_sizes:\n return fail_test('Analytical gradient has incorrect size')\n\n for j, (a, n) in enumerate(zip(analytical, numerical)):\n if _numel(a) != 0 or _numel(n) != 0:\n if not allclose(a, n, rtol, atol):\n return fail_test('Jacobian mismatch for output %d with respect to input %d,\\n'\n 'numerical:%s\\nanalytical:%s\\n' % (i, j, n, a))\n\n if not reentrant:\n return fail_test('Backward is not reentrant, i.e., running backward with same '\n 'input and grad_output multiple times gives different values, '\n 'although analytical gradient matches numerical gradient')\n\n # check if the backward multiplies by grad_output\n with tf.GradientTape(persistent=True) as tape:\n output = _differentiable_outputs(func(*tupled_inputs))\n\n if any([_requires_grad(o) for o in output]):\n diff_input_list = list(iter_tensors(tupled_inputs, True))\n grads_input = tape.gradient(output, diff_input_list, [tf.zeros_like(o) for o in output])\n\n if not len(grads_input) == 0:\n raise RuntimeError(\"no Tensors requiring grad found in input\")\n\n # grads_input = torch.autograd.grad(output, diff_input_list, [torch.zeros_like(o) for o in output],\n # allow_unused=True)\n for gi, i in zip(grads_input, diff_input_list):\n if gi is None:\n continue\n if not tf.reduce_all(tf.equal(gi, 0)):\n return fail_test('backward not multiplied by grad_output')\n if gi.dtype != i.dtype:\n return fail_test(\"grad is incorrect type\")\n if gi.shape != i.shape:\n return fail_test('grad is incorrect size')\n\n return True", "def test_custom_gradient_transform(self):\n dev = qml.device(\"default.qubit\", wires=2)\n\n @qml.gradients.gradient_transform\n def my_transform(tape):\n return tape, None\n\n @qml.qnode(dev, diff_method=my_transform)\n def circuit():\n return qml.probs(wires=0)\n\n info = qml.specs(circuit)()\n assert info[\"diff_method\"] == \"test_specs.my_transform\"\n assert info[\"gradient_fn\"] == \"test_specs.my_transform\"", "def test_grad_binary(func, motion, optimized, preserve_result, a, b):\n utils.test_reverse_array(func, motion, optimized, preserve_result, a, b)" ]
[ "0.7481806", "0.71142423", "0.6695256", "0.65490276", "0.64862365", "0.63315445", "0.62705255", "0.62506074", "0.6239375", "0.6215396", "0.619238", "0.61799407", "0.61622083", "0.60890555", "0.6043788", "0.6037538", "0.6016332", "0.60110986", "0.59955305", "0.5993162", "0.59500206", "0.59136873", "0.5851901", "0.58362865", "0.5809331", "0.5799229", "0.57969415", "0.57702595", "0.57637656", "0.57402956" ]
0.7644838
0
Test that no grad on execution uses the `device.batch_execute` and `device.gradients` pathway
def test_no_gradients_on_execution(self, mocker): dev = qml.device("default.qubit", wires=1) spy_execute = mocker.spy(qml.devices.DefaultQubit, "batch_execute") spy_gradients = mocker.spy(qml.devices.DefaultQubit, "gradients") def cost(a): with qml.queuing.AnnotatedQueue() as q: qml.RY(a[0], wires=0) qml.RX(a[1], wires=0) qml.expval(qml.PauliZ(0)) tape = qml.tape.QuantumScript.from_queue(q) return execute( [tape], dev, gradient_fn="device", grad_on_execution=False, gradient_kwargs={"method": "adjoint_jacobian"}, )[0] a = jax.numpy.array([0.1, 0.2]) jax.jit(cost)(a) assert dev.num_executions == 1 spy_execute.assert_called() spy_gradients.assert_not_called() jax.grad(cost)(a) spy_gradients.assert_called()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_incorrect_gradients_on_execution(self):\n a = jax.numpy.array([0.1, 0.2])\n\n dev = qml.device(\"default.qubit\", wires=1)\n\n def cost(a, device):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n device,\n gradient_fn=param_shift,\n grad_on_execution=True,\n )[0]\n\n with pytest.raises(\n ValueError, match=\"Gradient transforms cannot be used with grad_on_execution=True\"\n ):\n jax.grad(cost)(a, device=dev)", "def test_grad_on_execution(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(dev, \"execute_and_gradients\")\n\n def cost(a):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n dev,\n gradient_fn=\"device\",\n gradient_kwargs={\n \"method\": \"adjoint_jacobian\",\n \"use_device_state\": True,\n },\n )[0]\n\n a = jax.numpy.array([0.1, 0.2])\n jax.jit(cost)(a)\n\n # adjoint method only performs a single device execution, but gets both result and gradient\n assert dev.num_executions == 1\n spy.assert_called()", "def test_gradient_transform(self):\n dev = qml.device(\"default.qubit\", wires=2)\n\n @qml.qnode(dev, diff_method=qml.gradients.param_shift)\n def circuit():\n return qml.probs(wires=0)\n\n with pytest.warns(UserWarning, match=\"gradient of a tape with no trainable parameters\"):\n info = qml.specs(circuit)()\n assert info[\"diff_method\"] == \"pennylane.gradients.parameter_shift.param_shift\"\n assert info[\"gradient_fn\"] == \"pennylane.gradients.parameter_shift.param_shift\"", "def test_empty_circuit_grad(self, differentiator, op):\n differentiator.refresh()\n op = differentiator.generate_differentiable_op(analytic_op=op)\n circuit = tf.convert_to_tensor([], dtype=tf.string)\n psums = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)\n\n # Calculate tfq gradient.\n symbol_values_tensor = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)\n symbol_names_tensor = tf.convert_to_tensor([], dtype=tf.string)\n with tf.GradientTape() as g:\n g.watch(symbol_values_tensor)\n expectations = op(circuit, symbol_names_tensor,\n symbol_values_tensor, psums)\n grads = g.gradient(expectations, symbol_values_tensor)\n self.assertShapeEqual(grads.numpy(),\n tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32))", "def test_gradients_check(self):\n model = PoincareModel(self.data, negative=3)\n try:\n model.train(epochs=1, batch_size=1, check_gradients_every=1)\n except Exception as e:\n self.fail('Exception %s raised unexpectedly while training with gradient checking' % repr(e))", "def test_no_trainable_parameters(self, mocker):\r\n spy = mocker.spy(qml.gradients.finite_difference, \"generate_shifted_tapes\")\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(0.543, wires=[0])\r\n qml.RY(-0.654, wires=[1])\r\n qml.expval(qml.PauliZ(0))\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n tape.trainable_params = {}\r\n\r\n tapes, fn = finite_diff(tape)\r\n res = fn(dev.batch_execute(tapes))\r\n assert res.size == 0\r\n assert np.all(res == np.array([[]]))\r\n\r\n spy.assert_not_called()\r\n assert len(tapes) == 0", "def test_wrong_gradients_raises_assertion(self):\n model = PoincareModel(self.data, negative=3)\n model._loss_grad = Mock(return_value=np.zeros((2 + model.negative, model.size)))\n with self.assertRaises(AssertionError):\n model.train(epochs=1, batch_size=1, check_gradients_every=1)", "def test_empty_circuit_sampled_grad(self, differentiator, op):\n differentiator.refresh()\n op = differentiator.generate_differentiable_op(sampled_op=op)\n circuit = tf.convert_to_tensor([], dtype=tf.string)\n psums = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)\n\n # Calculate tfq gradient.\n symbol_values_tensor = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)\n symbol_names_tensor = tf.convert_to_tensor([], dtype=tf.string)\n n_samples_tensor = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.int32)\n with tf.GradientTape() as g:\n g.watch(symbol_values_tensor)\n expectations = op(circuit, symbol_names_tensor,\n symbol_values_tensor, psums, n_samples_tensor)\n grads = g.gradient(expectations, symbol_values_tensor)\n self.assertShapeEqual(grads.numpy(),\n tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32))", "def test_empty_circuit_sampled_grad(self, differentiator, op):\n differentiator.refresh()\n op = differentiator.generate_differentiable_op(sampled_op=op)\n circuit = tf.convert_to_tensor([], dtype=tf.string)\n psums = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)\n\n # Calculate tfq gradient.\n symbol_values_tensor = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)\n symbol_names_tensor = tf.convert_to_tensor([], dtype=tf.string)\n n_samples_tensor = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.int32)\n with tf.GradientTape() as g:\n g.watch(symbol_values_tensor)\n expectations = op(circuit, symbol_names_tensor,\n symbol_values_tensor, psums, n_samples_tensor)\n grads = g.gradient(expectations, symbol_values_tensor)\n self.assertShapeEqual(grads.numpy(),\n tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32))", "def test_wrong_predicate(self):\n\n with pytest.raises(ValueError):\n naughty_case_opt = lq.optimizers.CaseOptimizer(\n (lambda var: False, lq.optimizers.Bop()),\n default_optimizer=optimizers.Adam(0.01),\n )\n\n # Simple MNIST model\n mnist = tf.keras.datasets.mnist\n (train_images, train_labels), _ = mnist.load_data()\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation=\"relu\"),\n tf.keras.layers.Dense(10, activation=\"softmax\"),\n ]\n )\n model.compile(\n loss=\"sparse_categorical_crossentropy\",\n optimizer=naughty_case_opt,\n metrics=[\"acc\"],\n )\n\n # Should raise on first call to apply_gradients()\n model.fit(train_images[:1], train_labels[:1], epochs=1)", "def has_gradients(self) -> bool:\n return False", "def test_grad_test_values(self):\r\n backup = theano.config.compute_test_value\r\n theano.config.compute_test_value = 'raise'\r\n try:\r\n x = tensor.scalar('x')\r\n x.tag.test_value = 1\r\n # Used to crash due to undefined test value.\r\n tensor.grad(ifelse(0, x, x), x)\r\n finally:\r\n theano.config.compute_test_value = backup", "def test_find_unused_parameters_when_unused_parameters_empty(self):\n\n class FindUnusedParamModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.t0 = Task()\n self.t1 = Task()\n\n def task_parameters(self):\n return (self.t0.p, self.t1.p)\n\n def forward(self, x, rank):\n return self.t1(self.t0(x)) if rank == 0 else self.t1(x)\n\n def run_and_verify_grad(model):\n # Run forward\n output = model(8, self.rank)\n\n # The grads of all parameters should be None at this point.\n [self.assertIsNone(t_p.grad) for t_p in model.module.task_parameters()]\n\n # Run backward\n output.mean().backward()\n\n # Now locally unused parameter should have grad updated on all ranks.\n [self.assertIsNotNone(t_p.grad) for t_p in model.module.task_parameters()]\n\n process_group = self._get_process_group()\n\n # Test on CPU\n cpu_model = DistributedDataParallel(\n FindUnusedParamModule().cpu(),\n process_group=process_group,\n find_unused_parameters=True,\n )\n run_and_verify_grad(cpu_model)\n\n # Test on GPU\n device_id = gpus_for_rank(self.world_size)[self.rank][0]\n gpu_model = DistributedDataParallel(\n FindUnusedParamModule().to(device_id),\n device_ids=[device_id],\n process_group=process_group,\n find_unused_parameters=True,\n )\n run_and_verify_grad(gpu_model)", "def test_grad_writeback(self):\n self.run_subtests(\n {\n \"change_first_weight_grad\": [False, True],\n \"change_data\": [False, True], # change `.data` vs. variable itself\n \"set_to_none\": [False, True],\n },\n self._test_grad_writeback,\n )", "def no_gradient_fusion():\n pass", "def _assert_no_grad(tensor):\n assert not tensor.requires_grad, \\\n 'nn criterions don\\'t compute the gradient w.r.t. targets - please ' \\\n 'mark these tensors as not requiring gradients'", "def test_execute_non_gates(new_network):\n network = new_network\n devices = network.devices\n names = devices.names\n\n LOW = devices.LOW\n HIGH = devices.HIGH\n\n # Make different devices\n [SW1_ID, SW2_ID, SW3_ID, CL_ID, D_ID] = names.lookup([\"Sw1\", \"Sw2\", \"Sw3\",\n \"Clock1\", \"D1\"])\n devices.make_device(SW1_ID, devices.SWITCH, 1)\n devices.make_device(SW2_ID, devices.SWITCH, 0)\n devices.make_device(SW3_ID, devices.SWITCH, 0)\n devices.make_device(CL_ID, devices.CLOCK, 1)\n devices.make_device(D_ID, devices.D_TYPE)\n\n # Make connections\n network.make_connection(SW1_ID, None, D_ID, devices.DATA_ID)\n network.make_connection(CL_ID, None, D_ID, devices.CLK_ID)\n network.make_connection(SW2_ID, None, D_ID, devices.SET_ID)\n network.make_connection(SW3_ID, None, D_ID, devices.CLEAR_ID)\n\n # Get device outputs, the expression is in a string here so that it\n # can be re-evaluated again after executing devices\n sw1_output = \"network.get_output_signal(SW1_ID, None)\"\n sw2_output = \"network.get_output_signal(SW2_ID, None)\"\n sw3_output = \"network.get_output_signal(SW3_ID, None)\"\n clock_output = \"network.get_output_signal(CL_ID, None)\"\n dtype_Q = \"network.get_output_signal(D_ID, devices.Q_ID)\"\n dtype_QBAR = \"network.get_output_signal(D_ID, devices.QBAR_ID)\"\n\n # Execute devices until the clock is LOW at the start of its\n # period\n clock_device = devices.get_device(CL_ID)\n network.execute_network()\n while clock_device.clock_counter != 1 or eval(clock_output) != LOW:\n network.execute_network()\n\n # The clock is not rising yet, Q could be (randomly) HIGH or LOW\n assert [eval(sw1_output), eval(sw2_output), eval(sw3_output),\n eval(clock_output)] == [HIGH, LOW, LOW, LOW]\n\n assert eval(dtype_Q) in [HIGH, LOW]\n assert eval(dtype_QBAR) == network.invert_signal(eval(dtype_Q))\n\n network.execute_network() # the clock has risen\n # While sw1(DATA) is high, Q has now changed to HIGH\n assert [eval(sw1_output), eval(sw2_output), eval(sw3_output),\n eval(clock_output), eval(dtype_Q), eval(dtype_QBAR)] == [\n HIGH, LOW, LOW, HIGH, HIGH, LOW]\n\n devices.set_switch(SW1_ID, LOW) # Sw1 is connected to DATA\n devices.set_switch(SW2_ID, HIGH) # Sw2 is connected to SET\n network.execute_network() # the clock is not rising yet\n network.execute_network() # the clock has risen\n # Even if sw1(DATA) is LOW, and the clock is rising,\n # sw2(SET) is HIGH, so Q is HIGH\n assert [eval(sw1_output), eval(sw2_output), eval(sw3_output),\n eval(clock_output), eval(dtype_Q), eval(dtype_QBAR)] == [\n LOW, HIGH, LOW, HIGH, HIGH, LOW]\n\n devices.set_switch(SW1_ID, HIGH) # Sw1 is connected to DATA\n devices.set_switch(SW2_ID, LOW) # Sw2 is connected to SET\n devices.set_switch(SW3_ID, HIGH) # Sw3 is connected to CLEAR\n network.execute_network() # the clock is not rising yet\n network.execute_network() # the clock has risen\n # Even if sw1(DATA) is HIGH, and the clock is rising,\n # sw3(CLEAR) is HIGH, so Q is LOW\n assert [eval(sw1_output), eval(sw2_output), eval(sw3_output),\n eval(clock_output), eval(dtype_Q), eval(dtype_QBAR)] == [\n HIGH, LOW, HIGH, HIGH, LOW, HIGH]", "def testNoCatchEagerOpExecution(self):\n check_numerics_callback.enable_check_numerics()\n x = constant_op.constant([2.0, 3.0])\n y = constant_op.constant([1.0, 0.0])\n self.assertAllClose((x + y) * (x - y), [3.0, 9.0])", "def test_error_if_not_expval_batched(self):\n qml.enable_tape()\n dev = qml.device(\"orquestra.qiskit\", wires=2)\n\n with qml.tape.QuantumTape() as tape1:\n qml.expval(qml.PauliZ(wires=[0]))\n qml.var(qml.PauliZ(wires=[0]))\n\n with qml.tape.QuantumTape() as tape2:\n qml.expval(qml.PauliZ(wires=[0]))\n\n circuits = [tape1, tape2]\n with pytest.raises(NotImplementedError):\n res = dev.batch_execute(circuits)\n\n qml.disable_tape()", "def test_apply_device_rules(self):\n pass", "def test_gradients(self):\n ex = self._create_example()\n decoder_input_fn = FixedDecoderInputs(\n inputs=tf.convert_to_tensor(\n ex.target, dtype=tf.float32),\n sequence_length=tf.convert_to_tensor(\n ex.target_len, dtype=tf.int32))\n\n model = self.create_model()\n decoder_output = model.encode_decode(\n source=tf.convert_to_tensor(\n ex.source, dtype=tf.float32),\n source_len=tf.convert_to_tensor(\n ex.source_len, dtype=tf.int32),\n decoder_input_fn=decoder_input_fn,\n target_len=tf.convert_to_tensor(\n ex.target_len, dtype=tf.int32))\n\n # Get a loss to optimize\n losses = seq2seq_losses.cross_entropy_sequence_loss(\n logits=decoder_output.logits,\n targets=tf.ones_like(decoder_output.predictions),\n sequence_length=tf.convert_to_tensor(\n ex.target_len, dtype=tf.int32))\n mean_loss = tf.reduce_mean(losses)\n\n optimizer = tf.train.AdamOptimizer()\n grads_and_vars = optimizer.compute_gradients(mean_loss)\n train_op = optimizer.apply_gradients(grads_and_vars)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n _, grads_and_vars_ = sess.run([train_op, grads_and_vars])\n\n for grad, _ in grads_and_vars_:\n self.assertFalse(np.isnan(grad).any())", "def test_gradient_supported(self):\n # gradient supported here\n wrapped = EfficientSU2(2) # a circuit wrapped into a big instruction\n plain = wrapped.decompose() # a plain circuit with already supported instructions\n\n # gradients not supported on the following circuits\n x = Parameter(\"x\")\n duplicated = QuantumCircuit(2)\n duplicated.rx(x, 0)\n duplicated.rx(x, 1)\n\n needs_chainrule = QuantumCircuit(2)\n needs_chainrule.rx(2 * x, 0)\n\n custom_gate = WhatAmI(x)\n unsupported = QuantumCircuit(2)\n unsupported.append(custom_gate, [0, 1])\n\n tests = [\n (wrapped, True), # tuple: (circuit, gradient support)\n (plain, True),\n (duplicated, False),\n (needs_chainrule, False),\n (unsupported, False),\n ]\n\n # used to store the info if a gradient callable is passed into the\n # optimizer of not\n info = {\"has_gradient\": None}\n optimizer = partial(gradient_supplied, info=info)\n\n sampler = Sampler()\n estimator = Estimator()\n fidelity_primitive = ComputeUncompute(sampler)\n\n pvqd = PVQD(\n fidelity=fidelity_primitive,\n ansatz=None,\n initial_parameters=np.array([]),\n estimator=estimator,\n optimizer=optimizer,\n )\n problem = TimeEvolutionProblem(self.hamiltonian, time=0.01)\n for circuit, expected_support in tests:\n with self.subTest(circuit=circuit, expected_support=expected_support):\n pvqd.ansatz = circuit\n pvqd.initial_parameters = np.zeros(circuit.num_parameters)\n _ = pvqd.evolve(problem)\n self.assertEqual(info[\"has_gradient\"], expected_support)", "def skip_check_grad_ci(reason=None):\n if not isinstance(reason, str):\n raise AssertionError(\"The reason for skipping check_grad is required.\")\n\n def wrapper(cls):\n cls.no_need_check_grad = True\n return cls\n\n return wrapper", "def test_operate_cyclic_storage(self, on):\n if on is True:\n override = {} # cyclic storage is True by default\n m = build_model(\n override, \"simple_supply_and_supply_plus,operate,investment_costs\"\n )\n assert m.run_config[\"cyclic_storage\"] is True\n elif on is False:\n override = {\"run.cyclic_storage\": False}\n m = build_model(\n override, \"simple_supply_and_supply_plus,operate,investment_costs\"\n )\n assert m.run_config[\"cyclic_storage\"] is False\n with pytest.warns(exceptions.ModelWarning) as warning:\n m.run(build_only=True)\n check_warn = check_error_or_warning(\n warning, \"Storage cannot be cyclic in operate run mode\"\n )\n if on is True:\n assert check_warn\n elif on is True:\n assert not check_warn\n assert (\n AttrDict.from_yaml_string(m._model_data.attrs[\"run_config\"]).cyclic_storage\n is False\n )", "def _minimal_device_test(device: torch.device) -> bool:\n try:\n with torch.no_grad():\n model = torch.nn.Conv2d(1, 1, 1).to(device)\n x = torch.zeros(1, 1, 1, 1).to(device)\n y = model(x)\n del model, x, y\n except Exception as e:\n return False\n\n return True", "def test_gradient_convergence(self):\n pass", "def test_apply_unsupported():\n dev = _aws_device(wires=2)\n mock_op = Mock()\n mock_op.name = \"foo\"\n mock_op.parameters = []\n\n operations = [qml.Hadamard(wires=0), qml.CNOT(wires=[0, 1]), mock_op]\n dev.apply(operations)", "def test_change_color_of_the_device__false():", "def check_deterministic_ops() -> None:\n if os.getenv(TF_DETERMINISTIC_OPS, False):\n shared_io_utils.raise_warning(\n f\"You have set '{TF_DETERMINISTIC_OPS}' to 1. If you are \"\n f\"using one or more GPU(s) and use any of 'SparseFeaturizer', \"\n f\"'TEDPolicy', 'DIETClassifier', 'UnexpecTEDIntentPolicy', or \"\n f\"'ResponseSelector' training and testing will fail as there are no \"\n f\"deterministic GPU implementations of some underlying TF ops.\",\n category=UserWarning,\n )", "def test_gradient_exception_on_sample(self):\n dev = qml.device(\"default.qubit\", wires=2, shots=1000)\n\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.sample(qml.PauliZ(0)), qml.sample(qml.PauliX(1))\n\n with pytest.raises(\n qml.QuantumFunctionError,\n match=\"Circuits that include sampling can not be differentiated.\",\n ):\n grad_fn = autograd.jacobian(circuit)\n grad_fn(1.0)" ]
[ "0.6840362", "0.67945534", "0.62804043", "0.62784797", "0.620019", "0.6160229", "0.602515", "0.60208565", "0.60208565", "0.599637", "0.5913095", "0.5900473", "0.58735245", "0.5856681", "0.58525246", "0.5840177", "0.58140266", "0.5806199", "0.5756336", "0.5752305", "0.574577", "0.5740316", "0.5688202", "0.5648782", "0.56449676", "0.56334966", "0.5631385", "0.5598734", "0.55905086", "0.55843407" ]
0.7671382
0
Test the use of a custom cache object with multiple tapes
def test_custom_cache_multiple(self, mocker): dev = qml.device("default.qubit", wires=1) spy = mocker.spy(qml.interfaces, "cache_execute") a = jax.numpy.array(0.1) b = jax.numpy.array(0.2) def cost(a, b, cache): with qml.queuing.AnnotatedQueue() as q1: qml.RY(a, wires=0) qml.RX(b, wires=0) qml.expval(qml.PauliZ(0)) tape1 = qml.tape.QuantumScript.from_queue(q1) with qml.queuing.AnnotatedQueue() as q2: qml.RY(a, wires=0) qml.RX(b, wires=0) qml.expval(qml.PauliZ(0)) tape2 = qml.tape.QuantumScript.from_queue(q2) res = execute( [tape1, tape2], dev, gradient_fn=param_shift, cache=cache, ) return res[0] custom_cache = {} jax.grad(cost)(a, b, cache=custom_cache) cache = spy.call_args[0][1] assert cache is custom_cache
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_local_cache():", "def test_custom_cache(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(qml.interfaces, \"cache_execute\")\n\n def cost(a, cache):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n dev,\n gradient_fn=param_shift,\n cache=cache,\n )[0]\n\n custom_cache = {}\n params = jax.numpy.array([0.1, 0.2])\n jax.grad(cost)(params, cache=custom_cache)\n\n cache = spy.call_args[0][1]\n assert cache is custom_cache", "def test_cache():\n\n def func(arg1, arg2):\n return arg1 * arg2\n\n first = cache(func)(100, 200)\n second = cache(func)(100, 200)\n assert first is second", "def test_cached_class(self):\n self.storage.store(RECORD_TABLE, value='a')\n self.storage.store(RECORD_TABLE, value='b')\n self.storage.store(RECORD_TABLE, value='c')\n self.storage.store(RECORD_TABLE, value='d')\n self.clerk.cacheAll(Record)\n assert Record in self.clerk.cache.allCached\n assert len(self.clerk.cache.data[Record].keys()) == 4\n \n \"\"\"\n Now for the kicker:\n We wipe the underlying table...\n \"\"\"\n for x in range(1,5): self.storage.delete(RECORD_TABLE, x)\n assert self.storage.match(RECORD_TABLE) == []\n\n \"\"\"\n But we can still get the object:\n \"\"\"\n a = self.clerk.matchOne(Record, value='a')\n assert a.ID == 1\n assert a.value == 'a'", "def test_KanePage_cached(self):\n kane_page = KanePage(mocked=True)\n from_cache = kane_page.fetch_taplist(brewery=\"Kane\")\n assert not from_cache\n\n # 2nd read from cache!\n kane_page.ssml_taplist() # this puts it in the cache\n from_cache = kane_page.fetch_taplist(brewery=\"Kane\")\n assert from_cache", "def test_untimed(self):\n cache = TimedCache()\n for i in range(500):\n cache[i] = i\n for i in range(500):\n assert i in cache\n assert cache[i] == i", "def test_TEB_cached(self):\n teb = TEBpage(mocked=True)\n assert teb is not None\n from_cache = teb.fetch_taplist(brewery=\"Twin Elephant\")\n assert not from_cache\n\n # 2nd read from cache!\n teb.ssml_taplist() # this puts it in the cache\n from_cache = teb.fetch_taplist(brewery=\"Twin Elephant\")\n assert from_cache", "def test_cache_overflow_default(method):\n if method == \"init\":\n cache = CacheDict([(\"one\", 1), (\"two\", 2), (\"three\", 3)], cache_len=2)\n elif method == \"assign\":\n cache = CacheDict(cache_len=2)\n cache[\"one\"] = 1\n cache[\"two\"] = 2\n cache[\"three\"] = 3\n else:\n assert False\n\n assert \"one\" not in cache.keys()\n assert \"two\" in cache.keys()\n assert \"three\" in cache.keys()", "def test_timed(self):\n time = 0.001\n cache = TimedCache(max_age=time)\n\n cache[1] = 1\n assert 1 in cache\n sleep(time)\n assert 1 not in cache\n with pytest.raises(KeyError):\n assert cache[1]\n\n for i in range(50):\n cache[i] = i\n assert i in cache\n assert cache[i] == i\n sleep(time)\n for i in range(50):\n assert i not in cache\n with pytest.raises(KeyError):\n assert cache[i]", "def test_cache_init(case, method):\n if method == \"init\":\n cache = CacheDict(case.init, cache_len=case.cache_len)\n elif method == \"assign\":\n cache = CacheDict(cache_len=case.cache_len)\n for (key, val) in case.init:\n cache[key] = val\n else:\n assert False\n\n # length is max(#entries, cache_len)\n assert cache.__len__() == case.len\n\n # make sure the first entry is the one ejected\n if case.cache_len > 1 and case.init:\n assert \"one\" in cache.keys()\n else:\n assert \"one\" not in cache.keys()", "def setUp(self):\n self.expire_time = 1\n self.cache = Cacher(self.expire_time)\n self.key = 'test'\n self.value = {1:2}", "def test_cached(self):\n one, two = self.endpoint.instantiate(), self.endpoint.instantiate()\n self.assertIdentical(one, two)", "def test_simple_multi_cache(self):\n # cache params\n cache_key = 'test_simple_multi_cache'\n cache_len = 60\n\n # prepare cache data and save\n cache_data = self.get_cache_data(5000)\n multicache.set(cache_key, cache_data, cache_len)\n\n # retrieve data\n retrieved_data = multicache.get(cache_key)\n\n # test\n self.assertEqual(cache_data.keys().sort(), retrieved_data.keys().sort())", "def test_multi_cache(self):\n # cache params\n cache_key = 'test_multi_cache'\n cache_len = 60\n num_items = 20000\n\n # prepare cache data and save\n cache_data = self.get_cache_data(num_items)\n multicache.set(cache_key, cache_data, cache_len)\n\n # retrieve data\n retrieved_data = multicache.get(cache_key)\n\n # test\n self.assertEqual(cache_data.keys().sort(), retrieved_data.keys().sort())", "def test_cache(self):\n response = self.make_call().json[0]\n self.assertFalse(response['cached']) # a call has ben made to Google API\n # each step is saved\n self.assertEqual(len(r.keys(pattern=r'step*')), int(r.get('counter')))\n self.assertEqual(int(r.get('counter')), len(response['steps']))\n pairs = set((i, j) for (i, o), (j, d) in combinations_with_replacement(list(enumerate(response['steps'])), 2) if i <= j)\n self.assertEqual(len(r.keys(pattern=r'origin*')), len(pairs)) # each combination is cached\n for i, j in pairs:\n origin, destination = response['steps'][i], response['steps'][j]\n resp = self.make_call(origin=f\"{origin['start_lat']},{origin['start_lng']}\",\n destination=f\"{destination['end_lat']},{destination['end_lng']}\").json[0]\n # No new API calls are made, cached results are returned for each possible combination of origin/dest\n self.assertEqual(origin['start_lat'], resp['start_lat']) # all coordinates should match\n self.assertEqual(origin['start_lng'], resp['start_lng'])\n self.assertEqual(destination['end_lat'], resp['end_lat'])\n self.assertEqual(destination['end_lng'], resp['end_lng'])\n self.assertTrue(resp['cached'])\n # New API call is made for transit directions. We can't recycle driving directions for this one.\n response = self.make_call(mode='transit').json\n self.assertFalse(response[0]['cached'])\n self.assertTrue(len(response) > 1) # when asking for transit directions it should yield multiple alternatives\n # driving directions should be cached already\n response = self.make_call().json[0]\n self.assertTrue(response['cached'])\n # Walking directions should not be cached\n walking = self.make_call(mode='walking').json[0]\n self.assertFalse(walking['cached'])\n # Bicycling should be treated as walking but 3 times as fast\n bicycling = self.make_call(mode='bicycling').json[0]\n self.assertTrue(bicycling['cached'])\n self.assertEqual(walking['duration'], 3 * bicycling['duration'])", "def test_cache_results(self):\n env = pike.Environment()\n value = [1]\n with pike.Graph('g') as graph:\n n = ParrotNode(value)\n env.add(graph)\n ret = env.run('g')\n self.assertEqual(ret, {'default': [1]})\n n.value = [1, 2]\n\n # We mutated value, but the return value should be cached\n ret = env.run('g')\n self.assertEqual(ret, {'default': [1]})\n\n # Busting cache should return new value\n ret = env.run('g', True)\n self.assertEqual(ret, {'default': [1, 2]})", "def test__cache(self):\n # Access to a protected member _cache of a client class\n # pylint: disable=W0212\n treadmill.zkutils.get.return_value = {}\n\n zkclient = kazoo.client.KazooClient()\n self.evmgr._cache(zkclient, 'foo#001')\n\n appcache = os.path.join(self.cache, 'foo#001')\n self.assertTrue(os.path.exists(appcache))", "def test_timed_reset(self):\n time = 0.005\n cache = TimedCache(max_age=time)\n\n cache[1] = 1\n assert 1 in cache\n assert cache[1] == 1\n sleep(time / 2)\n assert 1 in cache\n assert cache[1] == 1\n cache[1] = 1\n sleep(time / 2)\n assert 1 in cache\n assert cache[1] == 1\n sleep(time / 2)\n assert 1 not in cache\n with pytest.raises(KeyError):\n assert cache[1]", "def test_statistics(self):\n max_size = 3\n cache = LRUCache(max_size)\n for i in range(max_size):\n cache.put(i, str(i))\n assert 0 == cache.hits\n assert 0 == cache.misses\n cache.get(0)\n assert 1 == cache.hits\n assert 0 == cache.misses\n with pytest.raises(KeyError):\n cache.get(42)\n assert 1 == cache.hits\n assert 1 == cache.misses", "def test_set_cache_timeout():\n my_accessor = RallyAccessor('uname', 'pword', 'base_url')\n my_accessor.set_cache_timeout('object_name', 10)\n\n assert_equal(my_accessor.cache_timeouts, {'object_name': 10})", "def test_get_cache(self):\r\n profile1 = self.profile_manager.get('testing')\r\n profile2 = self.profile_manager.get('testing')\r\n self.assertEqual(profile1, profile2)", "def test_cache_add_without_timeout(self):\n self.cache.set('garbage', 'full')\n\n self.assertTrue(self.cache.add('superman', 'clark kent'))\n self.assertTrue(self.cache.add('recipe', {'sugar': 2, 'wine': 5}))\n self.assertFalse(self.cache.add('garbage', 'empty'))\n\n self.assertEqual(self.cache.get('superman'), 'clark kent')\n self.assertEqual(self.cache.get('recipe'), {'sugar': 2, 'wine': 5})\n self.assertEqual(self.cache.get('garbage'), 'full')\n\n # Move time forward 10 years\n cache.datetime.now = lambda: datetime.now() + timedelta(days=10*365)\n\n self.assertEqual(self.cache.get('superman'), 'clark kent')\n self.assertEqual(self.cache.get('recipe'), {'sugar': 2, 'wine': 5})\n self.assertEqual(self.cache.get('garbage'), 'full')\n\n # Try adding items again\n self.assertFalse(self.cache.add('superman', 'not kent'))\n self.assertFalse(self.cache.add('recipe', {'sugar': None, 'wine': 'A bottle'}))\n self.assertFalse(self.cache.add('garbage', 'empty'))\n\n self.assertEqual(self.cache.get('superman'), 'clark kent')\n self.assertEqual(self.cache.get('recipe'), {'sugar': 2, 'wine': 5})\n self.assertEqual(self.cache.get('garbage'), 'full')", "def dynCache(*args, **kwargs)->None:\n pass", "def test_put_get(self):\n key = 1\n item = 'aaa'\n cache = LRUCache(5)\n cache.put(key, item)\n assert item == cache.get(key)\n assert 1 == cache.size", "def test_use_cache():\n # Generate cached files\n cmd_list = [NETMIKO_GREP] + ['interface', 'all']\n subprocess_handler(cmd_list)\n cmd_list = [NETMIKO_GREP] + ['--use-cache', '--display-runtime', 'interface', 'all']\n (output, std_err) = subprocess_handler(cmd_list)\n match = re.search(r\"Total time: (0:.*)\", output)\n time = match.group(1)\n _, _, seconds = time.split(\":\")\n seconds = float(seconds)\n assert seconds <= 1\n assert 'pynet_rtr1.txt:interface FastEthernet0' in output", "def test_cache_set_without_timeout(self):\n self.cache.set('superman', 'clark kent')\n self.cache.set('recipe', {'sugar': 2, 'wine': 5})\n\n self.assertEqual(self.cache.get('superman'), 'clark kent')\n self.assertEqual(self.cache.get('recipe'), {'sugar': 2, 'wine': 5})\n\n # Move time forward 10 years\n cache.datetime.now = lambda: datetime.now() + timedelta(days=10*365)\n\n self.assertEqual(self.cache.get('superman'), 'clark kent')\n self.assertEqual(self.cache.get('recipe'), {'sugar': 2, 'wine': 5})", "def test_unsized(self):\n cache = LRUCache()\n for i in range(500):\n cache[i] = i\n for i in range(500):\n assert i in cache\n assert cache[i] == i", "def test_cache_retrieved(self):\n read = self.client.get(\"/read/froLit/jns915/jns1856/ciham-fro1/1\")\n data = read.data.decode()\n self.assertIn(\n '<span class=\"expan\">etย </span>', data,\n \"Text content should be transformed\"\n )\n self.assertIn(\n 'Facsimilaire', data,\n \"Other content should be added\"\n )\n\n cached = self.cache.get(\"urn:cts:froLit:jns915.jns1856.ciham-fro1:1\").decode()\n self.assertIn('<aside class=\"text-left\">', cached, \"Assert cache is made\")\n\n with mock.patch(\"nemo_xslttwo_plugin.shell\") as shell:\n read = self.client.get(\"/read/froLit/jns915/jns1856/ciham-fro1/1\")\n cached_response = read.data.decode()\n self.assertEqual(\n cached_response, data,\n \"Text content should the same in cache\"\n )\n self.assertEqual(\n shell.call_count, 0,\n \"Shell should not be called because we use cache\"\n )", "def test_abstract_methods(self):\n settings = {\"pypi.storage\": \"tests.DummyStorage\"}\n kwargs = ICache.configure(settings)\n cache = ICache(**kwargs)\n with self.assertRaises(NotImplementedError):\n cache.distinct()\n with self.assertRaises(NotImplementedError):\n cache.fetch(\"pkg-1.1.tar.gz\")\n with self.assertRaises(NotImplementedError):\n cache.all(\"pkg\")\n with self.assertRaises(NotImplementedError):\n cache.clear(make_package())\n with self.assertRaises(NotImplementedError):\n cache.clear_all()\n with self.assertRaises(NotImplementedError):\n cache.save(make_package())", "def test_cache_lru_overflow(mode, add_third):\n\n cache = CacheDict([(\"one\", 1), (\"two\", 2)], cache_len=2)\n\n if mode == \"get\":\n dummy = cache[\"one\"]\n elif mode == \"set\":\n cache[\"one\"] = 1\n else:\n assert False\n\n if add_third:\n cache[\"three\"] = 3\n\n assert \"one\" in cache.keys()\n assert \"two\" not in cache.keys()\n assert \"three\" in cache.keys()\n else:\n assert \"one\" in cache.keys()\n assert \"two\" in cache.keys()\n assert \"three\" not in cache.keys()" ]
[ "0.7523432", "0.7488053", "0.7069932", "0.7023241", "0.6847037", "0.67787766", "0.6732996", "0.6732327", "0.6684719", "0.66628677", "0.6645275", "0.65749645", "0.6552753", "0.6544951", "0.65383387", "0.6496633", "0.6484428", "0.6461473", "0.643884", "0.6417242", "0.6400255", "0.636031", "0.63392496", "0.63368845", "0.6314965", "0.6285286", "0.6282795", "0.6278207", "0.6258131", "0.62449807" ]
0.7550254
0
Test that, when using parametershift transform, caching produces the optimum number of evaluations.
def test_caching_param_shift(self, tol): dev = qml.device("default.qubit", wires=1) def cost(a, cache): with qml.queuing.AnnotatedQueue() as q: qml.RY(a[0], wires=0) qml.RX(a[1], wires=0) qml.expval(qml.PauliZ(0)) tape = qml.tape.QuantumScript.from_queue(q) return execute( [tape], dev, gradient_fn=param_shift, cache=cache, )[0] # Without caching, 5 evaluations are required to compute # the Jacobian: 1 (forward pass) + 2 (backward pass) * (2 shifts * 2 params) params = jax.numpy.array([0.1, 0.2]) jax.grad(cost)(params, cache=None) assert dev.num_executions == 5 # With caching, 5 evaluations are required to compute # the Jacobian: 1 (forward pass) + (2 shifts * 2 params) dev._num_executions = 0 jac_fn = jax.grad(cost) grad1 = jac_fn(params, cache=True) assert dev.num_executions == 5 # Check that calling the cost function again # continues to evaluate the device (that is, the cache # is emptied between calls) grad2 = jac_fn(params, cache=True) assert dev.num_executions == 10 assert np.allclose(grad1, grad2, atol=tol, rtol=0) # Check that calling the cost function again # with different parameters produces a different Jacobian grad2 = jac_fn(2 * params, cache=True) assert dev.num_executions == 15 assert not np.allclose(grad1, grad2, atol=tol, rtol=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cache_maxsize(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(qml.interfaces, \"cache_execute\")\n\n def cost(a, cachesize):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n dev,\n gradient_fn=param_shift,\n cachesize=cachesize,\n )[0]\n\n params = jax.numpy.array([0.1, 0.2])\n jax.jit(jax.grad(cost), static_argnums=1)(params, cachesize=2)\n cache = spy.call_args[0][1]\n\n assert cache.maxsize == 2\n assert cache.currsize == 2\n assert len(cache) == 2", "def test_argument_change(tmpdir):\n memory = Memory(location=tmpdir.strpath, verbose=0)\n func = memory.cache(count_and_append)\n # call the function for the first time, is should cache it with\n # argument x=[]\n assert func() == 0\n # the second time the argument is x=[None], which is not cached\n # yet, so the functions should be called a second time\n assert func() == 1", "def test_argument_change():\r\n mem = Memory(cachedir=env['dir'], verbose=0)\r\n func = mem.cache(count_and_append)\r\n # call the function for the first time, is should cache it with\r\n # argument x=[]\r\n assert func() == 0\r\n # the second time the argument is x=[None], which is not cached\r\n # yet, so the functions should be called a second time\r\n assert func() == 1", "def test_custom_cache(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(qml.interfaces, \"cache_execute\")\n\n def cost(a, cache):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n dev,\n gradient_fn=param_shift,\n cache=cache,\n )[0]\n\n custom_cache = {}\n params = jax.numpy.array([0.1, 0.2])\n jax.grad(cost)(params, cache=custom_cache)\n\n cache = spy.call_args[0][1]\n assert cache is custom_cache", "def test_cache_results(self):\n env = pike.Environment()\n value = [1]\n with pike.Graph('g') as graph:\n n = ParrotNode(value)\n env.add(graph)\n ret = env.run('g')\n self.assertEqual(ret, {'default': [1]})\n n.value = [1, 2]\n\n # We mutated value, but the return value should be cached\n ret = env.run('g')\n self.assertEqual(ret, {'default': [1]})\n\n # Busting cache should return new value\n ret = env.run('g', True)\n self.assertEqual(ret, {'default': [1, 2]})", "def test_intermediate(self):\n self.sm.fit(self.x, intermediate__save=True)\n self.assertTrue(hasattr(self.sm.intermediate, \"cache_\"))\n\n for k in (\n \"stft\",\n \"peaks\",\n ):\n with self.subTest(key=k):\n self.assertTrue(k in self.sm.intermediate.cache_, \"Key not found\")\n self.assertGreater(len(self.sm.intermediate[k]), 0, \"List is empty\")", "def test_inter_process_cache():\r\n\r\n x, y = theano.tensor.dvectors('xy')\r\n f = theano.function([x, y], [MyOp()(x), MyOp()(y)])\r\n f(numpy.arange(60), numpy.arange(60))\r\n if theano.config.mode == 'FAST_COMPILE' or theano.config.cxx == \"\":\r\n assert MyOp.nb_called == 0\r\n else:\r\n assert MyOp.nb_called == 1\r\n\r\n # What if we compile a new function with new variables?\r\n x, y = theano.tensor.dvectors('xy')\r\n f = theano.function([x, y], [MyOp()(x), MyOp()(y)])\r\n f(numpy.arange(60), numpy.arange(60))\r\n if theano.config.mode == 'FAST_COMPILE' or theano.config.cxx == \"\":\r\n assert MyOp.nb_called == 0\r\n else:\r\n assert MyOp.nb_called == 1", "def test_custom_cache_multiple(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(qml.interfaces, \"cache_execute\")\n\n a = jax.numpy.array(0.1)\n b = jax.numpy.array(0.2)\n\n def cost(a, b, cache):\n with qml.queuing.AnnotatedQueue() as q1:\n qml.RY(a, wires=0)\n qml.RX(b, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape1 = qml.tape.QuantumScript.from_queue(q1)\n\n with qml.queuing.AnnotatedQueue() as q2:\n qml.RY(a, wires=0)\n qml.RX(b, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape2 = qml.tape.QuantumScript.from_queue(q2)\n\n res = execute(\n [tape1, tape2],\n dev,\n gradient_fn=param_shift,\n cache=cache,\n )\n return res[0]\n\n custom_cache = {}\n jax.grad(cost)(a, b, cache=custom_cache)\n\n cache = spy.call_args[0][1]\n assert cache is custom_cache", "def test_multilingual_pipeline_small_cache():\n run_multilingual_pipeline(max_cache_size=1)", "def test_set_source_cache_changes_delegate_cache(simple_param):\n offset = 4\n scale = 5\n d = DelegateParameter('d', simple_param, offset=offset, scale=scale)\n new_source_value = 3\n simple_param.cache.set(new_source_value)\n\n assert d.cache.get() == (new_source_value - offset) / scale", "def test_number_of_steps(self):\n class Mock(object):\n def __init__(self):\n self.count = 0\n\n def evolve(self, t, dt):\n self.count += 1\n\n G = Mock()\n sim = simulation.Simulation(G, dt=0.1)\n\n sim.run(100.0)\n self.assertEqual(G.count, 1000)\n\n G = Mock()\n sim = simulation.Simulation(G, dt=0.2)\n sim.run(100.2)\n self.assertEqual(G.count, 501)", "def test_cache():\n\n def func(arg1, arg2):\n return arg1 * arg2\n\n first = cache(func)(100, 200)\n second = cache(func)(100, 200)\n assert first is second", "def test_eval_2(self):\n maxcycles = collatz_eval(100, 200)\n self.assertEqual(maxcycles, 125)", "def test_set_source_cache_changes_delegate_get(simple_param):\n offset = 4\n scale = 5\n d = DelegateParameter('d', simple_param, offset=offset, scale=scale)\n new_source_value = 3\n\n simple_param.cache.set(new_source_value)\n\n assert d.get() == (new_source_value - offset) / scale", "def test_cache_to_coherency():\r\n ij = [(0, 1), (1, 0)]\r\n ts = np.loadtxt(os.path.join(test_dir_path, 'tseries12.txt'))\r\n freqs, cache = tsa.cache_fft(ts, ij)\r\n Cxy = tsa.cache_to_coherency(cache, ij)\r\n f, c = tsa.coherency(ts)\r\n npt.assert_almost_equal(Cxy[0][1], c[0, 1])\r\n\r\n # Check that it doesn't matter if you prefer_speed_over_memory:\r\n freqs, cache2 = tsa.cache_fft(ts, ij, prefer_speed_over_memory=True)\r\n Cxy2 = tsa.cache_to_coherency(cache2, ij)\r\n\r\n npt.assert_equal(Cxy2, Cxy)\r\n\r\n # XXX Calculating the angle of the averaged psd and calculating the average\r\n # of the angles calculated over different windows does not yield exactly\r\n # the same number, because the angle is not a linear functions (arctan),\r\n # so it is unclear how to test this, but we make sure that it runs,\r\n # whether or not you prefer_speed_over_memory:\r\n freqs, cache = tsa.cache_fft(ts, ij)\r\n tsa.cache_to_relative_phase(cache, ij)\r\n\r\n freqs, cache = tsa.cache_fft(ts, ij, prefer_speed_over_memory=True)\r\n tsa.cache_to_relative_phase(cache, ij)\r\n\r\n # Check that things run alright, even if there is just one window for the\r\n # entire ts:\r\n freqs, cache = tsa.cache_fft(ts, ij, method=dict(this_method='welch',\r\n NFFT=ts.shape[-1],\r\n n_overlap=0))\r\n\r\n cxy_one_window = tsa.cache_to_coherency(cache, ij)\r\n ph_one_window = tsa.cache_to_relative_phase(cache, ij)\r\n\r\n # And whether or not you prefer_speed_over_memory\r\n freqs, cache = tsa.cache_fft(ts, ij, method=dict(this_method='welch',\r\n NFFT=ts.shape[-1],\r\n n_overlap=0),\r\n prefer_speed_over_memory=True)\r\n\r\n cxy_one_window = tsa.cache_to_coherency(cache, ij)\r\n ph_one_window = tsa.cache_to_relative_phase(cache, ij)", "def test_eval_9(self):\n maxcycles = collatz_eval(1, 5000000)\n self.assertEqual(maxcycles, 597)", "def test_eval_3(self):\n maxcycles = collatz_eval(201, 210)\n self.assertEqual(maxcycles, 89)", "def test_eval_7(self):\n maxcycles = collatz_eval(1, 1)\n self.assertEqual(maxcycles, 1)", "def test_memoization(self):\n non_memoized_func = lambda: random.randint(0, 1000000)\n yes_memoized_func = util.memoize(non_memoized_func)\n self.assertNotEqual(non_memoized_func(), non_memoized_func())\n self.assertEqual(yes_memoized_func(), yes_memoized_func())", "def test_eval_1(self):\n maxcycles = collatz_eval(1, 10)\n self.assertEqual(maxcycles, 20)", "def precalculate():\n pass", "def precalculate():\n pass", "def test_eval_6(self):\n maxcycles = collatz_eval(1000, 2001)\n self.assertEqual(maxcycles, 182)", "def warmup_step(ckpt_step: int) -> float:\n return ckpt_step * 10", "def test_memoise(free_alg, tmpdir):\n\n dr = free_alg\n n_calls = [0]\n filename = 'tmp.pickle'\n log = io.StringIO()\n\n def get_zero():\n n_calls[0] += 1\n return 0\n\n # Test the reporting facility.\n with tmpdir.as_cwd():\n assert dr.memoize(get_zero, filename, log=log) == 0\n assert dr.memoize(get_zero, filename, log=log) == 0\n assert dr.memoize(get_zero, filename) == 0\n assert n_calls[0] == 1\n assert len(log.getvalue().splitlines()) == 2", "def test_cache_overflow_default(method):\n if method == \"init\":\n cache = CacheDict([(\"one\", 1), (\"two\", 2), (\"three\", 3)], cache_len=2)\n elif method == \"assign\":\n cache = CacheDict(cache_len=2)\n cache[\"one\"] = 1\n cache[\"two\"] = 2\n cache[\"three\"] = 3\n else:\n assert False\n\n assert \"one\" not in cache.keys()\n assert \"two\" in cache.keys()\n assert \"three\" in cache.keys()", "def testResults(self):\n problem = problems.simple()\n optimizer = meta.MetaOptimizer(net=dict(\n net=\"CoordinateWiseDeepLSTM\",\n net_options={\n \"layers\": (),\n \"initializer\": \"zeros\"\n }))\n minimize_ops = optimizer.meta_minimize(problem, 5)\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n cost, final_x = train(sess, minimize_ops, 1, 2)\n\n # Torch results\n torch_cost = 0.7325327\n torch_final_x = 0.8559\n\n self.assertAlmostEqual(cost, torch_cost, places=4)\n self.assertAlmostEqual(final_x[0], torch_final_x, places=4)", "def test_scan(self):\r\n orig_compute_test_value = theano.config.compute_test_value\r\n try:\r\n theano.config.compute_test_value = 'raise'\r\n #theano.config.compute_test_value = 'warn'\r\n k = T.iscalar(\"k\")\r\n A = T.vector(\"A\")\r\n k.tag.test_value = 3\r\n A.tag.test_value = numpy.random.rand(5).astype(config.floatX)\r\n\r\n def fx(prior_result, A):\r\n return prior_result * A\r\n # Symbolic description of the result\r\n result, updates = theano.scan(fn=fx,\r\n outputs_info=T.ones_like(A),\r\n non_sequences=A,\r\n n_steps=k)\r\n\r\n # We only care about A**k, but scan has provided us with A**1 through A**k.\r\n # Discard the values that we don't care about. Scan is smart enough to\r\n # notice this and not waste memory saving them.\r\n final_result = result[-1]\r\n assert hasattr(final_result.tag, 'test_value')\r\n finally:\r\n theano.config.compute_test_value = orig_compute_test_value", "def test_compute_persistence(perfectModelEnsemble_initialized_control):\n perfectModelEnsemble_initialized_control._compute_persistence(metric=\"acc\")", "def log2_cached_creator():\n\n log2_dict = {}\n log2_dict[0] = -float(\"inf\")\n max_shift_amount = 0\n\n def closure(x):\n \"\"\"\n Given an x, This will find and cache all solutions of numbers less\n than x if they haven't already been cached. Only works for x that\n is a power of 2 (single 1 bit).\n \"\"\"\n\n nonlocal max_shift_amount\n\n if (x in log2_dict):\n return log2_dict[x]\n\n while (True):\n max_num = 1 << max_shift_amount\n log2_dict[max_num] = max_shift_amount\n max_shift_amount += 1\n if (x == max_num):\n return log2_dict[x]\n return closure" ]
[ "0.6651072", "0.6048294", "0.6017", "0.59834677", "0.58053", "0.5749205", "0.5714358", "0.5695462", "0.56551707", "0.5652003", "0.56464815", "0.55483776", "0.54793906", "0.5419169", "0.54163677", "0.5401587", "0.53614277", "0.5349554", "0.534416", "0.5324941", "0.5314627", "0.5314627", "0.5274387", "0.5271688", "0.52642244", "0.5263749", "0.5239883", "0.522112", "0.5208242", "0.5179831" ]
0.69252145
0
Test that caching produces the optimum number of adjoint evaluations when mode=backward
def test_caching_adjoint_backward(self): dev = qml.device("default.qubit", wires=2) params = jax.numpy.array([0.1, 0.2, 0.3]) def cost(a, cache): with qml.queuing.AnnotatedQueue() as q: qml.RY(a[0], wires=0) qml.RX(a[1], wires=0) qml.RY(a[2], wires=0) qml.expval(qml.PauliZ(0)) tape = qml.tape.QuantumScript.from_queue(q) return execute( [tape], dev, gradient_fn="device", cache=cache, grad_on_execution=False, gradient_kwargs={"method": "adjoint_jacobian"}, )[0] # Without caching, 2 evaluations are required. # 1 for the forward pass, and one per output dimension # on the backward pass. jax.grad(cost)(params, cache=None) assert dev.num_executions == 2 # With caching, also 2 evaluations are required. One # for the forward pass, and one for the backward pass. dev._num_executions = 0 jac_fn = jax.grad(cost) jac_fn(params, cache=True) assert dev.num_executions == 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cache_maxsize(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(qml.interfaces, \"cache_execute\")\n\n def cost(a, cachesize):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n dev,\n gradient_fn=param_shift,\n cachesize=cachesize,\n )[0]\n\n params = jax.numpy.array([0.1, 0.2])\n jax.jit(jax.grad(cost), static_argnums=1)(params, cachesize=2)\n cache = spy.call_args[0][1]\n\n assert cache.maxsize == 2\n assert cache.currsize == 2\n assert len(cache) == 2", "def L_model_backward(AL, Y, caches):\n pass", "def L_model_backward_test_case():\n np.random.seed(3)\n AL = np.random.randn(1, 2)\n Y = np.array([[1, 0]])\n\n A1 = np.random.randn(4,2)\n W1 = np.random.randn(3,4)\n b1 = np.random.randn(3,1)\n Z1 = np.random.randn(3,2)\n linear_cache_activation_1 = ((A1, W1, b1), Z1)\n\n A2 = np.random.randn(3,2)\n W2 = np.random.randn(1,3)\n b2 = np.random.randn(1,1)\n Z2 = np.random.randn(1,2)\n linear_cache_activation_2 = ((A2, W2, b2), Z2)\n\n caches = (linear_cache_activation_1, linear_cache_activation_2)\n\n return AL, Y, caches", "def L_model_backward(AL, Y, caches):\n grads = {}\n L = len(caches) # the number of layers\n m = AL.shape[1]\n Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL\n\n dAL = -(np.divide(Y,AL)-np.divide(1-Y,1-AL))\n \"\"\"\n cache = caches[-1]\n grads[\"dA\"+str(L)],grads[\"dW\"+str(L)],grads[\"db\"+str(L)] = linear_activation_backward(dAL,cache,activation = 'sigmoid')\n\n for i in reversed(range(L-1)):\n grads[\"dA\"+str(i+1)],grads[\"dW\"+str(i+1)],grads[\"db\"+str(i+1)] = linear_activation_backward(grads[\"dA\"+str(i+2)],caches[i],activation = 'relu')\n \"\"\"\n\n current_cache = caches[-1]\n grads[\"dA\" + str(L)], grads[\"dW\" + str(L)], grads[\"db\" + str(L)] = linear_backward(sigmoid_backward(dAL, current_cache[1]),current_cache[0])\n\n for l in reversed(range(L - 1)):\n # lth layer: (RELU -> LINEAR) gradients.\n # Inputs: \"grads[\"dA\" + str(l + 2)], caches\". Outputs: \"grads[\"dA\" + str(l + 1)] , grads[\"dW\" + str(l + 1)] , grads[\"db\" + str(l + 1)]\n ### START CODE HERE ### (approx. 5 lines)\n current_cache = caches[l]\n dA_prev_temp, dW_temp, db_temp = linear_backward(sigmoid_backward(dAL, current_cache[1]), current_cache[0])\n grads[\"dA\" + str(l + 1)] = dA_prev_temp\n grads[\"dW\" + str(l + 1)] = dW_temp\n grads[\"db\" + str(l + 1)] = db_temp\n ### END CODE HERE ###\n\n return grads", "def _poputil_recompute_backward(op, grads):\n return grads", "def test_grad_with_backward_mode(self, execute_kwargs):\n dev = qml.device(\"default.qubit\", wires=2)\n params = jax.numpy.array([0.1, 0.2, 0.3])\n expected_results = jax.numpy.array([-0.3875172, -0.18884787, -0.38355705])\n\n def cost(a, cache):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.RY(a[2], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n res = qml.interfaces.execute([tape], dev, cache=cache, **execute_kwargs)[0]\n return res\n\n cost = jax.jit(cost)\n\n results = jax.grad(cost)(params, cache=None)\n for r, e in zip(results, expected_results):\n assert jax.numpy.allclose(r, e, atol=1e-7)", "def max_pool_backward_naive(dout, cache):\n dx = None\n #############################################################################\n # TODO: Implement the max pooling backward pass #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx", "def part1b_3():\n xs = exampleInput\n backward = [\n Counter({'-SIZE-': 0.564, '-FEAT-': 0.435}),\n Counter({'-SIZE-': 0.567, '-FEAT-': 0.432}),\n Counter({'-FEAT-': 0.5, '-SIZE-': 0.5})]\n backward_ = submission.computeBackward(simpleCRF, xs)\n for vec, vec_ in zip( backward, backward_):\n grader.requireIsTrue( Counters.approximateEquals( vec, vec_ ) )", "def test_caching_param_shift(self, tol):\n dev = qml.device(\"default.qubit\", wires=1)\n\n def cost(a, cache):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n dev,\n gradient_fn=param_shift,\n cache=cache,\n )[0]\n\n # Without caching, 5 evaluations are required to compute\n # the Jacobian: 1 (forward pass) + 2 (backward pass) * (2 shifts * 2 params)\n params = jax.numpy.array([0.1, 0.2])\n jax.grad(cost)(params, cache=None)\n assert dev.num_executions == 5\n\n # With caching, 5 evaluations are required to compute\n # the Jacobian: 1 (forward pass) + (2 shifts * 2 params)\n dev._num_executions = 0\n jac_fn = jax.grad(cost)\n grad1 = jac_fn(params, cache=True)\n assert dev.num_executions == 5\n\n # Check that calling the cost function again\n # continues to evaluate the device (that is, the cache\n # is emptied between calls)\n grad2 = jac_fn(params, cache=True)\n assert dev.num_executions == 10\n assert np.allclose(grad1, grad2, atol=tol, rtol=0)\n\n # Check that calling the cost function again\n # with different parameters produces a different Jacobian\n grad2 = jac_fn(2 * params, cache=True)\n assert dev.num_executions == 15\n assert not np.allclose(grad1, grad2, atol=tol, rtol=0)", "def update(self, state, action, nextState, reward):\n \"\"\"Description:\n Use second equation in slide 71 of MDP\n Adjest weight of active features depend on tranistion \n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n feat = self.featExtractor.getFeatures(state, action)\n\n # if weight is empty, then weight will need to initial to 1 for all features\n # According to which Extractor user choose, weight counter will have equal number of keys.\n if len(self.weight) == 0:\n feat = self.featExtractor.getFeatures(state, action)\n self.weight.incrementAll(feat.keys(), 1)\n \n maxQns = self.getValue(nextState)\n if maxQns == None:\n maxQns = 0\n Qsa = self.getQValue(state, action)\n difference = ( reward + self.discountRate * maxQns ) - Qsa\n \n for key in self.weight.keys():\n self.weight[key] += (self.alpha * difference * feat[key])\n \n \n \"\"\" END CODE \"\"\"", "def learn(model: KW_Model,\n trainloader: DataLoader,\n testloader: DataLoader,\n optimizer: optim.Optimizer,\n nb_epoch: int,\n device: torch.device,\n eval_fn: Callable[[List[bool], List[Qid]], Dict[Qid, float]],\n mean_window: int = 50,\n entropy_lambda: float = 0.025,\n smt_lambda: float = 1.0,\n reinforce_lambda: float = 1.0,\n ) -> Tuple[nn.Module, Dict[str, List[torch.tensor]], Dict[str, List[torch.tensor]]]:\n print(\"Memory usage: %s (kb)\" % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)\n past_rewards = {str(q_id.long().item()): deque(maxlen=mean_window)\n for _, _, q_ids, _ in chain(trainloader, testloader)\n for q_id in q_ids}\n \n logs = [\"reward\",\n \"scaled_entropy\",\n \"scaled_reinforce\",\n \"scaled_smt\",\n \"total_loss\",\n \"accuracy\"]\n train_logs = {log: list() for log in logs}\n test_logs = {log: list() for log in logs}\n del logs\n \n for epoch in range(nb_epoch):\n running_loss, running_reward = [], []\n entropies, reinforces, smts = [], [], []\n nb_correct, nb_total = 0, 0\n print(f\"\\nEpoch {epoch}\")\n \n print(\"Begin epoch: %s (kb)\" % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)\n model.train()\n for x, y, q_id, masks in trainloader:\n x = x.to(device)\n y = y.to(device)\n masks = masks.to(device)\n\n # batch x seq , batch x seq , batch x seq\n sample, logits, entropy, params = model(x, masks)\n batch_reward, rewards = eval_fn(sample.detach().t().tolist(), q_id)\n losses = compute_losses(y, params, rewards, logits, past_rewards,\n entropy, entropy_lambda, reinforce_lambda, smt_lambda, device)\n\n # entropy_lambda = min(1.01*entropy_lambda, 0.025)\n # reinforce_lambda = min(1.01*reinforce_lambda, 1.0)\n # smt_lambda = max(0.99*smt_lambda, 0.05)\n loss, reinforce_loss, entropy, smt_loss = losses\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n temp = (sample.long() == y.detach().long()).float() * masks\n nb_correct += temp.byte().cpu().sum().tolist()\n nb_total += masks.cpu().sum().tolist()\n\n running_loss.append(loss.item())\n running_reward.extend(rewards.values())\n print(f\"\\rTr Loss {mean(running_loss): .3f} Rewa {mean(running_reward): .5f}\", end=\"\")\n \n reinforces.append(reinforce_loss.item())\n entropies.append(entropy.item())\n smts.append(smt_loss.item())\n\n # Logs\n train_logs[\"reward\"].append(mean(running_reward))\n train_logs[\"scaled_entropy\"].append(mean(entropies))\n train_logs[\"scaled_reinforce\"].append(mean(reinforces))\n train_logs[\"scaled_smt\"].append(mean(smts))\n train_logs[\"total_loss\"].append(mean(running_loss))\n train_logs[\"accuracy\"].append(nb_correct / nb_total)\n \n \n train_loss, train_reward = mean(running_loss), mean(running_reward)\n running_loss, running_reward = [], []\n entropies, reinforces, smts = [], [], []\n nb_correct, nb_total = 0, 0\n model.eval()\n for x, y, q_id, masks in testloader:\n x = x.to(device)\n y = y.to(device)\n masks = masks.to(device)\n\n # batch x seq , batch x seq , batch x seq\n sample, logits, entropy, params = model(x, masks)\n batch_reward, rewards = eval_fn(sample.detach().t().tolist(), q_id)\n\n losses = compute_losses(y, params, rewards, logits, past_rewards,\n entropy, entropy_lambda, reinforce_lambda, smt_lambda, device)\n loss, reinforce_loss, entropy, smt_loss = losses\n \n temp = (sample.long() == y.detach().long()).float() * masks\n nb_correct += temp.byte().sum().tolist()\n nb_total += masks.sum().tolist()\n\n running_loss.append(loss.item())\n running_reward.extend(rewards.values())\n print(f\"\\rTr Loss {train_loss: .3f} Rewa {train_reward: .3f}\",\n f\"Te Loss{mean(running_loss): .3f} Rewa {mean(running_reward): .3f}\",\n end=\"\")\n \n reinforces.append(reinforce_loss.item())\n entropies.append(entropy.item())\n smts.append(smt_loss.item())\n \n \n # Logs\n test_logs[\"reward\"].append(mean(running_reward))\n test_logs[\"scaled_entropy\"].append(mean(entropies))\n test_logs[\"scaled_reinforce\"].append(mean(reinforces))\n test_logs[\"scaled_smt\"].append(mean(smts))\n test_logs[\"total_loss\"].append(mean(running_loss))\n test_logs[\"accuracy\"].append(nb_correct / nb_total)\n \n\n return model, train_logs, test_logs", "def backward_propagation(AL, Y, caches):\n grads = {}\n L = len(caches) \n m = AL.shape[1]\n Y = Y.reshape(AL.shape)\n\n dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))\n \n current_cache = caches[L-1]\n grads[\"dA\" + str(L-1)], grads[\"dW\" + str(L)], grads[\"db\" + str(L)] = linear_activation_backward(dAL, current_cache, 'sigmoid')\n \n for l in reversed(range(L-1)):\n current_cache = caches[l]\n dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads[\"dA\" + str(l + 1)], current_cache, 'relu')\n grads[\"dA\" + str(l)] = dA_prev_temp\n grads[\"dW\" + str(l + 1)] = dW_temp\n grads[\"db\" + str(l + 1)] = db_temp\n return grads", "def test_determinism_2():\n\n def run_sgd(mode):\n # Must be seeded the same both times run_sgd is called\n disturb_mem.disturb_mem()\n rng = np.random.RandomState([2012, 11, 27])\n\n batch_size = 5\n train_batches = 3\n valid_batches = 4\n num_features = 2\n\n # Synthesize dataset with a linear decision boundary\n w = rng.randn(num_features)\n\n def make_dataset(num_batches):\n disturb_mem.disturb_mem()\n m = num_batches*batch_size\n X = rng.randn(m, num_features)\n y = np.zeros((m, 1))\n y[:, 0] = np.dot(X, w) > 0.\n\n rval = DenseDesignMatrix(X=X, y=y)\n\n rval.yaml_src = \"\" # suppress no yaml_src warning\n\n X = rval.get_batch_design(batch_size)\n assert X.shape == (batch_size, num_features)\n\n return rval\n\n train = make_dataset(train_batches)\n valid = make_dataset(valid_batches)\n\n num_chunks = 10\n chunk_width = 2\n\n class ManyParamsModel(Model):\n \"\"\"\n Make a model with lots of parameters, so that there are many\n opportunities for their updates to get accidentally re-ordered\n non-deterministically. This makes non-determinism bugs manifest\n more frequently.\n \"\"\"\n\n def __init__(self):\n super(ManyParamsModel, self).__init__()\n self.W1 = [sharedX(rng.randn(num_features, chunk_width)) for i\n in xrange(num_chunks)]\n disturb_mem.disturb_mem()\n self.W2 = [sharedX(rng.randn(chunk_width))\n for i in xrange(num_chunks)]\n self._params = safe_union(self.W1, self.W2)\n self.input_space = VectorSpace(num_features)\n self.output_space = VectorSpace(1)\n\n disturb_mem.disturb_mem()\n model = ManyParamsModel()\n disturb_mem.disturb_mem()\n\n class LotsOfSummingCost(Cost):\n \"\"\"\n Make a cost whose gradient on the parameters involves summing many\n terms together, so that T.grad is more likely to sum things in a\n random order.\n \"\"\"\n\n supervised = True\n\n def expr(self, model, data, **kwargs):\n self.get_data_specs(model)[0].validate(data)\n X, Y = data\n disturb_mem.disturb_mem()\n\n def mlp_pred(non_linearity):\n Z = [T.dot(X, W) for W in model.W1]\n H = [non_linearity(z) for z in Z]\n Z = [T.dot(h, W) for h, W in safe_izip(H, model.W2)]\n pred = sum(Z)\n return pred\n\n nonlinearity_predictions = map(mlp_pred,\n [T.nnet.sigmoid,\n T.nnet.softplus,\n T.sqr,\n T.sin])\n pred = sum(nonlinearity_predictions)\n disturb_mem.disturb_mem()\n\n return abs(pred-Y[:, 0]).sum()\n\n def get_data_specs(self, model):\n data = CompositeSpace((model.get_input_space(),\n model.get_output_space()))\n source = (model.get_input_source(), model.get_target_source())\n return (data, source)\n\n cost = LotsOfSummingCost()\n\n disturb_mem.disturb_mem()\n\n algorithm = SGD(cost=cost,\n batch_size=batch_size,\n learning_rule=Momentum(.5),\n learning_rate=1e-3,\n monitoring_dataset={'train': train, 'valid': valid},\n update_callbacks=[ExponentialDecay(decay_factor=2.,\n min_lr=.0001)],\n termination_criterion=EpochCounter(max_epochs=5))\n\n disturb_mem.disturb_mem()\n\n train_object = Train(dataset=train,\n model=model,\n algorithm=algorithm,\n extensions=[PolyakAveraging(start=0),\n MomentumAdjustor(final_momentum=.9,\n start=1,\n saturate=5), ],\n save_freq=0)\n\n disturb_mem.disturb_mem()\n\n train_object.main_loop()\n\n output = cStringIO()\n record = Record(file_object=output, replay=False)\n record_mode = RecordMode(record)\n\n run_sgd(record_mode)\n\n output = cStringIO(output.getvalue())\n playback = Record(file_object=output, replay=True)\n playback_mode = RecordMode(playback)\n\n run_sgd(playback_mode)", "def relu_backward(dA, cache):\n\n Z = cache\n dZ = np.array(dA, copy=True)\n dZ[Z <= 0] = 0\n return dZ", "def relu_backward(dA, cache):\n Z = cache\n dZ = np.array(dA, copy=True)\n dZ[Z <= 0] = 0\n return dZ", "def _poputil_block_recompute_backward(op, grads):\n return grads", "def L_model_backward(AL, Y, caches):\n\n grads = {}\n L = len(caches)\n m = AL.shape[1]\n Y = Y.reshape(AL.shape)\n\n # Initializing the Back Propogation\n dAL = dAL = - (np.divide(Y, AL) - np.divide(1-Y, 1-AL))\n\n current_cache = caches[-1]\n grads[\"dA\" + str(L)], grads[\"dW\" + str(L)], grads[\"db\" + str(L)] = linear_backward(sigmoid_backward(dAL,\n current_cache[1]),\n current_cache[0])\n\n for l in reversed(range(L-1)):\n\n current_cache = caches[l]\n dA_prev_temp, dW_temp, db_temp = linear_backward(relu_backward(grads[\"dA\" + str(l+2)],\n current_cache[1]),\n current_cache[0])\n grads[\"dA\" + str(l + 1)] = dA_prev_temp\n grads[\"dW\" + str(l + 1)] = dW_temp\n grads[\"db\" + str(l + 1)] = db_temp\n\n return grads", "def test_global_efficiency_complete_graph(self):\n for n in range(2, 10):\n G = nx.complete_graph(n)\n assert_equal(nx.global_efficiency(G), 1)", "def linear_backward(dZ, cache):\n pass", "def test010_similarity(self, b_size=8, dim=1024,\n alpha_fwd=0.999, alpha_bkw=0.99, eps=1e-05, itrs=8):\n # instantiate inputs\n input = torch.randn(b_size, dim)\n input_0 = input.clone().detach().requires_grad_(True)\n input_1 = input.clone().detach().requires_grad_(True)\n # instantiate gradient at the output\n grad_out = torch.randn(b_size, dim)\n\n # instantiate Linearized Online Norm class\n onlin = OnlineNorm1D(dim, alpha_fwd=alpha_fwd, alpha_bkw=alpha_bkw,\n eps=eps, b_size=b_size)\n\n # instantiate Looping Online Norm class\n onloop = OnlineNorm1D(dim, eps=eps,\n ctrl_norm=ControlNorm1DLoop(dim,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n eps=eps))\n\n for _ in range(itrs):\n # fprop through Linearized Online Norm class\n y_0 = onlin(input_0)\n # bprop through Linearized Online Norm class\n y_0.backward(grad_out)\n # fprop through Looping Online Norm class\n y_1 = onloop(input_1)\n # bprop through Looping Online Norm class\n y_1.backward(grad_out)\n\n # numerically compare output\n np.testing.assert_allclose(y_0.detach().numpy(),\n y_1.detach().numpy(),\n rtol=1e-4, atol=1e-5)\n # numerically grad_in\n np.testing.assert_allclose(input_0.grad.detach().numpy(),\n input_1.grad.detach().numpy(),\n rtol=1e-4, atol=1e-5)\n\n self.logger.info('Algorithm implemented using linearization of ops '\n 'numerically matches algorithm implemented with '\n 'loops')", "def backward_propagation(parameters, cache, X, Y):\n m = X.shape[1]\n \n # First, retrieve W1 and W2 from the dictionary \"parameters\".\n ### START CODE HERE ### (โ‰ˆ 2 lines of code)\n W1 = parameters[\"W1\"]\n W2 = parameters[\"W2\"]\n ### END CODE HERE ###\n \n # Retrieve also A1 and A2 from dictionary \"cache\".\n ### START CODE HERE ### (โ‰ˆ 2 lines of code)\n A1 = cache[\"A1\"]\n A2 = cache[\"A2\"]\n ### END CODE HERE ###\n \n # Backward propagation: calculate dW1, db1, dW2, db2. \n ### START CODE HERE ### (โ‰ˆ 6 lines of code, corresponding to 6 equations on slide above)\n dZ2 = A2 - Y\n dW2 = (np.dot(dZ2, A1.T))/m\n db2 = (np.sum(dZ2, axis = 1, keepdims = True))/m\n dZ1 = np.dot(W2.T, dZ2)*(1 - np.power(A1, 2))\n dW1 = (np.dot(dZ1, X.T))/m\n db1 = (np.sum(dZ1, axis = 1, keepdims = True))/m\n ### END CODE HERE ###\n \n grads = {\"dW1\": dW1,\n \"db1\": db1,\n \"dW2\": dW2,\n \"db2\": db2}\n \n return grads", "def ncore(self):", "def test_estimate_mode() -> None:\n # Create a set of random parameters\n parameters = np.sort(1 + 5 * np.random.rand(3))\n parameters[[2, 1]] = parameters[[1, 2]]\n\n # Create an instance\n my_univariate_input = UnivDist(\n distribution=DISTRIBUTION_NAME, parameters=parameters\n )\n\n # Generate a sample\n sample_size = 1000000 # Should give 1e-0 accuracy\n xx = my_univariate_input.get_sample(sample_size)\n\n # Estimated result\n y, edges = np.histogram(xx, bins=\"auto\")\n mode = edges[np.argmax(y)]\n\n # Analytical result\n mode_ref = parameters[2]\n\n # Assertion\n assert np.isclose(mode, mode_ref, rtol=1, atol=1)", "def test_memoise(free_alg, tmpdir):\n\n dr = free_alg\n n_calls = [0]\n filename = 'tmp.pickle'\n log = io.StringIO()\n\n def get_zero():\n n_calls[0] += 1\n return 0\n\n # Test the reporting facility.\n with tmpdir.as_cwd():\n assert dr.memoize(get_zero, filename, log=log) == 0\n assert dr.memoize(get_zero, filename, log=log) == 0\n assert dr.memoize(get_zero, filename) == 0\n assert n_calls[0] == 1\n assert len(log.getvalue().splitlines()) == 2", "def test_optimize_basic(name, builder):\n\n model = Model(name)\n dirty = True\n printing = True\n counter = 1\n stats = list()\n\n with model.build():\n builder()\n\n if printing:\n print_graphs(f'opt_{name}_init', model)\n\n while dirty:\n\n print()\n\n dirty, new_model = model.run_algebra(\n OptimizeAlg(name=name,\n counter=counter,\n stats=stats,\n num_steps=1))\n \n if printing: \n print_graphs(f'opt_{name}_post({counter})', new_model)\n\n model = new_model\n counter += 1\n\n if printing:\n print_stats(f'opt_{name}', stats)", "def optimize_parameters(self):\n # forward\n for i in range(min(self.big_iter+1,len(self.orders_rev))):\n if(self.orders_rev):\n # compute fake images and reconstruction images.\n self.forward(i,False)\n # G_A and G_B\n # Ds require no gradients when optimizing Gs\n self.set_requires_grad(self.netD, False)\n # set G_A and G_B's gradients to zero\n self.optimizers_G[self.orders_rev[i]].zero_grad()\n # calculate gradients for G_A and G_B\n self.backward_G(i,False)\n # update G_A and G_B's weights\n self.optimizers_G[self.orders_rev[i]].step()\n # D_A and D_B\n self.set_requires_grad(self.netD, True)\n self.optimizer_D.zero_grad() \n self.backward_D(i,False) \n self.optimizer_D.step() \n else:\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_DY() # calculate gradients for D_A\n self.optimizer_D.step()\n for i in range(min(self.big_iter+1, len(self.orders))):\n if(self.orders):\n if(i>0):\n self.real_A = self.fake_B.detach()\n self.forward(i,True) # compute fake images and reconstruction images.\n # G_A and G_B\n # Ds require no gradients when optimizing Gs\n self.set_requires_grad(self.netD, False)\n # set G_A and G_B's gradients to zero\n self.optimizers_G[self.orders[i]].zero_grad()\n self.backward_G(i,True) # calculate gradients for G_A and G_B\n # update G_A and G_B's weights\n self.optimizers_G[self.orders[i]].step()\n # D_A and D_B\n self.set_requires_grad(self.netD, True)\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_D(i,True) # calculate gradients for D_A\n self.optimizer_D.step() \n else:\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_DX() # calculate gradients for D_A\n self.optimizer_D.step() \n self.current_label=self.labels[0]\n self.current_order=self.orders\n self.current_pred = np.concatenate((self.pred_real.detach().cpu().numpy().mean(\n axis=2).mean(axis=2), self.pred_fake.detach().cpu().numpy().mean(axis=2).mean(axis=2)))", "def learn(self):\n batch = self.agent.replay_buffer.sample(self.batch_size)\n states = torch.tensor([x.state for x in batch], dtype=torch.float32).to(self.agent.device) # shape == (batch_size, 3, 6, 7)\n actions = [x.action for x in batch]\n rewards = torch.tensor([x.reward for x in batch], dtype=torch.float32).to(self.agent.device)\n next_states = torch.tensor([x.next_state for x in batch], dtype=torch.float32).to(self.agent.device)\n dones = [x.done for x in batch]\n\n self.optimizer.zero_grad()\n\n\n q_vals = self.agent.policy_net(states)[range(len(actions)), actions] # Q vals for actions taken\n q_next_vals = self.agent.target_net(next_states).detach() # we don't care about grad wrt target net\n q_next_vals[dones] = 0.0 # terminal states have no future expected value\n q_targets = rewards + self.gamma * torch.max(q_next_vals, dim=1)[0]\n\n # all_q_vals = self.agent.policy_net(states)\n # print()\n # print('actions')\n # print(actions)\n # print()\n # print('original all q vals')\n # print(self.agent.policy_net(states)) \n # print(self.agent.policy_net(states).shape)\n # print()\n # print('QVALS:', q_vals)\n # print(q_vals.shape)\n # print('\\n\\n')\n # print('QTARGETS:', q_targets)\n # print(q_targets.shape)\n\n # breakpoint()\n\n loss = self.loss_fn(q_targets, q_vals).to(self.agent.device)\n loss.backward()\n \n # for layer in self.agent.policy_net.named_parameters():\n \n # # print(f'layer: {layer[0]}')\n # # print(f'grad:', layer[1].grad)\n\n # # print('loss', loss)\n # # print('q_vals grad:', q_vals.grad)\n # # print('states:', )\n\n self.optimizer.step()\n\n self.agent.learning_iters += 1\n if self.agent.learning_iters % self.target_update_freq == 0:\n self.agent.update_target_net()\n # logger.info('Updated target net')", "def relu_backward(dout, cache):\n #raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n x=cache\n dout[x<=0]=0\n dx=dout\n return dx", "def test(test_loader, model, criterion, epoch):\n global args, writer\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n #if not args.multi_gpu:\n # if model.beta_ema > 0:\n # old_params = model.get_params()\n # model.load_ema_params()\n #else:\n # if model.module.beta_ema > 0:\n # old_params = model.module.get_params()\n # model.module.load_ema_params()\n\n end = time.time()\n acc_part = []\n with torch.no_grad():\n for i, (input_, target) in enumerate(test_loader):\n if torch.cuda.is_available():\n target = target.cuda(async=True)\n input_ = input_.cuda()\n # compute output\n output = model(input_)\n preds = output.max(dim=1)[1]\n\n # measure accuracy and record loss\n # prec1 = accuracy(output.item(), target, topk=(1,))[0]\n prec1 = (preds == target).sum().item() / preds.size(0)\n top1.update(100 - prec1*100, input_.size(0))\n acc_part.append(prec1)\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0 and args.verbose:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Err@1 {top1.val:.3f} ({top1.avg:.3f})'.format(\n i, len(test_loader), batch_time=batch_time, loss=losses,\n top1=top1))\n\n if args.verbose:\n print(' * Err@1 {top1.avg:.3f}'.format(top1=top1))\n #if not args.multi_gpu:\n # if model.beta_ema > 0:\n # model.load_params(old_params)\n #else:\n # if model.module.beta_ema > 0:\n # model.module.load_params(old_params)\n\n # log to TensorBoard\n if writer is not None:\n writer.add_scalar('test/loss', losses.avg, epoch)\n writer.add_scalar('test/err', top1.avg, epoch)\n layers = model.layers if not args.multi_gpu else model.module.layers\n for k, layer in enumerate(layers):\n if hasattr(layer, 'qz_loga'):\n mode_z = layer.sample_z(1, sample=0).view(-1)\n writer.add_histogram('mode_z/layer{}'.format(k), mode_z.cpu().data.numpy(), epoch)\n return np.mean(acc_part)", "def affine_backward(dout, cache):\n x, w, b = cache\n dx, dw, db = None, None, None\n \n dx = dout.dot(w.T)\n dx = dx.reshape(x.shape)\n \n input_shape = x.shape\n prod = 1\n for i in range(1,len(input_shape)):\n prod *= input_shape[i]\n\n x_reshaped = x.reshape(x.shape[0], prod)\n dw = (x_reshaped.T).dot(dout)\n\n db = np.sum(dout,axis=0)\n \n return dx, dw, db" ]
[ "0.61493266", "0.6035271", "0.601645", "0.59946704", "0.58465856", "0.5792817", "0.5757036", "0.5731369", "0.56620586", "0.566111", "0.5609847", "0.56050646", "0.5604916", "0.5592317", "0.5580091", "0.554066", "0.5504138", "0.5499855", "0.5489731", "0.54679126", "0.5462262", "0.5457685", "0.5447759", "0.5442064", "0.54318154", "0.54308057", "0.54272383", "0.5425526", "0.54248464", "0.5423626" ]
0.66844857
0
Test scalar jacobian calculation
def test_scalar_jacobian(self, execute_kwargs, tol): a = jax.numpy.array(0.1) dev = qml.device("default.qubit", wires=2) def cost(a): with qml.queuing.AnnotatedQueue() as q: qml.RY(a, wires=0) qml.expval(qml.PauliZ(0)) tape = qml.tape.QuantumScript.from_queue(q) return execute([tape], dev, **execute_kwargs)[0] res = jax.jit(jax.grad(cost))(a) assert res.shape == () # compare to standard tape jacobian with qml.queuing.AnnotatedQueue() as q: qml.RY(a, wires=0) qml.expval(qml.PauliZ(0)) tape = qml.tape.QuantumScript.from_queue(q) tape.trainable_params = [0] tapes, fn = param_shift(tape) expected = fn(dev.batch_execute(tapes)) assert expected.shape == () assert np.allclose(res, expected, atol=tol, rtol=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def jacobian(self, x):\n pass", "def JacobianFunction(p,x,y,z):\n \n n = len(x)\n \n J = np.array([ np.ones((n)),x,x**2,y,y**2,x*y ])\n \n return J", "def jacobian(f, x, epsilon = 1e-10):\n f_ = f(x)\n value = np.zeros((len(f_), len(x)))\n \n for i in range(len(x)):\n f_ = partial_derivative(f, x, i, epsilon)\n value[:,i] = f_\n\n return value", "def jacobian(self, dt):\n raise NotImplementedError", "def jacobian(self, c):\n\n raise NotImplementedError", "def jacobian_func(f):\n jacobian = jacfwd(f)\n return jacobian", "def test_test_jacobian(self):\n self.set_up()\n inputObject = self.vmecOptimization.vmecInputObject\n rbc = np.copy(inputObject.rbc)\n zbs = np.copy(inputObject.zbs)\n inputObject.rbc = 0*inputObject.rbc\n inputObject.zbs = 0*inputObject.zbs\n orientable = self.vmecOptimization.test_jacobian(inputObject)\n self.assertFalse(orientable)\n # Reset boundary\n inputObject.rbc = rbc\n inputObject.zbs = zbs\n self.tear_down()", "def numerical_jacobian (fhandle, x, **args):\n \n y = fhandle (x, **args)\n numRows, numCols = (len (y), len (x))\n J = np.zeros ((numRows, numCols))\n\n for col in range (0, numCols):\n xPrime = x.copy ()\n deltaX = max (1e-4*x[col], 1e-6)\n xPrime[col] += deltaX\n yPrime = fhandle (xPrime, **args)\n J[:, col] = (yPrime - y) / deltaX\n\n return J", "def test_jacobian(self):\n\n gT1 = Pose2(1, 2, np.pi/2)\n gT2 = Pose2(-1, 4, np.pi)\n\n expected = Pose2(2, 2, np.pi/2)\n\n def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]):\n # print(f\"{this = },\\n{v = },\\n{len(H) = }\")\n\n key0 = this.keys()[0]\n key1 = this.keys()[1]\n gT1, gT2 = v.atPose2(key0), v.atPose2(key1)\n error = Pose2(0, 0, 0).localCoordinates(gT1.between(gT2))\n \n if len(H) > 0:\n result = gT1.between(gT2)\n H[0] = -result.inverse().AdjointMap()\n H[1] = np.eye(3)\n return error\n \n noise_model = gtsam.noiseModel.Unit.Create(3)\n cf = ge.CustomFactor(noise_model, gtsam.KeyVector([0, 1]), error_func)\n v = Values()\n v.insert(0, gT1)\n v.insert(1, gT2)\n \n bf = gtsam.BetweenFactorPose2(0, 1, Pose2(0, 0, 0), noise_model)\n\n gf = cf.linearize(v)\n gf_b = bf.linearize(v)\n\n J_cf, b_cf = gf.jacobian()\n J_bf, b_bf = gf_b.jacobian()\n np.testing.assert_allclose(J_cf, J_bf)\n np.testing.assert_allclose(b_cf, b_bf)", "def jacobian(self, xs):\n rx_list = []\n for nx,x in enumerate(xs):\n \n numpy.testing.assert_array_almost_equal(self.independentVariableShapeList[nx], numpy.shape(x), err_msg = '\\ntaped xs[%d].shape != forward xs[%d]\\n'%(nx,nx))\n rx = numpy.ravel(x)\n rx_list.append(rx)\n self.x = numpy.concatenate(rx_list)\n return wrapped_functions.jacobian(self.tape_tag, self.x)", "def jacobian(self, v):\n from scipy.special import erf, erfcx\n def integrand(u_arr):\n \"\"\"Integrand of self-consistency equation\"\"\"\n integrand_all = erfcx(-u_arr)\n #integrand_all = np.zeros(u_arr.shape)\n #u_mask = u_arr < -4.0\n #u = u_arr[u_mask]\n #integrand_all[u_mask] = -1. / np.sqrt(np.pi) * (1.0 / u - 1.0 / (2.0 * u**3) + \n #3.0 / (4.0 * u**5) - \n #15.0 / (8.0 * u**7))\n #integrand_all[~u_mask] = np.exp(u_arr[~u_mask]**2) * (1. + erf(u_arr[~u_mask]))\n return integrand_all\n\n\n mu_v = self.mu(v)\n sd_v = self.sd(v)\n low = (self.V_r - mu_v) / sd_v # reduced resting potential\n up = (self.theta - mu_v) / sd_v # reduced threshold\n f_low = integrand(low)\n f_up = integrand(up)\n jac_mat_1 = self.tau_m * 1e-3 * np.sqrt(np.pi) * self.mat_mu\n jac_mat_2 = self.tau_m * 1e-3 * np.sqrt(np.pi) * self.mat_var / (2. * sd_v**2)\n\n jac_T = np.diag(1. / v**2) - \\\n jac_mat_1.T * (f_up - f_low) + \\\n jac_mat_2.T * (f_up * up - f_low * low)\n return jac_T.T", "def jacobian(f, x, dx):\n x = np.atleast_1d(x)\n dx = np.atleast_1d(dx)\n nx = len(x)\n ny = 0\n jacobi = None\n e = np.zeros(nx)\n for ix in xrange(nx):\n e *= 0\n e[ix] = 1\n deriv = np.atleast_1d((f(x + e * dx) - f(x - e * dx)) / (2 * dx[ix]))\n if ix == 0:\n ny = len(deriv)\n jacobi = np.empty((ny, nx))\n jacobi[:, ix] = deriv\n return jacobi", "def jacobian(x, u):\n yaw = x[2, 0]\n v = u[0, 0]\n jac = np.array([\n [1.0, 0.0, -dt * v * math.sin(yaw), dt * math.cos(yaw)],\n [0.0, 1.0, dt * v * math.cos(yaw), dt * math.sin(yaw)],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0]])\n\n return jac", "def jacobian_i(self, x):\n return np.matrix([-x**3, -x**2, -x, -1])", "def jacobian(f, x):\n\n B, N = x.shape\n x.requires_grad = True\n in_ = torch.zeros(B, 1)\n \n y = f(in_, x)\n jacobian = list()\n \n for i in range(N):\n v = torch.zeros_like(y)\n v[:, i] = 1.\n dy_i_dx = torch.autograd.grad(y,\n x,\n grad_outputs=v,\n retain_graph=True,\n create_graph=True,\n allow_unused=True)[0] # shape [B, N]\n jacobian.append(dy_i_dx)\n\n jacobian = torch.stack(jacobian, dim=2).requires_grad_()\n\n return jacobian", "def jacobian_c(self, x, out=None, **kwargs):\n return empty_matrix(0, self.nx)", "def calculate_jacobian(robot_position, landmark_pos):\n\n return None", "def jacobian(self,x,p,fun):\n n = self.n\n y = fun(x,p)\n h = 1e-4\n nout = np.size(y)\n dfdx = np.zeros((nout,n))\n for j in range(n):\n dx1 = np.zeros(n)\n dx2 = np.zeros(n)\n dx1[j] = -h\n dx2[j] = h\n dfdx[:,j] = (fun(x+dx2,p)-fun(x+dx1,p))/(2*h)\n return dfdx", "def calc_jacobian(\n model: nn.Module,\n latents: torch.Tensor,\n normalize: bool = False,\n eps: float = 1e-8,\n vectorize=False,\n reverse_ad=True,\n norm_range=True,\n norm_diagonal=False,\n) -> torch.Tensor:\n # set to eval mode but remember original state\n in_training: bool = model.training\n model.eval() # otherwise we will get 0 gradients\n with torch.set_grad_enabled(True):\n jacob = []\n input_vars = latents.clone().requires_grad_(True)\n\n output_vars = model(input_vars)\n if not vectorize:\n for i in range(output_vars.shape[1]):\n jacob.append(\n torch.autograd.grad(\n output_vars[:, i : i + 1],\n input_vars,\n create_graph=True,\n grad_outputs=torch.ones(output_vars[:, i : i + 1].shape).to(\n output_vars.device\n ),\n )[0].detach()\n )\n\n jacobian = torch.stack(jacob, 1)\n else:\n from functorch import vmap, jacrev, jacfwd\n\n if reverse_ad is True:\n jac_fn = jacrev\n else:\n jac_fn = jacfwd\n\n sample_jacobian = jac_fn(model.forward, argnums=0)\n jacobian = vmap(\n lambda x: sample_jacobian(torch.unsqueeze(x, 0)), in_dims=0\n )(input_vars).squeeze()\n\n if normalize is True:\n # normalize the Jacobian by making it volume preserving\n # jacobian /= jacobian.det().abs().pow(1 / jacobian.shape[-1]).reshape(-1, 1, 1)\n\n # normalize to make variance to 1\n # norm_factor = (output_vars.std(dim=0) + 1e-8)\n # jacobian /= norm_factor.reshape(1, 1, -1)\n if norm_range is True:\n # normalize range to [0;1]\n dim_range = (\n (output_vars.max(dim=0)[0] - output_vars.min(dim=0)[0])\n .abs()\n .reshape(-1, 1)\n )\n\n jacobian /= dim_range + eps\n elif norm_diagonal is True:\n assert (dim := jacobian.shape[1]) == jacobian.shape[2]\n jacobian /= jacobian[:, (r := torch.arange(dim)), r].unsqueeze(-1) + eps\n\n # set back to original mode\n if in_training is True:\n model.train()\n\n return jacobian", "def calc_jacobian(*args, **kwargs):\n try:\n tag = kwargs[\"tag\"]\n except:\n tag = 0\n\n try:\n sparse = kwargs[\"sparse\"]\n except:\n sparse = True\n\n if sparse:\n try:\n shape = kwargs[\"shape\"]\n except:\n raise ValueError(\"'shape' should be passed to calculate sparse jacobian!\")\n\n \n options = np.array([0,0,0,0],dtype=int)\n result = ad.colpack.sparse_jac_no_repeat(tag, *args, options=options)\n nnz = result[0]\n ridx = result[1]\n cidx = result[2]\n values = result[3]\n assert nnz > 0\n jac = sp.csr_matrix((values, (ridx, cidx)), shape=shape)\n jac = jac.toarray()\n else:\n jac = ad.jacobian(tag, *args)\n return jac", "def test_system_jacobian(self, scml_system):\n el_jac = np.arange(4).reshape(2, 2)\n el_over_omega = np.arange(4, 6)\n torque_over_el = np.arange(6, 8)\n # Set the el. jacobian returns to specified values\n scml_system.electrical_motor.electrical_jac_return = (el_jac, el_over_omega, torque_over_el)\n me_jac = np.arange(8, 12).reshape(2, 2)\n me_over_torque = np.arange(12, 14)\n # Set the mech. jabobian returns to specified values\n scml_system.mechanical_load.mechanical_jac_return = me_jac, me_over_torque\n sys_jac = scml_system._system_jacobian(0, np.array([0, 1, 2, 3]), [0, -1])\n\n #\n assert np.all(sys_jac[-2:, -2:] == el_jac), 'The el. jacobian is false'\n assert np.all(sys_jac[:2, :2] == me_jac), 'The mech. jacobian is false'\n assert np.all(sys_jac[2:, 0] == el_over_omega), 'the derivative of the el.state over omega is false'\n assert np.all(sys_jac[2:, 1] == np.zeros(2))\n assert np.all(sys_jac[:-2, 2:] == np.array([[72, 84], [78, 91]])), 'The derivative of the mech.state ' \\\n 'over the currents is false'", "def jacobian(Lfrac, Lstar_10, qlf):\n D = np.tile(qlf.c_B*Lstar_10**qlf.k_B, [len(Lfrac),1])\n Lfrac_2D = np.tile(Lfrac, [len(qlf.c_B),1]).T\n return np.sum(-D*Lfrac_2D**qlf.k_B,axis=1) / np.sum(D*(qlf.k_B -1)*Lfrac_2D**qlf.k_B,axis=1)\n #return np.sum(D*(1.+qlf.k_B)*Lfrac_2D**qlf.k_B, axis=1)/np.sum(D*Lfrac_2D**qlf.k_B, axis=1)", "def jacobian(self,var,g=None):\n if (g==None):g=self.g\n jac=np.zeros([self.n+1,self.n])\n for i in range(self.n):\n for j in range(self.n):\n if(i==j): jac[i][j]=2.*(var[i]+1.)-g*np.sum([self.XXZ.Z(i,k) for k in range(self.n) if k!=i])\n else: jac[i][j]=g*self.XXZ.Z(i,j)\n for i in range(self.n):\n jac[self.n][i]=1.\n return jac", "def jacobian(self,x,y,l,a):\n J = np.zeros([*x.shape,2,2])\n\n J = _jacobian(x,y,l,a,J)\n\n return J", "def test_jacobian_disconnected_inputs():\r\n v1 = tensor.vector()\r\n v2 = tensor.vector()\r\n jacobian_v = theano.gradient.jacobian(1 + v1, v2,\r\n disconnected_inputs='ignore')\r\n func_v = theano.function([v1, v2], jacobian_v)\r\n val = numpy.arange(4.0).astype(theano.config.floatX)\r\n assert numpy.allclose(func_v(val, val), numpy.zeros((4, 4)))\r\n\r\n s1 = tensor.scalar()\r\n s2 = tensor.scalar()\r\n jacobian_s = theano.gradient.jacobian(1 + s1, s2,\r\n disconnected_inputs='ignore')\r\n func_s = theano.function([s2], jacobian_s)\r\n val = numpy.array(1.0).astype(theano.config.floatX)\r\n assert numpy.allclose(func_s(val), numpy.zeros(1))", "def compute_jacobian(self):\n \n d = len(self.theta)\n n,p = self.b.shape\n \n if not self.quiet:\n print \"Running jacobian computation.\"\n print \"D will be a {}x{}x{} array\".format(p,n,d)\n \n if self.x is None:\n raise ValueError('Can not compute Jacobian. self.x is None.')\n \n #print \"n={},n={}\".format(n,d);\n \n D = numpy.zeros((p,n,d))\n \n \n for k in range(d):\n A_k, b_k = self.get_diff_A_b(k)\n \n for i in range(p):\n D[i,:,k] = - self.solver.backsolve(A_k.dot(self.x[:,i]) - b_k[:,i])\n \n return D", "def jacobval(state, time, press):\n a = len(state)\n jacobian = np.zeros(a**2)\n pyjacob.py_eval_jacobian(time, press, state, jacobian)\n jacobian = np.reshape(jacobian, (a,a))\n return jacobian", "def jacobian(self, b):\n \n # Substitute parameters in partial derivatives\n subs = [pd.subs(zip(self._b, b)) for pd in self._pderivs]\n # Evaluate substituted partial derivatives for all x-values\n vals = [sp.lambdify(self._x, sub, \"numpy\")(self.xvals) for sub in subs]\n # Arrange values in column-major order\n return np.column_stack(vals)", "def test_j0():\n import time\n t1 = time.time()\n\n x_list = [ 0, 1.01, 0.2, 3.3, 5.9, 77. ]\n vals1 = [ galsim.bessel.j0(x) for x in x_list ]\n print 'x = ',x_list\n print 'vals1 = ',vals1\n\n try:\n import scipy.special\n vals2 = [ scipy.special.j0(x) for x in x_list ]\n print 'vals2 = ',vals2\n np.testing.assert_almost_equal(\n vals1, vals2, 8, \"bessel.j0 disagrees with scipy.special.j0\")\n except ImportError:\n print 'Unable to import scipy. Skipping scipy tests of j0.'\n\n # These values are what scipy returns. Check against these, so not require scipy.\n vals2 = [ 1.0, \n 0.76078097763218844,\n 0.99002497223957631,\n -0.34429626039888467,\n 0.12203335459282282,\n 0.062379777089647245\n ]\n np.testing.assert_almost_equal(\n vals1, vals2, 8, \"bessel.j0 disagrees with reference values\")\n\n t2 = time.time()\n print 'time for %s = %.2f'%(funcname(),t2-t1)", "def jacobian(self, theta, force=False):\n \n # Update the internal solution\n self.solution_update(theta, force)\n \n # Run the internal jacobian calculation\n return self.compute_jacobian()" ]
[ "0.7339697", "0.69234794", "0.6917951", "0.69130737", "0.6863862", "0.6843426", "0.67897356", "0.6771901", "0.6765239", "0.6762716", "0.67603743", "0.6646111", "0.6586984", "0.6561696", "0.6551046", "0.65283936", "0.6493513", "0.6487881", "0.6482703", "0.646528", "0.6461305", "0.64609385", "0.6460165", "0.6442906", "0.643842", "0.6429296", "0.64247924", "0.64160967", "0.641224", "0.6392585" ]
0.7391808
0
Test reusing a quantum tape by passing new parameters
def test_reusing_quantum_tape(self, execute_kwargs, tol): a = jax.numpy.array(0.1) b = jax.numpy.array(0.2) dev = qml.device("default.qubit", wires=2) with qml.queuing.AnnotatedQueue() as q: qml.RY(a, wires=0) qml.RX(b, wires=1) qml.CNOT(wires=[0, 1]) qml.expval(qml.PauliZ(0)) tape = qml.tape.QuantumScript.from_queue(q) assert tape.trainable_params == [0, 1] def cost(a, b): # An explicit call to _update() is required here to update the # trainable parameters in between tape executions. # This is different from how the autograd interface works. # Unless the update is issued, the validation check related to the # number of provided parameters fails in the tape: (len(params) != # required_length) and the tape produces incorrect results. tape._update() new_tape = tape.bind_new_parameters([a, b], [0, 1]) return execute([new_tape], dev, **execute_kwargs)[0] jac_fn = jax.jit(jax.grad(cost)) jac = jac_fn(a, b) a = jax.numpy.array(0.54) b = jax.numpy.array(0.8) # check that the cost function continues to depend on the # values of the parameters for subsequent calls res2 = cost(2 * a, b) expected = [np.cos(2 * a)] assert np.allclose(res2, expected, atol=tol, rtol=0) jac_fn = jax.jit(jax.grad(lambda a, b: cost(2 * a, b))) jac = jac_fn(a, b) expected = -2 * np.sin(2 * a) assert np.allclose(jac, expected, atol=tol, rtol=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_integration(self):\n\n m = 5 # number of wires in A\n M = 2**m\n\n xmax = np.pi # bound to region [-pi, pi]\n xs = np.linspace(-xmax, xmax, M)\n\n probs = np.array([norm().pdf(x) for x in xs])\n probs /= np.sum(probs)\n\n func = lambda i: np.sin(xs[i]) ** 2\n r_rotations = np.array([2 * np.arcsin(np.sqrt(func(i))) for i in range(M)])\n\n A_wires = [0, \"a\", -1.1, -10, \"bbb\"]\n target_wire = \"Ancilla\"\n wires = A_wires + [target_wire]\n estimation_wires = [\"bob\", -3, 42, \"penny\", \"lane\"]\n\n def fn():\n qml.templates.MottonenStatePreparation(np.sqrt(probs), wires=A_wires)\n r_unitary(qml.RY, r_rotations, control_wires=A_wires[::-1], target_wire=target_wire)\n\n qmc_circuit = qml.quantum_monte_carlo(\n fn, wires=wires, target_wire=target_wire, estimation_wires=estimation_wires\n )\n\n with qml.queuing.AnnotatedQueue() as q:\n qmc_circuit()\n qml.probs(estimation_wires)\n\n tape = qml.tape.QuantumScript.from_queue(q)\n tape = tape.expand(depth=2)\n\n assert all(\n not isinstance(op, (qml.MultiControlledX, qml.templates.QFT, qml.tape.QuantumScript))\n for op in tape.operations\n )\n\n dev = qml.device(\"default.qubit\", wires=wires + estimation_wires)\n res = dev.execute(tape)\n\n @qml.qnode(dev)\n def circuit():\n qml.templates.QuantumMonteCarlo(\n probs, func, target_wires=wires, estimation_wires=estimation_wires\n )\n return qml.probs(estimation_wires)\n\n res_expected = circuit()\n assert np.allclose(res, res_expected)", "def test_behaviour(self):\r\n\r\n with qml.tape.QuantumTape() as tape:\r\n qml.PauliZ(0)\r\n qml.RX(1.0, wires=0)\r\n qml.CNOT(wires=[0, 2])\r\n qml.Rot(2.0, 3.0, 4.0, wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n tape.trainable_params = {0, 2}\r\n shifts = [0.1, -0.2, 1.6]\r\n res = generate_shifted_tapes(tape, 1, shifts=shifts)\r\n\r\n assert len(res) == len(shifts)\r\n assert res[0].get_parameters(trainable_only=False) == [1.0, 2.0, 3.1, 4.0]\r\n assert res[1].get_parameters(trainable_only=False) == [1.0, 2.0, 2.8, 4.0]\r\n assert res[2].get_parameters(trainable_only=False) == [1.0, 2.0, 4.6, 4.0]", "def test_qnode_sample(self, execute_kwargs):\n dev = qml.device(\"default.qubit\", wires=2, shots=10)\n params = jax.numpy.array([0.1, 0.2, 0.3])\n\n grad_meth = (\n execute_kwargs[\"gradient_kwargs\"][\"method\"]\n if \"gradient_kwargs\" in execute_kwargs\n else \"\"\n )\n if \"adjoint\" in grad_meth or \"backprop\" in grad_meth:\n pytest.skip(\"Adjoint does not support probs\")\n\n def cost(a, cache):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.RY(a[2], wires=0)\n qml.sample(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n res = qml.interfaces.execute([tape], dev, cache=cache, **execute_kwargs)[0]\n return res\n\n res = jax.jit(cost, static_argnums=1)(params, cache=None)\n assert res.shape == (dev.shots,)", "def __init__(self,\n sess,\n num_actions,\n observation_shape=atari_lib.NATURE_DQN_OBSERVATION_SHAPE,\n observation_dtype=atari_lib.NATURE_DQN_DTYPE,\n stack_size=atari_lib.NATURE_DQN_STACK_SIZE,\n network=atari_lib.ImplicitQuantileNetwork,\n kappa=1.0,\n alpha=0.9,\n tau=0.03,\n clip_value_min=-1,\n interact='stochastic',\n replay_scheme='uniform',\n num_tau_samples=32,\n num_tau_prime_samples=32,\n num_quantile_samples=32,\n quantile_embedding_dim=64,\n gamma=0.99,\n update_horizon=1,\n min_replay_history=20000,\n update_period=4,\n target_update_period=8000,\n epsilon_fn=dqn_agent.linearly_decaying_epsilon,\n epsilon_train=0.01,\n epsilon_eval=0.001,\n epsilon_decay_period=250000,\n tf_device='/cpu:*',\n eval_mode=False,\n use_staging=True,\n max_tf_checkpoints_to_keep=4,\n optimizer=tf.train.AdamOptimizer(),\n summary_writer=None,\n summary_writing_frequency=500,\n allow_partial_reload=False):\n self.kappa = kappa\n # num_tau_samples = N below equation (3) in the paper.\n self.num_tau_samples = num_tau_samples\n # num_tau_prime_samples = N' below equation (3) in the paper.\n self.num_tau_prime_samples = num_tau_prime_samples\n # num_quantile_samples = k below equation (3) in the paper.\n self.num_quantile_samples = num_quantile_samples\n # quantile_embedding_dim = n above equation (4) in the paper.\n self.quantile_embedding_dim = quantile_embedding_dim\n # option to perform double dqn.\n self.alpha = alpha\n self.tau = tau\n self.clip_value_min = clip_value_min\n self._interact = interact\n\n self._replay_scheme = replay_scheme\n self.num_actions = num_actions\n self.observation_shape = tuple(observation_shape)\n self.observation_dtype = observation_dtype\n self.stack_size = stack_size\n self.network = network\n self.gamma = gamma\n self.update_horizon = update_horizon\n self.cumulative_gamma = math.pow(gamma, update_horizon)\n self.min_replay_history = min_replay_history\n self.target_update_period = target_update_period\n self.epsilon_fn = epsilon_fn\n self.epsilon_train = epsilon_train\n self.epsilon_eval = epsilon_eval\n self.epsilon_decay_period = epsilon_decay_period\n self.update_period = update_period\n self.eval_mode = eval_mode\n self.training_steps = 0\n self.optimizer = optimizer\n self.summary_writer = summary_writer\n self.summary_writing_frequency = summary_writing_frequency\n self.allow_partial_reload = allow_partial_reload\n\n with tf.device(tf_device):\n # Create a placeholder for the state input to the DQN network.\n # The last axis indicates the number of consecutive frames stacked.\n state_shape = (1,) + self.observation_shape + (stack_size,)\n self.state = np.zeros(state_shape)\n self.state_ph = tf.placeholder(\n self.observation_dtype, state_shape, name='state_ph')\n self._replay = self._build_replay_buffer(use_staging)\n\n self._build_networks()\n\n self._train_op = self._build_train_op()\n self._sync_qt_ops = self._build_sync_op()\n\n if self.summary_writer is not None:\n # All tf.summaries should have been defined prior to running this.\n self._merged_summaries = tf.summary.merge_all()\n self._sess = sess\n\n var_map = atari_lib.maybe_transform_variable_names(\n tf.global_variables())\n self._saver = tf.train.Saver(\n var_list=var_map, max_to_keep=max_tf_checkpoints_to_keep)\n\n # Variables to be initialized by the agent once it interacts with the\n # environment.\n self._observation = None\n self._last_observation = None", "def __init__(self,\n num_actions,\n\n tau,\n alpha=1,\n clip_value_min=-10,\n\n net_conf = None,\n env = \"CartPole\", \n normalize_obs = True,\n hidden_layer=2, \n neurons=512,\n replay_scheme='prioritized',\n noisy = False,\n dueling = False,\n initzer = 'xavier_uniform',\n target_opt=0,\n mse_inf=False,\n network=networks.NatureDQNNetwork,\n optimizer='adam',\n epsilon_fn=dqn_agent.linearly_decaying_epsilon,\n seed=None):\n # We need this because some tools convert round floats into ints.\n seed = int(time.time() * 1e6) if seed is None else seed\n self._net_conf = net_conf\n self._env = env \n self._normalize_obs = normalize_obs\n self._hidden_layer = hidden_layer\n self._neurons=neurons \n self._noisy = noisy\n self._dueling = dueling\n self._initzer = initzer\n self._target_opt = target_opt\n self._mse_inf = mse_inf\n self._tau = tau\n self._alpha = alpha\n self._clip_value_min = clip_value_min\n self._rng = jax.random.PRNGKey(seed)\n\n super(JaxDQNAgentNew, self).__init__(\n num_actions= num_actions,\n network= functools.partial(network, \n num_actions=num_actions,\n net_conf=self._net_conf,\n env=self._env,\n normalize_obs=self._normalize_obs,\n hidden_layer=self._hidden_layer, \n neurons=self._neurons,\n noisy=self._noisy,\n dueling=self._dueling,\n initzer=self._initzer),\n optimizer=optimizer,\n epsilon_fn=dqn_agent.identity_epsilon if self._noisy == True else epsilon_fn)\n\n \n self._replay_scheme = replay_scheme", "def test_prepare_on_run(self):\n class Mock(object):\n def __init__(self):\n self.t_max = None\n self.dt = None\n\n def evolve(self, t, dt):\n pass\n\n def prepare(self, t_max, dt):\n self.t_max = t_max\n self.dt = dt\n\n t_max = 10.0\n dt = 0.2\n \n G = Mock()\n sim = simulation.Simulation(G, dt=dt)\n self.assertIsNone(G.t_max)\n self.assertIsNone(G.dt)\n\n sim.run(t_max)\n self.assertEqual(G.t_max, t_max)\n self.assertEqual(G.dt, dt)", "def test_classical_processing_multiple_tapes(self, execute_kwargs):\n dev = qml.device(\"default.qubit\", wires=2)\n params = jax.numpy.array([0.3, 0.2])\n\n def cost_fn(x):\n with qml.queuing.AnnotatedQueue() as q1:\n qml.Hadamard(0)\n qml.RY(x[0], wires=[0])\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0))\n\n tape1 = qml.tape.QuantumScript.from_queue(q1)\n\n with qml.queuing.AnnotatedQueue() as q2:\n qml.Hadamard(0)\n qml.CRX(2 * x[0] * x[1], wires=[0, 1])\n qml.RX(2 * x[1], wires=[1])\n qml.expval(qml.PauliZ(0))\n\n tape2 = qml.tape.QuantumScript.from_queue(q2)\n\n result = execute(tapes=[tape1, tape2], device=dev, **execute_kwargs)\n return result[0] + result[1] - 7 * result[1]\n\n res = jax.jit(jax.grad(cost_fn))(params)\n assert res.shape == (2,)", "def test_classical_processing_single_tape(self, execute_kwargs):\n a = jax.numpy.array(0.1)\n b = jax.numpy.array(0.2)\n c = jax.numpy.array(0.3)\n\n def cost(a, b, c, device):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a * c, wires=0)\n qml.RZ(b, wires=0)\n qml.RX(c + c**2 + jax.numpy.sin(a), wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute([tape], device, **execute_kwargs)[0]\n\n dev = qml.device(\"default.qubit\", wires=2)\n res = jax.jit(jax.grad(cost, argnums=(0, 1, 2)), static_argnums=3)(a, b, c, device=dev)\n assert len(res) == 3", "def __init__(self,\n num_actions,\n\n tau,\n alpha=1,\n clip_value_min=-10,\n target_opt=0,\n\n net_conf = None,\n env = \"CartPole\",\n hidden_layer=2, \n neurons=512,\n noisy = False,\n dueling = False,\n initzer = 'variance_scaling',\n\n observation_shape=dqn_agent.NATURE_DQN_OBSERVATION_SHAPE,\n observation_dtype=dqn_agent.NATURE_DQN_DTYPE,\n stack_size=dqn_agent.NATURE_DQN_STACK_SIZE,\n network=networks.ImplicitQuantileNetwork,\n kappa=1.0,\n num_tau_samples=32,\n num_tau_prime_samples=32,\n num_quantile_samples=32,\n quantile_embedding_dim=64,\n double_dqn=False,\n gamma=0.99,\n update_horizon=1,\n min_replay_history=20000,\n update_period=4,\n target_update_period=8000,\n epsilon_fn=dqn_agent.linearly_decaying_epsilon,\n epsilon_train=0.01,\n epsilon_eval=0.001,\n epsilon_decay_period=250000,\n replay_scheme='prioritized',\n optimizer='adam',\n summary_writer=None,\n summary_writing_frequency=500,\n seed=None):\n \n seed = int(time.time() * 1e6) if seed is None else seed\n self._net_conf = net_conf\n self._env = env\n self._hidden_layer = hidden_layer\n self._neurons=neurons \n self._noisy = noisy\n self._dueling = dueling\n self._initzer = initzer\n\n self._tau = tau\n self._alpha = alpha\n self._clip_value_min = clip_value_min\n self._target_opt = target_opt\n self._rng = jax.random.PRNGKey(seed)\n\n self.kappa = kappa\n self._replay_scheme = replay_scheme\n\n # num_tau_samples = N below equation (3) in the paper.\n self.num_tau_samples = num_tau_samples\n # num_tau_prime_samples = N' below equation (3) in the paper.\n self.num_tau_prime_samples = num_tau_prime_samples\n # num_quantile_samples = k below equation (3) in the paper.\n self.num_quantile_samples = num_quantile_samples\n # quantile_embedding_dim = n above equation (4) in the paper.\n self.quantile_embedding_dim = quantile_embedding_dim\n # option to perform double dqn.\n self.double_dqn = double_dqn\n\n\n super(JaxImplicitQuantileAgentNew, self).__init__(\n num_actions=num_actions,\n observation_shape=observation_shape,\n observation_dtype=observation_dtype,\n stack_size=stack_size,\n network=functools.partial(network,\n \t num_actions=num_actions,\n net_conf=self._net_conf,\n env=self._env,\n hidden_layer=self._hidden_layer, \n neurons=self._neurons,\n noisy=self._noisy,\n dueling=self._dueling,\n initzer=self._initzer,\n quantile_embedding_dim=quantile_embedding_dim),\n gamma=gamma,\n update_horizon=update_horizon,\n min_replay_history=min_replay_history,\n update_period=update_period,\n target_update_period=target_update_period,\n epsilon_fn=epsilon_fn,\n epsilon_train=epsilon_train,\n epsilon_eval=epsilon_eval,\n epsilon_decay_period=epsilon_decay_period,\n optimizer=optimizer,\n summary_writer=summary_writer,\n summary_writing_frequency=summary_writing_frequency)\n\n self._num_actions=num_actions\n self._replay = self._build_replay_buffer()", "def test_variational():\n # iris\n #pres = \"Test pour le data set Iris (facile, classique)\"\n #test_from_func_variational(pres, 15, 10, 3, True, Iris)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func_variational(pres, 15, 10, 3, True, Breast_cancer)\n\n # digits\n # pres = \"Test pour le data set Digits (difficile, classique)\"\n # test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n # pres = \"Test pour le data set Wine (moyen, classique)\"\n # test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des donnรฉes gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func_variational(pres, 25, 10, 2, True, Gaussian)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des sรฉquences ADN courtes (difficile, classique)\"\n test_from_func_variational(pres, 10, 15, 14, True, Sequence)\n\n #Quantum data\n pres = \"Test pour des donnรฉes gรฉnรฉrรฉes par ordinateur quantique (facile, quantique)\"\n print(pres)\n _, samp_train, samp_test, labels = ad_hoc_data(15, 10, 2, 0.3, True)\n sample_m, sample_p = stock_get(20, 0.3)\n\n labels_me = [-1, 1]\n samp_train_me = {-1: np.array(sample_m[:15]), 1: np.array(sample_p[:15])}\n samp_test_me = {-1: np.array(sample_m[15:]), 1: np.array(sample_p[15:])}\n print(samp_train)\n print(samp_train_me)\n print(samp_test)\n print(samp_test_me)\n\n my_impl_variational(samp_train, samp_test, labels)\n print(\"Pour autres donnรฉes quantiques\")\n my_impl_variational(samp_train_me, samp_test_me, labels_me)", "def test5():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(A = 'h1', C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('ataque')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n exp1.pingAllTest() # **************** Parece que es necesario que se de un arranque al controlador\n # **************** para que aprenda las reglas antes del ataque.\n\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.pingMeasure()\n #exp1.trafico.pingMeasure(filename='ping_ataque_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def test_parego(facade, make_scenario, configspace):\n N_TRIALS = 64\n RETRAIN_AFTER = 8\n\n scenario: Scenario = make_scenario(configspace, use_multi_objective=True, n_trials=N_TRIALS)\n multi_objective_algorithm = WrapStrategy(ParEGO, scenario=scenario)\n intensifier = Intensifier(scenario, max_config_calls=1, max_incumbents=10)\n config_selector = ConfigSelector(scenario, retrain_after=RETRAIN_AFTER)\n initial_design = RandomInitialDesign(scenario, n_configs=1)\n\n smac = facade(\n scenario=scenario,\n target_function=tae,\n multi_objective_algorithm=multi_objective_algorithm,\n intensifier=intensifier,\n config_selector=config_selector,\n initial_design=initial_design,\n overwrite=True,\n )\n incumbents = smac.optimize()\n\n sorted_incumbents = []\n for incumbent in incumbents:\n x, y = func(incumbent[\"x\"])\n sorted_incumbents.append((x, y))\n\n sorted_incumbents = sorted(sorted_incumbents, key=lambda x: x[0])\n previous_y = np.inf\n for x, y in sorted_incumbents:\n assert y <= previous_y\n previous_y = y\n\n # We expect N_TRIALS/RETRAIN_AFTER updates\n assert multi_objective_algorithm._n_calls_update_on_iteration_start == int(N_TRIALS / RETRAIN_AFTER)", "def test_new():\n from qiskit import BasicAer\n from qiskit.aqua.algorithms import Grover\n from qiskit.aqua.components.oracles import LogicalExpressionOracle\n\n expr = \"your logical expression goes here\"\n algorithm = Grover(LogicalExpressionOracle(expr))\n backend = BasicAer.get_backend('qasm_simulator')\n result = algorithm.run(backend, seed=101110)\n print(result)", "def test_construct_subcircuit(self):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n def circuit(a, b, c):\r\n qml.RX(a, wires=0)\r\n qml.RY(b, wires=0)\r\n qml.CNOT(wires=[0, 1])\r\n qml.PhaseShift(c, wires=1)\r\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1))\r\n\r\n circuit = qml.QNode(circuit, dev)\r\n tapes = circuit.metric_tensor(1.0, 1.0, 1.0, only_construct=True)\r\n assert len(tapes) == 3\r\n\r\n # first parameter subcircuit\r\n assert len(tapes[0].operations) == 1\r\n assert isinstance(tapes[0].operations[0], qml.Hadamard) # PauliX decomp\r\n\r\n # second parameter subcircuit\r\n assert len(tapes[1].operations) == 4\r\n assert isinstance(tapes[1].operations[0], qml.RX)\r\n # PauliY decomp\r\n assert isinstance(tapes[1].operations[1], qml.PauliZ)\r\n assert isinstance(tapes[1].operations[2], qml.S)\r\n assert isinstance(tapes[1].operations[3], qml.Hadamard)\r\n\r\n # third parameter subcircuit\r\n assert len(tapes[2].operations) == 4\r\n assert isinstance(tapes[2].operations[0], qml.RX)\r\n assert isinstance(tapes[2].operations[1], qml.RY)\r\n assert isinstance(tapes[2].operations[2], qml.CNOT)\r\n # Phase shift generator\r\n assert isinstance(tapes[2].operations[3], qml.QubitUnitary)", "def test_svm_quantique():\n backend = BasicAer.get_backend('statevector_simulator')\n random_seed = 10598\n\n quantum_instance = QuantumInstance(backend, seed=random_seed, seed_transpiler=random_seed)\n pres = \"Test pour des donnรฉes gรฉnรฉrรฉes par ordinateur quantique (facile, quantique)\"\n print(pres)\n _, samp_train, samp_test, labels = ad_hoc_data(15, 10, 2, 0.3, True)\n sample_m, sample_p = stock_get(20, 0.3)\n\n labels_me = [-1, 1]\n samp_train_me = {-1: np.array(sample_m[:15]), 1: np.array(sample_p[:15])}\n samp_test_me = {-1: np.array(sample_m[15:]), 1: np.array(sample_p[15:])}\n print(samp_train)\n print(samp_train_me)\n print(samp_test)\n print(samp_test_me)\n classical_kernel_estimation(samp_train, samp_test, labels)\n classical_kernel_estimation(samp_train_me, samp_test_me, labels_me)\n\n # Generate the feature map\n feature_map = FirstOrderExpansion(feature_dimension=2, depth=2)\n\n # Run the Quantum Kernel Estimator and classify the test data\n qsvm = QSVM(feature_map=feature_map, training_dataset=samp_train,\n test_dataset=samp_test)\n qsvm_me = QSVM(feature_map=feature_map, training_dataset=samp_train_me,\n test_dataset=samp_test_me)\n\n result = qsvm.run(quantum_instance)\n result_me = qsvm_me.run(quantum_instance)\n print(\"Success of the FirstOrder feature map kernel:\")\n print(result['testing_accuracy'])\n print(result_me['testing_accuracy'])\n\n # Generate the feature map\n feature_map = SecondOrderExpansion(feature_dimension=2, depth=2)\n\n # Run the Quantum Kernel Estimator and classify the test data\n qsvm = QSVM(feature_map=feature_map, training_dataset=samp_train,\n test_dataset=samp_test)\n qsvm_me = QSVM(feature_map=feature_map, training_dataset=samp_train_me,\n test_dataset=samp_test_me)\n\n result = qsvm.run(quantum_instance)\n result_me = qsvm_me.run(quantum_instance)\n print(\"Success of the SecondOrder feature map kernel:\")\n print(result['testing_accuracy'])\n print(result_me['testing_accuracy'])\n\n # Last implementation using the custom circuit generator\n print(\"Success for my implementation (second order):\")\n my_impl(samp_train, samp_test, labels)\n my_impl(samp_train_me, samp_test_me, labels_me)\n\n feature_map = CustomExpansion(num_qubits=2, constructor_function=custom_constr, feature_param=[1])\n\n qsvm = QSVM(feature_map=feature_map, training_dataset=samp_train,\n test_dataset=samp_test)\n qsvm_me = QSVM(feature_map=feature_map, training_dataset=samp_train_me,\n test_dataset=samp_test_me)\n\n result = qsvm.run(quantum_instance)\n result_me = qsvm_me.run(quantum_instance)\n print(\"Success of the Custom feature map kernel:\")\n print(result['testing_accuracy'])\n print(result_me['testing_accuracy'])", "def simulate_quantities_of_interest_superoperator(tlist, c_ops, noise_parameters_CZ, fluxlutman,\n fluxbias_q1, amp,\n sim_step,\n verbose: bool=True):\n\n H_0=calc_hamiltonian(0,fluxlutman,noise_parameters_CZ) # computed at 0 amplitude\n # NOTE: parameters of H_0 could be not exactly e.g. the bare frequencies\n\n # We change the basis from the standard basis to the basis of eigenvectors of H_0\n # The columns of S are the eigenvectors of H_0, appropriately ordered\n if noise_parameters_CZ.dressed_compsub():\n S = qtp.Qobj(matrix_change_of_variables(H_0),dims=[[3, 3], [3, 3]])\n else:\n S = qtp.tensor(qtp.qeye(3),qtp.qeye(3)) # line here to quickly switch off the use of S\n H_0_diag = S.dag()*H_0*S\n\n #w_q0 = fluxlutman.q_freq_01()\n w_q0 = (H_0_diag[1,1]-H_0_diag[0,0]) / (2*np.pi)\n #w_q1 = fluxlutman.q_freq_10()\n w_q1 = (H_0_diag[3,3]-H_0_diag[0,0]) / (2*np.pi)\n\n # H_rotateaway = coupled_transmons_hamiltonian_new(w_q0=w_q0, w_q1=w_q1, \n # alpha_q0=-2*w_q0, alpha_q1=-2*w_q1, J=0)\n\n w_q1_sweetspot = noise_parameters_CZ.w_q1_sweetspot()\n # Correction up to second order of the frequency due to flux noise, computed from w_q0(phi) = w_q0^sweetspot * sqrt(cos(pi * phi/phi_0))\n w_q1_biased = w_q1 - np.pi/2 * (w_q1_sweetspot**2/w_q1) * np.sqrt(1 - (w_q1**4/w_q1_sweetspot**4)) * fluxbias_q1 - \\\n - np.pi**2/2 * w_q1_sweetspot * (1+(w_q1**4/w_q1_sweetspot**4)) / (w_q1/w_q1_sweetspot)**3 * fluxbias_q1**2\n # with sigma up to circa 1e-3 \\mu\\Phi_0 the second order is irrelevant\n correction_to_H = coupled_transmons_hamiltonian_new(w_q0=0, w_q1=np.real(w_q1_biased-w_q1), alpha_q0=0, alpha_q1=0, J=0)\n\n\n t0 = time.time()\n\n exp_L_total=1\n for i in range(len(amp)):\n H=calc_hamiltonian(amp[i],fluxlutman,noise_parameters_CZ) + correction_to_H\n H=S.dag()*H*S\n if c_ops != []:\n c_ops_temp=[]\n for c in range(len(c_ops)):\n if isinstance(c_ops[c],list):\n c_ops_temp.append(c_ops[c][0]*c_ops[c][1][i]) # c_ops are already in the H_0 basis\n else:\n c_ops_temp.append(c_ops[c])\n liouville_exp_t=(qtp.liouvillian(H,c_ops_temp)*sim_step).expm()\n else:\n liouville_exp_t=(-1j*H*sim_step).expm()\n exp_L_total=liouville_exp_t*exp_L_total\n\n t1 = time.time()\n #print('\\n alternative propagator',t1-t0)\n\n\n U_final = exp_L_total\n #U_final=rotating_frame_transformation_new(U_final, fluxlutman.cz_length(), H_0_diag)\n\n phases = phases_from_superoperator(U_final) # order is phi_00, phi_01, phi_10, phi_11, phi_02, phi_20, phi_cond\n phi_cond = phases[-1]\n L1 = leakage_from_superoperator(U_final)\n population_02_state = calc_population_02_state(U_final)\n L2 = seepage_from_superoperator(U_final)\n avgatefid = pro_avfid_superoperator_phasecorrected(U_final,phases)\n avgatefid_compsubspace = pro_avfid_superoperator_compsubspace_phasecorrected(U_final,L1,phases) # leakage has to be taken into account, see Woods & Gambetta\n #print('avgatefid_compsubspace',avgatefid_compsubspace)\n\n \n \n #H_twoqubits = coupled_transmons_hamiltonian_new(w_q0=w_q0, w_q1=w_q1, \n # alpha_q0=-2*w_q0, alpha_q1=-2*w_q1, J=0)\n #U_final_new = rotating_frame_transformation_new(U_final, fluxlutman.cz_length(), H_twoqubits) ### old method rotating away also the phase of the |2> state\n\n t = tlist[-1]+sim_step\n U_final_new = correct_reference(U=U_final,w_q1=w_q1,w_q0=w_q0,t=t)\n\n ### Script to check that we are correctly removing the single qubit phases in the rotating frame\n # cz_length = fluxlutman.cz_length()\n # U_check = (1j*H_twoqubits*cz_length).expm() * (-1j*H_0_diag*cz_length).expm()\n # phases_check = phases_from_superoperator(U_check)\n # print(phases_check)\n\n \n avgatefid_compsubspace_notphasecorrected = pro_avfid_superoperator_compsubspace(U_final_new,L1)\n # NOTE: a single qubit phase off by 30 degrees costs 5.5% fidelity\n\n ### Script to check that leakage and phi_cond are not affected by the phase correction, as it should be\n # L1_bis = leakage_from_superoperator(U_final_new)\n # phi_cond_bis = phases_from_superoperator(U_final_new)[-1]\n # print('leakage',L1-L1_bis)\n # print('phi_cond',phi_cond-phi_cond_bis)\n\n phases = phases_from_superoperator(U_final_new) # order is phi_00, phi_01, phi_10, phi_11, phi_02, phi_20, phi_cond\n phase_q0 = (phases[1]-phases[0]) % 360\n phase_q1 = (phases[2]-phases[0]) % 360\n\n\n # We now correct only for the phase of qubit left (q1), in the rotating frame\n avgatefid_compsubspace_pc_onlystaticqubit = pro_avfid_superoperator_compsubspace_phasecorrected_onlystaticqubit(U_final_new,L1,phases)\n \n\n return {'phi_cond': phi_cond, 'L1': L1, 'L2': L2, 'avgatefid_pc': avgatefid,\n 'avgatefid_compsubspace_pc': avgatefid_compsubspace, 'phase_q0': phase_q0, 'phase_q1': phase_q1,\n 'avgatefid_compsubspace': avgatefid_compsubspace_notphasecorrected,\n 'avgatefid_compsubspace_pc_onlystaticqubit': avgatefid_compsubspace_pc_onlystaticqubit, 'population_02_state': population_02_state,\n 'U_final_new': U_final_new}", "def test_qc_quantize_recurrent_param_op(self):\n graph = tf.Graph()\n config = tf.compat.v1.ConfigProto(log_device_placement=False)\n sess = tf.compat.v1.Session(graph=graph, config=config)\n bitwidth = 8\n use_symm_encoding = True\n\n with graph.as_default():\n # place holder for the input\n with tf.device(\"/device:CPU:0\"):\n inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')\n tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF,\n libpymo.RoundingMode.ROUND_NEAREST)\n tensor_quantizer_val = libpymo.PtrToInt64(tensor_quantizer)\n tensor_quant_ref = tf.Variable(initial_value=tensor_quantizer_val, trainable=False, dtype=tf.int64)\n\n time_step_tensor = tf.constant(1, dtype=tf.int32)\n\n encoding_min = tf.Variable(initial_value=-0.5, trainable=True, dtype=tf.double)\n encoding_max = tf.Variable(initial_value=0.5, trainable=True, dtype=tf.double)\n bit_width = tf.Variable(initial_value=bitwidth, trainable=False, dtype=tf.int8)\n use_symmetric_encoding = tf.Variable(initial_value=use_symm_encoding, trainable=False,\n dtype=tf.bool)\n\n mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize),\n trainable=False, dtype=tf.int32)\n\n sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer,\n encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer])\n\n pass_through_op_output = zero_out_module.qc_quantize_recurrent_param(name='quant_op', in_tensor=inp,\n op_mode=mode_var,\n tensor_quantizer_reference=tensor_quant_ref,\n encoding_min=encoding_min,\n encoding_max=encoding_max,\n bit_width=bit_width,\n use_symmetric_encoding=use_symmetric_encoding,\n time_steps=time_step_tensor)\n\n inp_tensor = sess.graph.get_tensor_by_name('input:0')\n # inp_data = np.random.rand(10).astype(np.float32)\n np.random.seed(18)\n inp_data = np.random.randint(low=-1, high=2, size=10).astype(np.float32)\n\n # get the output\n print(inp_data)\n out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})\n print(out_data)\n\n # compare qc_quantize op's output with input\n # encodings being set to -0.5 and 0.5 should not have a bearing on this quantized output\n # we should not observe truncation if op's encoding min/max input values are used instead of cached values\n assert np.allclose(out_data, inp_data, atol=1e-6)\n sess.close()", "def test6():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(A = 'h1', C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('ataque')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.iperfMeasure()\n exp1.trafico.iperfMeasure(filename='iperf_ataque_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def test_no_trainable_parameters(self, mocker):\r\n spy = mocker.spy(qml.gradients.finite_difference, \"generate_shifted_tapes\")\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(0.543, wires=[0])\r\n qml.RY(-0.654, wires=[1])\r\n qml.expval(qml.PauliZ(0))\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n tape.trainable_params = {}\r\n\r\n tapes, fn = finite_diff(tape)\r\n res = fn(dev.batch_execute(tapes))\r\n assert res.size == 0\r\n assert np.all(res == np.array([[]]))\r\n\r\n spy.assert_not_called()\r\n assert len(tapes) == 0", "def test_tf(self, diff_method, tol):\r\n tf = pytest.importorskip(\"tensorflow\", minversion=\"2.0\")\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n @qml.qnode(dev, interface=\"tf\", diff_method=diff_method)\r\n def circuit(weights):\r\n qml.RX(weights[0], wires=0)\r\n qml.RY(weights[1], wires=0)\r\n qml.CNOT(wires=[0, 1])\r\n qml.PhaseShift(weights[2], wires=1)\r\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1))\r\n\r\n weights = np.array([0.432, 0.12, -0.432])\r\n weights_t = tf.Variable(weights)\r\n a, b, c = weights\r\n\r\n with tf.GradientTape() as tape:\r\n loss = qml.metric_tensor(circuit)(weights_t)[2, 2]\r\n\r\n grad = tape.gradient(loss, weights_t)\r\n expected = np.array(\r\n [np.cos(a) * np.cos(b) ** 2 * np.sin(a) / 2, np.cos(a) ** 2 * np.sin(2 * b) / 4, 0]\r\n )\r\n assert np.allclose(grad, expected, atol=tol, rtol=0)", "def test_independent_parameters(self):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n with qml.tape.JacobianTape() as tape1:\r\n qml.RX(1, wires=[0])\r\n qml.RX(1, wires=[1])\r\n qml.expval(qml.PauliZ(0))\r\n\r\n with qml.tape.JacobianTape() as tape2:\r\n qml.RX(1, wires=[0])\r\n qml.RX(1, wires=[1])\r\n qml.expval(qml.PauliZ(1))\r\n\r\n tapes, fn = finite_diff(tape1, approx_order=1)\r\n j1 = fn(dev.batch_execute(tapes))\r\n\r\n # We should only be executing the device to differentiate 1 parameter (2 executions)\r\n assert dev.num_executions == 2\r\n\r\n tapes, fn = finite_diff(tape2, approx_order=1)\r\n j2 = fn(dev.batch_execute(tapes))\r\n\r\n exp = -np.sin(1)\r\n\r\n assert np.allclose(j1, [exp, 0])\r\n assert np.allclose(j2, [0, exp])", "def test_specs(self, diff_method, len_info):\n dev = qml.device(\"default.qubit\", wires=4)\n\n @qml.qnode(dev, diff_method=diff_method)\n def circuit(x, y, add_RY=True):\n qml.RX(x[0], wires=0)\n qml.Toffoli(wires=(0, 1, 2))\n qml.CRY(x[1], wires=(0, 1))\n qml.Rot(x[2], x[3], y, wires=2)\n if add_RY:\n qml.RY(x[4], wires=1)\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliX(1))\n\n x = np.array([0.05, 0.1, 0.2, 0.3, 0.5], requires_grad=True)\n y = np.array(0.1, requires_grad=False)\n\n info_func = qml.specs(circuit)\n\n info = info_func(x, y, add_RY=False)\n\n circuit(x, y, add_RY=False)\n\n assert len(info) == len_info\n\n gate_sizes = defaultdict(int, {1: 2, 3: 1, 2: 1})\n gate_types = defaultdict(int, {\"RX\": 1, \"Toffoli\": 1, \"CRY\": 1, \"Rot\": 1})\n expected_resources = qml.resource.Resources(\n num_wires=3, num_gates=4, gate_types=gate_types, gate_sizes=gate_sizes, depth=3\n )\n assert info[\"resources\"] == expected_resources\n\n assert info[\"num_observables\"] == 2\n assert info[\"num_diagonalizing_gates\"] == 1\n assert info[\"num_device_wires\"] == 4\n assert info[\"diff_method\"] == diff_method\n assert info[\"num_trainable_params\"] == 4\n\n if diff_method == \"parameter-shift\":\n assert info[\"num_gradient_executions\"] == 6\n\n if diff_method != \"backprop\":\n assert info[\"device_name\"] == \"default.qubit\"\n else:\n assert info[\"device_name\"] == \"default.qubit.autograd\"", "def test_independent_parameter(self, mocker):\r\n spy = mocker.spy(qml.gradients.finite_difference, \"generate_shifted_tapes\")\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(0.543, wires=[0])\r\n qml.RY(-0.654, wires=[1])\r\n qml.expval(qml.PauliZ(0))\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n tapes, fn = finite_diff(tape)\r\n res = fn(dev.batch_execute(tapes))\r\n assert res.shape == (1, 2)\r\n\r\n assert len(spy.call_args_list) == 1\r\n\r\n # only called for parameter 0\r\n assert spy.call_args[0][0:2] == (tape, 0)", "def test_differentiable_expand(self, execute_kwargs, tol):\n\n class U3(qml.U3):\n def expand(self):\n theta, phi, lam = self.data\n wires = self.wires\n return [\n qml.Rot(lam, theta, -lam, wires=wires),\n qml.PhaseShift(phi + lam, wires=wires),\n ]\n\n def cost_fn(a, p, device):\n qscript = qml.tape.QuantumScript(\n [qml.RX(a, wires=0), U3(*p, wires=0)], [qml.expval(qml.PauliX(0))]\n )\n qscript = qscript.expand(stop_at=lambda obj: device.supports_operation(obj.name))\n return execute([qscript], device, **execute_kwargs)[0]\n\n a = jax.numpy.array(0.1)\n p = jax.numpy.array([0.1, 0.2, 0.3])\n\n dev = qml.device(\"default.qubit\", wires=1)\n res = jax.jit(cost_fn, static_argnums=2)(a, p, device=dev)\n expected = np.cos(a) * np.cos(p[1]) * np.sin(p[0]) + np.sin(a) * (\n np.cos(p[2]) * np.sin(p[1]) + np.cos(p[0]) * np.cos(p[1]) * np.sin(p[2])\n )\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n jac_fn = jax.jit(jax.grad(cost_fn, argnums=(1)), static_argnums=2)\n res = jac_fn(a, p, device=dev)\n expected = jax.numpy.array(\n [\n np.cos(p[1]) * (np.cos(a) * np.cos(p[0]) - np.sin(a) * np.sin(p[0]) * np.sin(p[2])),\n np.cos(p[1]) * np.cos(p[2]) * np.sin(a)\n - np.sin(p[1])\n * (np.cos(a) * np.sin(p[0]) + np.cos(p[0]) * np.sin(a) * np.sin(p[2])),\n np.sin(a)\n * (np.cos(p[0]) * np.cos(p[1]) * np.cos(p[2]) - np.sin(p[1]) * np.sin(p[2])),\n ]\n )\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def test_simulate_sampled_expectation_inputs(self):\n n_qubits = 5\n batch_size = 5\n symbol_names = ['alpha']\n qubits = cirq.GridQubit.rect(1, n_qubits)\n circuit_batch, resolver_batch = \\\n util.random_symbol_circuit_resolver_batch(\n qubits, symbol_names, batch_size)\n\n symbol_values_array = np.array(\n [[resolver[symbol]\n for symbol in symbol_names]\n for resolver in resolver_batch])\n\n pauli_sums = util.random_pauli_sums(qubits, 3, batch_size)\n num_samples = [[10]] * batch_size\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'programs must be rank 1'):\n # Circuit tensor has too many dimensions.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor([circuit_batch]), symbol_names,\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'symbol_names must be rank 1.'):\n # symbol_names tensor has too many dimensions.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), np.array([symbol_names]),\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'symbol_values must be rank 2.'):\n # symbol_values_array tensor has too many dimensions.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n np.array([symbol_values_array]),\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'symbol_values must be rank 2.'):\n # symbol_values_array tensor has too few dimensions.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array[0],\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'pauli_sums must be rank 2.'):\n # pauli_sums tensor has too few dimensions.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch),\n symbol_names, symbol_values_array,\n util.convert_to_tensor(list(pauli_sums)), num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'pauli_sums must be rank 2.'):\n # pauli_sums tensor has too many dimensions.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array,\n [util.convert_to_tensor([[x] for x in pauli_sums])],\n num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'num_samples must be rank 2'):\n # num_samples tensor has the wrong shape.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]),\n [num_samples])\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'num_samples must be rank 2'):\n # num_samples tensor has the wrong shape.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]),\n num_samples[0])\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'Unparseable proto'):\n # circuit tensor has the right type but invalid values.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n ['junk'] * batch_size, symbol_names, symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'Could not find symbol in parameter map'):\n # symbol_names tensor has the right type but invalid values.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), ['junk'],\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'qubits not found in circuit'):\n # pauli_sums tensor has the right type but invalid values.\n new_qubits = [cirq.GridQubit(5, 5), cirq.GridQubit(9, 9)]\n new_pauli_sums = util.random_pauli_sums(new_qubits, 2, batch_size)\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array,\n util.convert_to_tensor([[x] for x in new_pauli_sums]),\n num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'Unparseable proto'):\n # pauli_sums tensor has the right type but invalid values 2.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array, [['junk']] * batch_size, num_samples)\n\n with self.assertRaisesRegex(TypeError, 'Cannot convert'):\n # circuits tensor has the wrong type.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n [1.0] * batch_size, symbol_names, symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(TypeError, 'Cannot convert'):\n # symbol_names tensor has the wrong type.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), [0.1234],\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(tf.errors.UnimplementedError, ''):\n # symbol_values tensor has the wrong type.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n [['junk']] * batch_size,\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(TypeError, 'Cannot convert'):\n # pauli_sums tensor has the wrong type.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array, [[1.0]] * batch_size, num_samples)\n\n with self.assertRaisesRegex(TypeError, 'missing'):\n # we are missing an argument.\n # pylint: disable=no-value-for-parameter\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array, num_samples)\n # pylint: enable=no-value-for-parameter\n\n with self.assertRaisesRegex(TypeError, 'positional arguments'):\n # pylint: disable=too-many-function-args\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]), [],\n num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n expected_regex='do not match'):\n # wrong op size.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor([cirq.Circuit()]), symbol_names,\n symbol_values_array.astype(np.float64),\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'greater than 0'):\n # pylint: disable=too-many-function-args\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]),\n [[-1]] * batch_size)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n expected_regex='do not match'):\n # wrong symbol_values size.\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array[:int(batch_size * 0.5)],\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n expected_regex='cirq.Channel'):\n # attempting to use noisy circuit.\n noisy_circuit = cirq.Circuit(cirq.depolarize(0.3).on_each(*qubits))\n tfq_simulate_ops.tfq_simulate_sampled_expectation(\n util.convert_to_tensor([noisy_circuit for _ in pauli_sums]),\n symbol_names, symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)", "def test3():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('normal')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.pingMeasure()\n exp1.trafico.pingMeasure(filename='ensayo_ping.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def test_add_circuit_noname(self):\n q_program = QuantumProgram()\n qr = q_program.create_quantum_register(size=2)\n cr = q_program.create_classical_register(size=2)\n qc1 = q_program.create_circuit(qregisters=[qr], cregisters=[cr])\n qc2 = q_program.create_circuit(qregisters=[qr], cregisters=[cr])\n qc1.h(qr[0])\n qc1.measure(qr[0], cr[0])\n qc2.measure(qr[1], cr[1])\n new_circuit = qc1 + qc2\n q_program.add_circuit(quantum_circuit=new_circuit)\n backend = 'local_qasm_simulator_py' # cpp simulator rejects non string IDs (FIXME)\n shots = 1024\n result = q_program.execute(backend=backend, shots=shots, seed=78)\n counts = result.get_counts(new_circuit.name)\n target = {'00': shots / 2, '01': shots / 2}\n threshold = 0.04 * shots\n self.assertDictAlmostEqual(counts, target, threshold)\n self.assertRaises(QISKitError, result.get_counts)", "def test_MasterEquation_simulateDynamicSloution(self,\n plot_figures=False,\n verbose=False,\n ):\n\n do_test_1 = True\n do_test_2 = True\n do_test_3 = True\n do_test_4 = True\n do_test_5 = True\n do_test_6 = True\n\n if do_test_1:\n print(\"Test 1 running\")\n ## test 1:\n\n # instanciate a MasterEquationSolver\n mes = HaPPPy.MasterEquation.MasterEquationSolver()\n # parameters of this tets:\n ฮต = mes.getฮต()\n ฮด = 1E-2\n a = 1 # > 0\n p0 = 0.1 # > 0\n p1 = 0.7 # > 0\n p2 = 0.2 # > 0, p0 + p1 + p2 = 1\n ฮ”t = 1\n t_max = 100 # >= plot_stop, plot_start\n plot_start = 0\n plot_stop = 20\n plot_step = 1\n # set-up a reasonable ฮ“-matrix\n ฮ“_L = np.array([[0, a, 0], [0, 0, 0], [0, 0, 0]])\n ฮ“_R = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n ns = [1, 2]\n # choose a legitimate start value for P_0 (P_0 = P(t=0))\n P_0 = np.array([p0, p1, p2])\n # simulate\n sim_tdp, sim_cur = mes.simulateDynamicSloution(\n ฮ”t, t_max, P_0, ฮ“_L, ฮ“_R, ns, verbose=verbose\n )\n stat_ps, stat_curs = mes.calculateStationarySolutions(\n ฮ“_L, ฮ“_R, ns, verbose=verbose\n )\n # plot result\n if plot_figures:\n sim_tdp.quickPlot(x_symbol=\"t\", y_symbol=\"P\",\n start=plot_start,\n stop=plot_stop,\n step=plot_step,\n )\n sim_cur.quickPlot(x_symbol=\"t\", y_symbol=\"I\",\n legend=[\"$I^L$\",\"$I^R$\"],\n start=plot_start,\n stop=plot_stop,\n step=plot_step,\n )\n # check validity\n self.assertTrue(sim_tdp.valid())\n self.assertTrue(sim_cur.valid())\n # check development of porpabilities\n Ps = sim_tdp.getValues()\n # a. check conservation of total probability\n for P in Ps:\n self.assertTrue(1 - ฮต <= sum(P) <= 1 + ฮต)\n # b. check values after a 'long' time period\n # expected: propability of state 1 'shifts' to state 2\n # state 3 is constant\n self.assertTrue(abs(Ps[t_max][0] - (p0 + p1)) <= ฮด)\n self.assertTrue(abs(Ps[t_max][1]) <= ฮด)\n self.assertTrue(abs(Ps[t_max][2] - p2) <= ฮต)\n print(\"Test 1 OK\")\n\n if do_test_2:\n print(\"Test 2 running\")\n ## test 2:\n\n # instanciate a MasterEquationSolver\n mes = HaPPPy.MasterEquation.MasterEquationSolver()\n # parameters of this tets:\n ฮต = mes.getฮต()\n ฮด = 1E-2\n f = 2.0 # fraction of P_1 to P_0 in equilibrium\n a = 0.5 # > 0\n p0 = 0.3 # > 0\n p1 = 0.7 # > 0, p0 + p1 = 1\n ฮ”t = 1\n t_max = 100\n # set-up a reasonable ฮ“-matrix\n ฮ“_L = np.array([[0, a], [0, 0]])\n ฮ“_R = f * ฮ“_L.transpose()\n ns = [1, 1]\n # choose a legitimate start value for P_0 (P_0 = P(t=0))\n P_0 = np.array([p0, p1])\n # simulate\n sim_tdp, sim_cur = mes.simulateDynamicSloution(\n ฮ”t, t_max, P_0, ฮ“_L, ฮ“_R, ns, verbose=verbose\n )\n stat_ps, stat_curs = mes.calculateStationarySolutions(\n ฮ“_L, ฮ“_R, ns, verbose=verbose\n )\n # plot result\n if plot_figures:\n sim_tdp.quickPlot(x_symbol=\"t\", y_symbol=\"P\",\n title=(\"P_stat = \" + str(stat_ps)),\n )\n sim_cur.quickPlot(x_symbol=\"t\", y_symbol=\"I\",\n title=(\"I_stat = \" + str(stat_curs)),\n legend=[\"$I^L$\",\"$I^R$\"],\n )\n # check validity\n self.assertTrue(sim_tdp.valid())\n self.assertTrue(sim_cur.valid())\n # check development of porpabilities\n Ps = sim_tdp.getValues()\n # a. check conservation of total probability\n for P in Ps:\n self.assertTrue(1 - ฮต <= sum(P) <= 1 + ฮต)\n # b. check values after a 'long' time period\n # expected: propability of state 1 'shifts' to state 2\n # state 3 is constant\n self.assertTrue(abs(f * Ps[t_max][0] - Ps[t_max][1]) <= ฮด)\n print(\"Test 2 OK\")\n\n if do_test_3:\n print(\"Test 3 running\")\n ## test 3:\n # symmetric ฮ“ = ฮ“_L - ฮ“_R --> uniform distibution of propability\n\n # instanciate a MasterEquationSolver\n mes = HaPPPy.MasterEquation.MasterEquationSolver()\n # parameters of this tets:\n ฮต = mes.getฮต()\n ฮ”t = 1\n t_max = 100\n n = 50\n # set-up a reasonable ฮ“-matrix\n ฮ“_L = np.array([[(i + j)/n**2 for j in range(n)] for i in range(n)])\n ฮ“_R = 2 * ฮ“_L\n ns = [1] * n\n # choose a legitimate start value for P_0 (P_0 = P(t=0))\n P_0 = np.array([i for i in range(n)])\n P_0 = P_0 / sum(P_0)\n # simulate\n sim_tdp, sim_cur = mes.simulateDynamicSloution(\n ฮ”t, t_max, P_0, ฮ“_L, ฮ“_R, ns, verbose=verbose\n )\n stat_ps, stat_curs = mes.calculateStationarySolutions(\n ฮ“_L, ฮ“_R, ns, verbose=verbose\n )\n # plot result\n if plot_figures:\n sim_tdp.quickPlot(x_symbol=\"t\", y_symbol=\"P\")\n sim_cur.quickPlot(x_symbol=\"t\", y_symbol=\"I\",\n legend=[\"$I^L$\",\"$I^R$\"],\n )\n # check validity\n self.assertTrue(sim_tdp.valid())\n self.assertTrue(sim_cur.valid())\n # check development of porpabilities\n Ps = sim_tdp.getValues()\n # a. check conservation of total probability\n for P in Ps:\n self.assertTrue(1 - ฮต <= sum(P) <= 1 + ฮต)\n # b. check values after a 'long' time period\n # expected: uniform distribution\n for P_i in Ps[t_max]:\n self.assertTrue(1/n - ฮต <= P_i <= 1/n + ฮต)\n print(\"Test 3 OK\")\n\n if do_test_4:\n print(\"Test 4 running\")\n ## test 4:\n\n # instanciate a MasterEquationSolver\n mes = HaPPPy.MasterEquation.MasterEquationSolver()\n # parameters of this tets:\n ฮต = mes.getฮต()\n ฮ”t = 1\n t_max = 10\n n = 3\n # set-up a reasonable ฮ“-matrix\n ฮ“_L = np.zeros((n, n))\n ฮ“_R = np.zeros((n, n))\n ns = [1] * n\n # choose a legitimate start value for P_0 (P_0 = P(t=0))\n P_0 = np.array([i for i in range(n)])\n P_0 = P_0 / sum(P_0)\n # simulate\n sim_tdp, sim_cur = mes.simulateDynamicSloution(\n ฮ”t, t_max, P_0, ฮ“_L, ฮ“_R, ns, verbose=verbose\n )\n stat_ps, stat_curs = mes.calculateStationarySolutions(\n ฮ“_L, ฮ“_R, ns, verbose=verbose\n )\n # plot result\n if plot_figures:\n sim_tdp.quickPlot(x_symbol=\"t\", y_symbol=\"P\")\n sim_cur.quickPlot(x_symbol=\"t\", y_symbol=\"I\",\n legend=[\"$I^L$\",\"$I^R$\"],\n )\n # check validity\n self.assertTrue(sim_tdp.valid())\n self.assertTrue(sim_cur.valid())\n # check development of porpabilities\n Ps = sim_tdp.getValues()\n # a. check conservation of individual probabilities\n for P in Ps:\n self.assertTrue(-ฮต <= sum(abs(P - P_0)) <= ฮต)\n # check current\n Is = sim_cur.getValues()\n # a. check if current is constntly 0\n for I in Is:\n self.assertTrue((abs(I) <= ฮต).all())\n print(\"Test 4 OK\")\n\n if do_test_5:\n print(\"Test 5 running\")\n ## test 5:\n # test tolerance behaviour\n\n # parameters of this tets:\n ฮต = 1E-100 # ridiculously precise\n ฮ”t = 1\n t_max = 100\n n = 10\n # set-up a reasonable ฮ“-matrix\n ฮ“_L = np.array([[(i + j)/n**2 for j in range(n)] for i in range(n)])\n ฮ“_R = 2 * ฮ“_L\n ns = [1] * n\n # choose a legitimate start value for P_0 (P_0 = P(t=0))\n P_0 = np.array([i for i in range(n)])\n P_0 = P_0 / sum(P_0)\n # instanciate a MasterEquationSolver\n mes = HaPPPy.MasterEquation.MasterEquationSolver(ฮต=ฮต)\n # simulate\n sim_tdp, sim_cur = mes.simulateDynamicSloution(\n ฮ”t, t_max, P_0, ฮ“_L, ฮ“_R, ns, verbose=verbose\n )\n stat_ps, stat_curs = mes.calculateStationarySolutions(\n ฮ“_L, ฮ“_R, ns, verbose=verbose\n )\n # plot result\n if plot_figures:\n sim_tdp.quickPlot(x_symbol=\"t\", y_symbol=\"P\")\n sim_cur.quickPlot(x_symbol=\"t\", y_symbol=\"I\",\n legend=[\"$I^L$\",\"$I^R$\"],\n )\n # check validity\n self.assertTrue(not sim_tdp.valid())\n self.assertTrue(not sim_cur.valid())\n print(\"Test 5 OK\")\n\n if do_test_6:\n print(\"Test 6 running\")\n ## test 6:\n # test if algorith can handle large inputs\n # (like test 2 but with large n to test accuracy)\n\n # instanciate a MasterEquationSolver\n mes = HaPPPy.MasterEquation.MasterEquationSolver()\n # parameters of this tets:\n ฮต = mes.getฮต()\n ฮ”t = 1\n t_max = 100\n n = 100\n # set-up a reasonable ฮ“-matrix\n ฮ“_L = np.array([[(i + j)/n**2 for j in range(n)] for i in range(n)])\n ฮ“_R = 2 * ฮ“_L\n ns = [1] * n\n # choose a legitimate start value for P_0 (P_0 = P(t=0))\n P_0 = np.array([i for i in range(n)])\n P_0 = P_0 / sum(P_0)\n # simulate\n sim_tdp, sim_cur = mes.simulateDynamicSloution(\n ฮ”t, t_max, P_0, ฮ“_L, ฮ“_R, ns, verbose=verbose\n )\n stat_ps, stat_curs = mes.calculateStationarySolutions(\n ฮ“_L, ฮ“_R, ns, verbose=verbose\n )\n # check validity\n self.assertTrue(sim_tdp.valid())\n self.assertTrue(sim_cur.valid())\n # check development of porpabilities\n Ps = sim_tdp.getValues()\n # a. check conservation of total probability\n for P in Ps:\n self.assertTrue(1 - ฮต <= sum(P) <= 1 + ฮต)\n # b. check values after a 'long' time period\n # expected: uniform distribution\n for P_i in Ps[t_max]:\n self.assertTrue(1/n - ฮต <= P_i <= 1/n + ฮต)\n print(\"Test 6 OK\")", "def run_trial():\n env = gym.make('CartPole-v0')\n obs_dim = env.observation_space.shape[0]\n n_actions = env.action_space.n\n\n qnet = QNet(obs_dim, n_actions)\n agent = Sarsa(qnet, n_actions, 0.99, 1.0, 0.05, 1e4)\n optim = torch.optim.RMSprop(qnet.parameters(), lr=0.01)\n memory = Memory()\n\n return_hist = []\n timestep = 1\n\n while timestep < 1e5:\n state = env.reset()\n done = False\n while not done:\n # Pick action and run a single environment step\n action = agent.act(state, timestep).item()\n next_state, reward, done, _ = env.step(action)\n # Add experience to memory for training\n memory.add_experience(state, action, reward, next_state, done)\n\n state = next_state\n\n # Run a single training step every 32 timesteps\n if timestep % 32 == 0:\n batch = memory.sample()\n agent.train(batch, optim)\n\n # Evaluate the current agent every 1000 agents\n if timestep % 1000 == 0:\n eval_return = evaluate(agent)\n return_hist.append(eval_return)\n\n timestep += 1\n\n return np.array(return_hist)", "def test4():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('normal')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.iperfMeasure()\n exp1.trafico.iperfMeasure(filename='iperf_normal_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador" ]
[ "0.63494396", "0.611109", "0.59224325", "0.589698", "0.5878735", "0.58523005", "0.5766617", "0.57567036", "0.5725462", "0.56870687", "0.5686969", "0.56491363", "0.5639581", "0.56304204", "0.5624216", "0.56179994", "0.5607596", "0.56032", "0.55903065", "0.5581218", "0.55777025", "0.5561267", "0.5558376", "0.5555391", "0.5543252", "0.5543007", "0.55397093", "0.55366343", "0.55195147", "0.55187297" ]
0.7527882
0
Test classical processing within the quantum tape for a single tape
def test_classical_processing_single_tape(self, execute_kwargs): a = jax.numpy.array(0.1) b = jax.numpy.array(0.2) c = jax.numpy.array(0.3) def cost(a, b, c, device): with qml.queuing.AnnotatedQueue() as q: qml.RY(a * c, wires=0) qml.RZ(b, wires=0) qml.RX(c + c**2 + jax.numpy.sin(a), wires=0) qml.expval(qml.PauliZ(0)) tape = qml.tape.QuantumScript.from_queue(q) return execute([tape], device, **execute_kwargs)[0] dev = qml.device("default.qubit", wires=2) res = jax.jit(jax.grad(cost, argnums=(0, 1, 2)), static_argnums=3)(a, b, c, device=dev) assert len(res) == 3
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_active_inference_SPM_1b(self):", "def test_classical_processing_multiple_tapes(self, execute_kwargs):\n dev = qml.device(\"default.qubit\", wires=2)\n params = jax.numpy.array([0.3, 0.2])\n\n def cost_fn(x):\n with qml.queuing.AnnotatedQueue() as q1:\n qml.Hadamard(0)\n qml.RY(x[0], wires=[0])\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0))\n\n tape1 = qml.tape.QuantumScript.from_queue(q1)\n\n with qml.queuing.AnnotatedQueue() as q2:\n qml.Hadamard(0)\n qml.CRX(2 * x[0] * x[1], wires=[0, 1])\n qml.RX(2 * x[1], wires=[1])\n qml.expval(qml.PauliZ(0))\n\n tape2 = qml.tape.QuantumScript.from_queue(q2)\n\n result = execute(tapes=[tape1, tape2], device=dev, **execute_kwargs)\n return result[0] + result[1] - 7 * result[1]\n\n res = jax.jit(jax.grad(cost_fn))(params)\n assert res.shape == (2,)", "def test_reusing_quantum_tape(self, execute_kwargs, tol):\n a = jax.numpy.array(0.1)\n b = jax.numpy.array(0.2)\n\n dev = qml.device(\"default.qubit\", wires=2)\n\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a, wires=0)\n qml.RX(b, wires=1)\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n assert tape.trainable_params == [0, 1]\n\n def cost(a, b):\n # An explicit call to _update() is required here to update the\n # trainable parameters in between tape executions.\n # This is different from how the autograd interface works.\n # Unless the update is issued, the validation check related to the\n # number of provided parameters fails in the tape: (len(params) !=\n # required_length) and the tape produces incorrect results.\n tape._update()\n new_tape = tape.bind_new_parameters([a, b], [0, 1])\n return execute([new_tape], dev, **execute_kwargs)[0]\n\n jac_fn = jax.jit(jax.grad(cost))\n jac = jac_fn(a, b)\n\n a = jax.numpy.array(0.54)\n b = jax.numpy.array(0.8)\n\n # check that the cost function continues to depend on the\n # values of the parameters for subsequent calls\n res2 = cost(2 * a, b)\n expected = [np.cos(2 * a)]\n assert np.allclose(res2, expected, atol=tol, rtol=0)\n\n jac_fn = jax.jit(jax.grad(lambda a, b: cost(2 * a, b)))\n jac = jac_fn(a, b)\n expected = -2 * np.sin(2 * a)\n assert np.allclose(jac, expected, atol=tol, rtol=0)", "def test_machine_get_tape(self):\n self.machine.add_state('0 ,R, ,R, ,R, a,N,!')\n self.machine.init_tape(' aba caba_caba caba ')\n assert self.machine.get_tape() == 'aba caba caba caba'", "def test_integration(self):\n\n m = 5 # number of wires in A\n M = 2**m\n\n xmax = np.pi # bound to region [-pi, pi]\n xs = np.linspace(-xmax, xmax, M)\n\n probs = np.array([norm().pdf(x) for x in xs])\n probs /= np.sum(probs)\n\n func = lambda i: np.sin(xs[i]) ** 2\n r_rotations = np.array([2 * np.arcsin(np.sqrt(func(i))) for i in range(M)])\n\n A_wires = [0, \"a\", -1.1, -10, \"bbb\"]\n target_wire = \"Ancilla\"\n wires = A_wires + [target_wire]\n estimation_wires = [\"bob\", -3, 42, \"penny\", \"lane\"]\n\n def fn():\n qml.templates.MottonenStatePreparation(np.sqrt(probs), wires=A_wires)\n r_unitary(qml.RY, r_rotations, control_wires=A_wires[::-1], target_wire=target_wire)\n\n qmc_circuit = qml.quantum_monte_carlo(\n fn, wires=wires, target_wire=target_wire, estimation_wires=estimation_wires\n )\n\n with qml.queuing.AnnotatedQueue() as q:\n qmc_circuit()\n qml.probs(estimation_wires)\n\n tape = qml.tape.QuantumScript.from_queue(q)\n tape = tape.expand(depth=2)\n\n assert all(\n not isinstance(op, (qml.MultiControlledX, qml.templates.QFT, qml.tape.QuantumScript))\n for op in tape.operations\n )\n\n dev = qml.device(\"default.qubit\", wires=wires + estimation_wires)\n res = dev.execute(tape)\n\n @qml.qnode(dev)\n def circuit():\n qml.templates.QuantumMonteCarlo(\n probs, func, target_wires=wires, estimation_wires=estimation_wires\n )\n return qml.probs(estimation_wires)\n\n res_expected = circuit()\n assert np.allclose(res, res_expected)", "def terminal_test(self):\n\n for self.cur_ep in tqdm.tqdm(range(1, self.episodes + 1), ascii=True, unit='episodes'):\n\n # Nombre de passages dans la boucle principale\n step = 1\n\n cur_state = self.env.reset()\n\n done = False\n\n while not done:\n\n # Choix au hasard entre :\n if np.random.random() > self.epsilon:\n # Action ร  partir de la q-table\n action = np.argmax(self.agent.get_q_values(np.array(cur_state)))\n\n else:\n # Action random\n action = np.random.randint(0, self.env.ACTION_SPACE_SIZE)\n\n # On effectue une action avec le serpent\n new_state, reward, done = self.env.step(action)\n\n # Ajout d'un exemple dans la mรฉmoire\n self.agent.update_training_set((cur_state, action, reward, new_state, done))\n\n # Entrainement รฉventuel\n self.agent.train()\n\n cur_state = new_state\n step += 1\n\n if self.epsilon > self.MIN_EPSILON:\n self.epsilon *= self.EPSILON_DECAY\n self.epsilon = max(self.MIN_EPSILON, self.epsilon)\n\n if self.save_model:\n self.agent.save_model(self.model_file_name)", "def run_experiments() :\n #%%\n target_size=(32,32)\n g_specs = {\n \"batch_size\" : [ 30 , 60, 100 ],\n \"learning_rate\" : [ 0.0002, 0.0003, 0.0005 ],\n \"drop_out_rate\" : [ 0.2, 0.25, 0.3 ],\n \"rescale_mode\" : [ \"max_q\" , \"max\", \"\" ]\n }\n\n model_traits = MODEL_TRAITS[\"model2\"].copy()\n tt_obj = model_traits[\"trainer_tester_class\"]( model_traits )\n del model_traits[\"trainer_tester_class\"]\n\n cnt = 0\n for batchs, lrate, do_rate, resc_mode in product( g_specs[\"batch_size\"],\n g_specs[\"learning_rate\"],\n g_specs[\"drop_out_rate\"],\n g_specs[\"rescale_mode\"] ) :\n\n tt_obj.model_traits.update( {\"batch_size\" : batchs,\n \"learning_rate\" : lrate,\n \"rescale_mode\" : resc_mode,\n \"drop_out_rate\" : do_rate } )\n\n train_4d, train_gt = tu.make_4d_arrays( images_dir=\"images/train\",\n target_size=target_size )\n\n test_4d, test_gt = tu.make_4d_arrays( images_dir=\"images/test\",\n target_size=target_size )\n\n data = {\"train_4d\" : train_4d,\n \"test_4d\" : test_4d,\n \"train_y\" : train_gt,\n \"test_y\" : test_gt}\n\n valid_accu_log, train_accu_log = tt_obj.train( model_traits, data,\n logl=100 )\n idx_v = int(np.argmax( valid_accu_log))\n idx_t = int(np.argmax( train_accu_log))\n\n model_traits.update({\"valid_accu_log\" : valid_accu_log,\n \"train_accu_log\" : train_accu_log,\n \"best_valid\" : max(valid_accu_log),\n \"best_valid_at\" : idx_v,\n \"train_at_best_valid\" : train_accu_log[idx_v],\n \"best_train\" : max(train_accu_log),\n \"best_train_at\": idx_t })\n\n #print(cnt, pformat(model_traits) )\n print( \"%d : best_train = %.4f, best_valid = %.4f\" % \\\n (cnt, max(train_accu_log), max(valid_accu_log) ))\n\n with open( \"exp_results_%d.json\" % cnt,\n \"wt\" , encoding=\"utf8\" ) as f_out :\n print( json.dumps( model_traits ), file=f_out)\n\n\n cnt += 1\n #%%", "def test_serialize_circuit_rotations_tape(self, monkeypatch, tmpdir, test_batch_result):\n qml.enable_tape()\n dev = QeQiskitDevice(wires=1, shots=1000, backend=\"qasm_simulator\", analytic=False)\n\n circuit_history = []\n\n with qml.tape.QuantumTape() as tape1:\n qml.Hadamard(wires=[0])\n qml.expval(qml.Hadamard(0))\n\n with monkeypatch.context() as m:\n m.setattr(pennylane_orquestra.cli_actions, \"user_data_dir\", lambda *args: tmpdir)\n m.setattr(\n pennylane_orquestra.orquestra_device,\n \"gen_expval_workflow\",\n lambda component, backend_specs, circuits, operators, **kwargs: circuit_history.extend(\n circuits\n ),\n )\n\n # Disable submitting to the Orquestra platform by mocking Popen\n m.setattr(subprocess, \"Popen\", lambda *args, **kwargs: MockPopen())\n m.setattr(\n pennylane_orquestra.orquestra_device,\n \"loop_until_finished\",\n lambda *args, **kwargs: test_batch_result, # The exact results are not considered in the test\n )\n\n dev.execute(tape1)\n\n expected = 'OPENQASM 2.0;\\ninclude \"qelib1.inc\";\\nqreg q[1];\\ncreg c[1];\\nh q[0];\\nry(-0.7853981633974483) q[0];\\n'\n assert circuit_history[0] == expected\n qml.disable_tape()", "def test(args):\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n env_info = env.reset(train_mode=True)[brain_name]\n\n num_agents = len(env_info.agents)\n print('Number of agents:', num_agents)\n\n # dim of each action\n action_size = brain.vector_action_space_size\n print('Size of each action:', action_size)\n\n # dim of the state space\n states = env_info.vector_observations\n state_size = states.shape[1]\n\n agent = MADDPG(state_size, action_size, actor_layer_dim_1=args.actor_layer_dim_1,\n actor_layer_dim_2=args.actor_layer_dim_2,\n actor_layer_dim_3=args.actor_layer_dim_3,\n critic_layer_dim_1=args.critic_layer_dim_1,\n critic_layer_dim_2=args.critic_layer_dim_2,\n critic_layer_dim_3=args.critic_layer_dim_3)\n\n agent.load(\n \"chkpts/{}/{:02d}_best_model.checkpoint\".format(args.model_path, args.loop_counter))\n\n test_scores = []\n for i_episode in tqdm(range(1, 1+args.test_n_run)):\n # initialize the scores\n scores = np.zeros(num_agents)\n env_info = env.reset(train_mode=True)[\n brain_name] # reset the environment\n states = env_info.vector_observations # get the current states\n dones = [False]*num_agents\n while not np.any(dones):\n actions = agent.act(states) # select actions\n # send the actions to the environment\n env_info = env.step(actions)[brain_name]\n next_states = env_info.vector_observations # get the next states\n rewards = env_info.rewards # get the rewards\n dones = env_info.local_done # see if episode has finished\n scores += rewards # update the scores\n # roll over the states to next time step\n states = next_states\n\n test_scores.append(np.max(scores))\n\n avg_score = sum(test_scores)/len(test_scores)\n print(\"Test Score: {}\".format(avg_score))\n\n return avg_score", "def test_predictor():", "def run_loop_measurement(t=0.5, name='test', loops=4, pump_t=180, total_t=600, jump_x=10):\n\n incident_angles = [0.1, 0.4]\n waxs_arc = [20, 0]\n user = \"TP\"\n\n condition = (\n ( -1 < waxs.arc.position )\n and ( waxs.arc.position < 1 )\n and (waxs_arc[0] == 20)\n )\n\n if condition:\n waxs_arc = waxs_arc[::-1]\n \n ranges = { 0.1 : [-16, 16, 33],\n 0.4 : [-25, 25, 51],\n }\n\n try:\n ai0 = RE.md['ai_0']\n except:\n yield from bp.count([])\n ai0 = db[-1].start['ai_0']\n print('Failed to acces RE.md')\n print(f'\\n\\nSample flat at theta = {ai0}')\n \n proposal_id('2023_2', '311564_Pettersson')\n #det_exposure_time(t, t)\n \n t_initial = time.time()\n\n for i in range(loops):\n t_start = time.time()\n print('Cycle number',i+1,'started at', (t_start - t_initial)/60)\n\n # Wait initial time for pumping to finish\n print(f'Start pumping now, going to wait for {pump_t} s\\n')\n while (time.time() - t_start) < pump_t:\n print(f'Pumping time: {(time.time() - t_start):.1f} s')\n yield from bps.sleep(10)\n\n # Go over SAXS and WAXS\n t_measurement = ( time.time() - t_initial ) / 60\n for wa in waxs_arc:\n yield from bps.mv(waxs, wa)\n dets = [pil900KW] if waxs.arc.position < 15 else [pil1M, pil900KW]\n\n for ai in incident_angles:\n yield from bps.mv(piezo.th, ai0 + ai)\n yield from bps.mvr(piezo.x, - jump_x)\n\n t2 = 2 * t if ai == 0.4 else t\n det_exposure_time(t2, t2)\n\n try:\n y_range = ranges[ai]\n except:\n y_range = [-10, 10, 11]\n \n sample_name = f'{name}{get_scan_md()}_time{t_measurement:.1f}_ai{ai}'\n sample_id(user_name=user, sample_name=sample_name)\n print(f\"\\n\\n\\n\\t=== Sample: {sample_name} ===\")\n yield from bp.rel_scan(dets, piezo.y, *y_range, md=dict(ai=ai))\n \n yield from bps.mv(waxs, waxs_arc[0],\n piezo.th, ai0)\n\n # Wait until the total loop time passes\n if i + 1 < loops:\n print(f'Waiting for the loop to last {total_t} s in total\\n')\n sleep_count = 0\n while (time.time() - t_start) < total_t:\n sleep_count += 1\n if (sleep_count % 10 == 0):\n print(f'Total time: {(time.time() - t_start):.1f} s')\n yield from bps.sleep(1)\n\n sample_id(user_name=\"test\", sample_name=\"test\")\n det_exposure_time(0.5, 0.5)", "def test_chunk_QFT(self, method, device):\n opts_no_chunk = {\n \"fusion_enable\": False,\n \"fusion_threshold\": 10,\n }\n opts_chunk = copy.copy(opts_no_chunk)\n opts_chunk[\"blocking_enable\"] = True\n opts_chunk[\"blocking_qubits\"] = 2\n\n backend = self.backend(method=method, device=device, **opts_chunk)\n backend_no_chunk = self.backend(method=method, device=device, **opts_no_chunk)\n\n shots = 100\n num_qubits = 3\n circuit = transpile(QFT(num_qubits), backend=backend, optimization_level=0)\n circuit.measure_all()\n\n result = backend.run(circuit, shots=shots, memory=True).result()\n counts = result.get_counts(circuit)\n result_no_chunk = backend_no_chunk.run(circuit, shots=shots, memory=True).result()\n counts_no_chunk = result_no_chunk.get_counts(circuit)\n\n self.assertEqual(counts_no_chunk, counts)", "def test_error_if_not_expval_batched(self):\n qml.enable_tape()\n dev = qml.device(\"orquestra.qiskit\", wires=2)\n\n with qml.tape.QuantumTape() as tape1:\n qml.expval(qml.PauliZ(wires=[0]))\n qml.var(qml.PauliZ(wires=[0]))\n\n with qml.tape.QuantumTape() as tape2:\n qml.expval(qml.PauliZ(wires=[0]))\n\n circuits = [tape1, tape2]\n with pytest.raises(NotImplementedError):\n res = dev.batch_execute(circuits)\n\n qml.disable_tape()", "def test_chunk_QFTWithFusion(self, method, device):\n opts_no_chunk = {\n \"fusion_enable\": True,\n \"fusion_threshold\": 5,\n }\n opts_chunk = copy.copy(opts_no_chunk)\n opts_chunk[\"blocking_enable\"] = True\n opts_chunk[\"blocking_qubits\"] = 4\n\n backend = self.backend(method=method, device=device, **opts_chunk)\n backend_no_chunk = self.backend(method=method, device=device, **opts_no_chunk)\n\n shots = 100\n num_qubits = 8\n circuit = transpile(QFT(num_qubits), backend=backend, optimization_level=0)\n circuit.measure_all()\n\n result = backend.run(circuit, shots=shots, memory=True).result()\n counts = result.get_counts(circuit)\n result_no_chunk = backend_no_chunk.run(circuit, shots=shots, memory=True).result()\n counts_no_chunk = result_no_chunk.get_counts(circuit)\n\n self.assertEqual(counts_no_chunk, counts)", "def test_tape_iteration(self) -> None:\n tape = TMTape(\n tape=\"abcdef\",\n blank_symbol=\".\",\n current_position=2,\n )\n self.assertEqual(tuple(tape), (\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"))", "def test6():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(A = 'h1', C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('ataque')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.iperfMeasure()\n exp1.trafico.iperfMeasure(filename='iperf_ataque_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def test_specific_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [0])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x1': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)", "def test5():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(A = 'h1', C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('ataque')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n exp1.pingAllTest() # **************** Parece que es necesario que se de un arranque al controlador\n # **************** para que aprenda las reglas antes del ataque.\n\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.pingMeasure()\n #exp1.trafico.pingMeasure(filename='ping_ataque_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def test_serialize_circuit_no_rotations_tape(self, monkeypatch, tmpdir, test_batch_result):\n qml.enable_tape()\n dev = QeQiskitDevice(wires=1, shots=1000, backend=\"statevector_simulator\", analytic=True)\n\n circuit_history = []\n\n with qml.tape.QuantumTape() as tape1:\n qml.Hadamard(wires=[0])\n qml.expval(qml.Hadamard(0))\n\n with monkeypatch.context() as m:\n m.setattr(pennylane_orquestra.cli_actions, \"user_data_dir\", lambda *args: tmpdir)\n m.setattr(\n pennylane_orquestra.orquestra_device,\n \"gen_expval_workflow\",\n lambda component, backend_specs, circuits, operators, **kwargs: circuit_history.extend(\n circuits\n ),\n )\n\n # Disable submitting to the Orquestra platform by mocking Popen\n m.setattr(subprocess, \"Popen\", lambda *args, **kwargs: MockPopen())\n m.setattr(\n pennylane_orquestra.orquestra_device,\n \"loop_until_finished\",\n lambda *args, **kwargs: test_batch_result, # The exact results are not considered in the test\n )\n\n dev.execute(tape1)\n\n expected = 'OPENQASM 2.0;\\ninclude \"qelib1.inc\";\\nqreg q[1];\\ncreg c[1];\\nh q[0];\\n'\n assert circuit_history[0] == expected\n qml.disable_tape()", "def test_synth_tr():\n test_path = tempfile.mkdtemp()\n x_train, metadata = synth_tr(test_path)\n try:\n assert x_train.shape == (250, 3)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_T0():", "def test_behaviour(self):\r\n\r\n with qml.tape.QuantumTape() as tape:\r\n qml.PauliZ(0)\r\n qml.RX(1.0, wires=0)\r\n qml.CNOT(wires=[0, 2])\r\n qml.Rot(2.0, 3.0, 4.0, wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n tape.trainable_params = {0, 2}\r\n shifts = [0.1, -0.2, 1.6]\r\n res = generate_shifted_tapes(tape, 1, shifts=shifts)\r\n\r\n assert len(res) == len(shifts)\r\n assert res[0].get_parameters(trainable_only=False) == [1.0, 2.0, 3.1, 4.0]\r\n assert res[1].get_parameters(trainable_only=False) == [1.0, 2.0, 2.8, 4.0]\r\n assert res[2].get_parameters(trainable_only=False) == [1.0, 2.0, 4.6, 4.0]", "def test4():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('normal')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.iperfMeasure()\n exp1.trafico.iperfMeasure(filename='iperf_normal_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def test_identity_multiple_tape(self, dev, tmpdir, monkeypatch):\n qml.enable_tape()\n\n dev = qml.device(dev, wires=2, keep_files=False)\n\n with qml.tape.QuantumTape() as tape1:\n qml.RX(0.133, wires=0)\n qml.expval(qml.Identity(wires=[0]))\n\n with qml.tape.QuantumTape() as tape2:\n qml.RX(0.432, wires=0)\n qml.expval(qml.Identity(wires=[0]))\n qml.expval(qml.Identity(wires=[1]))\n\n circuits = [tape1, tape2]\n\n test_uuid = \"1234\"\n with monkeypatch.context() as m:\n m.setattr(pennylane_orquestra.cli_actions, \"user_data_dir\", lambda *args: tmpdir)\n\n # Disable submitting to the Orquestra platform by mocking Popen\n m.setattr(subprocess, \"Popen\", lambda *args, **kwargs: MockPopen())\n m.setattr(\n pennylane_orquestra.orquestra_device,\n \"loop_until_finished\",\n lambda *args, **kwargs: None,\n )\n\n # Disable random uuid generation\n m.setattr(uuid, \"uuid4\", lambda *args: test_uuid)\n\n res = dev.batch_execute(circuits)\n\n # No workflow files were created because we only computed with\n # identities\n assert not os.path.exists(tmpdir.join(f\"expval-{test_uuid}.yaml\"))\n assert not os.path.exists(tmpdir.join(f\"expval-{test_uuid}.yaml\"))\n\n expected = [\n np.ones(1),\n np.ones(2),\n ]\n\n for r, e in zip(res, expected):\n assert np.allclose(r, e)\n\n qml.disable_tape()", "def step(self, action_index):\n\n x_t1, r_t, terminal, info = self.env.step(self.gym_actions[action_index])\n s_t1 = self.get_preprocessed_RAM(x_t1)\n\n \n return s_t1, r_t, terminal, info", "def main():\n # run_test_go_straight_inches()\n # run_test_turn_degrees()\n # run_test_spin_degrees()\n beep_if_blob_is_bigger_than(3000)", "def step(self):\n if self.op_state != Turing_Machine.RUNNING:\n return\n if self.end_time and time.time() >= self.end_time:\n self.op_state = Turing_Machine.TIME_OUT\n return\n\n if self.compute_steps:\n self.old_step_num = self.step_num\n # Note: We increment the number of loops early to take care of all the\n # places step() could early-return.\n self.num_loops += 1\n if self.prover:\n # Log the configuration and see if we can apply a rule.\n cond, new_tape, num_steps, replace_vars = self.prover.log(self.tape, self.state, self.step_num, self.num_loops-1)\n\n # If the prover has been printing, give us a newline and remind us\n # what the current configuration is.\n # TODO(sligocki): Figure out how to get this to happen only when prover\n # actually printed something (it doesn't if it just logged).\n #if self.prover.verbose:\n # print\n # self.num_loops -= 1 # Kludgey :/\n # self.verbose_print()\n # self.num_loops += 1\n\n # Proof system says that machine will repeat forever\n if cond == Turing_Machine.INF_REPEAT:\n self.op_state = Turing_Machine.INF_REPEAT\n self.inf_reason = PROOF_SYSTEM\n self.verbose_print()\n return\n # Proof system says that we can apply a rule\n elif cond == Turing_Machine.RUNNING:\n # TODO(shawn): This seems out of place here and is the only place in\n # the Simulator where we distinguish Algebraic_Expressions.\n # We should clean it up in some way.\n if replace_vars:\n assert self.options.allow_collatz\n # We don't want the update below to overwrite things.\n assert not frozenset(self.replace_vars.keys()).intersection(\n frozenset(replace_vars.keys()))\n self.replace_vars.update(replace_vars)\n # Update all instances of old variable (should just be in steps).\n assert isinstance(self.step_num, Algebraic_Expression)\n self.step_num = self.step_num.substitute(replace_vars)\n assert isinstance(self.old_step_num, Algebraic_Expression)\n self.old_step_num = self.old_step_num.substitute(replace_vars)\n assert not isinstance(self.num_loops, Algebraic_Expression)\n self.tape = new_tape\n self.num_rule_moves += 1\n if self.compute_steps:\n self.step_num += num_steps\n self.steps_from_rule += num_steps\n self.verbose_print()\n return\n # Get current symbol\n cur_symbol = self.tape.get_top_symbol()\n # Lookup TM transition rule\n cond, (symbol2write, next_state, next_dir), num_steps = \\\n self.machine.get_transition(cur_symbol, self.state, self.dir)\n # Test condition\n self.op_state = cond[0]\n self.op_details = cond[1:]\n # Apply transition\n # Chain move\n if next_state == self.state and next_dir == self.dir and \\\n self.op_state == Turing_Machine.RUNNING:\n num_reps = self.tape.apply_chain_move(symbol2write)\n if num_reps == Tape.INF:\n self.op_state = Turing_Machine.INF_REPEAT\n self.inf_reason = CHAIN_MOVE\n self.verbose_print()\n return\n # Don't need to change state or direction\n self.num_chain_moves += 1\n if self.compute_steps:\n self.step_num += num_steps*num_reps\n self.steps_from_chain += num_steps*num_reps\n # Simple move\n else:\n self.tape.apply_single_move(symbol2write, next_dir)\n self.state = next_state\n self.dir = next_dir\n self.num_macro_moves += 1\n if self.compute_steps:\n self.step_num += num_steps\n self.steps_from_macro += num_steps\n if self.op_state == Turing_Machine.INF_REPEAT:\n self.inf_reason = REPEAT_IN_PLACE\n if self.op_state != Turing_Machine.UNDEFINED:\n self.verbose_print()", "def test_T3():", "def test_T3():", "def test_active_inference_SPM_1a(self):\n array_path = os.path.join(os.getcwd(), DATA_PATH + \"vbx_test_1a.mat\")\n mat_contents = loadmat(file_name=array_path)\n\n A = mat_contents[\"A\"][0]\n B = mat_contents[\"B\"][0]\n C = to_arr_of_arr(mat_contents[\"C\"][0][0][:,0])\n obs_matlab = mat_contents[\"obs\"].astype(\"int64\")\n policy = mat_contents[\"policies\"].astype(\"int64\") - 1\n t_horizon = mat_contents[\"t_horizon\"][0, 0].astype(\"int64\")\n actions_matlab = mat_contents[\"actions\"].astype(\"int64\") - 1\n qs_matlab = mat_contents[\"qs\"][0]\n xn_matlab = mat_contents[\"xn\"][0]\n vn_matlab = mat_contents[\"vn\"][0]\n\n likelihoods_matlab = mat_contents[\"likelihoods\"][0]\n\n num_obs, num_states, _, num_factors = get_model_dimensions(A, B)\n obs = convert_observation_array(obs_matlab, num_obs)\n T = len(obs)\n\n agent = Agent(A=A, B=B, C=C, inference_algo=\"MMP\", policy_len=1, \n inference_horizon=t_horizon, use_BMA = False, \n policy_sep_prior = True)\n \n actions_python = np.zeros(T)\n\n for t in range(T):\n o_t = (np.where(obs[t])[0][0],)\n qx, xn_t, vn_t = agent.infer_states_test(o_t)\n q_pi, efe= agent.infer_policies()\n action = agent.sample_action()\n\n actions_python[t] = action\n\n xn_python = build_xn_vn_array(xn_t)\n vn_python = build_xn_vn_array(vn_t)\n\n if t == T-1:\n xn_python = xn_python[:,:,:-1,:]\n vn_python = vn_python[:,:,:-1,:]\n\n start_tstep = max(0, agent.curr_timestep - agent.inference_horizon)\n end_tstep = min(agent.curr_timestep + agent.policy_len, T)\n\n xn_validation = xn_matlab[0][:,:,start_tstep:end_tstep,t,:]\n vn_validation = vn_matlab[0][:,:,start_tstep:end_tstep,t,:]\n\n self.assertTrue(np.isclose(xn_python, xn_validation).all())\n self.assertTrue(np.isclose(vn_python, vn_validation).all())\n \n self.assertTrue(np.isclose(actions_matlab[0,:],actions_python[:-1]).all())" ]
[ "0.62464356", "0.61126363", "0.59649825", "0.59497005", "0.5917769", "0.58554536", "0.57219815", "0.5702824", "0.56830955", "0.565531", "0.56474787", "0.5647309", "0.5630913", "0.5625515", "0.56166446", "0.5601413", "0.55888873", "0.5582979", "0.5569949", "0.55557936", "0.5534369", "0.5531022", "0.5512179", "0.55108273", "0.5510019", "0.550964", "0.5506962", "0.54863364", "0.54863364", "0.54650533" ]
0.6337568
0
Test classical processing within the quantum tape for multiple tapes
def test_classical_processing_multiple_tapes(self, execute_kwargs): dev = qml.device("default.qubit", wires=2) params = jax.numpy.array([0.3, 0.2]) def cost_fn(x): with qml.queuing.AnnotatedQueue() as q1: qml.Hadamard(0) qml.RY(x[0], wires=[0]) qml.CNOT(wires=[0, 1]) qml.expval(qml.PauliZ(0)) tape1 = qml.tape.QuantumScript.from_queue(q1) with qml.queuing.AnnotatedQueue() as q2: qml.Hadamard(0) qml.CRX(2 * x[0] * x[1], wires=[0, 1]) qml.RX(2 * x[1], wires=[1]) qml.expval(qml.PauliZ(0)) tape2 = qml.tape.QuantumScript.from_queue(q2) result = execute(tapes=[tape1, tape2], device=dev, **execute_kwargs) return result[0] + result[1] - 7 * result[1] res = jax.jit(jax.grad(cost_fn))(params) assert res.shape == (2,)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_classical_processing_single_tape(self, execute_kwargs):\n a = jax.numpy.array(0.1)\n b = jax.numpy.array(0.2)\n c = jax.numpy.array(0.3)\n\n def cost(a, b, c, device):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a * c, wires=0)\n qml.RZ(b, wires=0)\n qml.RX(c + c**2 + jax.numpy.sin(a), wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute([tape], device, **execute_kwargs)[0]\n\n dev = qml.device(\"default.qubit\", wires=2)\n res = jax.jit(jax.grad(cost, argnums=(0, 1, 2)), static_argnums=3)(a, b, c, device=dev)\n assert len(res) == 3", "def test_active_inference_SPM_1b(self):", "def test_identity_multiple_tape(self, dev, tmpdir, monkeypatch):\n qml.enable_tape()\n\n dev = qml.device(dev, wires=2, keep_files=False)\n\n with qml.tape.QuantumTape() as tape1:\n qml.RX(0.133, wires=0)\n qml.expval(qml.Identity(wires=[0]))\n\n with qml.tape.QuantumTape() as tape2:\n qml.RX(0.432, wires=0)\n qml.expval(qml.Identity(wires=[0]))\n qml.expval(qml.Identity(wires=[1]))\n\n circuits = [tape1, tape2]\n\n test_uuid = \"1234\"\n with monkeypatch.context() as m:\n m.setattr(pennylane_orquestra.cli_actions, \"user_data_dir\", lambda *args: tmpdir)\n\n # Disable submitting to the Orquestra platform by mocking Popen\n m.setattr(subprocess, \"Popen\", lambda *args, **kwargs: MockPopen())\n m.setattr(\n pennylane_orquestra.orquestra_device,\n \"loop_until_finished\",\n lambda *args, **kwargs: None,\n )\n\n # Disable random uuid generation\n m.setattr(uuid, \"uuid4\", lambda *args: test_uuid)\n\n res = dev.batch_execute(circuits)\n\n # No workflow files were created because we only computed with\n # identities\n assert not os.path.exists(tmpdir.join(f\"expval-{test_uuid}.yaml\"))\n assert not os.path.exists(tmpdir.join(f\"expval-{test_uuid}.yaml\"))\n\n expected = [\n np.ones(1),\n np.ones(2),\n ]\n\n for r, e in zip(res, expected):\n assert np.allclose(r, e)\n\n qml.disable_tape()", "def test_multiple_tapes_output(self, execute_kwargs):\n dev = qml.device(\"default.qubit\", wires=2)\n params = jax.numpy.array([0.3, 0.2])\n\n def cost_fn(x):\n with qml.queuing.AnnotatedQueue() as q1:\n qml.Hadamard(0)\n qml.RY(x[0], wires=[0])\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0))\n\n tape1 = qml.tape.QuantumScript.from_queue(q1)\n\n with qml.queuing.AnnotatedQueue() as q2:\n qml.Hadamard(0)\n qml.CRX(2 * x[0] * x[1], wires=[0, 1])\n qml.RX(2 * x[1], wires=[1])\n qml.expval(qml.PauliZ(0))\n\n tape2 = qml.tape.QuantumScript.from_queue(q2)\n\n return execute(tapes=[tape1, tape2], device=dev, **execute_kwargs)\n\n res = jax.jit(cost_fn)(params)\n assert isinstance(res, list)\n assert all(isinstance(r, jax.numpy.ndarray) for r in res)\n assert all(r.shape == () for r in res)", "def terminal_test(self):\n\n for self.cur_ep in tqdm.tqdm(range(1, self.episodes + 1), ascii=True, unit='episodes'):\n\n # Nombre de passages dans la boucle principale\n step = 1\n\n cur_state = self.env.reset()\n\n done = False\n\n while not done:\n\n # Choix au hasard entre :\n if np.random.random() > self.epsilon:\n # Action ร  partir de la q-table\n action = np.argmax(self.agent.get_q_values(np.array(cur_state)))\n\n else:\n # Action random\n action = np.random.randint(0, self.env.ACTION_SPACE_SIZE)\n\n # On effectue une action avec le serpent\n new_state, reward, done = self.env.step(action)\n\n # Ajout d'un exemple dans la mรฉmoire\n self.agent.update_training_set((cur_state, action, reward, new_state, done))\n\n # Entrainement รฉventuel\n self.agent.train()\n\n cur_state = new_state\n step += 1\n\n if self.epsilon > self.MIN_EPSILON:\n self.epsilon *= self.EPSILON_DECAY\n self.epsilon = max(self.MIN_EPSILON, self.epsilon)\n\n if self.save_model:\n self.agent.save_model(self.model_file_name)", "def run_experiments() :\n #%%\n target_size=(32,32)\n g_specs = {\n \"batch_size\" : [ 30 , 60, 100 ],\n \"learning_rate\" : [ 0.0002, 0.0003, 0.0005 ],\n \"drop_out_rate\" : [ 0.2, 0.25, 0.3 ],\n \"rescale_mode\" : [ \"max_q\" , \"max\", \"\" ]\n }\n\n model_traits = MODEL_TRAITS[\"model2\"].copy()\n tt_obj = model_traits[\"trainer_tester_class\"]( model_traits )\n del model_traits[\"trainer_tester_class\"]\n\n cnt = 0\n for batchs, lrate, do_rate, resc_mode in product( g_specs[\"batch_size\"],\n g_specs[\"learning_rate\"],\n g_specs[\"drop_out_rate\"],\n g_specs[\"rescale_mode\"] ) :\n\n tt_obj.model_traits.update( {\"batch_size\" : batchs,\n \"learning_rate\" : lrate,\n \"rescale_mode\" : resc_mode,\n \"drop_out_rate\" : do_rate } )\n\n train_4d, train_gt = tu.make_4d_arrays( images_dir=\"images/train\",\n target_size=target_size )\n\n test_4d, test_gt = tu.make_4d_arrays( images_dir=\"images/test\",\n target_size=target_size )\n\n data = {\"train_4d\" : train_4d,\n \"test_4d\" : test_4d,\n \"train_y\" : train_gt,\n \"test_y\" : test_gt}\n\n valid_accu_log, train_accu_log = tt_obj.train( model_traits, data,\n logl=100 )\n idx_v = int(np.argmax( valid_accu_log))\n idx_t = int(np.argmax( train_accu_log))\n\n model_traits.update({\"valid_accu_log\" : valid_accu_log,\n \"train_accu_log\" : train_accu_log,\n \"best_valid\" : max(valid_accu_log),\n \"best_valid_at\" : idx_v,\n \"train_at_best_valid\" : train_accu_log[idx_v],\n \"best_train\" : max(train_accu_log),\n \"best_train_at\": idx_t })\n\n #print(cnt, pformat(model_traits) )\n print( \"%d : best_train = %.4f, best_valid = %.4f\" % \\\n (cnt, max(train_accu_log), max(valid_accu_log) ))\n\n with open( \"exp_results_%d.json\" % cnt,\n \"wt\" , encoding=\"utf8\" ) as f_out :\n print( json.dumps( model_traits ), file=f_out)\n\n\n cnt += 1\n #%%", "def test_integration(self):\n\n m = 5 # number of wires in A\n M = 2**m\n\n xmax = np.pi # bound to region [-pi, pi]\n xs = np.linspace(-xmax, xmax, M)\n\n probs = np.array([norm().pdf(x) for x in xs])\n probs /= np.sum(probs)\n\n func = lambda i: np.sin(xs[i]) ** 2\n r_rotations = np.array([2 * np.arcsin(np.sqrt(func(i))) for i in range(M)])\n\n A_wires = [0, \"a\", -1.1, -10, \"bbb\"]\n target_wire = \"Ancilla\"\n wires = A_wires + [target_wire]\n estimation_wires = [\"bob\", -3, 42, \"penny\", \"lane\"]\n\n def fn():\n qml.templates.MottonenStatePreparation(np.sqrt(probs), wires=A_wires)\n r_unitary(qml.RY, r_rotations, control_wires=A_wires[::-1], target_wire=target_wire)\n\n qmc_circuit = qml.quantum_monte_carlo(\n fn, wires=wires, target_wire=target_wire, estimation_wires=estimation_wires\n )\n\n with qml.queuing.AnnotatedQueue() as q:\n qmc_circuit()\n qml.probs(estimation_wires)\n\n tape = qml.tape.QuantumScript.from_queue(q)\n tape = tape.expand(depth=2)\n\n assert all(\n not isinstance(op, (qml.MultiControlledX, qml.templates.QFT, qml.tape.QuantumScript))\n for op in tape.operations\n )\n\n dev = qml.device(\"default.qubit\", wires=wires + estimation_wires)\n res = dev.execute(tape)\n\n @qml.qnode(dev)\n def circuit():\n qml.templates.QuantumMonteCarlo(\n probs, func, target_wires=wires, estimation_wires=estimation_wires\n )\n return qml.probs(estimation_wires)\n\n res_expected = circuit()\n assert np.allclose(res, res_expected)", "def run_loop_measurement(t=0.5, name='test', loops=4, pump_t=180, total_t=600, jump_x=10):\n\n incident_angles = [0.1, 0.4]\n waxs_arc = [20, 0]\n user = \"TP\"\n\n condition = (\n ( -1 < waxs.arc.position )\n and ( waxs.arc.position < 1 )\n and (waxs_arc[0] == 20)\n )\n\n if condition:\n waxs_arc = waxs_arc[::-1]\n \n ranges = { 0.1 : [-16, 16, 33],\n 0.4 : [-25, 25, 51],\n }\n\n try:\n ai0 = RE.md['ai_0']\n except:\n yield from bp.count([])\n ai0 = db[-1].start['ai_0']\n print('Failed to acces RE.md')\n print(f'\\n\\nSample flat at theta = {ai0}')\n \n proposal_id('2023_2', '311564_Pettersson')\n #det_exposure_time(t, t)\n \n t_initial = time.time()\n\n for i in range(loops):\n t_start = time.time()\n print('Cycle number',i+1,'started at', (t_start - t_initial)/60)\n\n # Wait initial time for pumping to finish\n print(f'Start pumping now, going to wait for {pump_t} s\\n')\n while (time.time() - t_start) < pump_t:\n print(f'Pumping time: {(time.time() - t_start):.1f} s')\n yield from bps.sleep(10)\n\n # Go over SAXS and WAXS\n t_measurement = ( time.time() - t_initial ) / 60\n for wa in waxs_arc:\n yield from bps.mv(waxs, wa)\n dets = [pil900KW] if waxs.arc.position < 15 else [pil1M, pil900KW]\n\n for ai in incident_angles:\n yield from bps.mv(piezo.th, ai0 + ai)\n yield from bps.mvr(piezo.x, - jump_x)\n\n t2 = 2 * t if ai == 0.4 else t\n det_exposure_time(t2, t2)\n\n try:\n y_range = ranges[ai]\n except:\n y_range = [-10, 10, 11]\n \n sample_name = f'{name}{get_scan_md()}_time{t_measurement:.1f}_ai{ai}'\n sample_id(user_name=user, sample_name=sample_name)\n print(f\"\\n\\n\\n\\t=== Sample: {sample_name} ===\")\n yield from bp.rel_scan(dets, piezo.y, *y_range, md=dict(ai=ai))\n \n yield from bps.mv(waxs, waxs_arc[0],\n piezo.th, ai0)\n\n # Wait until the total loop time passes\n if i + 1 < loops:\n print(f'Waiting for the loop to last {total_t} s in total\\n')\n sleep_count = 0\n while (time.time() - t_start) < total_t:\n sleep_count += 1\n if (sleep_count % 10 == 0):\n print(f'Total time: {(time.time() - t_start):.1f} s')\n yield from bps.sleep(1)\n\n sample_id(user_name=\"test\", sample_name=\"test\")\n det_exposure_time(0.5, 0.5)", "def test_reusing_quantum_tape(self, execute_kwargs, tol):\n a = jax.numpy.array(0.1)\n b = jax.numpy.array(0.2)\n\n dev = qml.device(\"default.qubit\", wires=2)\n\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a, wires=0)\n qml.RX(b, wires=1)\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n assert tape.trainable_params == [0, 1]\n\n def cost(a, b):\n # An explicit call to _update() is required here to update the\n # trainable parameters in between tape executions.\n # This is different from how the autograd interface works.\n # Unless the update is issued, the validation check related to the\n # number of provided parameters fails in the tape: (len(params) !=\n # required_length) and the tape produces incorrect results.\n tape._update()\n new_tape = tape.bind_new_parameters([a, b], [0, 1])\n return execute([new_tape], dev, **execute_kwargs)[0]\n\n jac_fn = jax.jit(jax.grad(cost))\n jac = jac_fn(a, b)\n\n a = jax.numpy.array(0.54)\n b = jax.numpy.array(0.8)\n\n # check that the cost function continues to depend on the\n # values of the parameters for subsequent calls\n res2 = cost(2 * a, b)\n expected = [np.cos(2 * a)]\n assert np.allclose(res2, expected, atol=tol, rtol=0)\n\n jac_fn = jax.jit(jax.grad(lambda a, b: cost(2 * a, b)))\n jac = jac_fn(a, b)\n expected = -2 * np.sin(2 * a)\n assert np.allclose(jac, expected, atol=tol, rtol=0)", "def test(args):\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n env_info = env.reset(train_mode=True)[brain_name]\n\n num_agents = len(env_info.agents)\n print('Number of agents:', num_agents)\n\n # dim of each action\n action_size = brain.vector_action_space_size\n print('Size of each action:', action_size)\n\n # dim of the state space\n states = env_info.vector_observations\n state_size = states.shape[1]\n\n agent = MADDPG(state_size, action_size, actor_layer_dim_1=args.actor_layer_dim_1,\n actor_layer_dim_2=args.actor_layer_dim_2,\n actor_layer_dim_3=args.actor_layer_dim_3,\n critic_layer_dim_1=args.critic_layer_dim_1,\n critic_layer_dim_2=args.critic_layer_dim_2,\n critic_layer_dim_3=args.critic_layer_dim_3)\n\n agent.load(\n \"chkpts/{}/{:02d}_best_model.checkpoint\".format(args.model_path, args.loop_counter))\n\n test_scores = []\n for i_episode in tqdm(range(1, 1+args.test_n_run)):\n # initialize the scores\n scores = np.zeros(num_agents)\n env_info = env.reset(train_mode=True)[\n brain_name] # reset the environment\n states = env_info.vector_observations # get the current states\n dones = [False]*num_agents\n while not np.any(dones):\n actions = agent.act(states) # select actions\n # send the actions to the environment\n env_info = env.step(actions)[brain_name]\n next_states = env_info.vector_observations # get the next states\n rewards = env_info.rewards # get the rewards\n dones = env_info.local_done # see if episode has finished\n scores += rewards # update the scores\n # roll over the states to next time step\n states = next_states\n\n test_scores.append(np.max(scores))\n\n avg_score = sum(test_scores)/len(test_scores)\n print(\"Test Score: {}\".format(avg_score))\n\n return avg_score", "def test_track_particles_multi_beamline(self):\n BaseUtils.i_am_sure_my_code_closed_in_if_name_equal_main()\n bl1 = HUST_SC_GANTRY().create_beamline()\n bl2 = HUST_SC_GANTRY(qs3_gradient=7).create_beamline()\n bl3 = HUST_SC_GANTRY(qs3_gradient=0).create_beamline()\n\n p1 = ParticleFactory.create_proton_along(\n bl.trajectory, gantry.first_bending_part_length() + gantry.DL2, 215\n )\n\n p2 = ParticleFactory.create_proton_along(\n bl.trajectory, gantry.first_bending_part_length() + gantry.DL2, 220\n )\n\n p3 = ParticleFactory.create_proton_along(\n bl.trajectory, gantry.first_bending_part_length() + gantry.DL2, 210\n )\n\n ps_cpu1 = [p1.copy(), p2.copy(), p3.copy()]\n ps_cpu2 = [p1.copy(), p2.copy(), p3.copy()]\n ps_cpu3 = [p1.copy(), p2.copy(), p3.copy()]\n ps_gpu32 = [p1.copy(), p2.copy(), p3.copy()]\n ps_gpu64 = [p1.copy(), p2.copy(), p3.copy()]\n\n print(\"CPU\")\n s = time.time()\n ParticleRunner.run_only(ps_cpu1, bl1, 10, 20*MM, 6)\n ParticleRunner.run_only(ps_cpu2, bl2, 10, 20*MM, 6)\n ParticleRunner.run_only(ps_cpu3, bl3, 10, 20*MM, 6)\n print(f\"CPU time = {time.time()-s}\")\n for p in ps_cpu1+ps_cpu2 + ps_cpu3:\n print(p)\n\n print(\"GPU64\")\n s = time.time()\n ps_end = ga64_b512.track_multi_particle_beamlime_for_magnet_with_single_qs(\n [bl1, bl2, bl3], ps_gpu64, 10, 20*MM\n )\n print(f\"GPU64 time = {time.time()-s}\")\n\n for ps in ps_end:\n for p in ps:\n print(p)\n\n for gid in range(3):\n for pid in range(3):\n print(f\"diff={ps_end[gid][pid]-(ps_cpu1+ps_cpu2 + ps_cpu3)[gid*3+pid]}\")", "def test_error_if_not_expval_batched(self):\n qml.enable_tape()\n dev = qml.device(\"orquestra.qiskit\", wires=2)\n\n with qml.tape.QuantumTape() as tape1:\n qml.expval(qml.PauliZ(wires=[0]))\n qml.var(qml.PauliZ(wires=[0]))\n\n with qml.tape.QuantumTape() as tape2:\n qml.expval(qml.PauliZ(wires=[0]))\n\n circuits = [tape1, tape2]\n with pytest.raises(NotImplementedError):\n res = dev.batch_execute(circuits)\n\n qml.disable_tape()", "def test6():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(A = 'h1', C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('ataque')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.iperfMeasure()\n exp1.trafico.iperfMeasure(filename='iperf_ataque_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def test_chunk_QFT(self, method, device):\n opts_no_chunk = {\n \"fusion_enable\": False,\n \"fusion_threshold\": 10,\n }\n opts_chunk = copy.copy(opts_no_chunk)\n opts_chunk[\"blocking_enable\"] = True\n opts_chunk[\"blocking_qubits\"] = 2\n\n backend = self.backend(method=method, device=device, **opts_chunk)\n backend_no_chunk = self.backend(method=method, device=device, **opts_no_chunk)\n\n shots = 100\n num_qubits = 3\n circuit = transpile(QFT(num_qubits), backend=backend, optimization_level=0)\n circuit.measure_all()\n\n result = backend.run(circuit, shots=shots, memory=True).result()\n counts = result.get_counts(circuit)\n result_no_chunk = backend_no_chunk.run(circuit, shots=shots, memory=True).result()\n counts_no_chunk = result_no_chunk.get_counts(circuit)\n\n self.assertEqual(counts_no_chunk, counts)", "def test5():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(A = 'h1', C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('ataque')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n exp1.pingAllTest() # **************** Parece que es necesario que se de un arranque al controlador\n # **************** para que aprenda las reglas antes del ataque.\n\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.pingMeasure()\n #exp1.trafico.pingMeasure(filename='ping_ataque_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def test_serialize_circuit_rotations_tape(self, monkeypatch, tmpdir, test_batch_result):\n qml.enable_tape()\n dev = QeQiskitDevice(wires=1, shots=1000, backend=\"qasm_simulator\", analytic=False)\n\n circuit_history = []\n\n with qml.tape.QuantumTape() as tape1:\n qml.Hadamard(wires=[0])\n qml.expval(qml.Hadamard(0))\n\n with monkeypatch.context() as m:\n m.setattr(pennylane_orquestra.cli_actions, \"user_data_dir\", lambda *args: tmpdir)\n m.setattr(\n pennylane_orquestra.orquestra_device,\n \"gen_expval_workflow\",\n lambda component, backend_specs, circuits, operators, **kwargs: circuit_history.extend(\n circuits\n ),\n )\n\n # Disable submitting to the Orquestra platform by mocking Popen\n m.setattr(subprocess, \"Popen\", lambda *args, **kwargs: MockPopen())\n m.setattr(\n pennylane_orquestra.orquestra_device,\n \"loop_until_finished\",\n lambda *args, **kwargs: test_batch_result, # The exact results are not considered in the test\n )\n\n dev.execute(tape1)\n\n expected = 'OPENQASM 2.0;\\ninclude \"qelib1.inc\";\\nqreg q[1];\\ncreg c[1];\\nh q[0];\\nry(-0.7853981633974483) q[0];\\n'\n assert circuit_history[0] == expected\n qml.disable_tape()", "def main():\n # run_test_go_straight_inches()\n # run_test_turn_degrees()\n # run_test_spin_degrees()\n beep_if_blob_is_bigger_than(3000)", "def test_chunk_QFTWithFusion(self, method, device):\n opts_no_chunk = {\n \"fusion_enable\": True,\n \"fusion_threshold\": 5,\n }\n opts_chunk = copy.copy(opts_no_chunk)\n opts_chunk[\"blocking_enable\"] = True\n opts_chunk[\"blocking_qubits\"] = 4\n\n backend = self.backend(method=method, device=device, **opts_chunk)\n backend_no_chunk = self.backend(method=method, device=device, **opts_no_chunk)\n\n shots = 100\n num_qubits = 8\n circuit = transpile(QFT(num_qubits), backend=backend, optimization_level=0)\n circuit.measure_all()\n\n result = backend.run(circuit, shots=shots, memory=True).result()\n counts = result.get_counts(circuit)\n result_no_chunk = backend_no_chunk.run(circuit, shots=shots, memory=True).result()\n counts_no_chunk = result_no_chunk.get_counts(circuit)\n\n self.assertEqual(counts_no_chunk, counts)", "def test():\n\t\treturn [\"vice.multizone\",\n\t\t\t[\n\t\t\t\ttest_from_output(),\n\t\t\t\tmig_matrix_row.test(run = False),\n\t\t\t\tmig_matrix.test(run = False),\n\t\t\t\tmig_specs.test(run = False),\n\t\t\t\tzone_array.test(run = False),\n\t\t\t\t_multizone.test(run = False),\n\t\t\t\tsrc_test(run = False)\n\t\t\t]\n\t\t]", "def exe_tests(self):\n self.rank = mpicom.rank()\n self.size = mpicom.size()\n if mpicom.parallel():\n self.test(\"libname\",os.path.split(mpicom.__file__)[1],\"mpicom.so\")\n else:\n self.test(\"libname\",os.path.split(mpicom.__file__)[1],\"mpistub.pyc\")\n self.test_broadcast()\n self.test_reduce()\n self.test_p2p()\n self.test_gather()\n self.test_scatter()\n #self.test_alltoall()", "def test4():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('normal')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.iperfMeasure()\n exp1.trafico.iperfMeasure(filename='iperf_normal_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def test_run():\n # Only few steps for test\n timesteps = 128\n\n # Compute all sub testing conf\n envs = ['CartPole-v0']\n ml_platforms = ['torch', 'tf']\n agents = ['dqn', 'a2c']\n\n test_combinations = list(it.product(\n envs,\n ml_platforms,\n agents\n )\n )\n\n # Finally test them all\n for conf in test_combinations:\n env_str, ml_platform_str, agent_str = conf\n run(\n agent_str,\n ml_platform_str,\n env_str,\n 'dense',\n timesteps,\n './target/')", "def test_tape_iteration(self) -> None:\n tape = TMTape(\n tape=\"abcdef\",\n blank_symbol=\".\",\n current_position=2,\n )\n self.assertEqual(tuple(tape), (\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"))", "def test_identity_multiple_batched(self, dev):\n qml.enable_tape()\n dev = qml.device(dev, wires=2)\n\n with qml.tape.QuantumTape() as tape1:\n qml.expval(qml.Identity(wires=[0]))\n qml.expval(qml.Identity(wires=[1]))\n\n res = dev.batch_execute([tape1])\n assert len(res) == 1\n assert np.allclose(res[0], np.array([1, 1]))\n qml.disable_tape()", "def test_behaviour(self):\r\n\r\n with qml.tape.QuantumTape() as tape:\r\n qml.PauliZ(0)\r\n qml.RX(1.0, wires=0)\r\n qml.CNOT(wires=[0, 2])\r\n qml.Rot(2.0, 3.0, 4.0, wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n tape.trainable_params = {0, 2}\r\n shifts = [0.1, -0.2, 1.6]\r\n res = generate_shifted_tapes(tape, 1, shifts=shifts)\r\n\r\n assert len(res) == len(shifts)\r\n assert res[0].get_parameters(trainable_only=False) == [1.0, 2.0, 3.1, 4.0]\r\n assert res[1].get_parameters(trainable_only=False) == [1.0, 2.0, 2.8, 4.0]\r\n assert res[2].get_parameters(trainable_only=False) == [1.0, 2.0, 4.6, 4.0]", "async def test_routine(self):\n print('Running test routine...')\n print('Waiting for axes to initialize...')\n await self.robot.wait_until_initialized()\n print('Synchronizing robot state with peripheral...')\n await self.robot.synchronize_values()\n print('Loading calibration data...')\n await self.robot.load_calibrations()\n await self.robot.go_to_alignment_hole()\n\n print('Starting 96-well plate test...')\n await self.robot.go_to_96_well_plate(1, 'a')\n await self.robot.dispense('96-well plate', 'far above')\n for height in ['bottom', 'low', 'mid', 'high', 'top', 'above', 'far above']:\n print('Testing with height {}...'.format(height))\n for (row, volume) in [('a', 20), ('b', 30), ('c', 40), ('d', 50), ('e', 100)]:\n print(\n ' Testing precise with row {} and volume {} mL...'\n .format(row, volume)\n )\n await self.test_individual_precise(row, height, volume / 1000)\n await self.robot.dispense('96-well plate', height)\n for (row, volume) in [\n ('f', 100), ('g', 150), ('h', 200), ('a', 300), ('b', 400),\n ('c', 500), ('d', 600), ('e', 700), ('g', 800), ('h', 900)\n ]:\n print(\n ' Testing rough with row {} and volume {} mL...'\n .format(row, volume / 1000)\n )\n await self.test_individual_rough(row, height, volume / 1000)\n await self.robot.z.go_to_high_end_position()\n await self.robot.y.go_to_low_end_position()\n\n print(batch.OUTPUT_FOOTER)\n print('Quitting...')", "def test_all_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_all_qubit_quantum_error(error, 'id')\n # Execute\n target = {\n '0x0': 9 * shots / 16,\n '0x1': 3 * shots / 16,\n '0x2': 3 * shots / 16,\n '0x3': shots / 16\n }\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)", "def test_specific_qubit_pauli_error_gate_25percent(self):\n qr = QuantumRegister(2, 'qr')\n cr = ClassicalRegister(2, 'cr')\n circuit = QuantumCircuit(qr, cr)\n circuit.iden(qr)\n circuit.barrier(qr)\n circuit.measure(qr, cr)\n backend = QasmSimulator()\n shots = 2000\n # test noise model\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model = NoiseModel()\n noise_model.add_quantum_error(error, 'id', [0])\n # Execute\n target = {'0x0': 3 * shots / 4, '0x1': shots / 4}\n circuit = transpile(circuit, basis_gates=noise_model.basis_gates)\n qobj = assemble([circuit], backend, shots=shots)\n result = backend.run(qobj, noise_model=noise_model).result()\n self.is_completed(result)\n self.compare_counts(result, [circuit], [target], delta=0.05 * shots)", "def test_variational():\n # iris\n #pres = \"Test pour le data set Iris (facile, classique)\"\n #test_from_func_variational(pres, 15, 10, 3, True, Iris)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func_variational(pres, 15, 10, 3, True, Breast_cancer)\n\n # digits\n # pres = \"Test pour le data set Digits (difficile, classique)\"\n # test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n # pres = \"Test pour le data set Wine (moyen, classique)\"\n # test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des donnรฉes gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func_variational(pres, 25, 10, 2, True, Gaussian)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des sรฉquences ADN courtes (difficile, classique)\"\n test_from_func_variational(pres, 10, 15, 14, True, Sequence)\n\n #Quantum data\n pres = \"Test pour des donnรฉes gรฉnรฉrรฉes par ordinateur quantique (facile, quantique)\"\n print(pres)\n _, samp_train, samp_test, labels = ad_hoc_data(15, 10, 2, 0.3, True)\n sample_m, sample_p = stock_get(20, 0.3)\n\n labels_me = [-1, 1]\n samp_train_me = {-1: np.array(sample_m[:15]), 1: np.array(sample_p[:15])}\n samp_test_me = {-1: np.array(sample_m[15:]), 1: np.array(sample_p[15:])}\n print(samp_train)\n print(samp_train_me)\n print(samp_test)\n print(samp_test_me)\n\n my_impl_variational(samp_train, samp_test, labels)\n print(\"Pour autres donnรฉes quantiques\")\n my_impl_variational(samp_train_me, samp_test_me, labels_me)", "def test_chunk_pauli(self, method, device):\n opts_no_chunk = {\"fusion_enable\": False}\n opts_chunk = copy.copy(opts_no_chunk)\n opts_chunk[\"blocking_enable\"] = True\n opts_chunk[\"blocking_qubits\"] = 3\n\n backend = self.backend(method=method, device=device, **opts_chunk)\n backend_no_chunk = self.backend(method=method, device=device, **opts_no_chunk)\n\n shots = 100\n\n qr = QuantumRegister(5)\n cr = ClassicalRegister(5)\n regs = (qr, cr)\n circuit = QuantumCircuit(*regs)\n circuit.h(qr[0])\n circuit.h(qr[1])\n circuit.h(qr[2])\n circuit.h(qr[3])\n circuit.h(qr[4])\n circuit.pauli(\"YXZYX\", qr)\n circuit.measure_all()\n\n result = backend.run(circuit, shots=shots, memory=True).result()\n counts = result.get_counts(circuit)\n result_no_chunk = backend_no_chunk.run(circuit, shots=shots, memory=True).result()\n counts_no_chunk = result_no_chunk.get_counts(circuit)\n\n self.assertEqual(counts_no_chunk, counts)" ]
[ "0.6260577", "0.6099374", "0.60667086", "0.6040244", "0.60340387", "0.59733605", "0.59639525", "0.5918906", "0.59123546", "0.58580023", "0.584144", "0.5829903", "0.58174", "0.58160144", "0.58011144", "0.57997733", "0.579832", "0.5781381", "0.5778989", "0.5777852", "0.57400465", "0.57378244", "0.57333666", "0.56908983", "0.568101", "0.5679097", "0.56728077", "0.5667846", "0.5666771", "0.56238157" ]
0.6635692
0
Tests the shape of vectorvalued QNode results.
def test_vector_valued_qnode(self, execute_kwargs, ret, out_dim, expected_type): dev = qml.device("default.qubit", wires=2) params = jax.numpy.array([0.1, 0.2, 0.3]) grad_meth = ( execute_kwargs["gradient_kwargs"]["method"] if "gradient_kwargs" in execute_kwargs else "" ) if "adjoint" in grad_meth and any( r.return_type in (qml.measurements.Probability, qml.measurements.State, qml.measurements.Variance) for r in ret ): pytest.skip("Adjoint does not support probs") def cost(a, cache): with qml.queuing.AnnotatedQueue() as q: qml.RY(a[0], wires=0) qml.RX(a[1], wires=0) qml.RY(a[2], wires=0) for r in ret: qml.apply(r) tape = qml.tape.QuantumScript.from_queue(q) res = qml.interfaces.execute([tape], dev, cache=cache, **execute_kwargs)[0] return res res = jax.jit(cost, static_argnums=1)(params, cache=None) assert isinstance(res, expected_type) if expected_type is tuple: for r in res: assert r.shape == out_dim else: assert res.shape == out_dim
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_vec(self):\n x = get_vec(5)\n assert x.shape == (5,)\n assert np.all(x == np.zeros(5))", "def isvec(qob):\n shp = qob.shape\n return len(shp) == 1 or (len(shp) == 2 and (shp[0] == 1 or shp[1] == 1))", "def test_vector_shape(self):\n model = PoincareModel(self.data, size=20)\n self.assertEqual(model.kv.syn0.shape, (7, 20))", "def test_contains_shape(self):\n dim = Dimension(None, \"uniform\", -3, 4, shape=(4, 4))\n\n with pytest.raises(NotImplementedError):\n assert dists.uniform.rvs(-3, 4, size=(4, 4)) in dim", "def is_vector(x):\r\n return len(x.shape) == 1", "def test_vector_dimensions(self):\r\n # crear una lista 1-D (Horizontal, Entradas). \r\n Z = [1, 2, 3, 4, 5]\r\n # crear una lista 1-D (Vertical, Pesos de la red).\r\n W = [10, 20, 30]\r\n # Notemos que las dimensiones de Z y W son diferentes.\r\n try:\r\n neuron = rhonn(W, Z)\r\n except ValueError as e:\r\n # Comprobamos que efectivamente hay un error en las dimensiones.\r\n self.assertEqual(type(e), ValueError)\r\n else:\r\n self.fail('El error no fue lanzado.')", "def test_normalize_empty_vector(self):\n\n v = Vector({ })\n v.normalize()\n self.assertEqual({ }, v.dimensions)", "def testSize(self):\n v1 = Vector(1, 2, 3, size=6)\n assert v1 == [1, 2, 3, 0, 0, 0]\n failed = False\n try:\n Vector(1, 2, 3, size=2)\n except IndexError:\n failed = True\n assert failed\n\n v3 = Vector(size=7)\n assert v3 == Vector(0, 0, 0, 0, 0, 0, 0)\n assert v3 == (0, 0, 0, 0, 0, 0, 0)", "def test_dimensions_vector_space(self):\n\n v = Vector()\n self.assertEqual(VectorSpace, type(v.dimensions))", "def is_vector(self):\n return True if self.width > 1 else False", "def test_vector(self, a, b, rtol, atol, expected):\n assert np.all(self.func(a, b, rtol=rtol, atol=atol) == expected)", "def test_random_sphere_vector():\n\ttest_vector = o_gen_instance.generate_random_sphere_vector()\n\tassert isinstance(test_vector, np.ndarray)\n\tassert test_vector.shape == (3,)\n\tfor component in test_vector:\n\t\tassert component != 0.\n\tassert np.isclose(np.linalg.norm(test_vector), 1.0)", "def test_vector(self):\n a = Vector(1, 2)\n assert a.x == 1\n assert a.y == 2", "def test_vector(self, a, b, rtol, atol, expected):\n assert self.func(a, b, rtol=rtol, atol=atol) == expected", "def test_empty_constructor(self):\n\n v = Vector()\n self.assertEqual({ }, v.dimensions)", "def test_qsvm_binary_directly_statevector(self):\n ref_kernel_testing = np. array([[0.1443953, 0.18170069, 0.47479649, 0.14691763],\n [0.33041779, 0.37663733, 0.02115561, 0.16106199]])\n\n ref_support_vectors = np.array([[2.95309709, 2.51327412], [3.14159265, 4.08407045],\n [4.08407045, 2.26194671], [4.46106157, 2.38761042]])\n\n backend = BasicAer.get_backend('statevector_simulator')\n num_qubits = 2\n feature_map = SecondOrderExpansion(feature_dimension=num_qubits,\n depth=2,\n entangler_map=[[0, 1]])\n svm = QSVM(feature_map, self.training_data, self.testing_data, None)\n\n quantum_instance = QuantumInstance(backend, seed_transpiler=self.random_seed,\n seed_simulator=self.random_seed)\n file_path = self.get_resource_path('qsvm_test.npz')\n try:\n result = svm.run(quantum_instance)\n\n ori_alphas = result['svm']['alphas']\n\n np.testing.assert_array_almost_equal(\n result['kernel_matrix_testing'], ref_kernel_testing, decimal=4)\n\n self.assertEqual(len(result['svm']['support_vectors']), 4)\n np.testing.assert_array_almost_equal(\n result['svm']['support_vectors'], ref_support_vectors, decimal=4)\n\n self.assertEqual(result['testing_accuracy'], 0.5)\n\n svm.save_model(file_path)\n\n self.assertTrue(os.path.exists(file_path))\n\n loaded_svm = QSVM(feature_map)\n loaded_svm.load_model(file_path)\n\n np.testing.assert_array_almost_equal(\n loaded_svm.ret['svm']['support_vectors'], ref_support_vectors, decimal=4)\n\n np.testing.assert_array_almost_equal(\n loaded_svm.ret['svm']['alphas'], ori_alphas, decimal=4)\n\n loaded_test_acc = loaded_svm.test(svm.test_dataset[0],\n svm.test_dataset[1],\n quantum_instance)\n self.assertEqual(result['testing_accuracy'], loaded_test_acc)\n\n np.testing.assert_array_almost_equal(\n loaded_svm.ret['kernel_matrix_testing'], ref_kernel_testing, decimal=4)\n except NameError as ex:\n self.skipTest(str(ex))\n finally:\n if os.path.exists(file_path):\n try:\n os.remove(file_path)\n except Exception: # pylint: disable=broad-except\n pass", "def test_scalar_index(self):\n dset = self.f.create_dataset('x', shape=(), dtype='f')\n out = dset[...]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, ())", "def test_normalize_zero_length_vector(self):\n\n v = Vector({ 'x': 0 })\n v.normalize()\n self.assertEqual({ 'x': 0 }, v.dimensions)", "def has_vector_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.vector3", "def test_normalize_vector_space(self):\n\n v = Vector({ 'x': 10 })\n self.assertEqual(VectorSpace, type(v.dimensions))\n v.normalize()\n self.assertEqual(VectorSpace, type(v.dimensions))", "def __size_restriction_correct_vector_vector(self):\n\n strTestName = 'Vector size lower or equal to the size of a vector (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('vRefParameter1', 'Vector ref. parameter')\n RxCSObject.paramType('vRefParameter1', np.ndarray)\n\n # Now, let me define a Numpy vector\n RxCSObject.paramAddMan('parameter1', 'Numpy array 1D parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramSizLE('parameter1', 'vRefParameter1', mul=3)\n\n RxCSObject.vRefParameter1 = np.array([0, 1, 0, 4])\n RxCSObject.parameter1 = np.random.randn(9)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def evaluate_as_vector(self, chain_state): \n def vector_representation(n, ordering, it):\n return self.mapping.subspace(zip(ordering,it))\n return self._evaluate(vector_representation, chain_state)", "def test_constructor(self):\n\n v = Vector({\"x\": 2, \"y\": 1})\n self.assertEqual({\"x\": 2, \"y\": 1}, v.dimensions)", "def vector_q(q_1: Q) -> Q:\n\n end_q_type = f\"vector_q({q_1.q_type})\"\n\n v = Q(\n [0, q_1.x, q_1.y, q_1.z],\n q_type=end_q_type,\n representation=q_1.representation,\n )\n return v", "def test_equal_with_different_type():\n assert_false(Vector(1.0) == 1.0)", "def is_vector(self):\n return len(self.coeffs.shape[self.sdim:]) == 1", "def test_iter(self):\n b = Vector(5, 6)\n assert list(b) == [5, 6]", "def _is_scalar_from_shape(shape):\n return _logical_equal(_ndims_from_shape(shape), 0)", "def test_stack_scalar_make_vector(self):\r\n a = tensor.scalar('a', dtype=self.floatX)\r\n b = tensor.scalar('b', dtype=self.floatX)\r\n s = stack(a, b, a, b)\r\n f = function([a, b], s, mode=self.mode)\r\n val = f(1, 2)\r\n #print val\r\n self.assertTrue(numpy.all(val == [1, 2, 1, 2]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len([n for n in topo if isinstance(n.op, opt.MakeVector)]) > 0\r\n assert len([n for n in topo if isinstance(n, self.join_op)]) == 0\r\n assert f.maker.fgraph.outputs[0].dtype == self.floatX", "def is_vec(x):\n return x.ndim == 1 or (x.ndim == 2 and \n (x.shape[0] == 1 or x.shape[1] == 1))" ]
[ "0.64669347", "0.64521986", "0.63848865", "0.6384336", "0.63729805", "0.63232344", "0.6228884", "0.6149276", "0.61155164", "0.6107356", "0.6096001", "0.6076757", "0.6044631", "0.59956014", "0.59195197", "0.5917795", "0.58967865", "0.58740616", "0.58721894", "0.5870175", "0.5859444", "0.5855074", "0.58381045", "0.583519", "0.5818305", "0.5810344", "0.5795523", "0.5784517", "0.57812613", "0.57711864" ]
0.7302401
0
Test the jacobian computation with multiple tapes with probability and expectation value computations.
def test_multi_tape_jacobian_probs_expvals(self, execute_kwargs): adjoint = execute_kwargs.get("gradient_kwargs", {}).get("method", "") == "adjoint_jacobian" if adjoint: pytest.skip("The adjoint diff method doesn't support probabilities.") def cost(x, y, device, interface, ek): with qml.queuing.AnnotatedQueue() as q1: qml.RX(x, wires=[0]) qml.RY(y, wires=[1]) qml.CNOT(wires=[0, 1]) qml.expval(qml.PauliZ(0)) qml.expval(qml.PauliZ(1)) tape1 = qml.tape.QuantumScript.from_queue(q1) with qml.queuing.AnnotatedQueue() as q2: qml.RX(x, wires=[0]) qml.RY(y, wires=[1]) qml.CNOT(wires=[0, 1]) qml.probs(wires=[0]) qml.probs(wires=[1]) tape2 = qml.tape.QuantumScript.from_queue(q2) return qml.execute([tape1, tape2], device, **ek, interface=interface)[0] dev = qml.device("default.qubit", wires=2) x = jax.numpy.array(0.543) y = jax.numpy.array(-0.654) x_ = np.array(0.543) y_ = np.array(-0.654) res = cost(x, y, dev, interface="jax-jit", ek=execute_kwargs) exp = cost(x_, y_, dev, interface="autograd", ek=execute_kwargs) for r, e in zip(res, exp): assert jax.numpy.allclose(r, e, atol=1e-7)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_multiple_expectation_values(self, approx_order, strategy, tol):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n x = 0.543\r\n y = -0.654\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(x, wires=[0])\r\n qml.RY(y, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0))\r\n qml.expval(qml.PauliX(1))\r\n\r\n tapes, fn = finite_diff(tape, approx_order=approx_order, strategy=strategy)\r\n res = fn(dev.batch_execute(tapes))\r\n assert res.shape == (2, 2)\r\n\r\n expected = np.array([[-np.sin(x), 0], [0, np.cos(y)]])\r\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def test_tf(self, approx_order, strategy, tol):\r\n tf = pytest.importorskip(\"tensorflow\")\r\n\r\n dev = qml.device(\"default.qubit.tf\", wires=2)\r\n params = tf.Variable([0.543, -0.654], dtype=tf.float64)\r\n\r\n with tf.GradientTape() as t:\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(params[0], wires=[0])\r\n qml.RY(params[1], wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\r\n\r\n tape.trainable_params = {0, 1}\r\n tapes, fn = finite_diff(tape, n=1, approx_order=approx_order, strategy=strategy)\r\n jac = fn(dev.batch_execute(tapes))\r\n\r\n x, y = 1.0 * params\r\n\r\n expected = np.array([-np.sin(x) * np.sin(y), np.cos(x) * np.cos(y)])\r\n assert np.allclose(jac, expected, atol=tol, rtol=0)\r\n\r\n res = t.jacobian(jac, params)\r\n expected = np.array(\r\n [\r\n [-np.cos(x) * np.sin(y), -np.cos(y) * np.sin(x)],\r\n [-np.cos(y) * np.sin(x), -np.cos(x) * np.sin(y)],\r\n ]\r\n )\r\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def test_classical_processing_multiple_tapes(self, execute_kwargs):\n dev = qml.device(\"default.qubit\", wires=2)\n params = jax.numpy.array([0.3, 0.2])\n\n def cost_fn(x):\n with qml.queuing.AnnotatedQueue() as q1:\n qml.Hadamard(0)\n qml.RY(x[0], wires=[0])\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0))\n\n tape1 = qml.tape.QuantumScript.from_queue(q1)\n\n with qml.queuing.AnnotatedQueue() as q2:\n qml.Hadamard(0)\n qml.CRX(2 * x[0] * x[1], wires=[0, 1])\n qml.RX(2 * x[1], wires=[1])\n qml.expval(qml.PauliZ(0))\n\n tape2 = qml.tape.QuantumScript.from_queue(q2)\n\n result = execute(tapes=[tape1, tape2], device=dev, **execute_kwargs)\n return result[0] + result[1] - 7 * result[1]\n\n res = jax.jit(jax.grad(cost_fn))(params)\n assert res.shape == (2,)", "def test_y0(self, mocker):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(0.543, wires=[0])\r\n qml.RY(-0.654, wires=[0])\r\n qml.expval(qml.PauliZ(0))\r\n\r\n tapes, fn = finite_diff(tape, approx_order=1)\r\n\r\n # one tape per parameter, plus one global call\r\n assert len(tapes) == tape.num_params + 1", "def test_classical_processing_single_tape(self, execute_kwargs):\n a = jax.numpy.array(0.1)\n b = jax.numpy.array(0.2)\n c = jax.numpy.array(0.3)\n\n def cost(a, b, c, device):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a * c, wires=0)\n qml.RZ(b, wires=0)\n qml.RX(c + c**2 + jax.numpy.sin(a), wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute([tape], device, **execute_kwargs)[0]\n\n dev = qml.device(\"default.qubit\", wires=2)\n res = jax.jit(jax.grad(cost, argnums=(0, 1, 2)), static_argnums=3)(a, b, c, device=dev)\n assert len(res) == 3", "def test_jacobi_analytical(env_class: mantrap.environment.base.GraphBasedEnvironment.__class__):\n env = env_class(torch.rand(2), ego_type=mantrap.agents.DoubleIntegratorDTAgent)\n env.add_ado(position=torch.rand(2) * 5, goal=torch.rand(2) * 10)\n\n ego_controls = torch.rand((5, 2)) / 10.0\n ego_controls.requires_grad = True\n ego_trajectory = env.ego.unroll_trajectory(controls=ego_controls, dt=env.dt)\n\n # Initialize HJ module and compute partial derivative dx_rel/du_robot using auto-grad.\n module = mantrap.modules.HJReachabilityModule(env=env, t_horizon=5)\n _ = module._constraint_core(ego_trajectory, ado_ids=env.ado_ids, tag=\"test\", enable_auto_grad=True)\n dx_rel_du_auto_grad = []\n for ado_id in env.ado_ids:\n x_rel = module.x_relative[f\"test/{ado_id}\"]\n grad = [torch.autograd.grad(x, ego_controls, retain_graph=True)[0] for x in x_rel]\n dx_rel_du_auto_grad.append(torch.stack(grad).reshape(4, -1))\n dx_rel_du_auto_grad = torch.stack(dx_rel_du_auto_grad)\n\n # Compute the same partial derivative analytically, by calling the `compute_jacobian_analytically()`\n # function. Since we cannot inverse a vector (dJ/dx_rel), we can check whether the jacobian\n # computed using the pre-computed dJ/dx_rel and the auto-grad (!) dx_rel/du results in the same\n # jacobian as the result of `compute_jacobian_analytically()`, which is only the case if\n # dx_rel/du(auto-grad) = dx_rel/du(analytic) since dJ/dx has non-zero elements.\n jacobian_analytical = module.compute_jacobian_analytically(ego_trajectory, grad_wrt=ego_controls,\n ado_ids=env.ado_ids, tag=\"test\")\n dj_dx_rel = []\n for ado_id in env.ado_ids:\n dj_dx_rel.append(module.value_gradient(x=module.x_relative[f\"test/{ado_id}\"]))\n jacobian_auto_grad = np.matmul(np.stack(dj_dx_rel), dx_rel_du_auto_grad)\n\n assert np.allclose(jacobian_analytical, jacobian_auto_grad)", "def test_scalar_jacobian(self, execute_kwargs, tol):\n a = jax.numpy.array(0.1)\n dev = qml.device(\"default.qubit\", wires=2)\n\n def cost(a):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute([tape], dev, **execute_kwargs)[0]\n\n res = jax.jit(jax.grad(cost))(a)\n assert res.shape == ()\n\n # compare to standard tape jacobian\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n tape.trainable_params = [0]\n tapes, fn = param_shift(tape)\n expected = fn(dev.batch_execute(tapes))\n\n assert expected.shape == ()\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def test_multiple_tapes_output(self, execute_kwargs):\n dev = qml.device(\"default.qubit\", wires=2)\n params = jax.numpy.array([0.3, 0.2])\n\n def cost_fn(x):\n with qml.queuing.AnnotatedQueue() as q1:\n qml.Hadamard(0)\n qml.RY(x[0], wires=[0])\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0))\n\n tape1 = qml.tape.QuantumScript.from_queue(q1)\n\n with qml.queuing.AnnotatedQueue() as q2:\n qml.Hadamard(0)\n qml.CRX(2 * x[0] * x[1], wires=[0, 1])\n qml.RX(2 * x[1], wires=[1])\n qml.expval(qml.PauliZ(0))\n\n tape2 = qml.tape.QuantumScript.from_queue(q2)\n\n return execute(tapes=[tape1, tape2], device=dev, **execute_kwargs)\n\n res = jax.jit(cost_fn)(params)\n assert isinstance(res, list)\n assert all(isinstance(r, jax.numpy.ndarray) for r in res)\n assert all(r.shape == () for r in res)", "def test_prob_expectation_values(self, approx_order, strategy, tol):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n x = 0.543\r\n y = -0.654\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(x, wires=[0])\r\n qml.RY(y, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0))\r\n qml.probs(wires=[0, 1])\r\n\r\n tapes, fn = finite_diff(tape, approx_order=approx_order, strategy=strategy)\r\n res = fn(dev.batch_execute(tapes))\r\n assert res.shape == (5, 2)\r\n\r\n expected = (\r\n np.array(\r\n [\r\n [-2 * np.sin(x), 0],\r\n [\r\n -(np.cos(y / 2) ** 2 * np.sin(x)),\r\n -(np.cos(x / 2) ** 2 * np.sin(y)),\r\n ],\r\n [\r\n -(np.sin(x) * np.sin(y / 2) ** 2),\r\n (np.cos(x / 2) ** 2 * np.sin(y)),\r\n ],\r\n [\r\n (np.sin(x) * np.sin(y / 2) ** 2),\r\n (np.sin(x / 2) ** 2 * np.sin(y)),\r\n ],\r\n [\r\n (np.cos(y / 2) ** 2 * np.sin(x)),\r\n -(np.sin(x / 2) ** 2 * np.sin(y)),\r\n ],\r\n ]\r\n )\r\n / 2\r\n )\r\n\r\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def test_jitable_funcs(self):\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = None\n\n self.basic_lindblad.evaluation_mode = \"dense_vectorized\"\n\n self.jit_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(1.0, Array(np.array([0.2, 0.4, 0.6, 0.8])))\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(1.0, Array(np.array([0.2, 0.4, 0.6, 0.8])))\n\n self.basic_lindblad.rotating_frame = None", "def test_jacobian_options(self, mocker):\n spy = mocker.spy(qml.gradients, \"param_shift\")\n\n a = jax.numpy.array([0.1, 0.2])\n\n dev = qml.device(\"default.qubit\", wires=1)\n\n def cost(a, device):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n device,\n gradient_fn=param_shift,\n gradient_kwargs={\"shifts\": [(np.pi / 4,)] * 2},\n )[0]\n\n jax.grad(cost)(a, device=dev)\n\n for args in spy.call_args_list:\n assert args[1][\"shifts\"] == [(np.pi / 4,)] * 2", "def test_y0_provided(self):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(0.543, wires=[0])\r\n qml.RY(-0.654, wires=[0])\r\n qml.expval(qml.PauliZ(0))\r\n\r\n f0 = dev.execute(tape)\r\n tapes, fn = finite_diff(tape, approx_order=1, f0=f0)\r\n\r\n # one tape per parameter, plus one global call\r\n assert len(tapes) == tape.num_params", "def test_single_expectation_value(self, approx_order, strategy, tol):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n x = 0.543\r\n y = -0.654\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(x, wires=[0])\r\n qml.RY(y, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\r\n\r\n tapes, fn = finite_diff(tape, approx_order=approx_order, strategy=strategy)\r\n res = fn(dev.batch_execute(tapes))\r\n assert res.shape == (1, 2)\r\n\r\n expected = np.array([[-np.sin(y) * np.sin(x), np.cos(y) * np.cos(x)]])\r\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def test_independent_parameters(self):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n with qml.tape.JacobianTape() as tape1:\r\n qml.RX(1, wires=[0])\r\n qml.RX(1, wires=[1])\r\n qml.expval(qml.PauliZ(0))\r\n\r\n with qml.tape.JacobianTape() as tape2:\r\n qml.RX(1, wires=[0])\r\n qml.RX(1, wires=[1])\r\n qml.expval(qml.PauliZ(1))\r\n\r\n tapes, fn = finite_diff(tape1, approx_order=1)\r\n j1 = fn(dev.batch_execute(tapes))\r\n\r\n # We should only be executing the device to differentiate 1 parameter (2 executions)\r\n assert dev.num_executions == 2\r\n\r\n tapes, fn = finite_diff(tape2, approx_order=1)\r\n j2 = fn(dev.batch_execute(tapes))\r\n\r\n exp = -np.sin(1)\r\n\r\n assert np.allclose(j1, [exp, 0])\r\n assert np.allclose(j2, [0, exp])", "def test_reusing_quantum_tape(self, execute_kwargs, tol):\n a = jax.numpy.array(0.1)\n b = jax.numpy.array(0.2)\n\n dev = qml.device(\"default.qubit\", wires=2)\n\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a, wires=0)\n qml.RX(b, wires=1)\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n assert tape.trainable_params == [0, 1]\n\n def cost(a, b):\n # An explicit call to _update() is required here to update the\n # trainable parameters in between tape executions.\n # This is different from how the autograd interface works.\n # Unless the update is issued, the validation check related to the\n # number of provided parameters fails in the tape: (len(params) !=\n # required_length) and the tape produces incorrect results.\n tape._update()\n new_tape = tape.bind_new_parameters([a, b], [0, 1])\n return execute([new_tape], dev, **execute_kwargs)[0]\n\n jac_fn = jax.jit(jax.grad(cost))\n jac = jac_fn(a, b)\n\n a = jax.numpy.array(0.54)\n b = jax.numpy.array(0.8)\n\n # check that the cost function continues to depend on the\n # values of the parameters for subsequent calls\n res2 = cost(2 * a, b)\n expected = [np.cos(2 * a)]\n assert np.allclose(res2, expected, atol=tol, rtol=0)\n\n jac_fn = jax.jit(jax.grad(lambda a, b: cost(2 * a, b)))\n jac = jac_fn(a, b)\n expected = -2 * np.sin(2 * a)\n assert np.allclose(jac, expected, atol=tol, rtol=0)", "def test_jacobian(self):\n\n gT1 = Pose2(1, 2, np.pi/2)\n gT2 = Pose2(-1, 4, np.pi)\n\n expected = Pose2(2, 2, np.pi/2)\n\n def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]):\n # print(f\"{this = },\\n{v = },\\n{len(H) = }\")\n\n key0 = this.keys()[0]\n key1 = this.keys()[1]\n gT1, gT2 = v.atPose2(key0), v.atPose2(key1)\n error = Pose2(0, 0, 0).localCoordinates(gT1.between(gT2))\n \n if len(H) > 0:\n result = gT1.between(gT2)\n H[0] = -result.inverse().AdjointMap()\n H[1] = np.eye(3)\n return error\n \n noise_model = gtsam.noiseModel.Unit.Create(3)\n cf = ge.CustomFactor(noise_model, gtsam.KeyVector([0, 1]), error_func)\n v = Values()\n v.insert(0, gT1)\n v.insert(1, gT2)\n \n bf = gtsam.BetweenFactorPose2(0, 1, Pose2(0, 0, 0), noise_model)\n\n gf = cf.linearize(v)\n gf_b = bf.linearize(v)\n\n J_cf, b_cf = gf.jacobian()\n J_bf, b_bf = gf_b.jacobian()\n np.testing.assert_allclose(J_cf, J_bf)\n np.testing.assert_allclose(b_cf, b_bf)", "def test_single_expectation_value_with_argnum_all(self, approx_order, strategy, tol):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n x = 0.543\r\n y = -0.654\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(x, wires=[0])\r\n qml.RY(y, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\r\n\r\n # we choose both trainable parameters\r\n tapes, fn = finite_diff(tape, argnum=[0, 1], approx_order=approx_order, strategy=strategy)\r\n res = fn(dev.batch_execute(tapes))\r\n assert res.shape == (1, 2)\r\n\r\n expected = np.array([[-np.sin(y) * np.sin(x), np.cos(y) * np.cos(x)]])\r\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def test_var_expectation_values(self, approx_order, strategy, tol):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n x = 0.543\r\n y = -0.654\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(x, wires=[0])\r\n qml.RY(y, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0))\r\n qml.var(qml.PauliX(1))\r\n\r\n tapes, fn = finite_diff(tape, approx_order=approx_order, strategy=strategy)\r\n res = fn(dev.batch_execute(tapes))\r\n assert res.shape == (2, 2)\r\n\r\n expected = np.array([[-np.sin(x), 0], [0, -2 * np.cos(y) * np.sin(y)]])\r\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def test_gradable_funcs(self):\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = None\n\n self.basic_lindblad.evaluation_mode = \"dense_vectorized\"\n\n self.jit_grad_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([0.2, 0.4, 0.6, 0.8]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_grad_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([0.2, 0.4, 0.6, 0.8]))\n )\n\n self.basic_lindblad.rotating_frame = None", "def test_jax(self, approx_order, strategy, tol):\r\n jax = pytest.importorskip(\"jax\")\r\n from jax import numpy as jnp\r\n from pennylane.interfaces.jax import JAXInterface\r\n from jax.config import config\r\n\r\n config.update(\"jax_enable_x64\", True)\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n params = jnp.array([0.543, -0.654])\r\n\r\n def cost_fn(x):\r\n with JAXInterface.apply(qml.tape.QubitParamShiftTape()) as tape:\r\n qml.RX(x[0], wires=[0])\r\n qml.RY(x[1], wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\r\n\r\n tape.trainable_params = {0, 1}\r\n tapes, fn = finite_diff(tape, n=1, approx_order=approx_order, strategy=strategy)\r\n jac = fn([t.execute(dev) for t in tapes])\r\n return jac\r\n\r\n res = jax.jacobian(cost_fn)(params)\r\n x, y = params\r\n expected = np.array(\r\n [\r\n [-np.cos(x) * np.sin(y), -np.cos(y) * np.sin(x)],\r\n [-np.cos(y) * np.sin(x), -np.cos(x) * np.sin(y)],\r\n ]\r\n )\r\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def test_autograd(self, approx_order, strategy, tol):\r\n dev = qml.device(\"default.qubit.autograd\", wires=2)\r\n params = np.array([0.543, -0.654], requires_grad=True)\r\n\r\n def cost_fn(x):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(x[0], wires=[0])\r\n qml.RY(x[1], wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\r\n\r\n tape.trainable_params = {0, 1}\r\n tapes, fn = finite_diff(tape, n=1, approx_order=approx_order, strategy=strategy)\r\n jac = fn(dev.batch_execute(tapes))\r\n return jac\r\n\r\n res = qml.jacobian(cost_fn)(params)\r\n x, y = params\r\n expected = np.array(\r\n [\r\n [-np.cos(x) * np.sin(y), -np.cos(y) * np.sin(x)],\r\n [-np.cos(y) * np.sin(x), -np.cos(x) * np.sin(y)],\r\n ]\r\n )\r\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def jac_pnp(beta, **kw):\n temp = kw.get('temp', 85)\n rate_source_ = kw.get('rate_source', 1E-4)\n DSIN_ = kw.get('DSIN', 3.92E-16)\n stress_voltage = kw.get('stress_voltage', 3.75)\n L1_ = kw.get('L1', 0.075)\n N1_ = int(kw.get('N1', 100))\n tsteps_ = int(kw.get('tsteps', 720))\n time_s = kw.get('time_s', np.array([0]))\n rsh_norm_ = kw.get('rsh_norm', np.array([0]))\n print('Called jac_pnp')\n\n S0_ = 10 ** beta[0]\n h_ = 10 ** beta[1]\n DSF_ = 10 ** beta[2]\n # y0 = ml_pid.simulate_rsh(\n # S0=S0_, h=h_, DSF=DSF_, simulation_time=np.amax(time_s) * 1.1,\n # temperature=temp, rate_source=rate_source_, DSIN=DSIN_,\n # stress_voltage=stress_voltage, L1=L1_, m=1, time_steps=tsteps_,\n # N1=N1_\n # )\n\n EPS_ = np.finfo(np.float).eps\n delta = EPS_ ** (1 / 3)\n delta = 1E-1\n\n # forward\n # derivparams_forward = []\n derivparams = []\n for i in range(len(beta)):\n copy = np.array(beta)\n copy[i] += delta\n derivparams.append(copy)\n # backward\n # derivparams_backward = []\n for i in range(len(beta)):\n copy = np.array(beta)\n copy[i] -= delta\n derivparams.append(copy)\n\n # results_forward = pool.map(partial(func, **kw), derivparams_forward)\n # results_backward = pool.map(partial(func, **kw), derivparams_backward)\n results = np.array(pool.map(partial(func, **kw), derivparams))\n [m, n] = results.shape\n idx = int(m / 2)\n results_forward = results[0:idx, :]\n results_backward = results[idx::,:]\n derivs = [(rf - rb) / (2.0 * delta) for rf, rb in zip(results_forward, results_backward)]\n return np.array(derivs).T", "def test_grad_on_execution(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(dev, \"execute_and_gradients\")\n\n def cost(a):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n dev,\n gradient_fn=\"device\",\n gradient_kwargs={\n \"method\": \"adjoint_jacobian\",\n \"use_device_state\": True,\n },\n )[0]\n\n a = jax.numpy.array([0.1, 0.2])\n jax.jit(cost)(a)\n\n # adjoint method only performs a single device execution, but gets both result and gradient\n assert dev.num_executions == 1\n spy.assert_called()", "def test_multiple_expvals_grad(self, execute_kwargs):\n dev = qml.device(\"default.qubit\", wires=2)\n params = jax.numpy.array([0.1, 0.2, 0.3])\n\n def cost(a, cache):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.RY(a[2], wires=0)\n qml.expval(qml.PauliZ(0))\n qml.expval(qml.PauliZ(1))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n res = qml.interfaces.execute([tape], dev, cache=cache, **execute_kwargs)[0]\n return res[0] + res[1]\n\n res = jax.jit(jax.grad(cost), static_argnums=1)(params, cache=None)\n assert res.shape == (3,)", "def test_single_expectation_value_with_argnum_one(self, approx_order, strategy, tol):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n x = 0.543\r\n y = -0.654\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(x, wires=[0])\r\n qml.RY(y, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\r\n\r\n # we choose only 1 trainable parameter\r\n tapes, fn = finite_diff(tape, argnum=1, approx_order=approx_order, strategy=strategy)\r\n res = fn(dev.batch_execute(tapes))\r\n assert res.shape == (1, 2)\r\n\r\n expected = np.array([[0, np.cos(y) * np.cos(x)]])\r\n res = res.flatten()\r\n expected = expected.flatten()\r\n\r\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def test_differentiable_expand(self, execute_kwargs, tol):\n\n class U3(qml.U3):\n def expand(self):\n theta, phi, lam = self.data\n wires = self.wires\n return [\n qml.Rot(lam, theta, -lam, wires=wires),\n qml.PhaseShift(phi + lam, wires=wires),\n ]\n\n def cost_fn(a, p, device):\n qscript = qml.tape.QuantumScript(\n [qml.RX(a, wires=0), U3(*p, wires=0)], [qml.expval(qml.PauliX(0))]\n )\n qscript = qscript.expand(stop_at=lambda obj: device.supports_operation(obj.name))\n return execute([qscript], device, **execute_kwargs)[0]\n\n a = jax.numpy.array(0.1)\n p = jax.numpy.array([0.1, 0.2, 0.3])\n\n dev = qml.device(\"default.qubit\", wires=1)\n res = jax.jit(cost_fn, static_argnums=2)(a, p, device=dev)\n expected = np.cos(a) * np.cos(p[1]) * np.sin(p[0]) + np.sin(a) * (\n np.cos(p[2]) * np.sin(p[1]) + np.cos(p[0]) * np.cos(p[1]) * np.sin(p[2])\n )\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n jac_fn = jax.jit(jax.grad(cost_fn, argnums=(1)), static_argnums=2)\n res = jac_fn(a, p, device=dev)\n expected = jax.numpy.array(\n [\n np.cos(p[1]) * (np.cos(a) * np.cos(p[0]) - np.sin(a) * np.sin(p[0]) * np.sin(p[2])),\n np.cos(p[1]) * np.cos(p[2]) * np.sin(a)\n - np.sin(p[1])\n * (np.cos(a) * np.sin(p[0]) + np.cos(p[0]) * np.sin(a) * np.sin(p[2])),\n np.sin(a)\n * (np.cos(p[0]) * np.cos(p[1]) * np.cos(p[2]) - np.sin(p[1]) * np.sin(p[2])),\n ]\n )\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def test_functions():\n\n isGood = True\n\n # Test the trapezoidal integration function\n trapz = integrate_trapz(f, 0, 1, N=1000)[0]\n if abs(trapz - 1/3) > 0.00001:\n print(\"WARNING: integrate_trapz() failed the test.\")\n isGood = False\n\n # Test the simpson integration function\n simps = integrate_simpson(f, 0, 1, N=1000)[0]\n if abs(simps - 1 / 3) > 1e-12:\n print(\"WARNING: integrate_simpson() failed the test.\")\n isGood = False\n\n # Test the adaptive trapezoidal integration function\n trapz = integrate_trapz_adaptive(f, 0, 1, 1e-12)[0]\n if abs(trapz - 1 / 3) > 1e-12:\n print(\"WARNING: integrate_trapz_adaptive() failed the test.\")\n isGood = False\n\n # Test the adaptive simpson integration function\n simps = integrate_simpson_adaptive(f, 0, 1, 1e-12)[0]\n if abs(simps - 1 / 3) > 1e-12:\n print(\"WARNING: integrate_simpson_adaptive() failed the test.\")\n isGood = False\n\n # Test the 1D Monte Carlo integration function\n monte = integrate_monte_carlo(f, 0, 1, 1000000)\n if abs(monte - 1 / 3) > 0.01:\n print(\"WARNING: integrate_monte_carlo() failed the test.\")\n print(\"NOTE: There is a small probability of random failure with this method.\")\n print(\"Run the test a few more times.\")\n isGood = False\n\n # Test the multi-dimensional Monte Carlo integration function\n monte = integrate_monte_carlo_nd(g, 3, [[-1, 1], [-1, 1], [-1, 1]], N=100000)\n if abs(monte - (4/3)*np.pi) > 0.1:\n print(\"WARNING: integrate_monte_carlo_nd() failed the test.\")\n print(\"NOTE: There is a small probability of random failure with this method.\")\n print(\"Run the test a few more times.\")\n isGood = False\n\n if isGood is True:\n print(\"Module is good.\")", "def test_non_differentiable_error(self):\r\n psi = np.array([1, 0, 1, 0]) / np.sqrt(2)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.QubitStateVector(psi, wires=[0, 1])\r\n qml.RX(0.543, wires=[0])\r\n qml.RY(-0.654, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.probs(wires=[0, 1])\r\n\r\n # by default all parameters are assumed to be trainable\r\n with pytest.raises(\r\n ValueError, match=r\"Cannot differentiate with respect to parameter\\(s\\) {0}\"\r\n ):\r\n finite_diff(tape)\r\n\r\n # setting trainable parameters avoids this\r\n tape.trainable_params = {1, 2}\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n tapes, fn = finite_diff(tape)\r\n\r\n # For now, we must squeeze the results of the device execution, since\r\n # qml.probs results in a nested result. Later, we will revisit device\r\n # execution to avoid this issue.\r\n res = fn(qml.math.squeeze(dev.batch_execute(tapes)))\r\n assert res.shape == (4, 2)", "def jacobian_numba(coordinates, points, jac, greens_function):\n east, north, upward = coordinates[:]\n point_east, point_north, point_upward = points[:]\n for i in prange(east.size):\n for j in range(point_east.size):\n jac[i, j] = greens_function(\n east[i],\n north[i],\n upward[i],\n point_east[j],\n point_north[j],\n point_upward[j],\n )", "def jacobian(self, xs):\n rx_list = []\n for nx,x in enumerate(xs):\n \n numpy.testing.assert_array_almost_equal(self.independentVariableShapeList[nx], numpy.shape(x), err_msg = '\\ntaped xs[%d].shape != forward xs[%d]\\n'%(nx,nx))\n rx = numpy.ravel(x)\n rx_list.append(rx)\n self.x = numpy.concatenate(rx_list)\n return wrapped_functions.jacobian(self.tape_tag, self.x)" ]
[ "0.6522309", "0.65060055", "0.6396451", "0.63793343", "0.61633843", "0.61629945", "0.6161094", "0.6139958", "0.61293775", "0.61203456", "0.6013492", "0.6010821", "0.6009504", "0.5966791", "0.59597903", "0.59468144", "0.59339505", "0.5917524", "0.5891985", "0.5855533", "0.57962584", "0.57883525", "0.57380986", "0.57310176", "0.5727344", "0.57172483", "0.5681488", "0.56666076", "0.56464195", "0.56455094" ]
0.7660705
0
Searches Spotify for a song, giving you the link you can use to listen in. Give the query to search for and it will search by title/artist for the best match
async def spotify(self, ctx, *, query): # Setup the headers with the token that should be here headers = {"Authorization": "Bearer {}".format(self._token)} opts = {"q": query, "type": "track"} url = "https://api.spotify.com/v1/search" response = await utils.request(url, headers=headers, payload=opts) try: await ctx.send( response.get("tracks") .get("items")[0] .get("external_urls") .get("spotify") ) except (KeyError, AttributeError, IndexError): await ctx.send("Couldn't find a song for:\n{}".format(query))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search_song(title, artist):\n\ttitle = quote(title, safe='')\n\tartist = quote(artist, safe='')\n\tbase_url = SPOTIFY_API_HOST + 'search/' + '?q=track:{0}+artist:{1}&type=track&limit=1'\n\turl = base_url.format(title, artist)\n\tresults = requests.get(url).json()\n\n\ttry:\n\t\tif results['tracks']['total'] == 0:\n\t\t\tlogging.debug('Found no results for song {0}'.format(title))\n\t\t\treturn ''\n\texcept KeyError as e:\n\t\tlogging.warning('Invalid result from spotify on key {0}:\\n{1}'.format(e, results))\n\turi_string = results['tracks']['items'][0]['uri']\n\tlogging.debug('Found uri {0} for song {1}'.format(\n\t\turi_string[uri_string.rfind(':')+1:], title))\n\treturn uri_string[uri_string.rfind(':')+1:] # Strip off the 'spotify:track:' tag.", "def searchSong(query, lim=40):\n headers = {\n \"User-Agent\": \"ytmdl\"\n }\n payload = {\n \"api_key\": API_KEY,\n \"method\": \"track.search\",\n \"track\": query,\n \"format\": \"json\"\n }\n\n data = []\n\n response = get(API_BASE, headers=headers, params=payload)\n\n if response.status_code != 200:\n print(response.status_code)\n return data\n\n for song in response.json()[\"results\"][\"trackmatches\"][\"track\"]:\n data.append(LastFMSongs(song))\n\n return data", "def find_song(spotify, query, matchRatio=0.75):\n results = spotify.search(\"track:\\\"\" + query + \"\\\"\", limit=50, type='track')\n candidates = list(map(lambda track: {'name': track['name'], 'uri': track['uri']}, \n results['tracks']['items']))\n for candidate in candidates:\n matcher = difflib.SequenceMatcher(None, candidate['name'].lower(), query.lower())\n if matcher.ratio() >= matchRatio:\n print(\"Adding song \" + candidate[\"name\"] + \" for \" + query)\n return candidate['uri']\n print(\"Found no matches for \" + query)\n return None", "def spotify_track_search(query: str, access_token: str) -> dict:\n response = requests.get(\n \"https://api.spotify.com/v1/search?q={}&type=track\".format(query),\n headers={\"Authorization\": \"Bearer {}\".format(access_token)},\n )\n if (\n response.status_code == 200\n and \"tracks\" in response.text\n and \"items\" in response.text\n ):\n return json.loads(response.text)[\"tracks\"][\"items\"]\n return {\"error\": response.reason, \"status\": response.status_code}", "def search_song(self, name):\n self.logger.debug('Searched for Song: {}'.format(name))\n results = self.sp.search(q='track:' + name, type='track')\n songs = [song for song in results['tracks']['items']]\n i = 1\n songs_ls = []\n table_ls = []\n for song in songs:\n table_ls.append([i,\n song['name'][0:20].strip(),\n song['album']['name'][0:20].strip(),\n \"%0.2f\" % (song['duration_ms'] / 60000),\n song['popularity']])\n songs_ls.append(song['uri'])\n i = i + 1\n return songs_ls, table_ls", "def song_search_matching(chart_song, query):\n song_searches = song_search(query, NUM_SONG_SEARCH_RESULTS)\n if 'error' in song_searches:\n print('>>> error:', song_searches['error'])\n return\n\n songs = []\n # print(song_searches)\n for s in song_searches['songs']:\n # print('test song:', s)\n performers = ' '.join(x['name'] for x in s['performers']).lower()\n\n print('checking performers:', performers, 'vs.', chart_song.artist.lower())\n print('checking titles:', '\"' + s['title']['name'] + '\"', 'vs.', '\"' + chart_song.title + '\"')\n diff1 = fuzz.token_set_ratio(chart_song.artist.lower(), performers)\n diff2 = difflib.SequenceMatcher(\n None,\n a=s['title']['name'].lower(),\n b=chart_song.title.lower()\n ).ratio()\n print('performer score:', diff1, 'and title score:', diff2)\n if diff1 >= 65 and diff2 > 0.75:\n songs.append(s)\n print('song passed with diff performers of', diff1, 'and diff title of', diff2)\n if diff1 <= 75 or diff2 < 0.85:\n print('NOTE impartial match?', s, 'for', chart_song)\n\n return songs", "async def search(self, ctx: commands.Context, *, query: t.Optional[str]) -> None:\n if query is None:\n # Maybe the user didn't know to pass in a query?\n embed = discord.Embed(colour=Colours.regular, timestamp=Embeds.now())\n embed.description = (\n \"No query passed in. Try passing in something: `$search arabic music`\"\n )\n embed.set_footer(\n text=\"See $help voice for more commands.\", icon_url=Icons.info\n )\n return await ctx.send(embed=embed)\n\n if (results := await self.get_tracks(query, True, False)) is not None:\n # Ensure that we're connected before playing.\n await ctx.invoke(self.connect, channel=None)\n player = self.get_player(ctx.guild)\n if not player.is_connected:\n return\n\n embed = discord.Embed(colour=Colours.regular, timestamp=Embeds.now())\n embed.set_footer(\n text=f\"Showing 5/{len(results)} results.\",\n icon_url=ctx.author.avatar_url,\n )\n embed.description = \"\"\n results = results[:5]\n\n for index, track in enumerate(results, 1):\n m, s = self.get_formatted_length(track.length, True)\n embed.description += (\n f\"**{index}**. [{track.title}]({track.uri}) ({m}:{s})\\n\"\n )\n\n # Get a integer selection using Choice.prompt().\n if (\n choice := await Choices.prompt(\n ctx=ctx, embed=embed, n=5, author_only=True\n )\n ) is None:\n if player.queue.empty:\n await ctx.invoke(self.disconnect)\n return\n\n embed = discord.Embed(\n title=\"Now queued.\" if player.is_playing else \"Now playing.\",\n description=f\"[{results[choice].title}]({results[choice].uri})\",\n colour=Colours.regular,\n timestamp=Embeds.now(),\n )\n\n m, s = self.get_formatted_length(results[choice].length, False)\n embed.set_footer(\n text=f\"Track goes for {m} minutes and {s} seconds.\",\n icon_url=ctx.author.avatar_url,\n )\n if results[choice].thumb is not None:\n embed.set_thumbnail(url=results[choice].thumb)\n await ctx.send(embed=embed)\n\n player.queue.add_tracks(results[choice])\n if not player.is_playing:\n await player.playback()\n else:\n fail = Embeds.status(success=False, desc=\"Failed to find any results.\")\n await ctx.send(embed=fail)", "def search(\n self, query, callback=None,\n track_offset=0, track_count=20,\n album_offset=0, album_count=20,\n artist_offset=0, artist_count=20,\n playlist_offset=0, playlist_count=20,\n search_type=None):\n return spotify.Search(\n query=query, callback=callback,\n track_offset=track_offset, track_count=track_count,\n album_offset=album_offset, album_count=album_count,\n artist_offset=artist_offset, artist_count=artist_count,\n playlist_offset=playlist_offset, playlist_count=playlist_count,\n search_type=search_type)", "def search_spotify(self, artist_name):\n if VERBOSE:\n print (\"\\nSearching for artist on Spotify: \" + artist_name)\n try:\n result = self.sp.search(q='artist:' + artist_name, type='artist')\n except spotipy.client.SpotifyException:\n print(\"ERROR: Couldnt not find artist: %s. Trying again.\" % artist_name)\n try:\n result = self.sp.search(q='artist:' + artist_name, type='artist')\n except spotipy.client.SpotifyException as error:\n print(\"ERROR: Failed to search twice. Error below:\")\n print(error)\n result = None\n except ValueError as error:\n print(\"ERROR: Failure while searching Spotify for artist: %s\" % artist_name)\n print(error)\n result = None\n return result", "def search(\n\tlog,\n\tverbose,\n\tquiet,\n\tusername,\n\tdevice_id,\n\tyes,\n\tfilters\n):\n\n\tconfigure_logging(verbose - quiet, username, log_to_file=log)\n\n\tlogger.info(\"Logging in to Google Music\")\n\tmc = google_music.mobileclient(username, device_id=device_id)\n\n\tif not mc.is_authenticated:\n\t\tsys.exit(\"Failed to authenticate client.\")\n\n\tsearch_results = filter_songs(mc.songs(), filters)\n\tsearch_results.sort(\n\t\tkey=lambda song: (\n\t\t\tsong.get('artist', ''),\n\t\t\tsong.get('album', ''),\n\t\t\tsong.get('trackNumber', 0)\n\t\t)\n\t)\n\n\tif search_results:\n\t\tresult_num = 0\n\t\ttotal = len(search_results)\n\t\tpad = len(str(total))\n\n\t\tconfirm = (\n\t\t\tyes\n\t\t\tor input(f\"\\nDisplay {len(search_results)} results? (y/n) \") in (\"y\", \"Y\")\n\t\t)\n\n\t\tif confirm:\n\t\t\tfor result in search_results:\n\t\t\t\tresult_num += 1\n\n\t\t\t\ttitle = result.get('title', \"<empty>\")\n\t\t\t\tartist = result.get('artist', \"<empty>\")\n\t\t\t\talbum = result.get('album', \"<empty>\")\n\t\t\t\tsong_id = result['id']\n\n\t\t\t\tlogger.info(\n\t\t\t\t\tf\"{result_num:>{pad}}/{total} {title} -- {artist} -- {album} ({song_id})\"\n\t\t\t\t)\n\telse:\n\t\tlogger.info(\"No songs found matching query\")\n\n\tmc.logout()\n\tlogger.info(\"All done!\")", "async def playlist(self, ctx, *, query):\n # Setup the headers with the token that should be here\n headers = {\"Authorization\": \"Bearer {}\".format(self._token)}\n opts = {\"q\": query, \"type\": \"playlist\"}\n url = \"https://api.spotify.com/v1/search\"\n response = await utils.request(url, headers=headers, payload=opts)\n try:\n await ctx.send(\n response.get(\"playlists\")\n .get(\"items\")[0]\n .get(\"external_urls\")\n .get(\"spotify\")\n )\n except (KeyError, AttributeError, IndexError):\n await ctx.send(\"Couldn't find a song for:\\n{}\".format(query))", "def search():\n\tif not request.vars.search_term:\n\t\tredirect(URL('index'))\n\tterm = request.vars.search_term\n\torigterm = term\n\tterm = term.replace(' ','|')\n\tartists = db.executesql(\"select distinct(m1.id), m1.art_name, m1.artist_type, m1.country, m1.b_year,m1.b_month,m1.b_date,m1.e_year,m1.e_month,m1.e_day,ts_rank(to_tsvector(m1.art_name),to_tsquery('\"+term+\"')) rank from art_info m1 where to_tsvector('english',m1.art_name) @@ to_tsquery('\"+term+\"') order by rank desc limit 20;\")\n\talbums = db.executesql(\"select distinct(m1.id),m2.name,m1.art_id,m1.art_name,m1.rel_type,m1.count,ts_rank(to_tsvector(m2.name),to_tsquery('\"+term+\"')) rank from rel_art m1, release_name m2, release_group m3 where m3.name = m2.id and m3.id = m1.id and to_tsvector('english',m2.name) @@ to_tsquery('\"+term+\"') order by rank desc limit 20;\")\n\tsongs = db.executesql(\"select m2.id, m1.name, m3.art_id, m3.art_name, m3.rel_id, m3.rel_name from track_name m1, recording m2, rec_rel_art m3 where m1.id = m2.name and m2.id = m3.rec_id and lower(m1.name) LIKE lower('%%\"+origterm+\"%%') limit 20;\")\n\treturn dict(songs=songs, albums=albums, artists=artists)", "async def search(self, ctx, *, query):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n query = \"ytsearch:{}\".format(query)\n if player.is_connected:\n if not ctx.author.voice or not ctx.author.voice.channel or player.connected_channel.id != ctx.author.voice.channel.id:\n return await ctx.send(\"You have to be in my voice channel to queue a song :no_entry:\")\n else:\n if not ctx.author.voice or not ctx.author.voice.channel:\n return await ctx.send(\"Join a voice channel :no_entry:\")\n else:\n player.store('sessionowner', ctx.author.id)\n player.store('channel', ctx.channel.id)\n await player.connect(ctx.author.voice.channel.id)\n results = await self.bot.lavalink.get_tracks(query)\n if not results or not results['tracks']:\n return await ctx.send(\"I could not find any songs matching that query :no_entry:\")\n msg = \"\"\n for i, x in enumerate(results[\"tracks\"][:10], start=1):\n msg += \"{}. **[{}]({})**\\n\".format(i, x[\"info\"][\"title\"], x[\"info\"][\"uri\"])\n message = await ctx.send(embed=discord.Embed(description=msg).set_footer(text=\"Choose a number to the queue the song | cancel\"))\n def check(m):\n return m.channel == ctx.channel and m.author == ctx.author and (m.content.isdigit() or m.content.lower() == \"cancel\")\n try:\n response = await self.bot.wait_for(\"message\", check=check, timeout=60)\n if response.content.lower() == \"cancel\":\n await response.delete()\n return await message.delete()\n else:\n track = results[\"tracks\"][int(response.content) + 1]\n player.add(requester=ctx.author.id, track=track)\n timetill = 0\n for x in player.queue:\n timetill += x.duration\n if player.current:\n timetill += player.current.duration - player.position\n else:\n timetill = 0 \n index = [x.track for x in player.queue].index(track[\"track\"]) + 1\n s=discord.Embed()\n s.set_author(name=\"Added to Queue\", icon_url=ctx.author.avatar_url)\n s.set_thumbnail(url=\"https://img.youtube.com/vi/{}/default.jpg\".format(track[\"info\"][\"identifier\"]))\n s.add_field(name=\"Song\", value=\"[{}]({})\".format(track[\"info\"][\"title\"], track[\"info\"][\"uri\"]), inline=False)\n s.add_field(name=\"Duration\", value=self.format_time(track[\"info\"][\"length\"]), inline=True)\n s.add_field(name=\"Position in Queue\", value=index)\n if timetill != 0:\n s.add_field(name=\"Estimated time till playing\", value=self.format_time(timetill-track[\"info\"][\"length\"]))\n else:\n s.add_field(name=\"Estimated time till playing\", value=\"Next\")\n await response.delete()\n await message.delete()\n await self.bot.get_channel(player.fetch('channel')).send(embed=s)\n if not player.is_playing:\n await player.play()\n except asyncio.TimeoutError:\n return await ctx.send(\"Timed out :stopwatch:\")", "def search_song(self, name, album=None, artist=None):\n\n endpoint = \"/search\"\n query = f\"track:{self._strip_punctuation(name)}\"\n if artist:\n query += f\" artist:{self._strip_punctuation(artist)}\"\n if album:\n query += f\" album:{self._strip_punctuation(album)}\"\n response = self._send(endpoint, \"GET\", params={\"q\": query, \"type\": \"track\"})\n tracks = response.json()[\"tracks\"]\n if tracks[\"total\"] == 0:\n raise SongNotFoundError(\n f\"song name={name} artist={artist} album={album} could not be found\"\n )\n return tracks[\"items\"]", "def spotifySearch(request,genre):\n\tif genre in genre_artist.keys():\n\t\ttracks = top_tracks(genre)\n\t\tif tracks:\n\t\t\treturn HttpResponse(json.dumps(tracks))\n\t\telse:\n\t\t\tresponse ={\"message\":\"Artist/track is not found.\", \"error\":True}\n\t\t\treturn HttpResponse(json.dumps(response))\n\telse:\n\t\tresponse = {\"message\": \"Please give an existed genre as a parameter. Genres are: rock, alternative rock, pop, blues, country, electronic, jazz, r&b, rap, reggae.\", \"error\":True}\n\t\treturn HttpResponse(json.dumps(response))", "def search_multiple_tracks(search_query, sp):\r\n \r\n # List to store the track ids\r\n track_ids = []\r\n # List to store the track names and artists\r\n tracks = []\r\n\r\n #Search for 10 results in the Spotify API given a search querry\r\n results = sp.search(q = search_query ,limit=10)\r\n results = results['tracks']['items']\r\n\r\n # Extract the track id's, names and artists for all the search results\r\n for i in range(len(results)):\r\n\r\n # Get track id, artist and name\r\n track_id = results[i]['id']\r\n artist = results[i]['artists'][0]['name']\r\n track_name = results[i]['name']\r\n\r\n # Get a string with the artist and track name\r\n track = artist + ' - ' + track_name\r\n\r\n # Append the track id's and track name/artist to the list\r\n track_ids.append(track_id)\r\n tracks.append(track)\r\n\r\n # Make a dictionary of the track id and track name/artist list.\r\n return dict(zip(tracks,track_ids))", "async def search_song(self, search: str, filter_services: str=False):\n requestURL = f\"{self.base_url}/search/song\"\n songParams = { \"query\": search }\n # Check if services are filtered\n if filter_services != False:\n songParams[\"filterServices\"] = filter_services\n\n # Get the contents of the request\n results = await self.send_request(requestURL, songParams)\n # Check to see if results were returned\n if len(results[\"meta\"][\"servicesReturned\"]) == 0:\n return []\n\n # Send the contents of request, if any\n items = []\n for item in results[\"meta\"][\"servicesReturned\"]:\n items.append(Song(results[\"services\"][item], item))\n # Return a list of fetched results\n return items", "def song_found(self, searched_song, result, exact):\n message = {\n \"timestamp\": self._get_time(),\n \"level\": \"INFO\",\n \"type\": \"SONG_FOUND\",\n \"searched_song\": json.dumps(searched_song.to_dict()),\n \"result\": json.dumps(result),\n \"exact\": exact,\n }\n\n self._log_queue.put(json.dumps(message))", "def extract_track_url(search):\n\n if 'tracks' in search:\n tracks = search['tracks']\n if 'items' in tracks:\n items = tracks['items']\n # take the first url we can find\n for item in items:\n if 'external_urls' in item:\n external_urls = item['external_urls']\n if 'spotify' in external_urls:\n url = external_urls['spotify']\n return url", "def search(search_term):\r\n querystring = apiurl_musixmatch + \"track.search?q_track=\" + urllib2.quote(\r\n search_term) + \"&apikey=\" + apikey_musixmatch + \"&format=plain\"\r\n try:\r\n request = urllib2.Request(querystring)\r\n # timeout set 4 to seconds; automatically retries\r\n response = urllib2.urlopen(request, timeout=4)\r\n # raw = response.read()\r\n print colored.green(\"Starting\", bold=12)\r\n all_data = ''\r\n while True:\r\n do_task()\r\n print '\\b.',\r\n sys.stdout.flush()\r\n data = response.read(2048)\r\n if not data:\r\n break\r\n all_data += data\r\n time.sleep(0.4)\r\n print \"\\n\"\r\n json_obj = json.loads(all_data.decode(\"utf-8\"))\r\n body = json_obj['message']['body']['track_list']\r\n list_of_all_songs = []\r\n track_table = PrettyTable(['Song ID', 'Song Name', 'Artist Name'])\r\n for result in body:\r\n song_details = []\r\n result_id = result['track']['track_id']\r\n title = result['track']['track_name']\r\n artist_name = result['track']['artist_name']\r\n song_details.insert(0, result_id)\r\n song_details.insert(1, title)\r\n song_details.insert(2, artist_name)\r\n list_of_all_songs.append(song_details)\r\n track_table.add_row(\r\n [result_id, title, artist_name])\r\n print colored.yellow(track_table, bold=12)\r\n except socket.timeout:\r\n print 'Connection timed out, try again'", "def process_search_async(self, call):\n if \"query\" not in call.data:\n _LOGGER.error(\"No text to search\")\n return\n global G_SPOTIFY_FOUND\n G_SPOTIFY_FOUND = []\n search_text = call.data[\"query\"]\n\n self.refresh_spotify_instance()\n\n # Don't true search when token is expired\n if self._oauth.is_token_expired(self._token_info):\n _LOGGER.warning(\"Spotify failed to update, token expired.\")\n return\n\n titles = [ais_global.G_EMPTY_OPTION]\n # artist\n results = self._spotify.search(q='artist:' + search_text, type='artist')\n titles.extend(self.get_list_from_results(results, 'artist'))\n # album\n results = self._spotify.search(q='album:' + search_text, type='album')\n titles.extend(self.get_list_from_results(results, 'album'))\n # playlist\n results = self._spotify.search(q='playlist:' + search_text, type='playlist')\n titles.extend(self.get_list_from_results(results, 'playlist'))\n\n # Update input_select values:\n yield from self.hass.services.async_call(\n 'input_select',\n 'set_options', {\n \"entity_id\": \"input_select.ais_music_track_name\",\n \"options\": titles})\n\n if len(G_SPOTIFY_FOUND) > 0:\n text = \"Znaleziono: %s, wล‚ฤ…czam pierwszy: %s\" % (\n str(len(G_SPOTIFY_FOUND)), G_SPOTIFY_FOUND[0][\"title\"])\n else:\n text = \"Brak wynikรณw na Spotify dla zapytania %s\" % search_text\n yield from self.hass.services.async_call(\n 'ais_ai_service', 'say_it', {\n \"text\": text\n })\n yield from self.hass.services.async_call(\n 'input_select',\n 'select_option', {\n \"entity_id\": \"input_select.ais_music_track_name\",\n \"option\": G_SPOTIFY_FOUND[0][\"title\"]})", "def search_with_song(song_name: str, mode: int) -> str:\n SONG_NAME = 1\n db = get_db_name_by_mode(mode)\n song_list = get_singers_and_songs_by_mode(mode)[1]\n res = []\n songs_data = []\n\n db_connection = sqlite3.connect(db)\n if get_acceleration_flag(mode, True):\n for letter in song_name:\n db_cursor = db_connection.cursor()\n db_cursor.execute('SELECT * FROM TEST WHERE SONG LIKE \"%' + letter + '%\"')\n songs_data.extend([song for song in db_cursor.fetchall()])\n pass\n songs_data = list(dict.fromkeys(songs_data))\n similar_songs = [song[SONG_NAME] for song in songs_data]\n similar_songs = compare.compare(similar_songs, song_name, ac=True)\n for song_with_similar_score in similar_songs: # pick the song in similar_songs from in songs_data\n for song_info in songs_data:\n if song_with_similar_score[SONG_NAME] == song_info[SONG_NAME]:\n res.append(song_info)\n break\n pass\n else:\n similar_songs = compare.compare(song_list, song_name)\n for song_with_similar_score in similar_songs:\n db_cursor = db_connection.cursor()\n db_cursor.execute('SELECT * FROM TEST WHERE SONG = \"' + song_with_similar_score[SONG_NAME] + '\"')\n res.extend(db_cursor.fetchall())\n pass\n pass\n db_connection.close()\n\n if len(res) == 0:\n return response.pack(response.EMPTY, res)\n else:\n return response.pack(response.SUCCESS, res)\n pass", "def search_artist_by_name(sp, artist_name_input):\n\n results = sp.search(q=artist_name_input, type='artist', limit=20, offset=0, market=\"DE\")\n\n result_artist_name = results[\"artists\"][\"items\"][0][\"name\"]\n result_artist_uri = results[\"artists\"][\"items\"][0][\"uri\"]\n\n return result_artist_name, result_artist_uri", "def spotify_search(request):\n\n logger.debug(\"Spotify Search Called\")\n response_data = {}\n\n spotify_pre_auth = spotify_auth(request)\n if type(spotify_pre_auth) is JsonResponse:\n return spotify_pre_auth\n\n response = spotify_pre_auth.get('result','')\n query = spotify_pre_auth.get('query','')\n search_type = spotify_pre_auth.get('search_type','')\n response_code=spotify_pre_auth.get('status_code','')\n\n if response_code == 200:\n auth_data = json.loads(response)\n access_token = auth_data['access_token']\n headers = {\"Authorization\": \"Bearer %s\" % access_token}\n res = requests.get(\"https://api.spotify.com/v1/search?q=%s&type=%s\" % (query, search_type), headers=headers)\n if res.status_code == 200:\n result = json.loads(res.text)\n data_out = []\n for track in result['tracks']['items']:\n album = {\"name\": track['album']['name'], \"author\":track['artists'][0]['name'], \"url\":track['album']['href'], \"tracks_num\": track['album']['total_tracks'], 'year': track['album']['release_date'][:4]}\n if album not in data_out:\n data_out.append(album)\n return {\"result\":\"success\", \"payload\":data_out, \"status_code\":res.status_code}\n else:\n response = res.text\n response_code = res.status_code\n response_body = {\"result\": \"failure\", \"message\": \"Spotify album failed. Check the url or the connections\", \"status_code\": response_code}\n return response_body", "def test_get_songs_by_query(self, track_elms, service_config, request):\n service_config.search.search_tracks.return_value = track_elms\n request.args['query'] = 'Dummy'\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_songs(params)\n\n assert results == track_elms, 'Expected matching tracks returned'", "async def search_song(album_name):\n async with aiohttp.ClientSession() as session:\n async with session.get('https://bandcamp.com/api/fuzzysearch/1/autocomplete?q=' + album_name) as resp:\n response = await resp.json()\n\n results = response.get('auto', {}).get('results', [])\n results = [res for res in results if res.get('type') == 't']\n if not results:\n raise NotFound\n result = results[0]\n async with session.get(result.get('url', 'https://bandcamp.com/')) as resp:\n response = await resp.text()\n try:\n result['release_date'] = response.split('album_release_date: \"')[-1].split('\",')[0].split(':')[0]\n except:\n result['release_date'] = '01 Jan 1970 00'\n result['TrackAlbum'] = bs4.BeautifulSoup(response, 'html.parser').find('span', itemprop='inAlbum').text.strip()\n\n return BandcampSong(result)", "def search_for_artist(name):\n\ttoken = get_token()\n\tif token:\n\t\theaders = {\"Content-Type\": \"application/json\", \"Authorization\": \"Bearer \" + token}\n\t\toptions = {\n\t\t\t'q': name, 'type': 'artist', 'limit': '1'\n\t\t}\n\n\t\tresponse = requests.get(\n\t\t\t'https://api.spotify.com/v1/search',\n\t\t\theaders=headers,\n\t\t\tparams=options\n\t\t)\n\t\tif response.status_code == 200:\n\t\t\tcontent = json.loads(response.content.decode('utf-8'))\n\t\t\tif content:\n\t\t\t\treturn content['artists']['items'][0]['id']\n\t\t\telse: return None\n\t\telse:\n\t\t\treturn None\n\telse:\n\t\treturn None", "def query_by_name(url, params, name):\n params[\"query\"] = \"artist:\" + name\n return query_site(url, params)", "def search_album_art(artist, title, select_index=0, return_all=False):\r\n # TODO: add soundcloud search as well if spotify comes up with no results.\r\n # Soundcloud has it disabled\r\n artist, title = parse.quote(artist), parse.quote(title)\r\n header = {'Authorization': 'Bearer ' + get_spotify_access_token()}\r\n # TODO: search through playlists too\r\n links = []\r\n links_set = set()\r\n for code in COUNTRY_CODES:\r\n url = f'https://api.spotify.com/v1/search?q={title}+artist:{artist}&type=track&market={code}'\r\n r = requests.get(url, headers=header).json()\r\n if 'tracks' in r:\r\n links_from_country = [item['album']['images'][0]['url'] for item in r['tracks']['items']]\r\n for link in links_from_country:\r\n if link not in links_set:\r\n links.append(link)\r\n links_set.add(link)\r\n if return_all: return links\r\n return links[0]", "def __generate_search_query(self) -> None:\n if self.query_accuracy < 100:\n if self.title is not None and self.title != '' and self.artist is not None and self.artist != '':\n # Use the title and the artist name to find more information about the song.\n query: str = self.title + ' ' + self.artist\n query = re.sub(self.__get_filter_regex(), '', query)\n self.query = query\n # Remove unnecessary information in order to get a simpler query version.\n self.minimal_query = re.sub(r'\\([\\s\\S]+\\)', '', query).strip()\n self.query_accuracy = 100\n return\n if self.query_accuracy < 50:\n # No title nor artist name available, use the filename as search query.\n filename: str = os.path.basename(self.original_path)\n filename = os.path.splitext(filename)[0]\n query: str = filename.lower()\n query = re.sub(self.__get_filter_regex(), '', query)\n query = query.replace('_', ' ')\n query = query.strip()\n self.query = query\n self.minimal_query = re.sub(r'\\([\\s\\S]+\\)', '', query).strip()\n self.query_accuracy = 50" ]
[ "0.80161357", "0.737705", "0.7374892", "0.71660817", "0.7111647", "0.70489657", "0.7044572", "0.6919186", "0.69184464", "0.68979985", "0.68788624", "0.6842842", "0.6830334", "0.67822534", "0.6743658", "0.67357975", "0.6718091", "0.66173923", "0.661147", "0.66001695", "0.65450424", "0.64963096", "0.6468558", "0.643577", "0.6398325", "0.6359566", "0.6357427", "0.6355501", "0.63384134", "0.63254154" ]
0.7385611
1
Searches Spotify for a playlist, giving you the link you can use to listen in. Give the query to search for and it will search for the best match
async def playlist(self, ctx, *, query): # Setup the headers with the token that should be here headers = {"Authorization": "Bearer {}".format(self._token)} opts = {"q": query, "type": "playlist"} url = "https://api.spotify.com/v1/search" response = await utils.request(url, headers=headers, payload=opts) try: await ctx.send( response.get("playlists") .get("items")[0] .get("external_urls") .get("spotify") ) except (KeyError, AttributeError, IndexError): await ctx.send("Couldn't find a song for:\n{}".format(query))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def playlist_search(search_term, results=5):\r\n if search_term:\r\n url = PLAYLIST_SEARCH_URL.format(API_KEY, util.web.quote(search_term.encode('ascii', 'ignore')))\r\n response = util.web.http_get(url=url, json=True, referer='https://tinychat.com')\r\n\r\n if response['json'] is not None:\r\n play_lists = []\r\n try:\r\n if 'items' in response['json']:\r\n for i, item in enumerate(response['json']['items']):\r\n if i == results:\r\n return play_lists\r\n playlist_id = item['id']['playlistId']\r\n playlist_title = item['snippet']['title'].encode('ascii', 'ignore')\r\n play_list_info = {\r\n 'playlist_title': playlist_title,\r\n 'playlist_id': playlist_id\r\n }\r\n play_lists.append(play_list_info)\r\n except KeyError as ke:\r\n log.error(ke, exc_info=True)\r\n return None", "def searchSong(query, lim=40):\n headers = {\n \"User-Agent\": \"ytmdl\"\n }\n payload = {\n \"api_key\": API_KEY,\n \"method\": \"track.search\",\n \"track\": query,\n \"format\": \"json\"\n }\n\n data = []\n\n response = get(API_BASE, headers=headers, params=payload)\n\n if response.status_code != 200:\n print(response.status_code)\n return data\n\n for song in response.json()[\"results\"][\"trackmatches\"][\"track\"]:\n data.append(LastFMSongs(song))\n\n return data", "async def search(self, ctx, *, query):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n query = \"ytsearch:{}\".format(query)\n if player.is_connected:\n if not ctx.author.voice or not ctx.author.voice.channel or player.connected_channel.id != ctx.author.voice.channel.id:\n return await ctx.send(\"You have to be in my voice channel to queue a song :no_entry:\")\n else:\n if not ctx.author.voice or not ctx.author.voice.channel:\n return await ctx.send(\"Join a voice channel :no_entry:\")\n else:\n player.store('sessionowner', ctx.author.id)\n player.store('channel', ctx.channel.id)\n await player.connect(ctx.author.voice.channel.id)\n results = await self.bot.lavalink.get_tracks(query)\n if not results or not results['tracks']:\n return await ctx.send(\"I could not find any songs matching that query :no_entry:\")\n msg = \"\"\n for i, x in enumerate(results[\"tracks\"][:10], start=1):\n msg += \"{}. **[{}]({})**\\n\".format(i, x[\"info\"][\"title\"], x[\"info\"][\"uri\"])\n message = await ctx.send(embed=discord.Embed(description=msg).set_footer(text=\"Choose a number to the queue the song | cancel\"))\n def check(m):\n return m.channel == ctx.channel and m.author == ctx.author and (m.content.isdigit() or m.content.lower() == \"cancel\")\n try:\n response = await self.bot.wait_for(\"message\", check=check, timeout=60)\n if response.content.lower() == \"cancel\":\n await response.delete()\n return await message.delete()\n else:\n track = results[\"tracks\"][int(response.content) + 1]\n player.add(requester=ctx.author.id, track=track)\n timetill = 0\n for x in player.queue:\n timetill += x.duration\n if player.current:\n timetill += player.current.duration - player.position\n else:\n timetill = 0 \n index = [x.track for x in player.queue].index(track[\"track\"]) + 1\n s=discord.Embed()\n s.set_author(name=\"Added to Queue\", icon_url=ctx.author.avatar_url)\n s.set_thumbnail(url=\"https://img.youtube.com/vi/{}/default.jpg\".format(track[\"info\"][\"identifier\"]))\n s.add_field(name=\"Song\", value=\"[{}]({})\".format(track[\"info\"][\"title\"], track[\"info\"][\"uri\"]), inline=False)\n s.add_field(name=\"Duration\", value=self.format_time(track[\"info\"][\"length\"]), inline=True)\n s.add_field(name=\"Position in Queue\", value=index)\n if timetill != 0:\n s.add_field(name=\"Estimated time till playing\", value=self.format_time(timetill-track[\"info\"][\"length\"]))\n else:\n s.add_field(name=\"Estimated time till playing\", value=\"Next\")\n await response.delete()\n await message.delete()\n await self.bot.get_channel(player.fetch('channel')).send(embed=s)\n if not player.is_playing:\n await player.play()\n except asyncio.TimeoutError:\n return await ctx.send(\"Timed out :stopwatch:\")", "def search(\n self, query, callback=None,\n track_offset=0, track_count=20,\n album_offset=0, album_count=20,\n artist_offset=0, artist_count=20,\n playlist_offset=0, playlist_count=20,\n search_type=None):\n return spotify.Search(\n query=query, callback=callback,\n track_offset=track_offset, track_count=track_count,\n album_offset=album_offset, album_count=album_count,\n artist_offset=artist_offset, artist_count=artist_count,\n playlist_offset=playlist_offset, playlist_count=playlist_count,\n search_type=search_type)", "async def spotify(self, ctx, *, query):\n\n # Setup the headers with the token that should be here\n headers = {\"Authorization\": \"Bearer {}\".format(self._token)}\n opts = {\"q\": query, \"type\": \"track\"}\n url = \"https://api.spotify.com/v1/search\"\n response = await utils.request(url, headers=headers, payload=opts)\n try:\n await ctx.send(\n response.get(\"tracks\")\n .get(\"items\")[0]\n .get(\"external_urls\")\n .get(\"spotify\")\n )\n except (KeyError, AttributeError, IndexError):\n await ctx.send(\"Couldn't find a song for:\\n{}\".format(query))", "def search_song(title, artist):\n\ttitle = quote(title, safe='')\n\tartist = quote(artist, safe='')\n\tbase_url = SPOTIFY_API_HOST + 'search/' + '?q=track:{0}+artist:{1}&type=track&limit=1'\n\turl = base_url.format(title, artist)\n\tresults = requests.get(url).json()\n\n\ttry:\n\t\tif results['tracks']['total'] == 0:\n\t\t\tlogging.debug('Found no results for song {0}'.format(title))\n\t\t\treturn ''\n\texcept KeyError as e:\n\t\tlogging.warning('Invalid result from spotify on key {0}:\\n{1}'.format(e, results))\n\turi_string = results['tracks']['items'][0]['uri']\n\tlogging.debug('Found uri {0} for song {1}'.format(\n\t\turi_string[uri_string.rfind(':')+1:], title))\n\treturn uri_string[uri_string.rfind(':')+1:] # Strip off the 'spotify:track:' tag.", "def find_song(spotify, query, matchRatio=0.75):\n results = spotify.search(\"track:\\\"\" + query + \"\\\"\", limit=50, type='track')\n candidates = list(map(lambda track: {'name': track['name'], 'uri': track['uri']}, \n results['tracks']['items']))\n for candidate in candidates:\n matcher = difflib.SequenceMatcher(None, candidate['name'].lower(), query.lower())\n if matcher.ratio() >= matchRatio:\n print(\"Adding song \" + candidate[\"name\"] + \" for \" + query)\n return candidate['uri']\n print(\"Found no matches for \" + query)\n return None", "async def play(self, ctx, *, query: str):\n # Get the player for this guild from cache.\n player = self.bot.lavalink.player_manager.get(ctx.guild.id)\n\n # If player is paused - unpause, return\n if player.paused:\n return await player.set_pause(False)\n\n # Remove leading and trailing <>. <> may be used to suppress embedding links in Discord.\n query = query.strip('<>')\n\n # Check if the user input might be a URL. If it isn't, we can Lavalink do a YouTube search for it instead.\n # SoundCloud searching is possible by prefixing \"scsearch:\" instead.\n if not url_rx.match(query):\n query = f'ytsearch:{query}'\n\n # Get the results for the query from Lavalink.\n results = await player.node.get_tracks(query)\n\n # Results could be None if Lavalink returns an invalid response (non-JSON/non-200 (OK)).\n # ALternatively, resullts['tracks'] could be an empty array if the query yielded no tracks.\n if not results or not results['tracks']:\n return await ctx.send(embed=self.error_embed(f\"No results found for `{query}`\"))\n\n embed = discord.Embed(color=discord.Color.blurple())\n\n # Valid loadTypes are:\n # TRACK_LOADED - single video/direct URL)\n # PLAYLIST_LOADED - direct URL to playlist)\n # SEARCH_RESULT - query prefixed with either ytsearch: or scsearch:.\n # NO_MATCHES - query yielded no results\n # LOAD_FAILED - most likely, the video encountered an exception during loading.\n if results['loadType'] == 'PLAYLIST_LOADED':\n tracks = results['tracks']\n\n for track in tracks:\n # Add all of the tracks from the playlist to the queue.\n length = track[\"info\"][\"length\"]\n track = lavalink.models.AudioTrack(\n track, requester=ctx.author.id, recommended=True, length=length)\n player.add(requester=ctx.author.id, track=track)\n\n embed.title = ''\n embed.description = f'Queued **{results[\"playlistInfo\"][\"name\"]}** - {len(tracks)} tracks'\n else:\n track = results['tracks'][0]\n embed.title = \"\"\n embed.description = f'Queued [{track[\"info\"][\"title\"]}]({track[\"info\"][\"uri\"]}) [{ctx.message.author.mention}]'\n length = track[\"info\"][\"length\"]\n\n # You can attach additional information to audiotracks through kwargs, however this involves\n # constructing the AudioTrack class yourself.\n track = lavalink.models.AudioTrack(\n track, requester=ctx.author.id, recommended=True, length=length)\n player.add(requester=ctx.author.id, track=track)\n\n # Save text channel in which bot command was sent\n # for further reply\n self.preferred_channels[str(ctx.guild.id)] = ctx.message.channel.id\n\n await ctx.send(embed=embed)\n\n # We don't want to call .play() if the player is playing as that will effectively skip\n # the current track.\n if not player.is_playing:\n await player.play()", "async def get_playlist(self, part=\"snippet\", max_results=7, playlist_id=\"\", playlist_url=\"\"):\n\n url = self.url_api.get_playlist_url(playlist_id, part, max_results, playlist_url)\n\n response = await self.session.get(url)\n search_results = await response.json()\n return search_results", "def spotify_track_search(query: str, access_token: str) -> dict:\n response = requests.get(\n \"https://api.spotify.com/v1/search?q={}&type=track\".format(query),\n headers={\"Authorization\": \"Bearer {}\".format(access_token)},\n )\n if (\n response.status_code == 200\n and \"tracks\" in response.text\n and \"items\" in response.text\n ):\n return json.loads(response.text)[\"tracks\"][\"items\"]\n return {\"error\": response.reason, \"status\": response.status_code}", "def search(self, query):\n URL = \"https://www.youtube.com/results\"\n r = requests.get(URL, params={'search_query': query})\n results = re.findall(r'href=\\\"\\/watch\\?v=(.{11})', r.content)\n self.results = results[::2]\n return self.results", "def pl_search(term, page=0, splash=True, is_user=False):\n if not term or len(term) < 2:\n g.message = c.r + \"Not enough input\" + c.w\n g.content = generate_songlist_display()\n return\n\n if isinstance(term, dict):\n is_user = term[\"is_user\"]\n term = term[\"term\"]\n\n if splash:\n g.content = logo(c.g)\n prog = \"user: \" + term if is_user else term\n g.message = \"Searching playlists for %s\" % c.y + prog + c.w\n screen_update()\n\n if is_user:\n ret = channelfromname(term)\n if not ret: # Error\n return\n user, channel_id = ret\n\n else:\n # playlist search is done with the above url and param type=playlist\n logging.info(\"playlist search for %s\", prog)\n max_results = min(getxy().max_results, 50) # Limit for playlists command\n qs = generate_search_qs(term, page, result_count=max_results)\n qs['type'] = 'playlist'\n if 'videoCategoryId' in qs:\n del qs['videoCategoryId'] # Incompatable with type=playlist\n\n pldata = call_gdata('search', qs)\n id_list = [i.get('id', {}).get('playlistId')\n for i in pldata.get('items', ())]\n # page info\n get_page_info_from_json(pldata, len(id_list))\n\n qs = {'part': 'contentDetails,snippet',\n 'maxResults': 50}\n\n if is_user:\n if page:\n qs['pageToken'] = token(page)\n qs['channelId'] = channel_id\n else:\n qs['id'] = ','.join(id_list)\n\n pldata = call_gdata('playlists', qs)\n playlists = get_pl_from_json(pldata)\n\n if playlists:\n g.last_search_query = {\"playlists\": {\"term\": term, \"is_user\": is_user}}\n g.browse_mode = \"ytpl\"\n g.current_page = page\n g.ytpls = playlists\n g.message = \"Playlist results for %s\" % c.y + prog + c.w\n g.content = generate_playlist_display()\n\n else:\n g.message = \"No playlists found for: %s\" % c.y + prog + c.w\n g.current_page = 0\n g.content = generate_songlist_display(zeromsg=g.message)", "async def search(self, ctx: commands.Context, *, query: t.Optional[str]) -> None:\n if query is None:\n # Maybe the user didn't know to pass in a query?\n embed = discord.Embed(colour=Colours.regular, timestamp=Embeds.now())\n embed.description = (\n \"No query passed in. Try passing in something: `$search arabic music`\"\n )\n embed.set_footer(\n text=\"See $help voice for more commands.\", icon_url=Icons.info\n )\n return await ctx.send(embed=embed)\n\n if (results := await self.get_tracks(query, True, False)) is not None:\n # Ensure that we're connected before playing.\n await ctx.invoke(self.connect, channel=None)\n player = self.get_player(ctx.guild)\n if not player.is_connected:\n return\n\n embed = discord.Embed(colour=Colours.regular, timestamp=Embeds.now())\n embed.set_footer(\n text=f\"Showing 5/{len(results)} results.\",\n icon_url=ctx.author.avatar_url,\n )\n embed.description = \"\"\n results = results[:5]\n\n for index, track in enumerate(results, 1):\n m, s = self.get_formatted_length(track.length, True)\n embed.description += (\n f\"**{index}**. [{track.title}]({track.uri}) ({m}:{s})\\n\"\n )\n\n # Get a integer selection using Choice.prompt().\n if (\n choice := await Choices.prompt(\n ctx=ctx, embed=embed, n=5, author_only=True\n )\n ) is None:\n if player.queue.empty:\n await ctx.invoke(self.disconnect)\n return\n\n embed = discord.Embed(\n title=\"Now queued.\" if player.is_playing else \"Now playing.\",\n description=f\"[{results[choice].title}]({results[choice].uri})\",\n colour=Colours.regular,\n timestamp=Embeds.now(),\n )\n\n m, s = self.get_formatted_length(results[choice].length, False)\n embed.set_footer(\n text=f\"Track goes for {m} minutes and {s} seconds.\",\n icon_url=ctx.author.avatar_url,\n )\n if results[choice].thumb is not None:\n embed.set_thumbnail(url=results[choice].thumb)\n await ctx.send(embed=embed)\n\n player.queue.add_tracks(results[choice])\n if not player.is_playing:\n await player.playback()\n else:\n fail = Embeds.status(success=False, desc=\"Failed to find any results.\")\n await ctx.send(embed=fail)", "def test_ms_playlist_search(helpers):\n item_from_xml, item_from_dict = common_tests(\n MSAlbumList,\n MS_PLAYLIST_SEARCH_XML,\n MS_PLAYLIST_SEARCH_DICT,\n \"00020064playlistsearch:Dans &\",\n helpers,\n )\n getter_attributes_test(\n \"uri\", item_from_xml, item_from_dict, MS_PLAYLIST_SEARCH_DICT[\"uri\"]\n )", "def song_search_matching(chart_song, query):\n song_searches = song_search(query, NUM_SONG_SEARCH_RESULTS)\n if 'error' in song_searches:\n print('>>> error:', song_searches['error'])\n return\n\n songs = []\n # print(song_searches)\n for s in song_searches['songs']:\n # print('test song:', s)\n performers = ' '.join(x['name'] for x in s['performers']).lower()\n\n print('checking performers:', performers, 'vs.', chart_song.artist.lower())\n print('checking titles:', '\"' + s['title']['name'] + '\"', 'vs.', '\"' + chart_song.title + '\"')\n diff1 = fuzz.token_set_ratio(chart_song.artist.lower(), performers)\n diff2 = difflib.SequenceMatcher(\n None,\n a=s['title']['name'].lower(),\n b=chart_song.title.lower()\n ).ratio()\n print('performer score:', diff1, 'and title score:', diff2)\n if diff1 >= 65 and diff2 > 0.75:\n songs.append(s)\n print('song passed with diff performers of', diff1, 'and diff title of', diff2)\n if diff1 <= 75 or diff2 < 0.85:\n print('NOTE impartial match?', s, 'for', chart_song)\n\n return songs", "def search_multiple_tracks(search_query, sp):\r\n \r\n # List to store the track ids\r\n track_ids = []\r\n # List to store the track names and artists\r\n tracks = []\r\n\r\n #Search for 10 results in the Spotify API given a search querry\r\n results = sp.search(q = search_query ,limit=10)\r\n results = results['tracks']['items']\r\n\r\n # Extract the track id's, names and artists for all the search results\r\n for i in range(len(results)):\r\n\r\n # Get track id, artist and name\r\n track_id = results[i]['id']\r\n artist = results[i]['artists'][0]['name']\r\n track_name = results[i]['name']\r\n\r\n # Get a string with the artist and track name\r\n track = artist + ' - ' + track_name\r\n\r\n # Append the track id's and track name/artist to the list\r\n track_ids.append(track_id)\r\n tracks.append(track)\r\n\r\n # Make a dictionary of the track id and track name/artist list.\r\n return dict(zip(tracks,track_ids))", "async def play(self, ctx, *, query):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n query = query.strip('<>')\n if player.is_connected:\n if not ctx.author.voice or not ctx.author.voice.channel or player.connected_channel.id != ctx.author.voice.channel.id:\n return await ctx.send(\"You have to be in my voice channel to queue a song :no_entry:\")\n else:\n if not ctx.author.voice or not ctx.author.voice.channel:\n return await ctx.send(\"Join a voice channel :no_entry:\")\n else:\n player.store('sessionowner', ctx.author.id)\n player.store('channel', ctx.channel.id)\n await player.connect(ctx.author.voice.channel.id)\n if not url_re.match(query):\n query = \"ytsearch:{}\".format(query)\n results = await self.bot.lavalink.get_tracks(query)\n if not results or not results['tracks']:\n return await ctx.send(\"I could not find any songs matching that query :no_entry:\")\n s=discord.Embed()\n if results[\"loadType\"] == \"PLAYLIST_LOADED\":\n tracks = results[\"tracks\"]\n for track in tracks:\n player.add(requester=ctx.author.id, track=track)\n s.description = \"Enqueued {} with **{}** tracks <:done:403285928233402378>\".format(results['playlistInfo']['name'], len(tracks))\n await self.bot.get_channel(player.fetch('channel')).send(embed=s)\n else:\n track = results[\"tracks\"][0]\n player.add(requester=ctx.author.id, track=track)\n timetill = 0\n for x in player.queue:\n timetill += x.duration\n if player.current:\n timetill += player.current.duration - player.position\n else:\n timetill = 0 \n index = [x.track for x in player.queue].index(track[\"track\"]) + 1\n s.set_author(name=\"Added to Queue\", icon_url=ctx.author.avatar_url)\n s.set_thumbnail(url=\"https://img.youtube.com/vi/{}/default.jpg\".format(track[\"info\"][\"identifier\"]))\n s.add_field(name=\"Song\", value=\"[{}]({})\".format(track[\"info\"][\"title\"], track[\"info\"][\"uri\"]), inline=False)\n s.add_field(name=\"Duration\", value=self.format_time(track[\"info\"][\"length\"]), inline=True)\n s.add_field(name=\"Position in Queue\", value=index)\n if timetill != 0:\n s.add_field(name=\"Estimated time till playing\", value=self.format_time(timetill-track[\"info\"][\"length\"]))\n else:\n s.add_field(name=\"Estimated time till playing\", value=\"Next\")\n await self.bot.get_channel(player.fetch('channel')).send(embed=s)\n if not player.is_playing:\n await player.play()", "def search_song(self, name):\n self.logger.debug('Searched for Song: {}'.format(name))\n results = self.sp.search(q='track:' + name, type='track')\n songs = [song for song in results['tracks']['items']]\n i = 1\n songs_ls = []\n table_ls = []\n for song in songs:\n table_ls.append([i,\n song['name'][0:20].strip(),\n song['album']['name'][0:20].strip(),\n \"%0.2f\" % (song['duration_ms'] / 60000),\n song['popularity']])\n songs_ls.append(song['uri'])\n i = i + 1\n return songs_ls, table_ls", "def process_search_async(self, call):\n if \"query\" not in call.data:\n _LOGGER.error(\"No text to search\")\n return\n global G_SPOTIFY_FOUND\n G_SPOTIFY_FOUND = []\n search_text = call.data[\"query\"]\n\n self.refresh_spotify_instance()\n\n # Don't true search when token is expired\n if self._oauth.is_token_expired(self._token_info):\n _LOGGER.warning(\"Spotify failed to update, token expired.\")\n return\n\n titles = [ais_global.G_EMPTY_OPTION]\n # artist\n results = self._spotify.search(q='artist:' + search_text, type='artist')\n titles.extend(self.get_list_from_results(results, 'artist'))\n # album\n results = self._spotify.search(q='album:' + search_text, type='album')\n titles.extend(self.get_list_from_results(results, 'album'))\n # playlist\n results = self._spotify.search(q='playlist:' + search_text, type='playlist')\n titles.extend(self.get_list_from_results(results, 'playlist'))\n\n # Update input_select values:\n yield from self.hass.services.async_call(\n 'input_select',\n 'set_options', {\n \"entity_id\": \"input_select.ais_music_track_name\",\n \"options\": titles})\n\n if len(G_SPOTIFY_FOUND) > 0:\n text = \"Znaleziono: %s, wล‚ฤ…czam pierwszy: %s\" % (\n str(len(G_SPOTIFY_FOUND)), G_SPOTIFY_FOUND[0][\"title\"])\n else:\n text = \"Brak wynikรณw na Spotify dla zapytania %s\" % search_text\n yield from self.hass.services.async_call(\n 'ais_ai_service', 'say_it', {\n \"text\": text\n })\n yield from self.hass.services.async_call(\n 'input_select',\n 'select_option', {\n \"entity_id\": \"input_select.ais_music_track_name\",\n \"option\": G_SPOTIFY_FOUND[0][\"title\"]})", "def search_youtube_music_video(self, artist, name, duration_ms):\n\t\t# return val : false until proven wrong\n\t\tsuccess = False # could not find matching youtube video\n\t\terror_des = \"none\"\n\n\t\tself.authorize()\n\n\n\n\t\t# build search params aka q\n\t\t#finders = ['vevo','lyrics']\t\t\t# words that we want to see in our search\n\t\ttry:\n\t\t\tsearch_response_j = self.youtube.search().list(\n\t\t\t\tvideoCategoryId = 10,\n\t\t\t\ttype = 'video',\n\t\t\t\torder = 'relevance',\n\t\t\t\t#q = '{} {} lyrics'.format(artist, name),\n\t\t\t\tq = artist + ' ' + name + ' lyrics',\n\t\t\t\tpart = \"snippet\",\n\t\t\t\tmaxResults = 5\n\t\t\t).execute()\n\n\n\t\t\tyoutube_videos_j = search_response_j['items']\n\n\t\t\tif len(youtube_videos_j) == 0:\t# NO results\n\t\t\t\terror_des = 'Sorry! Could not find track to download...'\n\t\t\t\tsuccess = False\n\t\t\telse:\t\t\t\t\n\n\t\t\t\t######################################################################################################\n\t\t\t\t# set default best video to the first relevant video\n\t\t\t\t# will be overwritten in next block of code if a better option is found\n\t\t\t\t# if self.check_video_pruning(artist, name, youtube_videos_j[0]['snippet']['title']) is False:\n\t\t\t\t######################################################################################################\n\t\t\t\t# youtube_video_best = {\n\t\t\t\t# \t'video_id': youtube_videos_j[0]['id']['videoId'],\n\t\t\t\t# \t'title': youtube_videos_j[0]['snippet']['title']\n\t\t\t\t# }\n\n\n\t\t\t\t# Let's see if we can find a better video then the default: 0\n\t\t\t\tfor index,video in enumerate(youtube_videos_j):\n\t\t\t\t\tsnippet = video['snippet']\n\t\t\t\t\tchannel_title = snippet['channelTitle']\n\t\t\t\t\ttitle = snippet['title']\n\n\t\t\t\t\t# weed out covers,vevo, live videos\n\t\t\t\t\t# not yet implemented : weed out videos that are not of the same duration as the spotify track\n\t\t\t\t\tif self.check_video_pruning(artist, name, title):\t# ensure that the artist or track name does not actually include the weeders Ex. live hous\n\t\t\t\t\t\tprint '==========\\nTESTING!!!!\\n=========='\n\t\t\t\t\t\tprint 'weeding out video: '\n\t\t\t\t\t\tprint 'name: ', name\n\t\t\t\t\t\tprint 'artist: ', artist\n\t\t\t\t\t\tprint 'title: ', title\n\n\t\t\t\t\t\tcontinue\t# skip video because it contains a weed word\n\n\t\t\t\t\t# select first video that is not pruned\n\t\t\t\t\telse:\n\n\t\t\t\t\t\tyoutube_video_best = {\n\t\t\t\t\t\t \t'video_id': youtube_videos_j[index]['id']['videoId'],\n\t\t\t\t\t\t \t'title': youtube_videos_j[index]['snippet']['title']\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tbreak\n\n\n\t\t\t\t\t#####################################################################################\n\t\t\t\t\t# check if vevo channel\n\t\t\t\t\t# check if channel title owned by artist\n\t\t\t\t\t#####################################################################################\n\t\t\t\t\t# if 'vevo' in channel_title.lower() or artist in channel_title.lower():\n\t\t\t\t\t# \tprint '==========\\nVEVO Found!!!!\\n=========='\n\n\t\t\t\t\t# \tyoutube_video_best['video_id'] = video['id']['videoId']\n\t\t\t\t\t# \tyoutube_video_best['title'] = title\n\n\t\t\t\t\t# \tbreak\t# stop looking\n\n\n\t\t\t\tsuccess = True\n\n\t\texcept HttpError, e:\n\t\t\terror_des = \"An HTTP error %d occurred:\\n%s\" % (e.resp.status, e.content)\n\t\t\tsuccess = False\n\t\t\tprint error_des\n\n\n\t\treturn {\n\t\t\t\t\t'success' : success,\n\t\t\t\t\t'error_des' : error_des,\n\t\t\t\t\t'youtube_video' : youtube_video_best\n\t\t\t\t}", "def scrape(search_title, search_artist, get_top_result=False):\n search_artist = search_artist.replace(\" \", \"+\").replace(\"&\", \"and\")\n search_title = search_title.replace(\" \", \"+\").replace(\"&\", \"and\")\n\n search_query = search_title + \"+\" + search_artist + \"+\\\"auto-generated+by+youtube\\\"\"\n # youtube_url = \"https://www.youtube.com/results?sp=EgIQAQ%253D%253D&search_query=\" + search_query\n youtube_url = \"https://www.youtube.com/results?search_query=\" + search_query\n header = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'}\n\n try:\n response = requests.get(youtube_url, headers=header)\n except requests.exceptions.ConnectionError:\n return None, None\n\n content = response.content\n soup = BeautifulSoup(content, \"html.parser\")\n title = []\n uploader = []\n ref = []\n all_title_tags = soup.find_all(\"h3\", attrs={\"class\": \"yt-lockup-title\"})\n all_uploader_tags = soup.find_all(\"div\", attrs={\"class\": \"yt-lockup-byline\"})\n\n for h3 in all_title_tags:\n try:\n title.append(h3.find('a').text)\n ref.append(h3.find('a')['href'])\n except TypeError:\n return None, None\n\n for div in all_uploader_tags:\n try:\n uploader.append(div.text)\n except TypeError:\n pass\n\n if get_top_result:\n # Return best matching link and its duration\n best_title = rank_results(title, search_title, search_artist, uploader)#, search_artist)\n # print(\"Best result is: '\"+str(title[best_title])+\"' at index \"+str(best_title))\n final_url = 'https://www.youtube.com'+ref[best_title]\n\n # video_length = get_video_time(final_url)\n # print(\"Video length is \"+str(video_length)+' ms long')\n return str(title[best_title]), ref[best_title]\n\n # if get_top_result:\n # return [title[0], ref[0]]\n return title, ref", "def search_spotify(self, artist_name):\n if VERBOSE:\n print (\"\\nSearching for artist on Spotify: \" + artist_name)\n try:\n result = self.sp.search(q='artist:' + artist_name, type='artist')\n except spotipy.client.SpotifyException:\n print(\"ERROR: Couldnt not find artist: %s. Trying again.\" % artist_name)\n try:\n result = self.sp.search(q='artist:' + artist_name, type='artist')\n except spotipy.client.SpotifyException as error:\n print(\"ERROR: Failed to search twice. Error below:\")\n print(error)\n result = None\n except ValueError as error:\n print(\"ERROR: Failure while searching Spotify for artist: %s\" % artist_name)\n print(error)\n result = None\n return result", "def SearchPlaylist(self, playlistName):\n self.__playlistName = playlistName\n self.__itemsFoundList = []\n for item in self.__playlists:\n if self.__playlistName in item:\n self.__itemsFoundList.append(item)\n return sorted(self.__itemsFoundList)", "def singleQuery(self, query):\n try:\n results = self.sp.search(query, limit=1)\n firstResult = self.getFirstResult(results)\n song = firstResult['uri']\n returnMessage = self.reportSongAddedToPlaylist(firstResult)\n self.addSongsToPlaylist(song)\n return firstResult\n except spotipy.SpotifyException as se:\n self.authenticate()\n return self.singleQuery(query)", "def search(search_term):\r\n querystring = apiurl_musixmatch + \"track.search?q_track=\" + urllib2.quote(\r\n search_term) + \"&apikey=\" + apikey_musixmatch + \"&format=plain\"\r\n try:\r\n request = urllib2.Request(querystring)\r\n # timeout set 4 to seconds; automatically retries\r\n response = urllib2.urlopen(request, timeout=4)\r\n # raw = response.read()\r\n print colored.green(\"Starting\", bold=12)\r\n all_data = ''\r\n while True:\r\n do_task()\r\n print '\\b.',\r\n sys.stdout.flush()\r\n data = response.read(2048)\r\n if not data:\r\n break\r\n all_data += data\r\n time.sleep(0.4)\r\n print \"\\n\"\r\n json_obj = json.loads(all_data.decode(\"utf-8\"))\r\n body = json_obj['message']['body']['track_list']\r\n list_of_all_songs = []\r\n track_table = PrettyTable(['Song ID', 'Song Name', 'Artist Name'])\r\n for result in body:\r\n song_details = []\r\n result_id = result['track']['track_id']\r\n title = result['track']['track_name']\r\n artist_name = result['track']['artist_name']\r\n song_details.insert(0, result_id)\r\n song_details.insert(1, title)\r\n song_details.insert(2, artist_name)\r\n list_of_all_songs.append(song_details)\r\n track_table.add_row(\r\n [result_id, title, artist_name])\r\n print colored.yellow(track_table, bold=12)\r\n except socket.timeout:\r\n print 'Connection timed out, try again'", "def get_playlist(speaker, name):\n playlists = speaker.get_sonos_playlists(complete_result=True)\n # Strict match\n for playlist in playlists:\n if name == playlist.title:\n logging.info(\n \"Found playlist '{}' using strict match\".format(playlist.title)\n )\n return playlist\n # Fuzzy match\n name = name.lower()\n for playlist in playlists:\n if name in playlist.title.lower():\n logging.info(\"Found playlist '{}' using fuzzy match\".format(playlist.title))\n return playlist\n return None", "def search(\n\tlog,\n\tverbose,\n\tquiet,\n\tusername,\n\tdevice_id,\n\tyes,\n\tfilters\n):\n\n\tconfigure_logging(verbose - quiet, username, log_to_file=log)\n\n\tlogger.info(\"Logging in to Google Music\")\n\tmc = google_music.mobileclient(username, device_id=device_id)\n\n\tif not mc.is_authenticated:\n\t\tsys.exit(\"Failed to authenticate client.\")\n\n\tsearch_results = filter_songs(mc.songs(), filters)\n\tsearch_results.sort(\n\t\tkey=lambda song: (\n\t\t\tsong.get('artist', ''),\n\t\t\tsong.get('album', ''),\n\t\t\tsong.get('trackNumber', 0)\n\t\t)\n\t)\n\n\tif search_results:\n\t\tresult_num = 0\n\t\ttotal = len(search_results)\n\t\tpad = len(str(total))\n\n\t\tconfirm = (\n\t\t\tyes\n\t\t\tor input(f\"\\nDisplay {len(search_results)} results? (y/n) \") in (\"y\", \"Y\")\n\t\t)\n\n\t\tif confirm:\n\t\t\tfor result in search_results:\n\t\t\t\tresult_num += 1\n\n\t\t\t\ttitle = result.get('title', \"<empty>\")\n\t\t\t\tartist = result.get('artist', \"<empty>\")\n\t\t\t\talbum = result.get('album', \"<empty>\")\n\t\t\t\tsong_id = result['id']\n\n\t\t\t\tlogger.info(\n\t\t\t\t\tf\"{result_num:>{pad}}/{total} {title} -- {artist} -- {album} ({song_id})\"\n\t\t\t\t)\n\telse:\n\t\tlogger.info(\"No songs found matching query\")\n\n\tmc.logout()\n\tlogger.info(\"All done!\")", "def extract_track_url(search):\n\n if 'tracks' in search:\n tracks = search['tracks']\n if 'items' in tracks:\n items = tracks['items']\n # take the first url we can find\n for item in items:\n if 'external_urls' in item:\n external_urls = item['external_urls']\n if 'spotify' in external_urls:\n url = external_urls['spotify']\n return url", "def main():\n\tdescription = \"Utility to search for spotify by song, artist or song ID and to create playlists based off of song ID's\"\n\tusage = \"search.py [-h] [-s SONG | -a ARTIST | -i ID] [-p PLAYLIST & -u USERNAME & -i ID & -d DESCRIPTION]\"\n\tparser = argparse.ArgumentParser(description=description, usage=usage)\n\tgroup = parser.add_mutually_exclusive_group()\n\tgroup.add_argument(\"-s\", \"--song\", nargs=1, required='--argument' in sys.argv, help=\"Search for a song by name\")\n\tgroup.add_argument(\"-a\", \"--artist\", nargs=1, required='--argument' in sys.argv,\n\t\t\t\t\t help=\"Search for songs from an Artist\\n\")\n\tgroup.add_argument(\"-i\", \"--id\", nargs=1, required='--argument' in sys.argv,\n\t\t\t\t\t help=\"Search for song based on ID or create playlist based off of song ID\")\n\tparser.add_argument(\"-p\", \"--playlist\", nargs=1, required='--id' in sys.argv,\n\t\t\t\t\t\thelp=\"Name of the playlist to be created. MUST be used with -i/--id\")\n\tparser.add_argument(\"-d\", \"--description\", nargs=1, required='--argument' in sys.argv,\n\t\t\t\t\t\thelp=\"Playlist Description. Must be used with -p,-i and -u\")\n\tparser.add_argument(\"-u\", \"--username\", nargs=1, required='--argumnet' in sys.argv,\n\t\t\t\t\t\thelp=\"Spotify Username. Must be used with -p, -i and -d\")\n\targs = parser.parse_args()\n\t# print(args)\n\n\tsolr = Solr_Query()\n\n\tresponse = None\n\n\tif args.song:\n\t\tprint(\"Searching for song:\", args.song[0].strip())\n\t\tsong_name = args.song[0].strip()\n\t\tsolr.set_search_type(\"songs\")\n\t\tquery = solr.set_query(song_name)\n\t\tresponse = solr.exec_query(query)\n\t\tsolr.print_search_results(response)\n\n\tif args.artist:\n\t\tprint(\"Searching for songs by artist: \", args.artist[0].strip())\n\t\tartist = args.artist[0].strip()\n\t\tsolr.set_search_type(\"artists\")\n\t\tquery = solr.set_query(artist)\n\t\tresponse = solr.exec_query(query)\n\t\tsolr.print_search_results(response)\n\n\t# Still trying to figure this one out. The getmorelike this funcionality is harder than we thought\n\tif args.playlist and args.id and args.description and args.username:\n\t\tprint(\"Creating a playlist based off of song ID:\", args.id[0].strip())\n\t\tid = args.id[0].strip()\n\t\tdescription = args.description[0].strip()\n\t\tplaylist = args.playlist[0].strip()\n\t\tusername = args.username[0].strip()\n\n\t\tsolr.set_search_type(\"id\")\n\t\tquery = solr.set_query(id)\n\t\tresponse = solr.exec_query(query)\n\n\t\t# Create a playlist create object to find similar songs and create the playlist\n\t\tcreator = Playlist_Create(username, playlist, description)\n\t\tcreator.authenticate() # authenticate using the username passed in\n\t\tresponse = creator.get_similar_songs(response)\n\t\tsongs = creator.get_song_ids(response)\n\t\tplaylist_id = creator.create_playlist()\n\t\tcreator.add_songs(playlist_id, songs)\n\n\n\n\telif args.playlist and not args.id:\n\t\tparser.error(\"Must input a song ID to create a playlist with!\")\n\telif args.playlist and not args.description:\n\t\tparser.error(\"Must input a playlist description\")\n\telif args.playlist and not args.username:\n\t\tparser.error(\"Need your username to create the playlist\")\n\n\tif args.id:\n\t\tprint(\"Searching for song with ID:\", args.id[0].strip())\n\t\tid = args.id[0].strip()\n\t\tsolr.set_search_type(\"id\")\n\t\tquery = solr.set_query(id)\n\t\tresponse = solr.exec_query(query)\n\t\tsolr.print_search_results(response)\n\n\tprint(\"\\nDone!\")", "def search():\n\tif not request.vars.search_term:\n\t\tredirect(URL('index'))\n\tterm = request.vars.search_term\n\torigterm = term\n\tterm = term.replace(' ','|')\n\tartists = db.executesql(\"select distinct(m1.id), m1.art_name, m1.artist_type, m1.country, m1.b_year,m1.b_month,m1.b_date,m1.e_year,m1.e_month,m1.e_day,ts_rank(to_tsvector(m1.art_name),to_tsquery('\"+term+\"')) rank from art_info m1 where to_tsvector('english',m1.art_name) @@ to_tsquery('\"+term+\"') order by rank desc limit 20;\")\n\talbums = db.executesql(\"select distinct(m1.id),m2.name,m1.art_id,m1.art_name,m1.rel_type,m1.count,ts_rank(to_tsvector(m2.name),to_tsquery('\"+term+\"')) rank from rel_art m1, release_name m2, release_group m3 where m3.name = m2.id and m3.id = m1.id and to_tsvector('english',m2.name) @@ to_tsquery('\"+term+\"') order by rank desc limit 20;\")\n\tsongs = db.executesql(\"select m2.id, m1.name, m3.art_id, m3.art_name, m3.rel_id, m3.rel_name from track_name m1, recording m2, rec_rel_art m3 where m1.id = m2.name and m2.id = m3.rec_id and lower(m1.name) LIKE lower('%%\"+origterm+\"%%') limit 20;\")\n\treturn dict(songs=songs, albums=albums, artists=artists)" ]
[ "0.7246209", "0.7094535", "0.6902199", "0.6900072", "0.6880712", "0.68578", "0.68566424", "0.6802174", "0.6751635", "0.66845226", "0.6641539", "0.66020864", "0.65797824", "0.656957", "0.65241736", "0.63375807", "0.6305814", "0.62658346", "0.6250762", "0.6225917", "0.6218487", "0.61956936", "0.61862093", "0.61778945", "0.6136296", "0.61314446", "0.61313164", "0.6114399", "0.6057559", "0.6055477" ]
0.7926087
0
Test POST /fn/ for task creation.
async def test_fn_create(app: Quart) -> None: test_client = app.test_client() response = await test_client.post( "/fn", json=VALID_TASK_BASIC ) assert response.status_code == 200 response_json = await response.get_json() assert response_json == VALID_TASK_BASIC
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_anonymous_01_newtask(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n res = self.app.get('api/app/1/newtask')\r\n print res.data\r\n data = json.loads(res.data)\r\n assert data['info'], data", "def test_create_task(self):\n response =self.client.post(reverse('todos'),self.data,format=\"json\")\n self.assertEqual(201,response.status_code)", "def test_user_01_newtask(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n # Register\r\n self.register()\r\n self.signin()\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n assert data['info'], data\r\n self.signout()", "def test_post_task(self):\n resp = self.app.post('/api/2/inf/esrs',\n headers={'X-Auth': self.token},\n json={'name': \"myESRS\", 'image': \"3.28\", 'network': \"someNetwork\"})\n\n task_id = resp.json['content']['task-id']\n expected = 'asdf-asdf-asdf'\n\n self.assertEqual(task_id, expected)", "def test_post(self):\n self.client.force_login(self.john)\n\n with self.subTest(\"Test start task success\"):\n resp = self.client.post(self.URL, data={'taskType': 1})\n\n self.assertEqual(\n resp.status_code,\n status.HTTP_201_CREATED,\n \"Gamer cant create the task via API!\"\n )\n\n with self.subTest(\"Start the same task again fail\"):\n resp = self.client.post(self.URL, data={'taskType': 1})\n\n self.assertEqual(\n resp.status_code,\n status.HTTP_409_CONFLICT\n )", "def test_newtask(self):\r\n app = AppFactory.create()\r\n TaskFactory.create_batch(2, app=app)\r\n user = UserFactory.create()\r\n\r\n # anonymous\r\n # test getting a new task\r\n res = self.app.get('/api/app/%s/newtask' % app.id)\r\n assert res, res\r\n task = json.loads(res.data)\r\n assert_equal(task['app_id'], app.id)\r\n\r\n # The output should have a mime-type: application/json\r\n assert res.mimetype == 'application/json', res\r\n\r\n # as a real user\r\n url = '/api/app/%s/newtask?api_key=%s' % (app.id, user.api_key)\r\n res = self.app.get(url)\r\n assert res, res\r\n task = json.loads(res.data)\r\n assert_equal(task['app_id'], app.id)\r\n\r\n # Get NotFound for an non-existing app\r\n url = '/api/app/5000/newtask'\r\n res = self.app.get(url)\r\n err = json.loads(res.data)\r\n err_msg = \"The app does not exist\"\r\n assert err['status'] == 'failed', err_msg\r\n assert err['status_code'] == 404, err_msg\r\n assert err['exception_cls'] == 'NotFound', err_msg\r\n assert err['target'] == 'app', err_msg\r\n\r\n # Get an empty task\r\n url = '/api/app/%s/newtask?offset=1000' % app.id\r\n res = self.app.get(url)\r\n assert res.data == '{}', res.data", "def test_create_task_with_name_success(\n self,\n mock_background_tasks\n ):\n task_name = \"task_with_arbitrary_name\"\n task_category = \"DEFAULT\"\n\n rv = TEST_CLIENT.post(\n TASK_ROUTE, json={\"name\": task_name, \"category\": task_category}\n )\n result = rv.json()\n\n expected = {\n \"arguments\": None,\n \"category\": \"DEFAULT\",\n \"commands\": None,\n \"cpuLimit\": models.task.TASK_DEFAULT_CPU_LIMIT,\n \"cpuRequest\": models.task.TASK_DEFAULT_CPU_REQUEST,\n \"createdAt\": mock.ANY,\n \"dataIn\": None,\n \"dataOut\": None,\n \"description\": None,\n \"docs\": None,\n \"hasNotebook\": True,\n \"image\": models.task.TASK_DEFAULT_EXPERIMENT_IMAGE,\n \"memoryLimit\": models.task.TASK_DEFAULT_MEMORY_LIMIT,\n \"memoryRequest\": models.task.TASK_DEFAULT_MEMORY_REQUEST,\n \"name\": task_name,\n \"parameters\": [],\n \"readinessProbeInitialDelaySeconds\": models.task.TASK_DEFAULT_READINESS_INITIAL_DELAY_SECONDS,\n \"tags\": [\"DEFAULT\"],\n \"updatedAt\": mock.ANY,\n \"uuid\": mock.ANY,\n }\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def post(self):\n try:\n req = api.payload\n result = create_task(\n get_db(),\n req[\"task\"],\n date.fromisoformat(req[\"due_by\"]),\n Status[req[\"status\"]],\n )\n return task_to_dict(result), 201\n except ValueError:\n api.abort(422, \"Invalid request parameters\")", "def test_create_task_without_name_success(\n self,\n mock_background_tasks\n ):\n task_category = \"DEFAULT\"\n\n rv = TEST_CLIENT.post(TASK_ROUTE, json={\"category\": task_category})\n result = rv.json()\n\n expected = {\n \"arguments\": None,\n \"category\": \"DEFAULT\",\n \"commands\": None,\n \"cpuLimit\": \"2000m\",\n \"cpuRequest\": \"100m\",\n \"createdAt\": mock.ANY,\n \"dataIn\": None,\n \"dataOut\": None,\n \"description\": None,\n \"docs\": None,\n \"hasNotebook\": True,\n \"image\": models.task.TASK_DEFAULT_EXPERIMENT_IMAGE,\n \"memoryLimit\": models.task.TASK_DEFAULT_MEMORY_LIMIT,\n \"memoryRequest\": models.task.TASK_DEFAULT_MEMORY_REQUEST,\n \"name\": \"Tarefa em branco - 1\",\n \"parameters\": [],\n \"readinessProbeInitialDelaySeconds\": models.task.TASK_DEFAULT_READINESS_INITIAL_DELAY_SECONDS,\n \"tags\": [\"DEFAULT\"],\n \"updatedAt\": mock.ANY,\n \"uuid\": mock.ANY,\n }\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def test_create_task_empty_request_body_success(\n self,\n mock_background_tasks,\n ):\n rv = TEST_CLIENT.post(TASK_ROUTE, json={})\n self.assertEqual(rv.status_code, 200)", "def test_create_task_view(self):\n create_task_url = reverse('create_task')\n response = self.client.get(create_task_url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'tasks/task_form.html')\n user = User.objects.get(username='ragsagar')\n data = {\n 'title': 'Test task 2',\n 'priority': 1,\n 'module': 'HRMS',\n 'due_date': datetime.date(2014, 4, 5),\n 'type': 1,\n 'description': 'This is a description',\n 'assigned_user_id': user.pk,\n }\n old_count = Task.objects.all().count()\n response = self.client.post(create_task_url, data)\n self.assertEqual(response.status_code, 302)\n self.assertEqual(Task.objects.all().count(), old_count+1)", "def new_task():\n req = request.json\n if 'cmd' in req:\n id = mongo.db.tasks.insert({\n 'cmd' : req['cmd'],\n 'status' : 'Not started'\n })\n\n response = {'id' : str(id)}\n return response", "def create_task():", "def test_04_new_task(self):\r\n url = '/api/app/1/newtask'\r\n self.check_limit(url, 'get', 'app')", "def create_task():\n client = RequestManager()\n client.set_method(\"POST\")\n client.set_endpoint(\"/projects/{0}/stories/{1}/tasks\".format(STORED_ID['project_id'], STORED_ID['story_id']))\n name = \"\".join(choices(string.ascii_letters, k=6))\n body = {\"description\": name}\n client.set_body(json.dumps(body))\n response = client.execute_request()\n try:\n STORED_ID['task_id'] = response.json()['id']\n except KeyError:\n LOGGER.info(response.json())", "def create(task):\n tname = task.get(\"tname\")\n # user cannot create task without name\n\n # Does the new task have a name? If no we can't insert it.\n # Can we insert it?\n if tname is not None:\n\n # Create a person instance using the schema and the passed in person\n schema = TaskListSchema()\n print(task)\n new_task = schema.load(task, session=db.session).data\n\n # Add the person to the database\n db.session.add(new_task)\n db.session.commit()\n\n # Serialize and return the newly created person in the response\n data = schema.dump(new_task).data\n\n return data, 201\n\n # Otherwise, nope, person exists already\n else:\n abort(409, \"Task needs a name\".format(tname=tname),)", "def test_taskrun_authenticated_post(self):\r\n app = AppFactory.create()\r\n task = TaskFactory.create(app=app)\r\n data = dict(\r\n app_id=app.id,\r\n task_id=task.id,\r\n info='my task result')\r\n\r\n # With wrong app_id\r\n data['app_id'] = 100000000000000000\r\n datajson = json.dumps(data)\r\n url = '/api/taskrun?api_key=%s' % app.owner.api_key\r\n tmp = self.app.post(url, data=datajson)\r\n err_msg = \"This post should fail as the app_id is wrong\"\r\n err = json.loads(tmp.data)\r\n assert tmp.status_code == 403, err_msg\r\n assert err['status'] == 'failed', err_msg\r\n assert err['status_code'] == 403, err_msg\r\n assert err['exception_msg'] == 'Invalid app_id', err_msg\r\n assert err['exception_cls'] == 'Forbidden', err_msg\r\n assert err['target'] == 'taskrun', err_msg\r\n\r\n # With wrong task_id\r\n data['app_id'] = task.app_id\r\n data['task_id'] = 100000000000000000000\r\n datajson = json.dumps(data)\r\n tmp = self.app.post(url, data=datajson)\r\n err_msg = \"This post should fail as the task_id is wrong\"\r\n err = json.loads(tmp.data)\r\n assert tmp.status_code == 403, err_msg\r\n assert err['status'] == 'failed', err_msg\r\n assert err['status_code'] == 403, err_msg\r\n assert err['exception_msg'] == 'Invalid task_id', err_msg\r\n assert err['exception_cls'] == 'Forbidden', err_msg\r\n assert err['target'] == 'taskrun', err_msg\r\n\r\n # Now with everything fine\r\n data = dict(\r\n app_id=task.app_id,\r\n task_id=task.id,\r\n info='my task result')\r\n datajson = json.dumps(data)\r\n tmp = self.app.post(url, data=datajson)\r\n r_taskrun = json.loads(tmp.data)\r\n assert tmp.status_code == 200, r_taskrun\r\n\r\n # If the user tries again it should be forbidden\r\n tmp = self.app.post(url, data=datajson)\r\n err_msg = (\"Authorized users should be only allowed to post \\\r\n one task_run per task\")\r\n task_runs = self.app.get('/api/taskrun')\r\n assert tmp.status_code == 403, tmp.data", "def test_task(self, mocker):\n\n tid = 289466\n site = \"mysite\"\n json = self.generate_task_dictionary(tid, state=\"error\")\n url = (\n \"https://cloudapi.acquia.com/v1/\"\n \"sites/prod:{site}/tasks/{tid}.json\".format(tid=tid, site=site)\n )\n\n mocker.register_uri(\"GET\", url, json=json)\n\n task = self.client.site(site).task(tid)\n self.assertEqual(task[\"id\"], tid)\n self.assertEqual(task[\"state\"], \"error\")", "def create_task(self, unused_parent, task, **kwargs):\n self.uri = task.get('app_engine_http_request').get('relative_uri')\n self.body = task.get('app_engine_http_request').get('body')\n logging.info('Task uri: %r', self.uri)\n logging.info('Task body: %r', self.body)\n return 'fake task'", "def post(self, request, format=None):\n feedback = {\n 'permission': True\n }\n try:\n post_data = request.data\n serializer = task_serializer.InstantTaskSerializer(data=post_data, group=self.get_group())\n if serializer.is_valid():\n task = serializer.save()\n feedback['data'] = {\n 'code': 200,\n 'message': 'Instant task creation successfully!',\n 'info': {\n 'task_id': task.pk\n }\n }\n else:\n logger.info('Instant task parameters is not available: {}'.format(serializer.format_errors()))\n feedback['data'] = ErrorCode.parameter_invalid('instant_task_creation',\n reason=serializer.format_errors())\n except natrix_exception.NatrixBaseException as e:\n feedback['data'] = ErrorCode.sp_code_bug('Create instant has a bug: {}'.format(e.get_log()))\n logger.error(e.get_log())\n except Exception as e:\n natrix_exception.natrix_traceback()\n feedback['data'] = ErrorCode.sp_db_fault(str(e))\n\n return JsonResponse(data=feedback)", "def test_create_a_todo(self):\n # hit the API endpoint\n response = self.make_a_request(\n kind=\"post\",\n version=\"v1\",\n data=self.valid_data\n )\n self.assertEqual(response.data, self.valid_data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n # test with invalid data\n response = self.make_a_request(\n kind=\"post\",\n version=\"v1\",\n data=self.invalid_data\n )\n self.assertEqual(\n response.data[\"message\"],\n \"TODO item requires state, due_date and text\"\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def tasks_create(self, name, labels, bug, resource_type, resources, image_quality, frame_filter, **kwargs):\n url = self.api.tasks\n data = {'name': name,\n 'labels': labels,\n 'bug_tracker': bug,\n 'image_quality': image_quality,\n 'frame_filter': frame_filter\n }\n response = self.session.post(url, json=data)\n response.raise_for_status()\n response_json = response.json()\n log.info('Created task ID: {id} NAME: {name}'.format(**response_json))\n log.info(str(response.json()))\n self.tasks_data(response_json['id'], resource_type, resources)", "def test_create_valid_submission(self):\n with self.client:\n # valid submission registration\n sub_response = register_ok_submission(self, self.token)\n response_data = json.loads(sub_response.data.decode())\n self.assertTrue(response_data['status']=='success')", "def test_taskrun_anonymous_post(self):\r\n app = AppFactory.create()\r\n task = TaskFactory.create(app=app)\r\n data = dict(\r\n app_id=app.id,\r\n task_id=task.id,\r\n info='my task result')\r\n\r\n # With wrong app_id\r\n data['app_id'] = 100000000000000000\r\n datajson = json.dumps(data)\r\n tmp = self.app.post('/api/taskrun', data=datajson)\r\n err_msg = \"This post should fail as the app_id is wrong\"\r\n err = json.loads(tmp.data)\r\n assert tmp.status_code == 403, tmp.data\r\n assert err['status'] == 'failed', err_msg\r\n assert err['status_code'] == 403, err_msg\r\n assert err['exception_msg'] == 'Invalid app_id', err_msg\r\n assert err['exception_cls'] == 'Forbidden', err_msg\r\n assert err['target'] == 'taskrun', err_msg\r\n\r\n # With wrong task_id\r\n data['app_id'] = task.app_id\r\n data['task_id'] = 100000000000000000000\r\n datajson = json.dumps(data)\r\n tmp = self.app.post('/api/taskrun', data=datajson)\r\n err = json.loads(tmp.data)\r\n assert tmp.status_code == 403, err_msg\r\n assert err['status'] == 'failed', err_msg\r\n assert err['status_code'] == 403, err_msg\r\n assert err['exception_msg'] == 'Invalid task_id', err_msg\r\n assert err['exception_cls'] == 'Forbidden', err_msg\r\n assert err['target'] == 'taskrun', err_msg\r\n\r\n # Now with everything fine\r\n data = dict(\r\n app_id=task.app_id,\r\n task_id=task.id,\r\n info='my task result')\r\n datajson = json.dumps(data)\r\n tmp = self.app.post('/api/taskrun', data=datajson)\r\n r_taskrun = json.loads(tmp.data)\r\n assert tmp.status_code == 200, r_taskrun\r\n\r\n # If the anonymous tries again it should be forbidden\r\n tmp = self.app.post('/api/taskrun', data=datajson)\r\n err_msg = (\"Anonymous users should be only allowed to post \\\r\n one task_run per task\")\r\n assert tmp.status_code == 403, err_msg", "def test_task_creation(self):\n Task.objects.filter(status=Task.Status.AWAITING_PROCESSING).delete()\n\n project = self.projects['test_human_and_machine']\n self.assertEqual(Task.objects.filter(project=project).count(),\n 0)\n create_subsequent_tasks(project)\n\n # Human Task was created\n self.assertEqual(Task.objects.filter(project=project).count(),\n 1)\n\n human_step = self.workflow_steps['test_workflow_2']['step4']\n task = Task.objects.get(step=human_step, project=project)\n data = {'submit_key1': 'submit_val1'}\n assign_task(self.workers[0].id, task.id)\n\n # user 0 submits a task\n response = self._submit_assignment(self.clients[0], task.id, data=data)\n self.assertEqual(response.status_code, 200)\n\n # Machine Task was created\n self.assertEqual(Task.objects.filter(project=project).count(),\n 2)\n machine_step = self.workflow_steps['test_workflow_2']['simple_machine']\n machine_task_assignment = (\n TaskAssignment.objects\n .filter(task__step=machine_step,\n task__project=project)[0])\n\n self.assertEqual(machine_task_assignment.status,\n TaskAssignment.Status.SUBMITTED)\n\n self.assertEqual(machine_task_assignment.in_progress_task_data,\n {'json': 'simple'})\n\n self.assertEqual(machine_task_assignment.task.status,\n Task.Status.COMPLETE)", "def add_task():\n # get values from user\n responses = accept_inputs([\"Task label\", \"Short task description\", \"Parent task label\"])\n # insert into db\n query_no_results(\"insert into task values(?, ?, ?)\",\n [responses[\"Task label\"], responses[\"Short task description\"], responses[\"Parent task label\"]])\n print(\"New task created\")", "def fusion_api_create_task(self, body, api=None, headers=None):\n return self.task.create(body, api, headers)", "def test_taskrun_post_with_bad_data(self):\r\n app = AppFactory.create()\r\n task = TaskFactory.create(app=app)\r\n app_id = app.id\r\n task_run = dict(app_id=app.id, task_id=task.id, info='my task result')\r\n url = '/api/taskrun?api_key=%s' % app.owner.api_key\r\n\r\n # POST with not JSON data\r\n res = self.app.post(url, data=task_run)\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'taskrun', err\r\n assert err['action'] == 'POST', err\r\n assert err['exception_cls'] == 'ValueError', err\r\n\r\n # POST with not allowed args\r\n res = self.app.post(url + '&foo=bar', data=task_run)\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'taskrun', err\r\n assert err['action'] == 'POST', err\r\n assert err['exception_cls'] == 'AttributeError', err\r\n\r\n # POST with fake data\r\n task_run['wrongfield'] = 13\r\n res = self.app.post(url, data=json.dumps(task_run))\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'taskrun', err\r\n assert err['action'] == 'POST', err\r\n assert err['exception_cls'] == 'TypeError', err", "def post(self):\n data = request.json\n create_testing_scenario(data)\n return None, 201", "def POST_task(self, task_data):\n\t\tif not self.room_id:\n\t\t\tself.POST_room()\n\t\trv = self.POST_data('/api/room/' + self.room_id + '/task', data=task_data)\n\t\tself.assertEqual(rv.status_code, 200)\n\t\treturn json.loads(rv.data)['_id']" ]
[ "0.7558174", "0.7519237", "0.7402585", "0.73293424", "0.72627306", "0.7140294", "0.70866376", "0.69574684", "0.6845608", "0.6843559", "0.6759246", "0.67584723", "0.6733653", "0.6723772", "0.6711094", "0.6681573", "0.65603113", "0.65247786", "0.6523374", "0.65187365", "0.6515333", "0.64964676", "0.6482581", "0.6441833", "0.6413659", "0.6395184", "0.63931906", "0.63715756", "0.6355111", "0.63547844" ]
0.84658504
0
Simplest train test with using named output
def test_simple_train_named_output(self): data_source = nemo.backends.pytorch.tutorials.RealFunctionDataLayer(n=10000, batch_size=128,) trainable_module = nemo.backends.pytorch.tutorials.TaylorNet(dim=4) loss = nemo.backends.pytorch.tutorials.MSELoss() data = data_source() self.assertEqual( first=type(data).__name__, second='RealFunctionDataLayerOutput', msg='Check output class naming coherence.', ) y_pred = trainable_module(x=data.x) loss_tensor = loss(predictions=y_pred, target=data.y) optimizer = nemo.backends.pytorch.actions.PtActions() optimizer.train( tensors_to_optimize=[loss_tensor], optimizer="sgd", optimization_params={"lr": 0.0003, "num_epochs": 1}, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_training(self):\n\t\tpass", "def test_machine_learning():", "def test_training(self):\n warnings.filterwarnings('ignore')\n example_args = example_args_parser()\n example_args.unittest = True\n # prepare data\n example_args.stage = 'prepare'\n example_wrapper(example_args)\n # train goalDNN model\n example_args.stage = 'train'\n example_args.model = 'goalDNN'\n example_wrapper(example_args)\n # train cVAE model\n example_args.model = 'cVAE'\n example_wrapper(example_args)\n # train gcVAE model\n example_args.model = 'gcVAE'\n example_wrapper(example_args)\n # cVAE harmonization\n example_args.stage = 'predict'\n example_args.model = 'cVAE'\n example_wrapper(example_args)\n # gcVAE harmonization\n example_args.model = 'gcVAE'\n example_wrapper(example_args)\n # goalDNN prediction\n example_args.model = 'goalDNN'\n example_wrapper(example_args)\n # XGBoost\n example_args.stage = 'train'\n example_args.model = 'XGBoost'\n example_wrapper(example_args)\n # compare with reference results\n check_args = check_results_args_parser()\n check_args.unittest = True\n check_reference_results(check_args)", "def test_predictor():", "def train():\n pass", "def test():\n # load dataset and model\n X, observed_y = load_data('../data/dev.txt')\n\n model = pickle.load(open('test.model', 'rb'))\n model.traverse()\n\n # predict labels for dataset\n preds = model.predict(X)\n\n # print(preds)\n # output model predictions\n np.savetxt('test.predictions', preds, fmt='%s')", "def test_train(self):\n print \"x=\",self.trainer.train()", "def train():\n # YOUR TRAINING CODE GOES HERE", "def run_tests():\n source1 = TextModel('hilary_speaches')\n source1.add_file('hilary_source_text.txt')\n\n source2 = TextModel('bernie_speaches')\n source2.add_file('bernie_source_text.txt')\n\n new1 = TextModel('trump_speach')\n new1.add_file('trump_text.txt')\n new1.classify(source1, source2)\n\n new2 = TextModel('hilary_test')\n new2.add_file('hilary_test.txt')\n new2.classify(source1, source2)\n\n new3 = TextModel('bernie_test')\n new3.add_file('bernie_test.txt')\n new3.classify(source1, source2)\n\n new4 = TextModel('bill_clinton_test')\n new4.add_file('bill_clinton_source.txt')\n new4.classify(source1, source2)", "def model_pipeline_test(model_name):\n dir_path = config.DataDirectory.DEV_DIR\n model, path = load_model(model_name)\n model.load_state_dict(torch.load(path))\n model.eval()\n model.init_hidden()\n\n test_dir = os.path.join(\n dir_path, model_name.LABEL, config.TrainingTestingSplitDirectory.TEST_DIR\n )\n\n assert os.path.exists(test_dir)\n test_accuracy = {}\n for label in os.listdir(test_dir):\n test_set = os.path.join(test_dir, label)\n accuracy = 0\n for value in os.listdir(test_set):\n data = np.load(os.path.join(test_set, value))\n pred_label, _ = generate_pred(\n mel=data, model=model, label=model_name.OUTPUT, model_name=model_name\n )\n if pred_label.lower() == label.lower():\n accuracy += 1\n test_accuracy[label] = accuracy / len(os.listdir(test_set))\n\n _logger.info(test_accuracy)\n return test_accuracy", "def run_tests():\r\n source1 = TextModel('50 Shades of Gray')\r\n source1.add_file('50.txt')\r\n \r\n print()\r\n \r\n source2 = TextModel('King James Version of the Bible')\r\n source2.add_file('kjv.txt')\r\n\r\n print()\r\n\r\n new1 = TextModel('Shakespeare')\r\n new1.add_file('shake.txt')\r\n new1.classify(source1, source2)\r\n \r\n print()\r\n \r\n new2 = TextModel('JK Rowling')\r\n new2.add_file('hp.txt')\r\n new2.classify(source1, source2)\r\n \r\n print()\r\n \r\n new3 = TextModel('Breitbart News Network')\r\n new3.add_file('bnn.txt')\r\n new3.classify(source1, source2)\r\n \r\n print()\r\n \r\n new4 = TextModel('Chaucer')\r\n new4.add_file('tct.txt')\r\n new4.classify(source1, source2)", "def train_and_test(self, train_fn, test_fn):\n logging.info(\"Training..\")\n self.train(train_fn)\n logging.info(\"Testing..\")\n return self.test(test_fn)\n logging.info(\"Done!\")", "def test_trainer(testsetting, w2vmodel, tweets, targets, labels, ids, tweets_test, targets_test, labels_test, ids_test, hidden_size, max_epochs, tanhOrSoftmax, dropout, modeltype=\"conditional\", targetInTweet={}, testid = \"test-1\", pretrain = \"pre_cont\", acc_thresh=0.9, sep = False):\n\n # parameters\n learning_rate = 0.0001\n batch_size = 70\n input_size = 100\n\n outfolder = \"_\".join([testid, modeltype, testsetting, \"hidden-\" + str(hidden_size), tanhOrSoftmax])\n\n # real data stance-semeval\n target_size = 3\n max_seq_length = len(tweets[0])\n if modeltype == \"conditional-reverse\":\n data = [np.asarray(targets), np.asarray(tweets), np.asarray(ids), np.asarray(labels)]\n else:\n data = [np.asarray(tweets), np.asarray(targets), np.asarray(ids), np.asarray(labels)]\n\n X = w2vmodel.syn0\n vocab_size = len(w2vmodel.vocab)\n\n if modeltype == \"concat\":\n model, placeholders = get_model_concat(batch_size, max_seq_length, input_size,\n hidden_size, target_size, vocab_size, pretrain, tanhOrSoftmax, dropout)\n elif modeltype == \"tweetonly\":\n model, placeholders = get_model_tweetonly(batch_size, max_seq_length, input_size,\n hidden_size, target_size, vocab_size, pretrain, tanhOrSoftmax, dropout)\n data = [np.asarray(tweets), np.asarray(ids), np.asarray(labels)]\n elif modeltype == \"conditional\" or modeltype == \"conditional-reverse\":\n # output of get_model(): model, [inputs, inputs_cond]\n model, placeholders = get_model_conditional(batch_size, max_seq_length, input_size,\n hidden_size, target_size, vocab_size, pretrain, tanhOrSoftmax, dropout)\n elif modeltype == \"bicond\":\n model, placeholders = get_model_bidirectional_conditioning(batch_size, max_seq_length, input_size, hidden_size, target_size,\n vocab_size, pretrain, tanhOrSoftmax, dropout)\n elif modeltype == \"conditional-target-feed\":\n model, placeholders = get_model_conditional_target_feed(batch_size, max_seq_length, input_size, hidden_size,\n target_size,\n vocab_size, pretrain, tanhOrSoftmax, dropout)\n elif modeltype == \"bicond-sepembed\":\n model, placeholders = get_model_bicond_sepembed(batch_size, max_seq_length, input_size, hidden_size,\n target_size,\n vocab_size, pretrain, tanhOrSoftmax, dropout)\n sep = True\n\n ids = tf.placeholder(tf.float32, [batch_size, 1], \"ids\") #ids are so that the dev/test samples can be recovered later since we shuffle\n targets = tf.placeholder(tf.float32, [batch_size, target_size], \"targets\")\n\n\n loss = tf.nn.softmax_cross_entropy_with_logits(model, targets) # targets: labels (e.g. pos/neg/neutral)\n\n optimizer = tf.train.AdamOptimizer(learning_rate)\n\n batcher = BatchBucketSampler(data, batch_size)\n acc_batcher = BatchBucketSampler(data, batch_size)\n\n placeholders += [ids]\n placeholders += [targets]\n\n pad_nr = batch_size - (\n len(labels_test) % batch_size) + 1 # since train/test batches need to be the same size, add padding for test\n\n # prepare the testing data. Needs to be padded to fit the batch size.\n if modeltype == \"tweetonly\":\n data_test = [np.lib.pad(np.asarray(tweets_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(ids_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(labels_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0))\n ]\n elif modeltype == \"conditional-reverse\":\n data_test = [np.lib.pad(np.asarray(targets_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(tweets_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(ids_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(labels_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0))\n ]\n else:\n data_test = [np.lib.pad(np.asarray(tweets_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(targets_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(ids_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(labels_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0))\n ]\n\n corpus_test_batch = BatchBucketSampler(data_test, batch_size)\n\n\n with tf.Session() as sess:\n summary_writer = tf.train.SummaryWriter(\"./out/save\", graph_def=sess.graph_def)\n\n hooks = [\n SpeedHook(summary_writer, iteration_interval=50, batch_size=batch_size),\n SaveModelHookDev(path=\"../out/save/\" + outfolder, at_every_epoch=1),\n SemEvalHook(corpus_test_batch, placeholders, 1),\n LossHook(summary_writer, iteration_interval=50),\n AccuracyHook(summary_writer, acc_batcher, placeholders, 2),\n AccuracyHookIgnoreNeutral(summary_writer, acc_batcher, placeholders, 2)\n ]\n\n trainer = Trainer(optimizer, max_epochs, hooks)\n epoch = trainer(batcher=batcher, acc_thresh=acc_thresh, pretrain=pretrain, embedd=X, placeholders=placeholders,\n loss=loss, model=model, sep=sep)\n\n print(\"Applying to test data, getting predictions for NONE/AGAINST/FAVOR\")\n\n predictions_detailed_all = []\n predictions_all = []\n ids_all = []\n\n load_model_dev(sess, \"../out/save/\" + outfolder + \"_ep\" + str(epoch), \"model.tf\")\n\n total = 0\n correct = 0\n for values in corpus_test_batch:\n total += len(values[-1])\n feed_dict = {}\n for i in range(0, len(placeholders)):\n feed_dict[placeholders[i]] = values[i]\n truth = np.argmax(values[-1], 1) # values[2] is a 3-length one-hot vector containing the labels\n if pretrain == \"pre\" and sep == True: # this is a bit hacky. To do: improve\n vars = tf.all_variables()\n emb_var = vars[0]\n emb_var2 = vars[1]\n sess.run(emb_var.assign(X))\n sess.run(emb_var2.assign(X))\n if pretrain == \"pre\": # this is a bit hacky. To do: improve\n vars = tf.all_variables()\n emb_var = vars[0]\n sess.run(emb_var.assign(X))\n predictions = sess.run(tf.nn.softmax(model), feed_dict=feed_dict)\n predictions_detailed_all.extend(predictions)\n ids_all.extend(values[-2])\n predicted = sess.run(tf.arg_max(tf.nn.softmax(model), 1),\n feed_dict=feed_dict)\n predictions_all.extend(predicted)\n correct += sum(truth == predicted)\n\n print(\"Num testing samples \" + str(total) +\n \"\\tAcc \" + str(float(correct)/total) +\n \"\\tCorrect \" + str(correct) + \"\\tTotal \" + str(total))\n\n\n # postprocessing\n if targetInTweet != {}:\n\n predictions_new = []\n ids_new = []\n it = 0\n for pred_prob in predictions_detailed_all:\n id = ids_all[it]\n if id == 0.0:\n it += 1\n continue\n inTwe = targetInTweet[id.tolist()[0]]\n if inTwe == True: #and (pred_prob[2] > 0.1 or pred_prob[1] > 0.1): #NONE/AGAINST/FAVOUR\n #print(str(id), \"inTwe!\")\n pred = 1\n if pred_prob[2] > pred_prob[1]:\n pred = 2\n predictions_new.append(pred)\n else:\n plist = pred_prob.tolist()\n pred = plist.index(max(plist))\n predictions_new.append(pred)\n it += 1\n ids_new.append(id)\n return predictions_new, predictions_detailed_all, ids_new\n\n return predictions_all, predictions_detailed_all, ids_all", "def run_custom_training_tests():\n test_custom_training()\n test_custom_distributed_training()\n test_custom_multimodel_training()\n test_custom_distributed_multimodel_training()", "def main():\n test_runner = TestRunner(\n FLAGS.workspace, FLAGS.bench_home, imagenet_dir=FLAGS.train_data_dir)\n test_runner.run_tests(FLAGS.test_list.split(','))", "def main(cls, args):\n #cls.trainOfflineAndTest(100, 0.1, 0.1, 0.9);\n #cls.trainOfflineAndTest(500, 0.1, 0.1, 1.0);\n\n cls.trainer.teachActiveAndSaveStatistics(\"onlineTest\", 10, 0.8, 1.0 ,1.0, 0.0, 0.3, True, True,True);\n cls.trainer.teachActiveAndSaveStatistics(\"path\", 10, 0.0, 0.0, 0.0, 0.0, 0.0, True, False, False)\n\n #trainer.teachActiveAndSaveStatistics(\"onlineTest\", 10000, 0.1f, 0.1f, 1.0f, 0.0f, 0.1f,true, true, true);\n # \t\ttrainer.teachActiveAndSaveStatistics(\"onlineTest\", 10000, 0.1f, 0.1f, 1.0f, 0.0f, 0.1f,\n # \t\t\t\tfalse, true, true);\n # \t\t\n # \t\ttestAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_first.net\", 10000, true);\n # \t\ttestAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_secound.net\", 10000, true);\n #cls.testAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_first.net\", 10000, False)\n #cls.testAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_secound.net\", 10, False)\n # \t\ttestAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1.net\", 10000, false);", "def test(self):\n self.training = False", "def test():\n return _make_modules(is_train=False)", "def test_training(self):\n self.classifier.train(\"test\", self.message)", "def generate_train_txt(name, path):\n with open(path + '/test.txt', 'a') as file:\n file.write('/content/YOLO_metric/data/obj/' + name + '\\n')", "def test_stage_0():\n\tra_1 = readImage(TRAIN_RAW_IMAGE_1)\n\tre_1 = readImage(TRAIN_RESULT_IMAGE_1)\n\n\tra_2 = readImage(TRAIN_RAW_IMAGE_2)\n\tre_2 = readImage(TRAIN_RESULT_IMAGE_2)\n\n\t# Uncomment below if more examples are required.\n\t# ra_3 = readImage(TRAIN_RAW_IMAGE_3)\n\t# re_3 = readImage(TRAIN_RESULT_IMAGE_3)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_1),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_2 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_2),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_2)[0],\\\n\t# \t)\n\n\t# The prediction model is obtained and trained.\n\tengine = get_model((ra_1, ra_2,), (re_1, re_2,), model_type=SVM, percentage=0.1)\n\n\ttest_percentage = float(1) # how many tests\n\n\tra_1 = readImage(TEST_RAW_IMAGE_1)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TEST_RAW_IMAGE_1),\\\n\t# \t# k_means(TEST_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\tre_1 = readImage(TEST_RESULT_IMAGE_1)\n\n\t# ra_2 = readImage(TEST_RAW_IMAGE_2)\n\t# re_2 = readImage(TEST_RESULT_IMAGE_2)\n\n\tinput_vec = []\n\t# The features are extracted.\n\tinput_vec += buildFeatureArray_2(ra_1[0], ra_1[1], ra_1[2],\\\n\t\tRADIUS_ARRAY,\\\n\t\tadditional_feats=([] if len(ra_1) == 3 else ra_1[3:]))\n\n\tex_no = int(test_percentage * len(input_vec)) # actual number of the test sample\n\n\toutput_vec = []\n\toutput_vec += matrixToArray(re_1[0], lambda el: 1 if el == 255 else 0)\n\n\tprint('Will start predicting...')\n\n\tpredicted_vec = engine.predict(input_vec[:ex_no])\n\n\tcounter = float(0)\n\tfor y, p in zip(output_vec[:ex_no], predicted_vec[:ex_no]):\n\t\tif y == p: counter += 1\n\n\tprint('Accuracy: ' + str(counter/ex_no))\n\n\tpredicted_mat = arrayToMatrix( predicted_vec, len(re_1[0]), len(re_1[0][0]),\\\n\t\tlambda el: 255 if el == 1 else 0)\n\n\t# The predicted segmentation is saved.\n\tsave_rgb_img(\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t 'pred.bmp',\\\n\t)", "def run_tests():\n source1 = TextModel('CS111 Syllabus')\n source1.add_file('CS111_Syllabus.txt')\n\n source2 = TextModel('AR Syllabus')\n source2.add_file('AR_Syllabus.txt')\n\n new1 = TextModel('WR120 Syllabus')\n new1.add_file('WR120_Syllabus.txt')\n new1.classify(source1, source2)\n \n new2 = TextModel('CS131 Syllabus')\n new2.add_file('CS131_Syllabus.txt')\n new2.classify(source1, source2)\n \n new3 = TextModel('My Paper 2 for WR120')\n new3.add_file('WR_Paper_2.txt')\n new3.classify(source1, source2)\n \n new4 = TextModel('CS111 PS9PR0')\n new4.add_file('ps9pr0.txt')\n new4.classify(source1, source2)", "def cli(config, data, metrics, model, loader):\n runner = TestMlRunner(config=config, data=data, metrics=metrics, model=model, loader=loader)\n print(runner.run())", "def train(self, trainfile):", "def evaluate_training_on_testing(net_name, dobj, dir_path, t_start, batch_size=32, generator=g.DataGeneratorMultInput ,testing_files=None, **kwargs):\n opt_arg, kwargs = filter_keys(evaluate_net_defaults(), kwargs)\n \n wiki_data = {}\n for k, v in opt_arg.items():\n wiki_data[k] = str(v)\n \n t_string = date_to_file_string(t_start)\n \n ###\n \n if testing_files == None:\n global testing_file_names\n testing_files = testing_file_names\n \n tmp_files = []\n \n for f in testing_files:\n if os.path.isfile(os.path.join(dir_path, f)):\n tmp_files.append(f)\n \n testing_files = tmp_files\n \n ###\n \n print(\"Now loading the last model\")\n \n net_last = keras.models.load_model(os.path.join(dir_path, net_name + '.hf5'), custom_objects=custom_layers.get_custom_objects())\n \n print(\"Now loading the best model\")\n \n #Load networks\n if not opt_arg['best_epoch'] == 0:\n net_best = keras.models.load_model(os.path.join(dir_path, net_name + '_epoch_' + str(opt_arg['best_epoch']) + '.hf5'), custom_objects=custom_layers.get_custom_objects())\n else:\n net_best = None\n \n print(\"Now getting the data\")\n \n #Run predict generator on the test data for each net.\n tmp_prediction_paths_last = []\n tmp_prediction_paths_best = []\n for f in testing_files:\n tmp_prediction_paths_last.append(os.path.join(dir_path, os.path.splitext(f)[0] + '_predictions_last.hf5'))\n if not net_best == None:\n tmp_prediction_paths_best.append(os.path.join(dir_path, os.path.splitext(f)[0] + '_predictions_best.hf5'))\n \n dobj.set_file_path(f)\n dobj.unload_all()\n #dobj.get_set()\n print(\"dobj.shape: {}\".format(dobj.shape))\n dobj.get_formatted_data('testing', 'test_data')\n dobj.get_formatted_data('testing', 'test_labels')\n dobj.get_formatted_data('testing', 'test_snr_calculated')\n \n store_test_results(net_last, dobj, tmp_prediction_paths_last[-1], batch_size=batch_size, generator=generator)\n if not net_best == None:\n store_test_results(net_best, dobj, tmp_prediction_paths_best, batch_size=batch_size, generator=generator)\n \n prediction_path_last = os.path.join(dir_path, net_name + '_predictions_last_epoch_full_testing_' + t_string + '.hf5')\n join_test_results(tmp_prediction_paths_last, prediction_path_last, delete_copied_files=True)\n prediction_path_best = ''\n if not net_best == None:\n prediction_path_best = os.path.join(dir_path, net_name + '_predictions_best_epoch_full_testing_' + t_string + '.hf5')\n join_test_results(tmp_prediction_paths_best, prediction_path_best, delete_copied_files=True)\n \n #Make SNR plots\n SNR_plot_path_last = os.path.join(dir_path, net_name + '_snr_plot_last_epoch_full_testing_' + t_string + '.png')\n \n plot_true_and_calc_from_file(prediction_path_last, dobj, SNR_plot_path_last, show=opt_arg['show_snr_plot'], net_name=net_name + ' last epoch')\n \n SNR_plot_path_best = ''\n \n if not net_best == None:\n SNR_plot_path_best = os.path.join(dir_path, net_name + '_snr_plot_best_epoch_full_testing_' + t_string + '.png')\n \n plot_true_and_calc_from_file(prediction_path_best, dobj, SNR_plot_path_best, show=opt_arg['show_snr_plot'], net_name=net_name + ' best epoch')\n \n #Make false alarm plots\n false_alarm_plot_path_last = os.path.join(dir_path, net_name + '_false_alarm_plot_last_epoch_full_testing_' + t_string + '.png')\n \n tmp_false_alarm_path_last = plot_false_alarm(dobj, prediction_path_last, false_alarm_plot_path_last, show=opt_arg['show_false_alarm'])\n \n false_alarm_plot_prob_path_last = os.path.join(dir_path, net_name + '_false_alarm_plot_prob_last_epoch_full_testing_' + t_string + '.png')\n \n tmp_false_alarm_prob_path_last = plot_false_alarm_prob(dobj, prediction_path_last, false_alarm_plot_prob_path_last, show=opt_arg['show_false_alarm'])\n \n false_alarm_plot_path_best = ''\n \n false_alarm_plot_prob_path_best = ''\n \n tmp_false_alarm_path_best = ''\n \n tmp_false_alarm_prob_path_best = ''\n \n if not net_best == None:\n false_alarm_plot_path_best = os.path.join(dir_path, net_name + '_false_alarm_plot_best_epoch_full_testing_' + t_string + '.png')\n \n false_alarm_plot_prob_path_best = os.path.join(dir_path, net_name + '_false_alarm_plot_prob_best_epoch_full_testing_' + t_string + '.png')\n \n tmp_false_alarm_path_best = plot_false_alarm(dobj, prediction_path_best, false_alarm_plot_path_best, show=opt_arg['show_false_alarm'])\n \n tmp_false_alarm_prob_path_best = plot_false_alarm_prob(dobj, prediction_path_best, false_alarm_plot_prob_path_best, show=opt_arg['show_false_alarm'])\n \n #Make sensitivity plots\n snr_range = dobj.get_file_properties()['snr']\n \n sensitivity_plot_path_last = os.path.join(dir_path, net_name + '_sensitivity_plot_last_epoch_full_testing_' + t_string + '.png')\n \n sensitivity_plot_prob_path_last = os.path.join(dir_path, net_name + '_sensitivity_plot_prob_last_epoch_full_testing_' + t_string + '.png')\n \n plot_sensitivity(dobj, prediction_path_last, tmp_false_alarm_path_last, sensitivity_plot_path_last, bins=(snr_range[0]+1, snr_range[1], 1), show=opt_arg['show_sensitivity_plot'])\n \n plot_sensitivity_prob_from_pred_file(prediction_path_last, sensitivity_plot_prob_path_last, bins=(snr_range[0]+1, snr_range[1], 1))\n #plot_sensitivity_prob(dobj, prediction_path_last, tmp_false_alarm_prob_path_last, sensitivity_plot_prob_path_last, show=opt_arg['show_sensitivity_plot'])\n \n sensitivity_plot_path_best = ''\n \n sensitivity_plot_prob_path_best = ''\n \n if not net_best == None:\n sensitivity_plot_path_best = os.path.join(dir_path, net_name + '_sensitivity_plot_best_epoch_full_testing_' + t_string + '.png')\n \n sensitivity_plot_prob_path_best = os.path.join(dir_path, net_name + '_sensitivity_plot_prob_best_epoch_full_testing_' + t_string + '.png')\n \n plot_sensitivity(dobj, prediction_path_best, tmp_false_alarm_path_best, sensitivity_plot_path_best, bins=(snr_range[0], snr_range[1], 1), show=opt_arg['show_sensitivity_plot'])\n \n plot_sensitivity_prob_from_pred_file(prediction_path_best, sensitivity_plot_prob_path_best, bins=(snr_range[0]+1, snr_range[1], 1))\n #plot_sensitivity_prob(dobj, prediction_path_best, tmp_false_alarm_prob_path_best, sensitivity_plot_prob_path_best, show=opt_arg['show_sensitivity_plot'])\n \n return((SNR_plot_path_last, false_alarm_plot_path_last, false_alarm_plot_prob_path_last, sensitivity_plot_path_last, sensitivity_plot_prob_path_last, SNR_plot_path_best, false_alarm_plot_path_best, false_alarm_plot_prob_path_best, sensitivity_plot_path_best, sensitivity_plot_prob_path_best))", "def test(ctx, input_file, model, output_file):\n # parse extra input args\n kwargs = {ctx.args[i][2:]: ctx.args[i+1].strip('\"') for i in range(0, len(ctx.args), 2)}\n if 'use_groups' in kwargs:\n if kwargs['use_groups']:\n no_groups = 0\n else:\n no_groups = 1\n else:\n no_groups = 1\n click.echo('Init model from: ' + model)\n model_class = MDCASClassifier.init(True, None, None)\n click.echo('Make prediction on: ' + input_file)\n pred_df = model_class.test(model_bundle_file = model, test_set_file=input_file, gt_set_file=None, input_format='joblib', verbose=True, prob=1, no_groups=no_groups)\n click.echo('Save predictions to: ' + output_file)\n model_class.export_test(output_file)\n click.echo('Saved')", "def train_and_test(\n env, envconfig, \n work, workconfig, \n powerpoints, \n agentconfig, \n trainconfig, testconfig\n ):\n from raytrain import train\n from raytest import test\n\n ## GENERAL PATHS\n trainpath = trainconfig['chkptpath']\n testpath = testconfig['logpath']\n\n ## TRAIN-TEST PER POWERPOINT\n results = {}\n for i, power in enumerate(powerpoints):\n ## ADAPT TRAINING\n envconfig['power'] = power\n trainconfig['chkptpath'] = trainpath + f\"/powerpoint-{i + 1}\"\n\n ## TRAIN\n train(\n env, envconfig,\n work, workconfig,\n agentconfig, trainconfig\n )\n\n ## TRAINED PATH\n epochs = trainconfig['epochs']\n agentpath = trainconfig['chkptpath'] + f\"/checkpoint-{epochs}\"\n\n ## ADAPT TEST\n testconfig['logpath'] = testpath + f\"/powerpoint-{i + 1}\"\n\n ## TEST\n count, _ = test(\n agentpath,\n env, envconfig,\n work, workconfig,\n testconfig\n )\n\n ## RECORD COUNT\n results[power] = count\n\n ## RESULTS\n generate_csv(results, testpath)", "def main(input_filepath, output_filepath):\n\n logging.info(\"reading %s\", input_filepath)\n train_test = pd.read_hdf(input_filepath, 'train_test')\n meta = pd.read_hdf(input_filepath, 'meta')\n meta_org = pd.read_hdf(input_filepath, 'meta_org')\n\n sel_series = train_test[train_test.entry_type.isin(['train', 'cold_start'])]\\\n ['series_id'].unique()\n train_series, validate_series = train_test_split(sel_series, random_state=1)\n\n logging.info(\"calc train_test\")\n train_test = calc_final_features(train_test, meta, meta_org=meta_org, verbose=True)\n\n sel = train_test[train_test.entry_type.isin(['train', 'cold_start'])]\n train = sel[sel.series_id.isin(train_series)]\n validate = sel[sel.series_id.isin(validate_series)]\n test = train_test[train_test.entry_type.isin(['test'])]\n\n logging.info(\"writing %s\", output_filepath)\n train.to_hdf(output_filepath, \"train\", mode=\"w\")\n validate.to_hdf(output_filepath, \"validate\", mode=\"a\")\n test.to_hdf(output_filepath, \"test\", mode=\"a\")\n for k in ['meta', 'submission']:\n df = pd.read_hdf(input_filepath, k)\n df.to_hdf(output_filepath, k, mode=\"a\")", "def train_test_model_stream():\n train=learning.Train_kmer_clf()\n train.run()\n #with open(os.path.join(cfg.pathtoxp, cfg.xp_name, cfg.id, f'{cfg.model}_CVresults.pkl'), 'rb') as f:\n # dic=pickle.load(f)\n #test=learning.Test_streaming(batchsize=1, kmer_to_index=dic['features'], clf=dic['classifier'])\n test=learning.Test_streaming(batchsize=1, kmer_to_index=train.kmer_to_index, clf=train.cv_clf)\n test.run()", "def train_test(in_path, train_out_path, test_out_path):\n df = pd.read_csv(in_path, sep='|')\n rng = RandomState()\n\n train = df.sample(frac=0.8, random_state=rng)\n test = df.loc[~df.index.isin(train.index)]\n\n train.to_csv(train_out_path, sep='|', index = None, header=True)\n test.to_csv(test_out_path, sep='|', index = None, header=True)" ]
[ "0.72303665", "0.6986525", "0.6882543", "0.68158686", "0.6709295", "0.6583969", "0.65741163", "0.6494195", "0.64419305", "0.6441741", "0.6437468", "0.6436363", "0.63998556", "0.6396058", "0.6387685", "0.63823533", "0.6376246", "0.63716936", "0.63709074", "0.63214844", "0.6311491", "0.63106525", "0.63092715", "0.6295231", "0.62821007", "0.62800837", "0.6276338", "0.6276096", "0.6275904", "0.62706894" ]
0.7193557
1
Checks multiline conditions ``if`` statement nodes.
def _check_multiline_conditions(self, node: ast.If) -> None: start_lineno = getattr(node, 'lineno', None) for sub_nodes in ast.walk(node.test): sub_lineno = getattr(sub_nodes, 'lineno', None) if sub_lineno is not None and sub_lineno > start_lineno: self.add_violation(MultilineConditionsViolation(node)) break
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_if_statement(self, line):\n line = re.sub(\"^if *\", \"\", line)\n if '(' not in line or ')' not in line:\n self.print_error(\"Syntax error: If statements take the syntax if (condition) { ... }\",\n errorFunc=SyntaxError)\n\n\n # remove the brackets\n statement, _ = gen_parse.get_str_between_delims(line, \"(\", \")\")\n\n\n # Check all variables have been declared\n any_vars = [i.strip('$') for i in re.findall(VAR_REGEX, statement)]\n for var_name in any_vars:\n if var_name not in self.variables:\n self.print_error(f\"Unknown variable: {var_name}\")", "def parse_if_cmd(self, line):\n line = re.sub(\"^if *\", \"\", line)\n\n # remove the brackets\n statement, _ = gen_parse.get_str_between_delims(line, \"(\", \")\")\n\n # Check all variables have been declared\n any_vars = [i for i in re.findall(IN_STR_VAR_REGEX, statement)]\n # Get the variables declared\n _vars = []\n for var in any_vars:\n _Var = getattr(self, var.strip('$'))\n if type(_Var) == inp_types.Variable: _vars.append(_Var.data)\n else: _vars.append(_Var)\n\n for var_name, var_val in zip(any_vars, _vars):\n statement = statement.replace(var_name, str(var_val))\n\n # Evaluate the if statement\n try:\n var_container = {}\n exec(f\"val = {statement}\", var_container)\n val = var_container['val']\n except Exception as e:\n self.print_error(\"Couldn't parse the if statement\\n\\nError:\"\n + str(e))\n\n end_line = self.get_end_brace()\n\n self.line_num += 1\n if val is False:\n self.line_num = end_line", "def compile_if(self) -> None:\n self._consume('if')\n self._consume('(')\n self.compile_expression()\n self._consume(')')\n\n end_lbl = f'IF_END_{self._if_count}'\n false_lbl = f'IF_FALSE_{self._if_count}'\n self._if_count += 1\n\n self._consume('{')\n self.writer.write_if(false_lbl)\n\n self.compile_statements()\n self.writer.write_goto(end_lbl)\n self.writer.write_label(false_lbl)\n\n self._consume('}')\n\n if self._get_current_token() == 'else':\n self._consume('else')\n self._consume('{')\n self.compile_statements()\n self._consume('}')\n\n self.writer.write_label(end_lbl)", "def _If(self, t):\n self.fill(\"if (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n # collapse nested ifs into equivalent elifs.\n while (t.orelse and len(t.orelse) == 1 and\n isinstance(t.orelse[0], ast.If)):\n t = t.orelse[0]\n self.fill(\"else if (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n # final else\n if t.orelse:\n self.fill(\"else\")\n self.enter()\n self.dispatch(t.orelse)\n self.leave()", "def parseIfStatement( ): # parse rountine for the if and uses the if class to print out the appropriate string\n\n\ttok = tokens.peek( )\n\tif debug: print( \"ifStatement: \", tok )\n\tstart = match( \"if\" )\n\texpr = expression( )\n\tblk = parseBlock( )\n\telseblk = None\n\ttok = tokens.peek( )\n\tif tok == \"else\":\n\t\tmatch( \"else\" )\n\t\telseblk = parseBlock( )\n\treturn ifStatement(expr, blk, elseblk)", "def compile_if(self):\r\n lab1 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n lab2 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n self.tokenizer.advance() # ignore 'if' keyword\r\n self.tokenizer.advance() # ignore '(' symbol\r\n self.compile_expression()\r\n self.code_writer.write_arithmetic(\"not\")\r\n self.tokenizer.advance() # ignore ')' symbol\r\n self.tokenizer.advance() # ignore '{'\r\n self.code_writer.write_if(lab1)\r\n self.compile_statements()\r\n self.code_writer.write_goto(lab2)\r\n self.tokenizer.advance() # ignore '}' symbol\r\n self.code_writer.write_label(lab1)\r\n if (self.tokenizer.token_type() == JackTokenizer.KEYWORD_T and\r\n self.tokenizer.key_word() == \"else\"):\r\n self.tokenizer.advance()\r\n self.tokenizer.advance() # ignore '{' symbol\r\n self.compile_statements()\r\n self.tokenizer.advance() # ignore '}' symbol\r\n self.code_writer.write_label(lab2)", "def _process_if(self, node):\n creg = node.children[0].name\n cval = node.children[1].value\n self.backend.set_condition(creg, cval)\n self._process_node(node.children[2])\n self.backend.drop_condition()", "def newif(line):\n if not line.startswith(\"\\\\newif\"):\n return False\n pieces = line.split(\"\\\\\")\n if len(pieces) != 4 or pieces[0] != \"\" or pieces[1] != \"newif\":\n print(\"%Wrong number of pieces: \"+line)\n return False\n if not pieces[2].startswith(\"if\"):\n print(\"%Missing if: \"+line)\n return False\n name = pieces[2][2:]\n if not pieces[3].startswith(name):\n print(\"%Name missing: \"+line)\n return False\n value = pieces[3][len(name):]\n if not value in truth:\n print(\"Misunderstood truth value: \"+line)\n return False\n conditionals[\"\\\\if\"+name] = truth[value]\n return True", "def is_if(self, file, i):\n\n # Save line to local variable\n line = file[i].strip()\n\n # If line starts with if and ends with ':' return True, else False\n if (line[:2] == \"if\" or line[:4] in [\"elif\", \"else\"]) and \":\" in line:\n return True\n return False", "def get_if_condition(self, file, i):\n\n # Check if 'if function' is to run main function of program\n if re.match(\"if __name__ == [\\\"']__main__[\\\"']:\", file[i]) and \\\n re.match(r\"\\s*main\\(\\)\", file[i + 1]):\n\n # If yes, return None\n return \"omit\", 2, \n\n # Run super definition\n line = super().get_if_condition(file, i)\n\n # Strip ending colon\n line = line.split(\":\", 1)\n line, multi_statement = line[0], line[1]\n\n # Set if keyword for back translation\n ln_split = line.split(\" \")\n if ln_split[0] not in [\"elif\", \"else\"]:\n if_kw = \"if\"\n else:\n if_kw, line = ln_split[0], \" \".join(ln_split[1:]).strip()\n\n # Replace 'elif' with standard\n if if_kw == \"elif\":\n if_kw = \"else if\"\n\n # Replace logical operators\n line = self.replace_logical_ops(line, direction=\"to\")\n\n # Create start and end for while call\n start = []\n end = []\n\n # Check if multiple statements are declared in one line\n if multi_statement.strip():\n start += multi_statement.split(\";\")\n\n # Return if condition\n return line, if_kw, start, end", "def __EvaluateIf(self, countIf, line):\n countIf = countIf - 1\n i = self.__ifs[countIf]\n i.SetLinePointer(self.__linePointer)\n #s = self.ScanIfCond(self.__oc.GermanUmlautReplace(line))\n s = self.ScanIfCond(line)\n if s:\n i.Set(s[0])\n try:\n i.Eval()\n line = ''\n except:\n raise Core.Error.IfHasNoEndif(0, 'IF-EXPRESSION %i HAS HAD AN ERROR:' \\\n ' EITHER NO CORRESPONDING (endif) OR SYNTAX ERROR'\n % countIf)\n l1, l2 = i.GetNextLine(), line\n return l1, l2", "def visit_if(self: Parser, node: doc.If) -> None:\n with self.var_table.with_frame():\n with T.If(self.eval_expr(node.test)):\n with T.Then():\n with self.var_table.with_frame():\n self.visit_body(node.body)\n if node.orelse:\n with T.Else():\n with self.var_table.with_frame():\n self.visit_body(node.orelse)", "def _analyse_stmt_If(self, statement: ast.If, *, next: CFNode) -> CFNode:\n # Analyse both branches unconditionally: even if they're not reachable,\n # they still need to exist in the graph produced.\n if_branch = self._analyse_statements(statement.body, next=next)\n else_branch = self._analyse_statements(statement.orelse, next=next)\n\n # Analyse the condition, if a constant.\n branches: Dict[str, CFNode] = {}\n test_is_constant, test_value = self._expression_as_constant(statement.test)\n if test_is_constant:\n if test_value:\n branches.update(enter=if_branch)\n else:\n branches.update(else_=else_branch)\n else:\n branches.update(enter=if_branch, else_=else_branch, error=self._raise)\n\n return self._ast_node(statement, **branches)", "def compile_if(self):\n\n\t\txml = '<ifStatement>\\n' + self.tokenizer.keyword() + self.tokenizer.symbol()\n\t\tself.outfile.write(xml)\n\n\t\tself.compile_expression()\n\n\t\txml = self.tokenizer.symbol() + self.tokenizer.symbol() + '<statements>\\n'\n\t\tself.outfile.write(xml)\n\n\t\twhile self.tokenizer.get_token() != '}':\n\t\t\tself.compile_statements()\n\n\t\tself.outfile.write('</statements>\\n' + self.tokenizer.symbol())\n\n\t\tif self.tokenizer.get_token() == 'else':\n\t\t\tself.compile_else()\n\n\t\tself.outfile.write('</ifStatement>\\n')", "def visit_if(self, node):\n branches = 1\n # don't double count If nodes coming from some 'elif'\n if node.orelse and len(node.orelse) > 1:\n branches += 1\n self.inc_branch(branches)\n self.stmts += branches", "def IfStatement(self):\n self.currtok = next(self.tg)\n if self.currtok[1].name == \"LPAREN\":\n self.currtok = next(self.tg)\n express = self.Expression()\n if self.currtok[1].name == \"RPAREN\":\n self.currtok = next(self.tg)\n state = self.Statement()\n if self.currtok[1].name == \"else\":\n self.currtok = next(self.tg)\n state2 = self.Statement()\n return ifelseStmt(express, state, state2)\n else:\n return ifStmt(express, state)\n raise SLUCSyntaxError(\"ERROR: Missing right paren on line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing left paren on line {0}\".format(str(self.currtok[2] - 1)))", "def test_if_elseif_paren_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif (foo and bar) or foo and (bar or (foo and bar))}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif (foo and bar) or foo and (bar or (foo and bar)) %}\\nfoo{% endif %}\"", "def test_if_elseif_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif blue}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif blue %}\\nfoo{% endif %}\"", "def compile_if(self):\r\n else_label = \"ELSE_\" + str(self.__if_count)\r\n end_label = \"END_IF_\" + str(self.__if_count)\r\n self.__if_count += 1\r\n self.__advance(n=2)\r\n self.compile_expression()\r\n self.__vmwriter.write_arithmetic(\"not\")\r\n self.__vmwriter.write_if(else_label)\r\n self.__advance(n=2)\r\n self.compile_statements()\r\n self.__vmwriter.write_goto(end_label)\r\n self.__vmwriter.write_label(else_label)\r\n self.__advance()\r\n if self.__tokenizer.keyword() == TYPES_DIC[\"ELSE\"]:\r\n self.__advance(n=2)\r\n self.compile_statements()\r\n self.__advance()\r\n self.__vmwriter.write_label(end_label)", "def stmt_if(executor, stmt):\n e = Expression()\n result = e.eval(stmt._tokens, symbols=executor._symbols)\n if not result:\n executor.goto_next_line()", "def test_if_elseif_else_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif blue}\\nfoo\\n{else}bar{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif blue %}\\nfoo\\n{% else %}bar{% endif %}\"", "def filter_cond(line_dict):\n if(line_dict[\"if1\"] == ''):\n return False\n cond_match = (\n (int(line_dict[\"if1\"]) > 20 and int(line_dict[\"if1\"]) < 40)\n ) \n return True if cond_match else False", "def test_if_elseif_and_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif awesome.sauce[1] and blue and 'hello'}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif awesome.sauce[1] and blue and 'hello' %}\\nfoo{% endif %}\"", "def check_if(self, token: tokenize.TokenInfo) -> bool:\n if self._seen_for:\n self._seen_if_in_line = True\n\n self._potential_violation = (\n self._potential_violation or\n self.seen_clause_in_line\n )\n return self._check_violation(token)\n return True", "def find_if_blocks(self):\n if_idxs = [p[0] for p in _commands_with(\"if\", self.cmake)]\n endif_idxs = [p[0] for p in _commands_with(\"endif\", self.cmake)]\n self.if_blocks = list(zip(if_idxs, endif_idxs))", "def test_if_paren_statement():\n r = convert_code(\n \"{if (foo and bar) or foo and (bar or (foo and bar))}\\nbar\\n{else}\\nfoo{/if}\")\n assert r == \"{% if (foo and bar) or foo and (bar or (foo and bar)) %}\\nbar\\n{% else %}\\nfoo{% endif %}\"", "def test_if_else_statement():\n r = convert_code(\"{if foo}\\nbar\\n{else}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% else %}\\nfoo{% endif %}\"", "def test_28_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then return; else return 0; end\n\t\tend\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(None)\"\n\t\tself.assertTrue(TestChecker.test(input,expect,428))", "def if_(self):\n initial_pos = self.pos\n try:\n self.match_value(Identifier, \"if\")\n expr = self.tokens[self.pos:]\n self.pos = len(self.tokens)\n\n return IfNode(expr)\n except ParseError:\n self.pos = initial_pos\n raise ParseError(\"Invalid if directive.\")", "def CheckEmptyBlockBody(fn, filename, clean_lines, linenum, error):\n from cpplint import CloseExpression\n\n # Search for loop keywords at the beginning of the line. Because only\n # whitespaces are allowed before the keywords, this will also ignore most\n # do-while-loops, since those lines should start with closing brace.\n #\n # We also check \"if\" blocks here, since an empty conditional block\n # is likely an error.\n line = clean_lines.elided[linenum]\n matched = Match(r'\\s*(for|while|if)\\s*\\(', line)\n if matched:\n # Find the end of the conditional expression\n (end_line, end_linenum, end_pos) = CloseExpression(\n clean_lines, linenum, line.find('('))\n\n # Output warning if what follows the condition expression is a\n # semicolon. No warning for all other cases, including\n # whitespace or newline, since we have a separate check for\n # semicolons preceded by whitespace.\n if end_pos >= 0 and Match(r';', end_line[end_pos:]):\n if matched.group(1) == 'if':\n error(filename, end_linenum,\n 'whitespace/empty_conditional_body', 5,\n 'Empty conditional bodies should use {}')\n elif matched.group(1) == 'while' and linenum is not 0 \\\n and \"}\" in clean_lines.elided[linenum-1]:\n # Don't report an error for ros style do-whiles. Works\n # by checking for a closing brace on the previous\n # line, since that means it's probably a do-while\n # loop.\n return\n else:\n error(filename, end_linenum, 'whitespace/empty_loop_body', 5,\n 'Empty loop bodies should use {} or continue')" ]
[ "0.69199467", "0.66166645", "0.64184743", "0.6394726", "0.63435435", "0.626644", "0.6216721", "0.6210071", "0.62085503", "0.61941326", "0.6121475", "0.6098061", "0.5982492", "0.59647274", "0.59607804", "0.59274286", "0.58617854", "0.58296543", "0.5819066", "0.5787807", "0.57736695", "0.56924343", "0.5674062", "0.5642704", "0.55720437", "0.55648595", "0.54924846", "0.54918396", "0.5453149", "0.54326016" ]
0.7710257
0
Commit `n` new people to the database. Return their IDs.
def create_multiple_people(sqla, n): person_schema = PersonSchema() new_people = [] for i in range(n): valid_person = person_schema.load(person_object_factory()) new_people.append(Person(**valid_person)) sqla.add_all(new_people) sqla.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_flush_pk_given(n):\n session = Session(bind=engine)\n for chunk in range(0, n, 1000):\n session.add_all(\n [\n Customer(\n id=i + 1,\n name=\"customer name %d\" % i,\n description=\"customer description %d\" % i,\n )\n for i in range(chunk, chunk + 1000)\n ]\n )\n session.flush()\n session.commit()", "def test_orm_bulk_insert(n):\n session = Session(bind=engine)\n session.execute(\n insert(Customer),\n [\n {\n \"name\": \"customer name %d\" % i,\n \"description\": \"customer description %d\" % i,\n }\n for i in range(n)\n ],\n )\n session.commit()", "def test_core_insert(n):\n with engine.begin() as conn:\n conn.execute(\n Customer.__table__.insert(),\n [\n dict(\n name=\"customer name %d\" % i,\n description=\"customer description %d\" % i,\n )\n for i in range(n)\n ],\n )", "def fill_in_db(cnx, n=100):\n cursor = cnx.cursor()\n add_employee = (\"INSERT INTO employees \"\n \"(firstname, lastname, hiredate, gender, birthdate) \"\n \"VALUES (%s, %s, %s, %s, %s)\")\n add_salary = (\"INSERT INTO salaries \"\n \"(empno, salary, fromdate, todate, commentary) \"\n \"VALUES (%(empno)s, %(salary)s, %(fromdate)s, %(todate)s,%(commentary)s)\")\n add_title = (\"INSERT INTO titles \"\n \"(empno, title, fromdate, todate,lotterychance,description) \"\n \"VALUES (%(empno)s, %(title)s, %(fromdate)s, %(todate)s,%(lotterychance)s,%(description)s)\")\n for _ in range(n):\n cursor.execute(add_employee, rand_fun.random_employee())\n empno = cursor.lastrowid\n cursor.execute(add_salary, rand_fun.random_salary(empno))\n cursor.execute(add_title, rand_fun.random_titles(empno))\n cnx.commit()", "def create_members(N):\n for _ in range(N):\n name = fake.name()\n phone = fake.phone_number()\n email = fake.email()\n address = fake.address()\n Member.objects.create(\n name=name,phone=phone,\n email=email,address=address\n )", "def generate_users(count=10):\n for i in range(count):\n user = generate_random_user()\n db.session.add(user)\n db.session.commit()", "def test_flush_no_pk(n):\n session = Session(bind=engine)\n for chunk in range(0, n, 1000):\n session.add_all(\n [\n Customer(\n name=\"customer name %d\" % i,\n description=\"customer description %d\" % i,\n )\n for i in range(chunk, chunk + 1000)\n ]\n )\n session.flush()\n session.commit()", "def fill_repo_with_random_persons(self, n=10, id_lb=1, id_ub=100):\r\n random_ids, random_names, random_phone_numbers = self.generate_random_persons(n, id_lb, id_ub)\r\n for id_, name, phone_num in zip(random_ids, random_names, random_phone_numbers):\r\n self.add_person(id_, ' '.join(name), phone_num)", "def _mint_new_ott_ids(self, how_many=1):\n first_minted_id = self._next_ott_id\n self._next_ott_id = first_minted_id + how_many\n content = u'{\"next_ott_id\": %d}\\n' % self._next_ott_id\n # The content is JSON, but we hand-rolled the string above\n # so that we can use it as a commit_msg\n self._write_master_branch_resource(content,\n self._id_minting_file,\n commit_msg=content,\n is_json=False)\n last_minted_id = self._next_ott_id - 1\n return first_minted_id, last_minted_id", "def test_many_insertions():\n # Connect to the database\n mongodb = get_database()\n\n expected_ids = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 76950,\n 43380, 26717, 70, 47561, 32800, 37021, 2449, 63555, 72987}\n try:\n ids = add_candidates(mongodb)\n print(\"received ids: \", ids)\n assert all(index in expected_ids for index in ids)\n finally:\n collection = mongodb[COLLECTION_NAME]\n collection.drop()", "def create_users(N):\n for _ in range(N):\n name = fake.name()\n phone = fake.phone_number()\n email = fake.email()\n role = random.choice([\"shepherd\",\"admin\"])\n password = fake.user_name\n User.objects.create(\n name=name,phone=phone,\n email=email,role=role,\n password=password\n )", "def create_n_items(n):\n total_objects = models.Item.objects.all().count()\n for i in range(n):\n models.Item.objects.create(\n name=\"Randomly generated object {}\".format(i+total_objects),\n value=random.random() * 1000000\n )", "def test_add_many_implicit_commit(self):\n\n # That one fails in r5 (<commit/> must be made on its own)\n\n doc_count = 10\n user_ids = [get_rand_string() for x in range(doc_count)]\n data = [get_rand_string() for x in range(doc_count)]\n ids = [get_rand_string() for x in range(doc_count)]\n documents = [dict(user_id=user_ids[x], data=data[x], id=ids[x])\n for x in range(doc_count)]\n\n # Pass in the commit flag.\n self.conn.add(documents, True)\n\n results = []\n for id in ids:\n res = self.conn.query(\"id:\" + id).results\n if not res:\n self.fail(\"Could not find document (id:%s)\" % id)\n results.append(res[0])", "def populate(N=5):\n for entry in range(N):\n # Create the fake data for the entry\n fake_name = fakegen.name().split()\n fake_first_name = fake_name[0]\n fake_last_name = fake_name[1]\n fake_email = fakegen.email()\n\n # Create the new User entry\n user = User.objects.get_or_create(first_name=fake_first_name, last_name=fake_last_name, email=fake_email)[0]", "def test_add_many_objects_implicit_commit(self):\n\n # That one fails in r5 (<commit/> must be made on its own)\n\n doc_count = 10\n user_ids = [get_rand_string() for x in range(doc_count)]\n data = [get_rand_string() for x in range(doc_count)]\n ids = [get_rand_string() for x in range(doc_count)]\n documents = []\n for x in range(doc_count):\n doc = Document()\n doc['user_id'] = user_ids[x]\n doc['data'] = data[x]\n doc['id'] = ids[x]\n documents.append(doc)\n\n # Pass in the commit flag.\n self.conn.add(documents, True)\n\n results = []\n for id in ids:\n res = self.conn.query(\"id:\" + id).results\n if not res:\n self.fail(\"Could not find document (id:%s)\" % id)\n results.append(res[0])", "def update(max_iterations):\n persons = get_persons()\n count = 0\n for person in persons:\n if count > max_iterations:\n return\n count += 1\n if choice([0, 1]):\n new_person = make_random('en')\n new_person['id'] = person['id']\n params = {\"event\": \"contact.update\",\n \"data\": new_person}\n request(params)", "def populate(iterations):\n for _ in range(iterations):\n person = make_random('en')\n params = {\"event\": \"contact.add\",\n \"data\": person}\n request(params)", "def test_006_add(self):\n HEADING()\n db = self.db\n\n count = 5\n\n db.connect()\n\n db.delete_jobs()\n\n for id in range(0,count):\n job = db.insert(\"job\" + str(id))\n\n assert len(db) == count", "def generate_fake(count=100, **kwargs):\n from sqlalchemy.exc import IntegrityError\n from random import seed, choice\n from faker import Faker\n\n fake = Faker()\n\n seed()\n for i in range(count):\n u = User(\n username=fake.first_name(),\n email=fake.email(),\n password='password',\n **kwargs)\n db.session.add(u)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()", "def generate_membership_numbers(apps, schema_editor):\n YouthProfile = apps.get_model(\"youths\", \"YouthProfile\")\n for profile in YouthProfile.objects.all():\n membership_number = str(profile.pk).zfill(6)\n YouthProfile.objects.filter(pk=profile.pk).update(\n membership_number=membership_number\n )", "def populate_persons():\n\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n\n database = SqliteDatabase('personjob.db')\n\n logger.info('Working with Person class')\n\n PERSON_NAME = 0\n LIVES_IN_TOWN = 1\n NICKNAME = 2\n\n people = [\n ('Andrew', 'Sultan', 'Andy'),\n ('Peter', 'Seattle', None),\n ('Susan', 'Boston', 'Beannie'),\n ('Pam', 'Coventry', 'PJ'),\n ('Steven', 'Stevens Pass', None),\n ('Ryan', 'New York', 'Private'),\n ('Pamela', 'Spokane', 'Patrol'),\n ('Monica', 'Portland', None),\n ]\n\n logger.info('Creating Person records: iterate through the list of tuples')\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n for person in people:\n with database.transaction():\n new_person = Person.create(\n person_name = person[PERSON_NAME],\n lives_in_town = person[LIVES_IN_TOWN],\n nickname = person[NICKNAME])\n new_person.save()\n logger.info('Database add successful')\n\n logger.info('Print the Person records we saved...')\n for saved_person in Person:\n logger.info(f'{saved_person.person_name} lives in {saved_person.lives_in_town} ' +\\\n f'and likes to be known as {saved_person.nickname}')\n\n except Exception as e:\n logger.info(f'Error creating = {person[PERSON_NAME]}')\n logger.info(e)\n\n finally:\n logger.info('database closes')\n database.close()", "def generate_new_ids(ids: np.array, n_required: int) -> np.array:\n\n current_max_id = ids.max()\n new_ids = np.arange(n_required) + current_max_id + 1\n\n return new_ids", "def flush():\n with transaction.atomic():\n if voter_records:\n NCVoter.objects.bulk_create(voter_records)\n with transaction.atomic():\n # This looks weird. Let me explain.\n # All the unsaved ChangeTracker instances have references\n # to the NCVoter instances from *before* the NCVoter instances\n # were saved. So they do not know the voter instances now have\n # IDs from being inserted. This re-sets the voter on the change\n # object, ensuring it knows the ID of its voter and can be saved\n # properly.\n for c in change_records:\n c.voter = c.voter\n c.voter_id = c.voter.id\n ChangeTracker.objects.bulk_create(change_records)\n change_records.clear()\n voter_records.clear()", "def __generateUserIDs(self,_count):\n return map(lambda x:self.__getNewUserID(),range(_count))", "def insert_many_rows(self, row_count=10000):\n values = [\"('name_{}')\".format(i) for i in range(row_count)]\n values_string = \",\".join(values)\n self.run_sync(f\"INSERT INTO manager (name) VALUES {values_string};\")", "def test_orm_insert_returning(n):\n session = Session(bind=engine)\n\n customer_result = session.scalars(\n insert(Customer).returning(Customer),\n [\n {\n \"name\": \"customer name %d\" % i,\n \"description\": \"customer description %d\" % i,\n }\n for i in range(n)\n ],\n )\n\n # this step is where the rows actually become objects\n customers = customer_result.all() # noqa: F841\n\n session.commit()", "def add_person():\n # Find the last used PK\n with sqlite3.connect('skeevy.db') as connection:\n cursor = connection.cursor()\n cursor.execute(\"SELECT id FROM person ORDER BY id DESC;\")\n for row in cursor.fetchone():\n last_pk = row\n\n # Auto-increment the primary key for the person table.\n last_pk = last_pk + 1\n\n # Prompt the user for the rest of their information.\n first_name = input(\"Enter your first name: \")\n middle_name = input(\"Enter your middle name: \")\n last_name = input(\"Enter your last name: \")\n suffix_name = input(\"Enter your suffix: \")\n e_mail = input(\"Enter your email: \")\n # Default status of the person is active (1).\n status = 1\n\n # Store the input in a variable.\n person_data = (last_pk, first_name, middle_name, last_name, suffix_name,\n e_mail, status)\n\n # Connect and insert the data into the person table.\n with sqlite3.connect('skeevy.db') as connection:\n cursor = connection.cursor()\n cursor.execute(\"INSERT INTO person VALUES(?, ?, ?, ?, ?, ?, ?);\",\n person_data)\n connection.commit()", "def seed_User(number=5, overwrite=False):\n\n if overwrite:\n print('Overwriting all users')\n User.objects.all().delete()\n count = 0\n for i in range(number):\n username = fake.first_name()\n User.objects.create_user(\n email=username + \"@blogmail.com\",\n password=\"vns12345\",\n name=username,\n date_joined=datetime.datetime.now(),\n is_active=1,\n is_superadmin=0,\n avatar='',\n is_staff=1\n )\n count += 1\n percent_complete = count / number * 100\n print(\n \"Adding {} new Users: {:.2f}%\".format(\n number, percent_complete),\n end='\\r',\n flush=True\n )\n print()", "def test_get_people_list(self):\n person_1 = Person(\n first_name='Emilia',\n last_name='Clarke',\n aliases='Emi'\n )\n person_2 = Person(\n first_name='Peter',\n last_name='Dinklage',\n )\n person_3 = Person(\n first_name='Thomas',\n last_name='McCarthy',\n aliases='Thom'\n )\n\n Person.objects.bulk_create([person_1, person_2, person_3])\n\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.get('count'), Person.objects.count())", "def commit_games_to_db(self, games):\n print ' '\n \n num_games = len(games)\n game_num = 0\n \n for game in games:\n update_progress(game_num, num_games)\n game_num += 1\n self.add_game(game)\n self.add_stats(game)" ]
[ "0.6410909", "0.6035476", "0.5936561", "0.59001946", "0.58297455", "0.5781418", "0.57174855", "0.5668549", "0.55672234", "0.55669963", "0.5547272", "0.5537836", "0.5524978", "0.5519709", "0.54809123", "0.5450484", "0.5418881", "0.5374081", "0.53303605", "0.5317177", "0.53019476", "0.5234223", "0.5232751", "0.52261615", "0.5215276", "0.52007663", "0.5155847", "0.5135706", "0.51299274", "0.5126849" ]
0.7085406
0
Commit accounts for `fraction` of `people` in DB.
def create_multiple_accounts(sqla, fraction=0.75): if fraction < 0.1 or fraction > 1.0: raise RuntimeError(f"Fraction ({fraction}) is out of bounds") all_people = sqla.query(Person).all() sample_people = random.sample(all_people, math.floor(len(all_people) * fraction)) account_schema = AccountSchema() new_accounts = [] for person in sample_people: valid_account = account_schema.load(account_object_factory(person.id)) new_accounts.append(Account(**valid_account)) sqla.add_all(new_accounts) sqla.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def commit(self):\n for user_name, user in self._users.items():\n self._execute(\n \"UPDATE users \\\n SET credits = ? \\\n WHERE name == ?\",\n (user.credits, user_name)\n )\n for item_name, amount in user.items.items():\n self._execute(\n \"REPLACE INTO users_items VALUES (?, ?, ?)\",\n (user_name, item_name, amount)\n )", "def update_balance_sheet(group_id):\n\n group_details = dbconn.get_collection('groups').find_one({\"_id\":group_id})\n print(\"Running Split Job on {0} after time {1}\".format(group_details['group_name'],group_details['last_settlement_on']))\n\n for expense in dbconn.get_collection('expenditures').find(\n {'group_id': group_id, 'time':{'$gt':group_details['last_settlement_on']} }):\n\n split_expense = expense['amount']/len(expense['shared_between'])\n lender = expense['member_id']\n borrower_set = set(expense['shared_between']) - set(lender)\n for borrower in borrower_set:\n\n '''\n db.members.update(\n {'_id':'nir', 'borrowing.group_id':'grp_tst','borrowing.member_id':'tst1'}\n ,{$inc:{'borrowing.$.amount':100}}\n\t\t\t\t,{upsert:true}\n\t\t\t\t)\n\t\t\t\t\n\t\t\tdb.members.update(\n\t\t\t\t{'_id':'nir'}\n\t\t\t\t,{'$addToSet': {'borrowing':{ 'group_id':'grp_tst','member_id':'tst1','amount':100}}}\n\t\t\t\t)\n\n '''\n\n try:\n dbconn.get_collection('members')\\\n .update_one(\n {'_id':borrower, 'borrowing.group_id':group_id,'borrowing.member_id':lender}\n ,{'$inc':{'borrowing.$.amount':split_expense}})\n except pymongoerrors.WriteError.code == 16836:\n print('You have never borrowed from this person. Running alternate update command.') # Added for testing\n dbconn.get_collection('members')\\\n .update_one(\n {\"_id\":borrower}\n ,{'$addToSet': {'borrowing':{'group_id':group_id,'member_id':lender,'amount':split_expense}}})\n\n dbconn.get_collection('expenditures').update_one({'_id':ObjectId(expense['_id'])},{'$set':{'settled':True}})\n dbconn.get_collection('groups').update_one({\"_id\":group_id}, {'$set': {'last_settlement_on':datetime.utcnow()}})", "def commit_games_to_db(self, games):\n print ' '\n \n num_games = len(games)\n game_num = 0\n \n for game in games:\n update_progress(game_num, num_games)\n game_num += 1\n self.add_game(game)\n self.add_stats(game)", "def commit():\n get_db().commit()", "def deposit(self, amount):\n connection = sqlite3.connect('/home/BorneAgain/Desktop/flasktest/accounts.db')\n\n cursor = connection.cursor()\n\n if self.getBalance() + amount > 0:\n cursor.execute(\"\"\"update accounts set amount=? where name =?;\"\"\", (amount+self.getBalance(), self.name))\n cursor.execute(\"\"\"insert into history (username,dt,amount) values (?,?,?);\"\"\", (self.name, datetime.utcnow(), amount))\n else:\n \n cursor.execute(\"\"\"update accounts set amount=? where name =?;\"\"\", (0, self.name))\n\n cursor.execute(\"\"\"insert into history (username,dt,amount) values (?,?,?);\"\"\", (self.name, datetime.utcnow(), amount))\n connection.commit()", "def tbr_to_total_people_ratio(cc,\n sql_time_specification=''): # pragma: no cover\n cc.execute(\"\"\"CREATE TEMPORARY TABLE addr_tbr_hash (email VARCHAR(200), tbr\n INT, hash VARCHAR(40), `timestamp` TIMESTAMP)\"\"\")\n try:\n cc.execute(\"\"\"INSERT INTO addr_tbr_hash (email, tbr, hash)\n SELECT commit_people.people_email_address, tbr_count.c,\n commit_people.git_commit_hash FROM commit_people\n LEFT JOIN (\n SELECT git_commit_hash, COUNT(*)\n AS c FROM commit_people\n INNER JOIN git_commit\n ON git_commit.hash = commit_people.git_commit_hash\n WHERE commit_people.type='tbr' %s\n GROUP BY git_commit_hash) tbr_count\n ON commit_people.git_commit_hash = tbr_count.git_commit_hash\n WHERE commit_people.type='author'\"\"\" % sql_time_specification)\n cc.execute(\"\"\"SELECT email,\n SUM(CASE WHEN tbr<>0 THEN 1 ELSE 0 END) num_tbrd, COUNT(*)\n AS num_total\n FROM addr_tbr_hash\n GROUP BY email\"\"\")\n people_tbr_data = cc.fetchall()\n people_tbr_data = [[data_item[0],\n round(float(data_item[1]) / data_item[2], 3),\n int(data_item[1]), int(data_item[2])] for data_item in\n people_tbr_data]\n sorted_people_tbr_data = sorted(people_tbr_data, key=lambda x: x[1],\n reverse=True)\n top_100 = sorted_people_tbr_data[:100]\n ordered_people_tbr_data = []\n last_ratio = None\n cur_rank = 0\n for i in range(len(top_100)):\n ratio = sorted_people_tbr_data[i][1]\n if ratio > 0:\n temp = {}\n temp['email'] = sorted_people_tbr_data[i][0]\n temp['ratio'] = ratio\n temp['suspicious'] = sorted_people_tbr_data[i][2]\n temp['total'] = sorted_people_tbr_data[i][3]\n if last_ratio != ratio:\n cur_rank += 1\n temp['rank'] = cur_rank\n last_ratio = ratio\n else:\n break\n ordered_people_tbr_data.append(temp)\n finally:\n cc.execute(\"\"\"DROP TABLE addr_tbr_hash\"\"\")\n return ordered_people_tbr_data", "def make_deposit(conn, userid, acctype, amount):\n print('\\n\\nUpdating account user:{}, type:{}, amount:{}'.format(userid, acctype, amount))\n with conn.cursor() as curs:\n res = curs.execute(\"\"\"UPDATE accounts\n SET balance=%s\n WHERE owner_id=%s AND type=%s\"\"\", (amount, userid, acctype))\n if res is not None:\n print(res)", "def commit(self):\n self.DB.commit()", "def commit(self):", "def deposit(holder):\n account = Account.query.filter_by(holder=holder).first()\n if not account:\n return jsonify({\"error\": \"Account does not exist\"})\n amount = request.json.get(\"amount\")\n account.balance += amount\n db.session.commit()\n return jsonify(\n {\n \"holder\": account.holder,\n \"balance\": account.balance,\n \"message\": \"The deposit has been processed\",\n }\n )", "def _deposit_coins(user_id: int, coins: int):\r\n if not Wealth.collection.find_one({\"_id\": user_id}):\r\n return\r\n Wealth.collection.update_one({\"_id\": user_id}, {\"$inc\": {\r\n \"Bank\": coins,\r\n \"coins\": -coins\r\n }})", "def commit(self):\n self._cur_batch.commit()\n self._cur_batch = None\n self._num_mutations = 0", "def commitChanges(self):\n \n ## User is prompted that changes are being committed\n print(\"Committing changes to the CRM and Mailings database...\")\n db_connection.executeQuery(\"COMMIT;\")", "def commit(self):\n self.db.commit()", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self):\n self._size += self._size_uncommitted\n self._size_uncommitted = 0\n self.session.commit()", "def challenge(factor, min_donation=None, max_donation=None):\n logger.info('+++ Run Challane with factor: {}, min: {}, max: {}'.format(factor,\n min_donation,\n max_donation))\n logger.info('+++ Projection')\n Main.connect_db(\"Donation\")\n try:\n with database.transaction():\n query = (Donation\n .update(\n amount = (Donation.amount)*factor\n )\n .where(\n (Donation.amount > min_donation) &\n (Donation.amount < max_donation)\n )\n )\n logger.info('{} records updated in Donation Table'.format(query.execute()))\n except Exception as e:\n logger.info('Exception cought when updating {} donations DB'.format(name))\n logger.info(\"Exception: {}\".format(e))\n finally:\n logger.info('Closing database...')\n database.close()", "def store_contribution(post, category):\n contributions = constants.DB_UTEMPIAN.contributions.find(\n {\"author\": post.author, \"category\": category})\n\n scores = [contribution[\"score\"] for contribution in contributions\n if contribution[\"score\"]]\n\n if not scores:\n return\n\n rejected = scores.count(0)\n accepted = len(scores) - rejected\n upvote_percentage = percentage(accepted, len(scores))\n\n if upvote_percentage < 66.0:\n return\n\n average_score = sum(scores) / len(scores)\n weight = exponential_vote(average_score, category)\n new_weight = (weight / max(constants.MAX_VOTE.values()) * 100.0) / constants.SCALER\n\n collection = constants.DB_UTEMPIAN.pending_contributions\n age = post.time_elapsed()\n collection.insert({\n \"url\": post.authorperm,\n \"upvote_time\": datetime.now() + timedelta(minutes=10) - age,\n \"inserted\": datetime.now(),\n \"upvoted\": False,\n \"weight\": new_weight\n })", "def hit_team(cid):\n cur = db.cursor()\n cur.execute(\"SELECT balance FROM users WHERE cid='%s'\" % (cid))\n balance = cur.fetchone()\n balance = float(balance[0])\n balance = balance - 10\n cur.execute(\"UPDATE users SET balance='%s' WHERE cid='%s'\" %\n (balance, cid))\n db.commit()\n print \"%s new balance: %s\" % (str(cid), str(balance))", "def commit(self) -> None:\n pass", "def dbcommit(self):\n self.con.commit()", "def database_commit(connector):\n try:\n connector.commit()\n except Exception as e:\n raise Exception(\n \"An error occurred while committing the modifications in the database: %s\"\n % e\n )", "def commit(self, amount=None):\n if amount is None:\n amount = self.autocommit_amount\n\n self.autocommit_amount -= amount\n\n del self.buff[0:amount]", "def calculate_half_percent_interest_on_account(list_of_all_accounts_known, ID_account_to_give_interest):\n for account in list_of_all_accounts_known:\n if ID_account_to_give_interest == account.account_id:\n account.balance += (account.balance * 0.005)", "def commit(self):\n for db in self.values():\n db.commit()", "def _update_solved_count(delta, task, profile, save_task=True, save_profile=True):\n if delta == 0:\n return\n\n task.solved_count += delta\n if save_task:\n task.save()\n\n profile.solved_count += delta\n profile.update_diff_distribution(task, delta)\n if save_profile:\n profile.save()" ]
[ "0.55138946", "0.5332356", "0.52618074", "0.52338505", "0.52129024", "0.51808655", "0.5152565", "0.5076112", "0.506996", "0.50614417", "0.5041748", "0.5001631", "0.49909", "0.49831", "0.49751437", "0.49751437", "0.49751437", "0.49751437", "0.49751437", "0.49574712", "0.4956084", "0.49521905", "0.48771167", "0.48499173", "0.4842442", "0.48370704", "0.48190558", "0.48150763", "0.47921675", "0.4775358" ]
0.56495214
0
Prepare the database with a random number of people, some of which have accounts. Returns list of IDs of the new accounts.
def prep_database(sqla): create_multiple_people(sqla, random.randint(5, 15)) create_multiple_accounts(sqla) return [account.id for account in sqla.query(Account.id).all()]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_users(count=10):\n for i in range(count):\n user = generate_random_user()\n db.session.add(user)\n db.session.commit()", "def create_multiple_accounts(sqla, fraction=0.75):\n if fraction < 0.1 or fraction > 1.0:\n raise RuntimeError(f\"Fraction ({fraction}) is out of bounds\")\n\n all_people = sqla.query(Person).all()\n sample_people = random.sample(all_people, math.floor(len(all_people) * fraction))\n\n account_schema = AccountSchema()\n new_accounts = []\n for person in sample_people:\n valid_account = account_schema.load(account_object_factory(person.id))\n new_accounts.append(Account(**valid_account))\n sqla.add_all(new_accounts)\n sqla.commit()", "def populate_random_phone_numbers(amount):\n phone_objects = [Phone(data=generate_random_phone())\n for _ in range(amount)]\n Phone.objects.bulk_create(phone_objects)", "def populate_database(num_patients, min_checkins, max_checkins):\n departments = [\n Department(department_name=\"Cardiology\"),\n Department(department_name=\"Emergency\"),\n Department(department_name=\"Gynecology\"),\n Department(department_name=\"Pediatrics\"),\n Department(department_name=\"Obstetrics\"),\n Department(department_name=\"Oncology\"),\n Department(department_name=\"Orthopedics\"),\n Department(department_name=\"Neurology\")\n ]\n\n for i in xrange(num_patients):\n patient = Patient(**generate_patient())\n patient.departments.append(choice(departments))\n db.add(patient)\n\n for j in xrange(randrange(min_checkins, max_checkins)):\n checkin = CheckIn(**generate_checkin())\n checkin.patient_nhi = patient.nhi\n\n lci = patient.latest_checkin_time\n vid = checkin.checkin_time\n\n lci = vid if lci is None or vid > lci else lci\n patient.latest_checkin_time = lci\n\n db.add(checkin)\n\n for k in xrange(randrange(0, 3)):\n appointment = Appointment(**generate_appointment())\n appointment.patient_nhi = patient.nhi\n\n db.add(appointment)\n\n db.commit()", "def getRandom( self ):\n import random \n count = Mysql.ex( \"SELECT count(*) AS c FROM `%s`.`people`;\" % self.db_name )\n the_id = random.randint( 1, count[0]['c'] )\n people = self.getByID( the_id )\n return people", "def fill_repo_with_random_persons(self, n=10, id_lb=1, id_ub=100):\r\n random_ids, random_names, random_phone_numbers = self.generate_random_persons(n, id_lb, id_ub)\r\n for id_, name, phone_num in zip(random_ids, random_names, random_phone_numbers):\r\n self.add_person(id_, ' '.join(name), phone_num)", "def populate_tables(connection: sqlite3.Connection) -> None:\n fake = Faker()\n Faker.seed(0)\n\n c = conn.cursor()\n\n number_of_courses = fake.pyint(min_value=5, max_value=20)\n\n for _ in range(number_of_courses):\n course_name = fake.word()\n\n insert_statement = f'insert into courses (name) values (\"{course_name}\");'\n c.execute(insert_statement)\n\n connection.commit()\n\n number_of_users = fake.pyint(min_value=1, max_value=23)\n\n Faker.seed()\n\n for _ in range(number_of_users):\n\n if fake.pybool():\n user_name = f'{fake.first_name_female()} {fake.last_name_female()}'\n else:\n user_name = f'{fake.first_name()} {fake.last_name()}'\n\n insert_statement = f'insert into users (name) values (\"{user_name}\");'\n c.execute(insert_statement)\n\n connection.commit()\n\n for _ in range(50000):\n Faker.seed()\n\n random_user_id = fake.pyint(1, number_of_users)\n random_course_id = fake.pyint(1, number_of_courses)\n Faker.seed()\n random_lesson_no = fake.pyint(3, 12)\n Faker.seed()\n random_exercise_no = fake.pyint(1, 50)\n random_data = fake.sentence()\n\n insert_statement = f\"\"\"insert into saves (user_id, course_id, lesson_no, exercise_no,data) \n values ({random_user_id}, {random_course_id}, {random_lesson_no}, \n {random_exercise_no}, '{random_data}');\"\"\"\n c.execute(insert_statement)\n\n connection.commit()", "def populate(N=5):\n for entry in range(N):\n # Create the fake data for the entry\n fake_name = fakegen.name().split()\n fake_first_name = fake_name[0]\n fake_last_name = fake_name[1]\n fake_email = fakegen.email()\n\n # Create the new User entry\n user = User.objects.get_or_create(first_name=fake_first_name, last_name=fake_last_name, email=fake_email)[0]", "def generate_fake(count=100, **kwargs):\n from sqlalchemy.exc import IntegrityError\n from random import seed, choice\n from faker import Faker\n\n fake = Faker()\n\n seed()\n for i in range(count):\n u = User(\n username=fake.first_name(),\n email=fake.email(),\n password='password',\n **kwargs)\n db.session.add(u)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()", "def seed_db():\n import cerbereapp.models as models\n con = engine.connect()\n con.execute(models.account_type.insert(), [\n {'Guest'},\n {'Premium'},\n {'Free'}])\n db_session.execute(models.profiles.insert(), [\n {'user_id': 1, 'profile_name' : '1recon'},\n {'user_id': 1, 'profile_name' : '1medic'},\n {'user_id': 2, 'profile_name' : '2recon'},\n {'user_id': 2, 'profile_name' : '2medic'}])\n db_session.commit()", "def fill_in_db(cnx, n=100):\n cursor = cnx.cursor()\n add_employee = (\"INSERT INTO employees \"\n \"(firstname, lastname, hiredate, gender, birthdate) \"\n \"VALUES (%s, %s, %s, %s, %s)\")\n add_salary = (\"INSERT INTO salaries \"\n \"(empno, salary, fromdate, todate, commentary) \"\n \"VALUES (%(empno)s, %(salary)s, %(fromdate)s, %(todate)s,%(commentary)s)\")\n add_title = (\"INSERT INTO titles \"\n \"(empno, title, fromdate, todate,lotterychance,description) \"\n \"VALUES (%(empno)s, %(title)s, %(fromdate)s, %(todate)s,%(lotterychance)s,%(description)s)\")\n for _ in range(n):\n cursor.execute(add_employee, rand_fun.random_employee())\n empno = cursor.lastrowid\n cursor.execute(add_salary, rand_fun.random_salary(empno))\n cursor.execute(add_title, rand_fun.random_titles(empno))\n cnx.commit()", "def __generateUserIDs(self,_count):\n return map(lambda x:self.__getNewUserID(),range(_count))", "def populate_donations():\n logger.info('Starting Donations table population')\n\n DONATION_DATE = 0\n DONATION_AMOUNT = 1\n DONATED_BY = 2\n\n d = datetime.today() - timedelta(days=random.randint(1, 301))\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n\n for donor in Donors:\n # Randomly generated number of donations\n #donation_times = random.randint(1, 10)\n for donation in range(random.randint(1, 10)):\n with database.transaction():\n # random date in last year\n # random donation amount converted to decimal\n # pulling donor fullname as id\n new_donation = Donations.create(\n donation_date=datetime.today() - timedelta(days=random.randint(1, 301)),\n donation_amount=decimal.Decimal(\n random.randrange(1, 9999999))/100,\n donated_by=donor.fullname,\n )\n new_donation.save()\n logger.info('Database add successful')\n\n logger.info('Print the Donors records we saved...')\n for don in Donations:\n logger.info(f'donation: {don.id} : {don.donation_date} : {don.donation_amount} : '\n + f' donor_id: {don.donated_by} has been added to the Donations table ')\n except Exception as e:\n logger.info(f'Error creating = {donation[DONATION_DATE]} {donation[DONATION_AMOUNT]}'\n + f'{donation[DONATED_BY]}')\n logger.info(e)\n logger.info('See how the database protects our data')\n finally:\n logger.info('closing database')\n database.close()", "def setup_sample_data(no_of_records):\n rows_in_database = [{'id': counter, 'name': get_random_string(string.ascii_lowercase, 20), 'dt': '2017-05-03'}\n for counter in range(0, no_of_records)]\n return rows_in_database", "def seed_all():\n seed_client()\n seed_staff()\n seed_request()\n seed_comment()", "def generate_fake(count=100, **kwargs):\n from sqlalchemy.exc import IntegrityError\n from random import seed, choice\n from faker import Faker\n\n fake = Faker()\n roles = Role.query.all()\n\n seed()\n for i in range(count):\n u = User(\n first_name=fake.first_name(),\n last_name=fake.last_name(),\n email=fake.email(),\n password='password',\n confirmed=True,\n role=choice(roles),\n **kwargs)\n db.session.add(u)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()", "def create_users(N):\n for _ in range(N):\n name = fake.name()\n phone = fake.phone_number()\n email = fake.email()\n role = random.choice([\"shepherd\",\"admin\"])\n password = fake.user_name\n User.objects.create(\n name=name,phone=phone,\n email=email,role=role,\n password=password\n )", "def create_multiple_people(sqla, n):\n person_schema = PersonSchema()\n new_people = []\n for i in range(n):\n valid_person = person_schema.load(person_object_factory())\n new_people.append(Person(**valid_person))\n sqla.add_all(new_people)\n sqla.commit()", "def seed_db():\n\n # TODO: Create user\n userNormal = User(username = 'usernormal', password = 'usernormal', role = 1)\n userAdmin = User(username = 'useradmin', password = 'useradmin', role = 10)\n User.insert(userNormal)\n User.insert(userAdmin)\n print('Seed User')\n\n # read accounts.json\n with open('./data/accounts.json') as f:\n accounts_data = json.load(f)\n mongo.db.accounts.insert_many(accounts_data)\n print('Seed Accounts')", "def create_match_database(self, summoner_name, count=1000):\n match_ids = set()\n\n match_id = self.get_matchlist_by_summoner_id(self.get_summoner_id(summoner_name))[0]\n players = self.get_summs_and_champs_from_match(match_id)\n m_ids = []\n while len(match_ids) < count:\n time.sleep(1)\n for player_id in players:\n m_ids = self.get_matchlist_by_summoner_id(player_id)\n print(m_ids)\n match_ids.update(m_ids)\n time.sleep(1)\n players = self.get_summs_and_champs_from_match(m_ids[-1])\n\n # save matches id to file\n with open(os.getcwd()+'/database/matchIds', 'w') as out:\n for id in match_ids:\n out.write(str(id) + '\\n')\n return match_ids", "def get_user_ids():\n TOTAL_USERS = 50\n return list(numpy.random.choice(\n TOTAL_USERS, random.randint(1, TOTAL_USERS), replace=False\n ))", "def _create_users(self):\r\n users = []\r\n for i in range(8):\r\n username = \"user{}\".format(i)\r\n email = \"test+user{}@edx.org\".format(i)\r\n user = User.objects.create_user(username, email, 'foo')\r\n user.is_active = True\r\n user.save()\r\n users.append(user)\r\n return users", "def populate(iterations):\n for _ in range(iterations):\n person = make_random('en')\n params = {\"event\": \"contact.add\",\n \"data\": person}\n request(params)", "def populate_db(dbase):\n # In this order: Iron, Blood, Shadow, Fel, Storm\n wowhead_ids = []\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-8))\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-9))\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-10))\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-11))\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-17))\n item_ids = set(wowhead_ids)\n print(item_ids)\n\n pos = 0\n for item_id in item_ids:\n if pos % 10 == 0:\n print(\"Relic %d of %d\" % (pos, len(item_ids)))\n pos += 1\n import_relic(dbase, item_id)", "def createWordList():\n wordList = Word.objects.all()\n returnList = []\n ids = []\n while len(ids) < 5:\n foo = randomInt()\n if len(ids) < 5:\n randInt = randomInt()\n if randInt not in ids:\n ids.append(randInt)\n i = 0\n while i < 5:\n for j in ids:\n word = wordList[j]\n if word not in returnList:\n returnList.append(word)\n word.times_appeared += 1\n word.save()\n i += 1\n return returnList", "def createWordList():\n wordList = Word.objects.all()\n returnList = []\n ids = []\n while len(ids) < 5:\n foo = randomInt()\n if len(ids) < 5:\n randInt = randomInt()\n if randInt not in ids:\n ids.append(randInt)\n i = 0\n while i < 5:\n for j in ids:\n word = wordList[j]\n if word not in returnList:\n returnList.append(word)\n word.times_appeared += 1\n word.save()\n i += 1\n return returnList", "def set_random_group_list(self, num):\n try:\n self.cursor.execute(\"insert into group_list (group_id, fest_id) \"\n \"select rand.group_id, rand.fest_id \"\n \"from (select groups.id as group_id, festivals.id as fest_id \"\n \"from festivals, groups) as rand \"\n \"left join group_list on (rand.group_id=group_list.group_id \"\n \"and rand.fest_id=group_list.fest_id) \"\n f\"where group_list.id is NULL order by random() limit {num} \")\n self.connection.commit()\n if self.cursor.rowcount:\n return \"generated group_list\"\n else:\n return \"NULL\"\n except(Exception, psycopg2.Error) as error:\n self.connect.rollback()\n print(\"error in generate\", error)", "def generate_membership_numbers(apps, schema_editor):\n YouthProfile = apps.get_model(\"youths\", \"YouthProfile\")\n for profile in YouthProfile.objects.all():\n membership_number = str(profile.pk).zfill(6)\n YouthProfile.objects.filter(pk=profile.pk).update(\n membership_number=membership_number\n )", "def populate_db(count, dbname):\n\n path = os.path.dirname(os.path.abspath(__file__))\n\n if not dbname.endswith('.json'):\n dbname = '{0}.json'.format(dbname)\n\n click.echo('Creating database {0}'.format(dbname))\n\n db = TinyDB(os.path.join(path, dbname))\n users = V2ProfileFactory().create_batch(count, export_json=False)\n db.insert_multiple(users)\n\n click.echo('Added {0} profiles in database {1}.'.format(count, dbname))", "def newList(self):\n lst = []\n count = 0\n while count < 52:\n lst.append(randint(1, 1500))\n count += 1\n return lst" ]
[ "0.63409966", "0.62582266", "0.6153173", "0.6131511", "0.6050464", "0.5958763", "0.590086", "0.58537793", "0.58406174", "0.581963", "0.5800112", "0.5779376", "0.5766366", "0.5753821", "0.5740715", "0.57321405", "0.5722673", "0.57001567", "0.566019", "0.5607473", "0.55965966", "0.5582", "0.55526", "0.55437374", "0.55399466", "0.55399466", "0.5538957", "0.5536845", "0.55222744", "0.5520225" ]
0.7617684
0
Print list in pretty columns.
def print(listing: typing.Iterable[typing.Any]) -> None: listing = tuple(str(i) for i in listing) if not listing: return width = max(len(i) for i in listing) + 2 count = min(shutil.get_terminal_size().columns // width, len(listing)) for row in itertools.zip_longest(*(listing[i::count] for i in range(count)), fillvalue=''): print(*(f'{c:<{width}}' for c in row), sep='')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fmtcols(mylist, cols):\n maxwidth = max(list(map(lambda x: len(x), mylist)))\n justifyList = list(map(lambda x: x.ljust(maxwidth), mylist))\n lines = (' '.join(justifyList[i:i + cols])\n for i in range(0, len(justifyList), cols))\n print(\"\\n\".join(lines))", "def tabular_formatted_printing(data_list):\n n = len(data_list)\n max = 0\n for i in range(0,n):\n if int(len(data_list[i][0])) > max:\n max = len(data_list[i][0])\n for i in range(0,n):\n if int(len(data_list[i][0])) < max:\n space = max - len(data_list[i][0])\n else:\n space = 0\n print(data_list[i][0]+space*' '+' : '+str(data_list[i][1]))\n return", "def print_table(listx):\r\n\tfor lists in listx:\r\n\t\tfor i in lists:\r\n\t\t\tprint str(i) , '\\t',\r\n\t\tprint()", "def print_list(self):\n self.print_avec_separateur(\" \")", "def print_list_to_columns(words, items_per_row=5):\n row = []\n width = max(map(len, words)) + 2 \n for idx, word in enumerate(words):\n if (idx + 1) % items_per_row == 0:\n print(\"\".join(word.ljust(width) for word in row))\n row = []\n row.append(word)\n # append one last time just in case\n if len(row) > 0:\n print(\"\".join(word.ljust(width) for word in row))", "def list_columns(obj, cols=4, columnwise=True, gap=4):\n\n sobj = [str(item) for item in obj]\n if cols > len(sobj): cols = len(sobj)\n max_len = max([len(item) for item in sobj])\n if columnwise: cols = int(math.ceil(float(len(sobj)) / float(cols)))\n plist = [sobj[i: i+cols] for i in range(0, len(sobj), cols)]\n if columnwise:\n if not len(plist[-1]) == cols:\n plist[-1].extend(['']*(len(sobj) - len(plist[-1])))\n plist = zip(*plist)\n printer = '\\n'.join([\n ''.join([c.ljust(max_len + gap) for c in p])\n for p in plist])\n print(printer)", "def list_columns(obj, cols=2, columnwise=True, gap=4):\n\n\tsobj = [str(item) for item in obj]\n\tif cols > len(sobj): cols = len(sobj)\n\tmax_len = max([len(item) for item in sobj])\n\tif columnwise: cols = int(math.ceil(float(len(sobj)) / float(cols)))\n\tplist = [sobj[i: i + cols] for i in range(0, len(sobj), cols)]\n\tif columnwise:\n\t\tif not len(plist[-1]) == cols:\n\t\t\tplist[-1].extend([''] * (len(sobj) - len(plist[-1])))\n\t\tplist = zip(*plist)\n\tprinter = '\\n'.join([\n\t\t''.join([c.ljust(max_len + gap) for c in p])\n\t\tfor p in plist])\n\tprint printer", "def print_list(self, items):\n\t\tstrtype = unicode if self.encoding else bytes\n\t\titems = map(strtype, items)\n\t\twidth = self.get_width()\n\t\tlines = []\n\t\tsep = strtype(' ')\n\t\tfor item in items:\n\t\t\tif lines:\n\t\t\t\tnew = lines[-1] + sep + item\n\t\t\t\tif len(new) <= width:\n\t\t\t\t\tlines[-1] = new\n\t\t\t\t\tcontinue\n\t\t\tlines.append(item)\n\t\tself.write(strtype('\\n').join(lines))", "def basic_print(lista):\n for item in lista:\n print(\"{} \\t\\t {}\".format(item[0], item[1]))", "def show(self, lst=None):\n\n def f(v):\n if np.size(v) == 1:\n return str(v)\n elif np.size(v) > 3:\n return str(np.shape(v))\n elif np.ndim(v) > 1:\n return str(np.shape(v))\n else:\n return str(v)\n\n def buffer(l, m, n=25):\n end = len(l) - 1\n buffered = []\n for i in range(m):\n if i > end:\n buffered.append(\"\".ljust(n))\n else:\n buffered.append(l[i].ljust(n))\n return buffered\n\n lst = self if lst is None else lst\n out = [IND.ljust(7) + INDEP.ljust(60) + DEP.ljust(60)]\n for row in lst:\n ind = [str(row[IND])]\n dep = [k + \": \" + f(v) for k, v in row[DEP].items()]\n indep = [k + \": \" + f(v) for k, v in row[INDEP].items()]\n m = max(len(dep), len(indep), 1)\n ind = buffer(ind, m, 7)\n dep = buffer(dep, m, 60)\n indep = buffer(indep, m, 60)\n for a, b, c in zip(ind, indep, dep):\n out.append(a + b + c)\n out.append(\"\")\n return \"\\n\".join(out)", "def print_bul_list(self, l):\n self.print_newline()\n for i in l:\n self._write(\" - %s\\n\" % i)\n self.print_newline()", "def pprint_table(out, table):\n\n\tcol_paddings = []\n\n\tfor i in range(len(table[0])):\n\t\tcol_paddings.append(get_max_width(table, i))\n\n\tfor row in table:\n\t\t# left col\n\t\tout.write(str(row[0]).ljust(col_paddings[0] + 1))\n\t\t\n\t\t# rest of the cols\n\t\tfor i in range(1, len(row)):\n\t\t\tout.write(str(row[i]).rjust(col_paddings[i] + 2))\n\t\t\n\t\tout.write('\\n')", "def print_list(l):\n print('[' + ', '.join([x.__str__() for x in l]) + ']')", "def display_col_dp(dp_list, attr_name):\n\n print()\n print(\"---------- {:s} ----------\".format(attr_name))\n print([getattr(dp, attr_name) for dp in dp_list])", "def columnize(L, indent=\"\", width=79):\n column_width = max(len(w) for w in L) + 1\n num_columns = (width - len(indent)) // column_width\n num_rows = len(L) // num_columns\n L = L + [\"\"] * (num_rows*num_columns - len(L))\n columns = [L[k*num_rows:(k+1)*num_rows] for k in range(num_columns)]\n lines = [\" \".join(\"%-*s\"%(column_width, entry) for entry in row)\n for row in zip(*columns)]\n output = indent + (\"\\n\"+indent).join(lines)\n return output", "def pretty_print(count_list):\n\tfor i in range(len(count_list)):\n\t\tif (count_list[i] > 0):\n\t\t\tprint(chr(i+ord('a')),count_list[i],sep = \": \", end =\"\\n\")", "def print_column():\n print('+----+----+----+----+')", "def columnize(items, displaywidth=80):\n if not items:\n print(\"<empty>\\n\")\n return\n\n nonstrings = [i for i in range(len(items))\n if not isinstance(items[i], str)]\n if nonstrings:\n raise TypeError(\"items[i] not a string for i in %s\" % \", \".join(map(str, nonstrings)))\n size = len(items)\n if size == 1:\n print('%s\\n' % str(items[0]))\n return\n # Try every row count from 1 upwards\n for nrows in range(1, len(items)):\n ncols = (size + nrows - 1) // nrows\n colwidths = []\n totwidth = -2\n for col in range(ncols):\n colwidth = 0\n for row in range(nrows):\n i = row + nrows * col\n if i >= size:\n break\n x = items[i]\n colwidth = max(colwidth, len(x))\n colwidths.append(colwidth)\n totwidth += colwidth + 2\n if totwidth > displaywidth:\n break\n if totwidth <= displaywidth:\n break\n else:\n nrows = len(items)\n ncols = 1\n colwidths = [0]\n for row in range(nrows):\n texts = []\n for col in range(ncols):\n i = row + nrows * col\n if i >= size:\n x = \"\"\n else:\n x = items[i]\n texts.append(x)\n while texts and not texts[-1]:\n del texts[-1]\n for col in range(len(texts)):\n texts[col] = texts[col].ljust(colwidths[col])\n print(\"%s\\n\" % str(\" \".join(texts)))", "def print_table(table):\n for row in table:\n # Header column left justified\n print(\"{:<19}\".format(row[0]), end='')\n # Remaining columns right justified\n for col in row[1:]:\n print(\"{:>4}\".format(col), end='')\n print(\"\", end='\\n')", "def print_table(table):\n # transpose the table:\n table = map(list, zip(*table))\n # get the column width:\n col_width = [max(len(str(x)) for x in col) for col in zip(*table)]\n # print it to screen:\n print\n for line in table:\n print \"| \" + \" | \".join(\"{:{}}\".format(x, col_width[i]) for i, x in enumerate(line)) + \" |\"\n print", "def print_column():\n print('+----+----+')", "def printlist(x, width=70, indent=4, file=None):\n\n blanks = ' ' * indent\n # Print the sorted list: 'x' may be a '--random' list or a set()\n print(textwrap.fill(' '.join(str(elt) for elt in sorted(x)), width,\n initial_indent=blanks, subsequent_indent=blanks),\n file=file)", "def prettyprint(board):\n print(\"\")\n for row in range(3,0,-1):\n print(\"\" + str(row) + \" |\" + board[(row-1)*3:(row)*3].replace(\"\",\" \"))\n print(\" +------\")\n print(\" A B C\")", "def pprint(self, data):\n self._assert(data)\n data = self._render(data) # make elements ascii\n fmats = self._fmats(data) # get array of padding formats)\n for row in data:\n print(fmats.format(*row))", "def prettyPrintListHelper_ (l, stream, indent, pretty_print=True, indent_additive=4) :\r\n \r\n # Base case, empty table\r\n entries = len(l)\r\n if entries==0 :\r\n stream.write(\"[ ]\")\r\n return\r\n \r\n # Recursive case\r\n stream.write(\"[\")\r\n if pretty_print: stream.write('\\n')\r\n\r\n # Iterate through, printing each element\r\n for ii in xrange(0,entries) :\r\n if pretty_print : indentOut_(stream, indent+indent_additive)\r\n specialStream_(l[ii], stream, indent, pretty_print, indent_additive)\r\n if entries>1 and ii!=entries-1 :\r\n stream.write(\",\")\r\n if pretty_print: stream.write('\\n')\r\n\r\n if pretty_print : indentOut_(stream, indent); \r\n stream.write(\"]\")", "def pretty_display(self):\n\t\tpretty_space = PrettyTable()\n\t\tpretty_space.field_names = range(self.space.shape[1])\n\t\tcount = 0\n\t\tpretty_row = []\n\t\tfor cell in self.space.flat:\n\t\t\tcount = count + 1\n\t\t\tpretty_row.append(cell.state)\n\t\t\tif count >= self.space.shape[1]:\n\t\t\t\tpretty_space.add_row(pretty_row)\n\t\t\t\tcount = 0\n\t\t\t\tpretty_row = []\n\t\tprint(pretty_space)", "def out(lst, max_width=100, index=False, spaces=3, ret=False):\n # Not even a list - just print\n if not isinstance(lst, (list,tuple)):\n print lst\n return\n\n # List of lists of same size\n strs = []\n if all([isinstance(l, (list,tuple)) for l in lst]) and all([len(l) == len(lst[0]) for l in lst]):\n L = len(lst[0])\n temp_strs = []\n for l in lst:\n temp_line = []\n for x in l:\n temp_line.append(str(x))\n temp_strs.append(temp_line)\n fields_sizes = []\n for i in range(L):\n temp_size = []\n for ts in temp_strs:\n temp_size.append(len(ts[i]))\n fields_sizes.append(temp_size)\n widths = [min(max(fs),max_width) for fs in fields_sizes]\n for i,l in enumerate(lst):\n temp = ''\n for j,x in enumerate(l):\n temp += temp_strs[i][j].ljust(widths[j])+' '*spaces\n strs.append(temp)\n\n else:\n for l in lst:\n strs.append(str(l))\n\n if index:\n index_width=len(str(len(strs)))\n for i in range(len(strs)):\n strs[i] = str(i).rjust(index_width)+':'+' '*spaces + strs[i]\n\n s = '\\n'.join(strs)\n\n if (ret == False):\n print s\n else:\n return s", "def pretty_print(self):\n for dtr in self.dtrs:\n dtr.pretty_print(indent=2)", "def tabulate(items: typing.List[str]):\n rows, columns = find_shape(len(items))\n extra = (rows * columns) - len(items)\n items += [' '] * extra\n items = [\n [f'{items[i][0]}-{items[i + columns - 1][0]}', *items[i:i + columns]]\n for i in range(0, len(items), columns)\n ]\n items = [[column[i] for column in items] for i in range(columns + 1)]\n items = ['| ' + ' | '.join(row) + ' |' for row in items]\n items.insert(1, ('| --- ' * rows) + '|')\n return '\\n'.join(items)", "def prettyTable(self, heads, rows): \n # First calculate the maximum lengths for each column.\n lengths = map(len, heads)\n for row in rows:\n lengths = map(max, lengths, map(len, row))\n\n # Create a format string for the maximum lengths.\n formatString = (\"|{{:^{}}}\" * len(heads) + \"|\").format(*lengths)\n\n # Print the heads, then the contents.\n headLine = formatString.format(*heads)\n border = \"-\" * len(headLine)\n print(border)\n print(headLine)\n print(border)\n\n # Remake the format string right-justified.\n formatString = (\"|{{:>{}}}\" * len(heads) + \"|\").format(*lengths)\n for row in rows:\n print(formatString.format(*row))\n print(border)" ]
[ "0.7253442", "0.716318", "0.705893", "0.6965506", "0.6951886", "0.6873508", "0.6842252", "0.6768106", "0.67344517", "0.669688", "0.6666301", "0.66652215", "0.66576207", "0.6651075", "0.662986", "0.65690386", "0.64982414", "0.6490559", "0.64825016", "0.64640856", "0.64574146", "0.6454528", "0.643656", "0.6433023", "0.6404255", "0.6365221", "0.6362512", "0.6354242", "0.6351476", "0.6327567" ]
0.7227835
1
Cli wrapper for handling ForML exceptions.
def cli() -> None: try: group() # pylint: disable=no-value-for-parameter except forml.AnyError as err: print(err, file=sys.stderr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n\n try:\n args.handler()\n except pymoira.BaseError as err:\n error( err )", "def main(self):\n args = self.parse_cli()\n\n def _resource_error_message(emsg):\n return \"{prog} {subcmd}: {id}: {msg}\".format(\n prog=self.parser.prog, subcmd=args.subcmd, id=args.id, msg=emsg)\n\n try:\n if not hasattr(args, 'func'):\n self.parser.print_usage()\n return 1\n\n self._post_parser_init(args)\n args.func(args)\n return 0\n except UsageException as e:\n eprint(\"{prog} {subcmd}: {msg}\".format(prog=self.parser.prog, subcmd=args.subcmd, msg=e))\n except ConnectionError as e:\n eprint(\"{prog}: Connection error occurred\".format(prog=self.parser.prog))\n except DerivaPathError as e:\n eprint(e)\n except HTTPError as e:\n if e.response.status_code == requests.codes.unauthorized:\n msg = 'Authentication required'\n elif e.response.status_code == requests.codes.forbidden:\n msg = 'Permission denied'\n else:\n msg = e\n logging.debug(format_exception(e))\n eprint(_resource_error_message(msg))\n except ResourceException as e:\n logging.debug(format_exception(e.cause))\n eprint(_resource_error_message(e))\n except RuntimeError as e:\n logging.warning(format_exception(e))\n eprint('Unexpected runtime error occurred')\n except:\n eprint('Unexpected error occurred')\n traceback.print_exc()\n return 1", "def handle_exceptions(self, exception: Exception):\n try:\n raise exception\n\n except clisys.InvalidCommand as e:\n print(f'Invalid option: \"{str(e.command_name)}\" .')\n\n except clisys.InvalidArgument as e:\n print(f'Invalid argument: \"{str(e.argument_name)}\" .')\n\n except clisys.InvalidArgumentCount:\n print('Invalid argument count.')\n\n except ValueError as e:\n print(e)", "def test_with_unknown_args(self):\n with self.assertRaises(ratd.cliargs.CliArgError):\n CliArgs('foomonkey')", "def main() -> None:\n try:\n run()\n except errors.BaseError as e:\n sys.stderr.write(f'{str(e)}\\n')\n sys.exit(e.code)", "def __handle_except(inst):\n return __standardize_result(\n False,\n \"Docker-compose command {} failed\".format(inspect.stack()[1][3]),\n \"{}\".format(inst),\n None,\n )", "def cli() -> None:", "def cli() -> None:", "def err_handler(gui):\n\n try:\n yield\n except (errs.PortError,\n errs.IpError,\n errs.SelectionError,\n errs.ZBrushServerError) as err:\n print err.msg\n gui(err.msg)\n except Exception as err:\n print err\n gui(err)\n finally:\n pass", "def handle_exception(e):\n print(e)\n return error()", "def bad_args(args):\n PARSER.print_help()\n exit(0)", "def StandViz_ReportError( errorobj, args, Header = None ): # error reporting and traceback function\n (MyPath, MyFile) = os.path.split( args[0] ) # retrieve filename and path of running python script\n (MyBaseName, MyExt) = os.path.splitext( MyFile ) # separate basefilename from extension\n errorfilename = \"{}.txt\".format(MyBaseName) # create new error filename based on base of script filename\n ERRFILE = open( errorfilename, 'w' ) # open text file for writting\n if( Header != None ): ERRFILE.write( '%s\\n' % Header ) # if Header defined, write Header to file\n ERRFILE.write( \"Error running '{}'\\n\".format(MyFile) ) # write error message with filename\n MyTrace = errorobj[2] # retrieve error object\n while( MyTrace != None ): # loop through stack trace\n (line, file, name) = ( MyTrace.tb_lineno, MyTrace.tb_frame.f_code.co_filename, MyTrace.tb_frame.f_code.co_name ) # extract line, file, and error name\n F = open( file, 'r' ) # open source file of Python script\n L = F.readlines() # read scripot source into memory\n F.close() # close script file\n code = L[line-1].strip() # extract line of source code that caused error\n ERRFILE.write( \" File '{}', line {}, in {}\\n {}\\n\".format(file, line, name, code) ) # write filename, source code line, error name, and error code\n MyTrace = MyTrace.tb_next # step to next level of call stack trace\n ERRFILE.write( \"errorobj: {}\\n\".format(errorobj) ) # write error object and arguments for call\n ERRFILE.write( \"Calling Argument Vector: {}\\n\".format(args) ) # write calling arguments\n ERRFILE.close() # close text file with error stack trace\n os.system( \"notepad.exe {}\".format(errorfilename) ) # display error log file with notepad.exe", "def error_wrapper(f, ctx, *args, **kwargs):\n\n if ctx.raw:\n return f(*args, **kwargs)\n else:\n try:\n return f(*args, **kwargs)\n\n except (OSError, IOError) as e:\n result = {\n 'error_code': ErrorConstants.OS_ERROR.value,\n 'strerror': e.strerror,\n 'errno': e.errno,\n 'filename': e.filename,\n 'msg': str(e)\n }\n\n except GContainerException as e:\n result = {\n 'error_code': e.error_code,\n 'msg': e.message\n }\n\n except Exception as e:\n result = {\n 'msg': e.message\n }\n\n if hasattr(e, 'error_code'):\n result['error_code'] = e.error_code\n else:\n result['error_code'] = ErrorConstants.GENERAL_ERROR.value\n\n res = ctx.format_function(result, ctx=ctx)\n if res is not None:\n click.echo(res)\n\n sys.exit(1)", "def cli(ctx):", "def cli(ctx):", "def handle_err(self):\n pass", "def __call__(self, argv ):\n \n try:\n self.apply( argv )\n return None\n except:\n exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()\n exception_stack = traceback.format_exc(exceptionTraceback)\n exception_name = exceptionType.__module__ + '.' + exceptionType.__name__\n exception_value = str(exceptionValue)\n return (exception_name, exception_value, exception_stack)", "def cli(args): # noqa; pylint: disable=unused-argument", "def main():\n cause_a_bunch_of_exceptions_to_happen()", "def cli(ctx):\n pass", "def cli(ctx):\n pass", "def test_raises_missing_data_error():\n\n for ds_path in ds_list:\n # default is to raise, even if not specified\n with raises((MissingDataException,)):\n sys.argv = shlex.split('neuropredict -e svm -y {} -o {}'.format(ds_path, out_dir))\n run_cli()\n\n # checking explicit raise\n with raises((MissingDataException,)):\n sys.argv = shlex.split('neuropredict -y {} -o {} --impute_strategy raise '\n ''.format(ds_path, out_dir))\n run_cli()", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():" ]
[ "0.60021865", "0.5985003", "0.5935742", "0.5924656", "0.58899945", "0.5881254", "0.5846594", "0.5846594", "0.5834598", "0.58008087", "0.5768631", "0.5730561", "0.57305557", "0.5700776", "0.5700776", "0.5693197", "0.5686788", "0.5674693", "0.5662413", "0.56506747", "0.56506747", "0.5632284", "0.5630377", "0.5630377", "0.5630377", "0.5630377", "0.5630377", "0.5630377", "0.5630377", "0.5630377" ]
0.6522494
0
Validates that initial and next state have the same log prob shape.
def assert_log_prob_shape_compliance(self, initial_state, next_state): for key in initial_state.as_dict.keys(): i_value = initial_state.get(key) is_i_random_var = isinstance(i_value, ed.RandomVariable) n_value = next_state.get(key) is_n_random_var = isinstance(n_value, ed.RandomVariable) # Either both or neither fields have to be a RV. self.assertEqual(is_i_random_var, is_n_random_var) if is_i_random_var: i_log_prob = i_value.distribution.log_prob(i_value) n_log_prob = n_value.distribution.log_prob(n_value) # the initial and next state have to have the same log prob shape. self.assertAllEqual(i_log_prob.shape, n_log_prob.shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_start_probs(self, initial: np.ndarray) -> None:\n if not isinstance(initial, np.ndarray):\n raise TypeError('Initial state distribution must be a numpy.ndarray')\n if not initial.shape == (self.n_states,):\n raise ValueError('Initial state distribution must be of shape (n_states,)')\n if not np.isclose(initial.sum(), 1):\n raise ValueError('Initial state distribution must sum to one')\n return initial", "def test_posterior_logprobs(self):\n x = list(product([True, False], repeat=2))\n xs = list(e for e in product(x, repeat=3))\n all_obs = list(o for o in xs\n if all(any(e) and not all(e) for e in o))\n total = logsumexp(list(posterior_logprobs(np.array(obs), self.S, self.A, self.E)[1]\n for obs in all_obs))\n assert_allclose(total, np.log(1))", "def initial_state(self):\n r = np.full((self.xlen, 2), self.log0, dtype=np.float32)\n r[0, 1] = self.log_probs[0, self.blank]\n for i in range(1, self.xlen):\n r[i, 1] = r[i - 1, 1] + self.log_probs[i, self.blank]\n return r", "def _update_logprobs(self):\n #self._logp_src = self._log_lim(self.p_source)\n self._logp_I0 = self._log_lim(self.p_source)\n self._logp_R0 = self._log_lim(self.p_source/(self.p_rec_div))\n self._logp_S_fin = self._log_lim(self.p_S_fin)\n self._logp_inf_fin = self._log_lim(self.p_infect_fin)", "def log_prob(self):", "def __convert_to_log(self):\n for i in range(self.nStates):\n if self.pi[i]>0:\n self.pi[i]=log(self.pi[i])\n else:\n self.pi[i]=float('-inf')\n for j in range(self.nStates):\n if self.t[i][j]>0:\n self.t[i][j]=log(self.t[i][j])\n else:\n self.t[i][j]=float('-inf')\n for j in range(self.nObs):\n if self.e[i][j]>0:\n self.e[i][j]=log(self.e[i][j])\n else:\n self.e[i][j]=float('-inf')\n self.logdomain=True", "def _logprob(self, sample):\n return 0, 0", "def validate_log_prob_and_transforms(\n self,\n model,\n sample_transformation_shapes,\n check_ground_truth_mean=False,\n check_ground_truth_mean_standard_error=False,\n check_ground_truth_standard_deviation=False,\n check_ground_truth_standard_deviation_standard_error=False,\n seed=None,\n ):\n batch_size = 16\n\n if seed is None:\n seed = test_util.test_seed(sampler_type='stateless')\n\n def _random_element(shape, dtype, default_event_space_bijector, seed):\n unconstrained_shape = default_event_space_bijector.inverse_event_shape(\n shape)\n unconstrained_shape = tf.TensorShape([batch_size\n ]).concatenate(unconstrained_shape)\n return default_event_space_bijector.forward(\n tf.random.stateless_normal(\n unconstrained_shape, dtype=dtype, seed=seed))\n\n num_seeds = len(tf.nest.flatten(model.dtype))\n flat_seed = tf.unstack(tfp.random.split_seed(seed, num_seeds), axis=0)\n seed = tf.nest.pack_sequence_as(model.dtype, flat_seed)\n\n test_points = tf.nest.map_structure(_random_element, model.event_shape,\n model.dtype,\n model.default_event_space_bijector,\n seed)\n log_prob = self.evaluate(model.unnormalized_log_prob(test_points))\n\n self.assertAllFinite(log_prob)\n self.assertEqual((batch_size,), log_prob.shape)\n\n for name, sample_transformation in model.sample_transformations.items():\n transformed_points = self.evaluate(sample_transformation(test_points))\n\n def _assertions_part(expected_shape, expected_dtype, transformed_part):\n self.assertAllFinite(transformed_part)\n self.assertEqual(\n (batch_size,) + tuple(expected_shape),\n tuple(list(transformed_part.shape)))\n self.assertEqual(expected_dtype, transformed_part.dtype)\n\n self.assertAllAssertsNested(\n _assertions_part,\n sample_transformation_shapes[name],\n sample_transformation.dtype,\n transformed_points,\n shallow=transformed_points,\n msg='Checking outputs of: {}'.format(name))\n\n def _ground_truth_shape_check_part(expected_shape, ground_truth):\n self.assertEqual(\n tuple(expected_shape),\n tuple(ground_truth.shape))\n\n if check_ground_truth_mean:\n self.assertAllAssertsNested(\n _ground_truth_shape_check_part,\n sample_transformation_shapes[name],\n sample_transformation.ground_truth_mean,\n shallow=transformed_points,\n msg='Checking ground truth mean of: {}'.format(name))\n\n if check_ground_truth_mean_standard_error:\n self.assertAllAssertsNested(\n _ground_truth_shape_check_part,\n sample_transformation_shapes[name],\n sample_transformation.ground_truth_mean_standard_error,\n shallow=transformed_points,\n msg='Checking ground truth mean standard error: {}'.format(name))\n\n if check_ground_truth_standard_deviation:\n self.assertAllAssertsNested(\n _ground_truth_shape_check_part,\n sample_transformation_shapes[name],\n sample_transformation.ground_truth_standard_deviation,\n shallow=transformed_points,\n msg='Checking ground truth standard deviation: {}'.format(name))\n\n if check_ground_truth_standard_deviation_standard_error:\n self.assertAllAssertsNested(\n _ground_truth_shape_check_part,\n sample_transformation_shapes[name],\n sample_transformation\n .ground_truth_standard_deviation_standard_error,\n shallow=transformed_points,\n msg='Checking ground truth standard deviation strandard error: {}'\n .format(name))", "def log_check(w_in: np.ndarray, w_log: np.ndarray) -> None:\n w_log[:] = np.nan\n\n if np.isnan(w_in).any():\n return\n\n if np.any(w_in <= 0):\n return\n\n w_log[:] = np.log(w_in[:])", "def check_prior_batch_behavior(prior):\n\n # Check for correct batch size in .sample and .log_prob\n num_samples = 1\n theta = prior.sample((num_samples,))\n log_probs = prior.log_prob(theta)\n\n assert (\n len(theta.shape) >= 2\n ), f\"\"\"A parameter batch sampled from the prior must be at least 2D, \n (num_samples, parameter_dim), but is {len(theta.shape)}\"\"\"\n\n num_sampled, parameter_dim = theta.shape\n # Using len here because log_prob could be np.ndarray or torch.Tensor\n num_log_probs = len(log_probs)\n\n assert (\n num_sampled == num_samples\n ), \"prior.sample((batch_size, )) must return batch_size parameters.\"\n\n assert (\n num_log_probs == num_samples\n ), \"prior.log_prob must return as many log probs as samples.\"", "def statePosteriors(log_alpha, log_beta):", "def test_validate_self_nonfinal_initial_state(self):\n with nose.assert_raises(exceptions.InitialStateError):\n self.dtm1.final_states.add('q0')\n self.dtm1.validate_self()", "def _check_log_params(self):\n steps_per_stats = self.configs['steps_per_stats']\n if not steps_per_stats or steps_per_stats < 0:\n steps_per_stats = 100\n steps_per_eval = self.configs['steps_per_eval']\n if not steps_per_eval:\n steps_per_eval = 10 * steps_per_stats\n steps_per_external_eval = self.configs['steps_per_external_eval']\n if not steps_per_external_eval:\n steps_per_external_eval = 5 * steps_per_eval\n self.configs['steps_per_stats'] = steps_per_stats\n self.configs['steps_per_eval'] = steps_per_eval\n self.configs['steps_per_external_eval'] = steps_per_external_eval", "def MH_step(log_like, log_prior, model_func, prop_params, curr_params,\\\n curr_like, curr_prior, max_like, maxL_params):\n # proposed model:\n prop_model = model_func(prop_params)\n prop_like = log_like(prop_model)\n prop_prior = log_prior(prop_params)\n\n # posterior:\n post_old = curr_like + curr_prior\n post_new = prop_like + prop_prior\n \n # acceptance testing:\n a = np.exp(post_new - post_old)\n draw = np.random.uniform(0, 1)\n \n if (a > draw) and (a < np.inf):\n accept = True\n curr_params = prop_params\n #print(curr_like, max_like)\n if prop_like > max_like:\n max_like = prop_like\n maxL_params = curr_params\n else:\n accept = False\n curr_params = curr_params\n \n return(accept, curr_params, maxL_params, max_like)", "def _validate_parameters(self, epochs, log_interval):\n\n if not epochs > 0:\n msg = (\n \"The number of training epochs = {} should be strictly\"\n \" positive.\"\n )\n self.logger.error(msg.format(epochs))\n raise ValueError(msg.format(epochs))\n\n if not log_interval > 0:\n msg = (\n \"The number of batches to wait before printting the\"\n \" training status should be strictly positive, but got {}\"\n \" instead.\"\n )\n self.logger.error(msg.format(log_interval))\n raise ValueError(msg.format(log_interval))\n\n if not 0 < self.shrinkage_rate <= 1:\n msg = (\n \"The shrinkage rate should be in the range (0, 1], but got\"\n \" {} instead.\"\n )\n self.logger.error(msg.format(self.shrinkage_rate))\n raise ValueError(msg.format(self.shrinkage_rate))", "def log_pseudo_joint(self, data: Tensor, states: Tensor) -> Tensor: # type: ignore\n K = states\n Y = data\n assert K.dtype == to.uint8 and Y.dtype == to.uint8\n pi = self.theta[\"pies\"]\n W = self.theta[\"W\"]\n batch_size, S, H = K.shape\n D = W.shape[0]\n dev = pi.device\n\n logPriors = to.matmul(K.type_as(pi), to.log(pi / (1 - pi)))\n\n logPy = to.empty((batch_size, S), device=dev, dtype=self.precision)\n # We will manually set the lpjs of all-zero states to the appropriate value.\n # For now, transform all-zero states in all-one states, to avoid computation of log(0).\n zeroStatesInd = to.nonzero((K == 0).all(dim=2))\n # https://discuss.pytorch.org/t/use-torch-nonzero-as-index/33218\n zeroStatesInd = (zeroStatesInd[:, 0], zeroStatesInd[:, 1])\n K[zeroStatesInd] = 1\n # prods_nsd = prod{h}{1-W_dh*K_nkh}\n prods = (W * K.type_as(W).unsqueeze(2)).neg_().add_(1).prod(dim=-1)\n to.clamp(prods, self.eps, 1 - self.eps, out=prods)\n # logPy_nk = sum{d}{y_nd*log(1/prods_nkd - 1) + log(prods_nkd)}\n f1 = to.log(1.0 / prods - 1.0)\n indeces = 1 - Y[:, None, :].expand(batch_size, S, D)\n # convert to BoolTensor in pytorch>=1.2, leave it as ByteTensor in earlier versions\n indeces = indeces.type_as(to.empty(0) < 0)\n f1[indeces] = 0.0\n logPy[:, :] = to.sum(f1, dim=-1) + to.sum(to.log(prods), dim=2)\n K[zeroStatesInd] = 0\n\n lpj = logPriors + logPy\n # for all-zero states, set lpj to arbitrary very low value if y!=0, 0 otherwise\n # in the end we want exp(lpj(y,s=0)) = 1 if y=0, 0 otherwise\n lpj[zeroStatesInd] = -1e30 * data[zeroStatesInd[0]].any(dim=1).type_as(lpj)\n assert (\n not to.isnan(lpj).any() and not to.isinf(lpj).any()\n ), \"some NoisyOR lpj values are invalid!\"\n return lpj.to(device=states.device) # (N, S)", "def calc_errors_from_state(problem, state, nshown=50, random=True):\n points, _logp = state.sample()\n if points.shape[0] < nshown: nshown = points.shape[0]\n # randomize the draw; skip the last point since state.keep_best() put\n # the best point at the end.\n if random: points = points[numpy.random.permutation(len(points)-1)]\n return calc_errors(problem, points[-nshown:-1])", "def _check_params(self):\n if self.k_initial <= 0 :\n raise ValueError('Initial K should be 1 or more.')", "def check_param(self):\n if scipy.ndim(self.param['initial_heading'].shape) > 1:\n raise(ValueError, 'initial_heading must have ndim=1')\n\n equal_shape_list = ['x_start_position','y_start_position','flight_speed','release_time']\n for item in equal_shape_list:\n if self.param[item].shape != self.param['initial_heading'].shape:\n raise(ValueError, '{0}.shape must equal initial_heading.shape'.format(item))", "def logprob(self):\n assert len(self._added_rows) == self._num_rows", "def viterbi(p_observations_given_state, p_transition, p_initial):\n p_observations_given_state = numpy.asarray(p_observations_given_state)\n p_transition = numpy.asarray(p_transition)\n p_initial = numpy.asarray(p_initial)\n N, S = p_observations_given_state.shape\n assert p_transition.shape in {(S, S), (N-1, S, S)}\n if p_transition.shape == (S, S):\n p_transition = numpy.array([p_transition for i in range(N-1)])\n assert numpy.allclose(numpy.sum(p_transition, axis=2), 1)\n assert p_initial.shape == (S,)\n assert numpy.allclose(numpy.sum(p_initial), 1)\n\n # convert all probabilities to log probabilities so we can sum instead of\n # multiplying, which better controls numerical error.\n err = numpy.seterr(divide='ignore') # allow log(0) to go to -inf, as desired\n lp_observations_given_state = numpy.log(p_observations_given_state)\n lp_transition = numpy.log(p_transition)\n lp_initial = numpy.log(p_initial)\n numpy.seterr(**err)\n\n states = numpy.arange(S)\n # path[i] always contains the maximum likelihood sequence of states ending at state i\n path = [[i] for i in states]\n # lp_state contains the current log probability of being in the state given the sequence\n # of observations thus far considered.\n lp_state = lp_observations_given_state[0] + lp_initial\n\n for lp_obs, lp_trans in zip(lp_observations_given_state[1:], lp_transition):\n # For each observation after the first timepoint, construct an (S, S)\n # shape array where [si, sj] contains the log probability of going from\n # state si to state sj between time t and t+1.\n # Assume we know for each state si prob(si at time t), the probability\n # of being in that state at that time, then we can calculate the probability\n # of being in any given state sj at time t+1:\n # prob(transition from si at time t to sj at time t+1) = prob(si at t) *\n # prob(si->sj between t and t+1) *\n # prob(observation at t+1 given state sj)\n # prob(j at time t+1) = max_i(prob(i at time t -> j at time t+1))\n #\n # Thus we merely need to keep updating our estimates for the probability\n # of being in each state at each time, and keep a list of the path that\n # lead to each state.\n #\n # The actual code in use is 100% equivalent to the code below; however it\n # is rather more efficient.\n #\n # lp_transition_t = numpy.zeros((s, s), dtype=float)\n # new_path = []\n # lp_state = []\n # for s_to in states:\n # best_from_lp = -numpy.inf\n # for s_from in states:\n # lp_transition_t[s_from, s_to] = lp_state[s_from] + lp_trans[s_from, s_to] + lp_obs[s_to]\n # if lp_transition_t[s_from, s_to] > best_from_lp:\n # best_from = s_from\n # best_from_lp = lp_transition_t[s_from, s_to]\n # lp_state.append(best_from_lp)\n # new_path.append(path[best_from] + [s_to])\n # path = new_path\n lp_transition_t = lp_state[:,numpy.newaxis] + lp_trans + lp_obs[numpy.newaxis,:]\n best_from = numpy.argmax(lp_transition_t, axis=0)\n path = [path[s_from]+[s_to] for s_to, s_from in enumerate(best_from)]\n lp_state = lp_transition_t[best_from, states]\n last_state = numpy.argmax(lp_state)\n return numpy.array(path[last_state])", "def log_prob(self, x):\n n_samples = x.shape[0].value\n log_prob = 0\n for i in range(self.time_steps):\n obs_dim = self.transition.dim_x\n # Current time step latent state and observation\n x_t = tf.slice(x, [0, i * obs_dim], [-1, obs_dim])\n if i == 0:\n log_prob += tf.reduce_sum(\n self.init_state_p.log_prob(x_t), axis=1)\n else:\n log_prob += self.transition.log_prob(x_t, x_tminus)\n # Update the previous latent state for next iteration\n x_tminus = x_t\n return log_prob", "def initialize_state(self):\n accepted = False\n while not accepted:\n self.state = self.net.sample(self.evidence)\n accepted = self.net.log_probability(self.state) != utils.LOG_PROB_0", "def check_state(self):\n if not self.__is_valid:\n raise GmParamError(\"Parameters of the model has not been\"\\\n \"set yet, please set them using self.set_param()\")\n\n # Check condition number for cov matrix\n if self.mode == 'diag':\n tinfo = N.finfo(self.va.dtype)\n if N.any(self.va < tinfo.eps):\n raise GmParamError(\"variances are singular\")\n elif self.mode == 'full':\n try:\n d = self.d\n for i in range(self.k):\n N.linalg.cholesky(self.va[i*d:i*d+d, :])\n except N.linalg.LinAlgError:\n raise GmParamError(\"matrix %d is singular \" % i)\n\n else:\n raise GmParamError(\"Unknown mode\")\n\n return True", "def on_train_begin(self, logs={}):\n self.losses = []\n self.val_losses = []", "def validate(cls, params):\n if np.isnan(params['loc']).sum():\n raise InvalidParamsError(\n \"Real location (mu) values are required for\"\n \" lognormal uncertainties.\"\n )\n if np.isnan(params['scale']).sum() or (params['scale'] <= 0).sum():\n raise InvalidParamsError(\n \"Real, positive scale (sigma) values are required for\"\n \" lognormal uncertainties.\"\n )", "def _train(self, log_prob):\n raise NotImplementedError", "def test_validate_self_invalid_final_state(self):\n with nose.assert_raises(exceptions.InvalidStateError):\n self.dtm1.final_states = {'q5'}\n self.dtm1.validate_self()", "def check(self):\r\n self.check_probabilities()\r\n self.check_sum()", "def test_logging(train_logger, valid_logger):\n\n global_step = 0\n acc_list = []\n acc_val_list = []\n #acc_list = np.array([])\n #acc_val_list = np.array([])\n # This is a strongly simplified training loop\n for epoch in range(10):\n torch.manual_seed(epoch)\n for iteration in range(20):\n dummy_train_loss = 0.9**(epoch+iteration/20.)\n dummy_train_accuracy = epoch/10. + torch.randn(10)\n \n #log the training loss\n train_logger.add_scalar('loss', dummy_train_loss, global_step = global_step)\n global_step += 1\n\n #append the training accuracy to a list\n acc_list.append(dummy_train_accuracy)\n \n #take the average of the training accuract \n #average = torch.mean(torch.stack(acc_list))\n #log the taining accuracy\n acc_new = [x.cpu().detach().numpy() for x in acc_list]\n train_logger.add_scalar('accuracy', np.mean(acc_new), global_step = global_step)\n \n torch.manual_seed(epoch)\n for iteration in range(10):\n dummy_validation_accuracy = epoch / 10. + torch.randn(10)\n \n #append the accuracy to a list\n acc_val_list.append(dummy_validation_accuracy)\n \n #take the average and log the accuracy\n averageValid = torch.mean(torch.stack(acc_val_list))\n valid_logger.add_scalar('accuracy', averageValid, global_step = global_step)" ]
[ "0.5958672", "0.58740187", "0.58280957", "0.5819362", "0.5771447", "0.56955814", "0.5686055", "0.56681377", "0.56341374", "0.56278366", "0.5553349", "0.548919", "0.5487075", "0.5450516", "0.5438275", "0.54236823", "0.53835535", "0.53824335", "0.5376346", "0.535134", "0.53166044", "0.53124243", "0.52811766", "0.52176934", "0.51705235", "0.5146061", "0.5138471", "0.5137847", "0.51374215", "0.51333" ]
0.8014728
0
Implements generic MGF1 Mask Generation function as described in Appendix B.2.1 of RFC 3447. The hash function is passed by name. valid values are 'md2', 'md4', 'md5', 'sha1', 'tls, 'sha256', 'sha384' and 'sha512'. Returns None on error.
def pkcs_mgf1(mgfSeed, maskLen, h): # steps are those of Appendix B.2.1 if not _hashFuncParams.has_key(h): warning("pkcs_mgf1: invalid hash (%s) provided" % h) return None hLen = _hashFuncParams[h][0] hFunc = _hashFuncParams[h][2] if maskLen > 2**32 * hLen: # 1) warning("pkcs_mgf1: maskLen > 2**32 * hLen") return None T = "" # 2) maxCounter = math.ceil(float(maskLen) / float(hLen)) # 3) counter = 0 while counter < maxCounter: C = pkcs_i2osp(counter, 4) T += hFunc(mgfSeed + C) counter += 1 return T[:maskLen]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _hash_function(self, n):\n # Get the mask for this n, or make a new one of 32 random bits.\n mask = self._memomask.get(n)\n if mask is None:\n random.seed(n ^ self.seed_mask)\n mask = self._memomask[n] = int(random.getrandbits(32))\n # Now return a function that uses Jenkins Hash\n #\n def somehash(x):\n return hashlittle(x, mask)\n return somehash", "def SHA1(self) -> _n_0_t_3[_n_0_t_9]:", "def fnv1(self, key, seed=0):\n # def fnv1(self, key):\n\n # Your code here\n \"\"\"\n Returns: The FNV-1 hash (64-bit) of a given string. \n \"\"\"\n #Constants : Fails the tests\n # FNV_prime = 1099511628211\n # offset_basis = 14695981039346656037\n\n # #FNV-1a Hash Function\n # hash = offset_basis + seed\n # # hash = offset_basis\n # for c in key:\n # hash = hash * FNV_prime\n # hash = hash ^ ord(c)\n # return hash\n\n \"\"\"\n Returns: The FNV-1a (alternate) hash of a given string\n \"\"\"\n # #Constants : Passes the tests\n # FNV_prime = 1099511628211\n # offset_basis = 14695981039346656037\n\n # #FNV-1a alternate Hash Function\n # hash = offset_basis + seed\n # for c in key:\n # hash = hash ^ ord(c)\n # hash = hash * FNV_prime\n # return hash", "def FNV1Hash(filename):\n \n FNV1_32_INIT = 0x811c9dc5\n FNV1_PRIME_32 = 16777619\n\n lowerName = filename.lower()\n \n _hash = FNV1_32_INIT\n uint32_max = 2 ** 32\n \n for c in lowerName:\n _hash = (_hash * FNV1_PRIME_32) % uint32_max\n _hash = _hash ^ ord(c)\n return format(_hash, 'x')", "def checksum_from_sha1(value):\n # More constrained regex at lexer level\n CHECKSUM_RE = re.compile('SHA1:\\\\s*([\\\\S]+)', re.UNICODE)\n match = CHECKSUM_RE.match(value)\n if match:\n return checksum.Algorithm(identifier='SHA1', value=match.group(1))\n else:\n return None", "def my_md5(inp):\n # https://en.wikipedia.org/wiki/MD5#Pseudocode\n global s, K # `s` and `K` are global\n\n # Initialize variables\n a0 = 0x67452301 # A\n b0 = 0xefcdab89 # B\n c0 = 0x98badcfe # C\n d0 = 0x10325476 # D\n\n # Convert input string to bit string\n msg = ''.join(f'{ord(i):08b}' for i in inp)\n\n # append \"1\" bit to message\n msg += '1'\n\n # append \"0\" bit until message length in bits = 448 (mod 512)\n msg += '0'*(448 - len(msg))\n\n # append original length in bits mod 2**64 to message\n msg += '{0:064b}'.format(ch_endian64(len(inp)*8))\n\n assert len(msg) == 512\n\n # Process the message in successive 512-bit chunks:\n # for each 512-bit chunk of padded message do\n # break chunk into sixteen 32-bit words M[j], 0 <= j <= 15\n #\n # ~> We have 1 chunk, so no need for that\n\n # Initialize hash value for this chunk:\n A, B, C, D = a0, b0, c0, d0 \n b_values = []\n\n # Main loop:\n for i in range(64):\n if 0 <= i and i <= 15:\n F = (B & C) | (~B & D)\n g = i\n elif 16 <= i and i <= 31:\n F = (D & B) | (~D & C)\n g = (5*i + 1) % 16\n elif 32 <= i and i <= 47:\n F = B ^ C ^ D\n g = (3*i + 5) % 16\n elif 48 <= i <= 63:\n F = C ^ (B | ~D)\n g = (7*i) % 16\n\n F &= 0xFFFFFFFF\n\n inp_chunk = ch_endian(int(msg[32*g:32*g + 32], 2))\n\n # Be wary of the below definitions of a,b,c,d\n F = (F + A + K[i] + inp_chunk) & 0xFFFFFFFF # M[g] must be a 32-bits block\n A = D\n D = C\n C = B\n B = (B + rol(F, s[i])) & 0xFFFFFFFF\n\n print(f'{i:2d}: A:{A:08X}, B:{B:08X}, C:{C:08X}, D:{D:08X} ~> g:{g} $ {inp_chunk:08X} $ X:{B & 0x3FF:03X}')\n\n b_values.append(B & 0x3FF) # Get the leak.\n\n # Add this chunk's hash to result so far:\n a0 = (a0 + A) & 0xFFFFFFFF\n b0 = (b0 + B) & 0xFFFFFFFF\n c0 = (c0 + C) & 0xFFFFFFFF\n d0 = (d0 + D) & 0xFFFFFFFF\n # end for\n\n a0 = ch_endian(a0)\n b0 = ch_endian(b0)\n c0 = ch_endian(c0)\n d0 = ch_endian(d0)\n\n print(f'{a0:08X}-{b0:08X}-{c0:08X}-{d0:08X}')\n \n # var char digest[16] := a0 append b0 append c0 append d0 // (Output is in little-endian)\n print(f'{a0:08x}{b0:08x}{c0:08x}{d0:08x}')\n\n return b_values", "def MD5(self) -> _n_0_t_3[_n_0_t_9]:", "def h1(data1=None, data2=None, data3=None, data4=None, data5=None, data6=None):\n\n hsh = SHA512.new()\n hsh.update(b\"4\")\n hsh.update(data1)\n hsh.update(data2)\n hsh.update(data3)\n hsh.update(data4)\n hsh.update(data5)\n hsh.update(data6)\n return hsh", "def _compute_hal9000_md5(observable: Observable) -> str:\n md5_hasher = md5()\n md5_hasher.update(observable.type.encode('utf-8', errors='ignore'))\n md5_hasher.update(observable.value.encode('utf-8', errors='ignore'))\n return md5_hasher.hexdigest()", "def __md5_hash(txt) -> str:\n\n return md5_crypt.hash(txt)", "def fnv1(self, key):\n # hash = 0xff\n hash = 0xcbf29ce484222325\n for n in key.encode():\n # print(n)\n hash = hash ^ n\n hash = hash * 0x100000001b3\n\n # print(hash)\n return hash", "def convert_to_premis_hash_function(hash_type):\n if hash_type.lower().startswith(\"sha\") and \"-\" not in hash_type:\n hash_type = \"SHA-\" + hash_type.upper()[3:]\n elif hash_type.lower() == \"md5\":\n return \"MD5\"\n\n return hash_type", "def getHashLfn(lfn):\n return hashlib.sha224(lfn).hexdigest()", "def __hash_md5__(self, text):\n key = hashlib.md5()\n key.update(text.encode('utf-8'))\n return key.digest()", "def get_md5(text):\n return hashlib.md5(text).hexdigest()", "def _hash_name(self, name, length=None):\n if not length:\n length = self.header_size\n hashed = name[:min(length, len(name))]\n for x in range(length, len(name), length):\n rem = min(x+length,len(name))-x\n for i in range(rem):\n hashed = hashed[:i] + chr(ord(name[x + i]) ^ ord(hashed[i])) + hashed[i+1:]\n if len(hashed) < length:\n hashed += '\\x00' * (length-len(hashed))\n return hashed", "def sdbm_hash(name):\n ret = 0\n for ii in name:\n ret = (ret * 65599 + ord(ii)) & 0xFFFFFFFF\n return hex(ret)", "def get_md5(string):\r\n byte_string = string.encode(\"utf-8\")\r\n md5 = hashlib.md5()\r\n md5.update(byte_string)\r\n result = md5.hexdigest()\r\n return 'M'+result", "def HashAlgorithm(self) -> _n_7_t_0:", "def compute_gzip_md5(fqfn):\n md5 = hashlib.md5()\n file_obj = gzip.open(fqfn, 'rb')\n for chunk in iter(lambda: file_obj.read(8192), ''):\n md5.update(chunk)\n\n file_obj.close()\n return md5.hexdigest()", "def getmd5(image: Image):\n return hashlib.md5(image.tobytes()).hexdigest()", "def detect_md5hash(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"detect_md5hash\")", "def detect_md5hash(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"detect_md5hash\")", "def _hash_function(self, x):\n return hashlib.sha1(x).hexdigest()", "def default_md5(key: KeyT, *args, **kwargs) -> bytes:\n return md5(key).digest() # type: ignore", "def hash_functions(self):\n def hash_factory(n):\n return lambda x: hash(\"salt\" + str(n) + str(x) + \"salt\")\n return [ hash_factory(_) for _ in range(self.dim) ]", "def pool_hash(path_list):\n return pool_process(md5_tuple, path_list, 'MD5 hashing')", "def get_md5(f: BinaryIO) -> str:\n BLOCKSIZE = 65536\n hasher = hashlib.md5()\n buf = f.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = f.read(BLOCKSIZE)\n return hasher.hexdigest()", "def _sample_using_md5(\n self,\n column_name: str,\n hash_digits: int = 1,\n hash_value: str = \"f\",\n ):\n return (\n sa.func.right(\n sa.func.md5(sa.cast(sa.column(column_name), sa.Text)), hash_digits\n )\n == hash_value\n )", "def pre_compute_hashes(s, M1, M2, X):\n n = len(s)\n h1 = [0 for _ in range(n+1)]\n h2 = [0 for _ in range(n+1)]\n for i in range(1, n+1):\n ch = ord(s[i-1])\n h1[i] = (X*h1[i-1] + ch) % M1\n h2[i] = (X*h2[i-1] + ch) % M2\n return h1, h2" ]
[ "0.5720204", "0.5584942", "0.5408497", "0.51387376", "0.5122226", "0.5117155", "0.51049674", "0.506334", "0.50571454", "0.501058", "0.49770173", "0.49719885", "0.49459144", "0.4913112", "0.49088615", "0.49074224", "0.48908415", "0.48510462", "0.48508447", "0.484109", "0.48351482", "0.48287296", "0.48287296", "0.48277074", "0.48211858", "0.48122966", "0.48106056", "0.47730252", "0.476722", "0.47596198" ]
0.75723803
0
Implements EMSAPSSENCODE() function described in Sect. 9.1.1 of RFC 3447
def pkcs_emsa_pss_encode(M, emBits, h, mgf, sLen): # 1) is not done hLen = _hashFuncParams[h][0] # 2) hFunc = _hashFuncParams[h][2] mHash = hFunc(M) emLen = int(math.ceil(emBits/8.)) if emLen < hLen + sLen + 2: # 3) warning("encoding error (emLen < hLen + sLen + 2)") return None salt = randstring(sLen) # 4) MPrime = '\x00'*8 + mHash + salt # 5) H = hFunc(MPrime) # 6) PS = '\x00'*(emLen - sLen - hLen - 2) # 7) DB = PS + '\x01' + salt # 8) dbMask = mgf(H, emLen - hLen - 1) # 9) maskedDB = strxor(DB, dbMask) # 10) l = (8*emLen - emBits)/8 # 11) rem = 8*emLen - emBits - 8*l # additionnal bits andMask = l*'\x00' if rem: j = chr(reduce(lambda x,y: x+y, map(lambda x: 1<<x, range(8-rem)))) andMask += j l += 1 maskedDB = strand(maskedDB[:l], andMask) + maskedDB[l:] EM = maskedDB + H + '\xbc' # 12) return EM # 13)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode_email(email, key):\n return", "def test_encode():\n enig = Enigma(534, 16, 8, [4, 6, 0, 7, 3, 0, 2, 3, 7, 0, 4, 2, 6, 1, 5, 5])\n string = \"\"\"Hello, this is a test string. I will follow this with a return\nbringing it onto a new line. I can do this forever, but I won't. Just\nfor a while.\"\"\"\n encoded = \"\"\"-)m>&)IKp[1`Sro$82[@_`TV&`f%}|<]a1R*\\W4IEb6j@+':`R[.(1$vV4rTJ2\n6V?5.;8q r%0p@+[Ir7-?rzIl;nV<4W7,PD[5-?;RE+~vR5-`i}>=z@S \"eJ`8g:S:1ir\nE0=<F0~/;6).\"\"\"\n\n assert_equal(encoded, enig.encode(string))\n\n endsettings = [5, 2, 2, 7, 3, 0, 2, 3, 7, 0, 4, 2, 6, 1, 5, 5]\n assert_equal(endsettings, enig.getrotsettings())", "def encrypt_message(message,public_key,symetric_key):\n\tif message != None:\n\t\tnonce = os.urandom(12)\n\t\tmessage = AESCCM(symetric_key).encrypt(nonce,message.encode(\"iso-8859-1\"),None)\n\t\tnonce, *_ = encrypt(public_key,nonce)\n\t\tmessage ={'nonce' : nonce.decode(\"iso-8859-1\"),'message':message.decode(\"iso-8859-1\")}\n\n\treturn message", "def encode_affine(msg, a, b):\n \n #Code to numbers\n encoded_message = [ RVALUES[(a * VALUES[i] + b) % 26] for i in msg ]\n \n return ''.join(encoded_message)", "def encode(self, strs):", "def encode(self, strs):", "def pkcs_emsa_pkcs1_v1_5_encode(M, emLen, h): # section 9.2 of RFC 3447\n hLen = _hashFuncParams[h][0] # 1)\n hFunc = _hashFuncParams[h][2]\n H = hFunc(M)\n hLeadingDigestInfo = _hashFuncParams[h][3] # 2)\n T = hLeadingDigestInfo + H\n tLen = len(T)\n if emLen < tLen + 11: # 3)\n warning(\"pkcs_emsa_pkcs1_v1_5_encode:\"\n \"intended encoded message length too short\")\n return None\n PS = '\\xff'*(emLen - tLen - 3) # 4)\n EM = '\\x00' + '\\x01' + PS + '\\x00' + T # 5)\n return EM # 6)", "def encode(self):\n self.preprocess_msg()\n self._find_e()\n\n self.__encoded_msg = self._calculate(self.e)", "def _encode_message(message):\n aes_key = get_settings()['aes_key'].encode('utf-8')\n hmac_key = get_settings()['hmac_key'].encode('utf-8')\n\n pad = lambda s: s + (AES.block_size - len(s) % AES.block_size) * chr(\n AES.block_size - len(s) % AES.block_size)\n init_vector = Random.new().read(AES.block_size)\n cipher = AES.new(aes_key, AES.MODE_CBC, init_vector)\n padded_message = pad(message)\n aes_message = init_vector + cipher.encrypt(padded_message)\n hmac_digest = hmac.new(bytes(hmac_key), bytes(aes_message), hashlib.sha1)\n\n return aes_message, hmac_digest", "def encode(self, text):", "def _transform_message(self, message):\n serialized = ev_envelope.serialize_envelope(message)\n return encodeutils.safe_encode(serialized, 'utf-8')", "def encodeMessage(self, key, message):\n # Make sure pure ascii, and replace bullshit\n message = message.encode('ascii', 'replace')\n # Any message needs to be a multiple of 8.\n dsize = len(message)\n #message = struct.pack('!q', int(binascii.crc32(message)& 0xffffffff)) + struct.pack('!q', dsize) + message\n # Encode two dsize parity blocks and check them against eachother on the receiving end\n message = struct.pack('!q', dsize) + struct.pack('!q', dsize) + message\n key = self.createKey(key)\n short = (math.ceil(len(message)/8.0) * 8) - len(message)\n space = \"\\x00\" * int(short)\n enc = DES.new(key, DES.MODE_ECB)\n return enc.encrypt(message+space)", "def encode_message(self, message):\n\n self.validate_machine_config()\n self.validate_message(message)\n encoded_message = ''\n\n for char in message:\n encoded_message += self.encode_char(char)\n\n return encoded_message", "def doEncode(self):\n raise CipherError(\"override this func and return the encoded msg\")", "def encode(self, strs):\n encoded_str = \"\"\n for s in strs:\n encoded_str += \"%0*x\" % (8, len(s)) + s\n return encoded_str", "def encode(self, message):\n return message.encode()", "def encode(self, seq):", "def encode_data ( data ) :\n firstpass = base64.b64encode( data )\n cipher = get_cipher( firstpass )\n\n index = 0\n datalen = len( firstpass )\n encoded_data = []\n while index < datalen :\n if index % 2 == 0 :\n encoded_data.append( chr( ord( firstpass[ index ] ) + cipher ) )\n else :\n encoded_data.append( chr( ord( firstpass[ index ] ) - cipher ) )\n index += 1\n\n encoded_data[ 0 ] = firstpass[ 0 ]\n encoded_data[ -1 ] = firstpass[ -1 ]\n encoded_data[ -2 ] = firstpass[ -2 ]\n return ''.join( encoded_data )", "def encryption(msg):\n \n start_key = 123\n key_increment = 4\n string = []\n encoded = []\n key = start_key\n message = msg\n for c in range(0, len(message)):\n code = ord(message[c])\n change = code+key\n new = chr(change)\n string += new\n key += key_increment\n \n encoded = ''.join(string)\n return ('Encoded Message:\\t' + encoded)", "def bech32_encode(hrp, data):\n combined = data + bech32_create_checksum(hrp, data)\n return hrp + '1' + ''.join([CHARSET[d] for d in combined])", "def encode(rosMsg): #@NoSelf", "def testEncode(self):\n test_cases = [\n ('\\n', '1\\n'),\n (' ', '1 '),\n ('aaabbb', '3a 3b'),\n ('a b', '1a 1 1b'),\n ('\\n\\n\\n', '3\\n'),\n ('122333', '11 22 33'),\n ('aaaaaaaaaa', '10a'),\n ('aaaaaaaaaabbbbbbbbbbb', '10a 11b'),\n ('a'*1001, '1001a'),\n (''.join(['a'*1001, 'b'*909, 'c'*65, 'd'*2]), '1001a 909b 65c 2d'),\n ]\n for data, expected in test_cases:\n encoded_result = ASCIITransportFormat.encode_data(data)\n self.assertEqual(encoded_result, expected)", "def encrypt_and_encode(data, key):\r\n return base64.urlsafe_b64encode(aes_encrypt(data, key))", "def strEnc(data, *keys):\n r = len(data) % 4\n data += (4 - r if r else 0) * chr(0)\n encData = \"\"\n for i in range(len(data) // 4):\n tempBt = strToBt(data[i * 4: i * 4 + 4])\n for key in keys:\n for b in getKeyBytes(key):\n tempBt = enc(tempBt, b)\n encData += bt64ToHex(tempBt)\n return encData", "def __encrypt(self, plaintext):\n iv = get_random_bytes(16)\n try:\n encryption_envelope = {'ciphertext':'', \n 'keyid':esn_manifest + '_' + str(self.sequence_number), 'sha256':'AA==', \n 'iv':base64.standard_b64encode(iv).decode('utf-8')}\n except Exception:\n print('ESN is invalid.')\n sys.exit(0)\n\n plaintext = Padding.pad(plaintext.encode('utf-8'), 16)\n cipher = AES.new(self.encryption_key, AES.MODE_CBC, iv)\n ciphertext = cipher.encrypt(plaintext)\n encryption_envelope['ciphertext'] = base64.standard_b64encode(ciphertext).decode('utf-8')\n return json.dumps(encryption_envelope)", "def _encode_supplement(self):", "def _encode_supplement(self):", "def encode(self, decoded):", "def test_encode():", "def test_encode(self):\n pass # TODO(tlarsen)" ]
[ "0.6101852", "0.60834146", "0.6082749", "0.60545015", "0.5945272", "0.5945272", "0.5905637", "0.5859622", "0.5806776", "0.58012116", "0.57811916", "0.57620645", "0.5729991", "0.5668424", "0.56242007", "0.55970365", "0.55773354", "0.55689085", "0.5524341", "0.5515865", "0.5503759", "0.5502754", "0.5495894", "0.54836076", "0.545903", "0.54271775", "0.54271775", "0.5419458", "0.5411966", "0.5406822" ]
0.6408031
0
Implements EMSAPSSVERIFY() function described in Sect. 9.1.2 of RFC 3447
def pkcs_emsa_pss_verify(M, EM, emBits, h, mgf, sLen): # 1) is not done hLen = _hashFuncParams[h][0] # 2) hFunc = _hashFuncParams[h][2] mHash = hFunc(M) emLen = int(math.ceil(emBits/8.)) # 3) if emLen < hLen + sLen + 2: return False if EM[-1] != '\xbc': # 4) return False l = emLen - hLen - 1 # 5) maskedDB = EM[:l] H = EM[l:l+hLen] l = (8*emLen - emBits)/8 # 6) rem = 8*emLen - emBits - 8*l # additionnal bits andMask = l*'\xff' if rem: val = reduce(lambda x,y: x+y, map(lambda x: 1<<x, range(8-rem))) j = chr(~val & 0xff) andMask += j l += 1 if strand(maskedDB[:l], andMask) != '\x00'*l: return False dbMask = mgf(H, emLen - hLen - 1) # 7) DB = strxor(maskedDB, dbMask) # 8) l = (8*emLen - emBits)/8 # 9) rem = 8*emLen - emBits - 8*l # additionnal bits andMask = l*'\x00' if rem: j = chr(reduce(lambda x,y: x+y, map(lambda x: 1<<x, range(8-rem)))) andMask += j l += 1 DB = strand(DB[:l], andMask) + DB[l:] l = emLen - hLen - sLen - 1 # 10) if DB[:l] != '\x00'*(l-1) + '\x01': return False salt = DB[-sLen:] # 11) MPrime = '\x00'*8 + mHash + salt # 12) HPrime = hFunc(MPrime) # 13) return H == HPrime # 14)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _ecssa_verify(ec: EC, hf, m: bytes, P: Point, sig: ECSS) -> bool:\n\n # the bitcoin proposed standard is only valid for curves\n # whose prime p = 3 % 4\n if not ec.pIsThreeModFour:\n errmsg = 'curve prime p must be equal to 3 (mod 4)'\n raise ValueError(errmsg)\n\n # Let r = int(sig[ 0:32]); fail if r is not [0, p-1].\n # Let s = int(sig[32:64]); fail if s is not [0, n-1].\n r, s = to_ssasig(ec, sig)\n\n # The message m: a 32-byte array\n if len(m) != hf().digest_size:\n errmsg = f'message of wrong size: {len(m)}'\n errmsg += f' instead of {hf().digest_size}'\n raise ValueError(errmsg)\n\n # Let P = point(pk); fail if point(pk) fails.\n ec.requireOnCurve(P)\n if P[1] == 0:\n raise ValueError(\"public key is infinite\")\n\n # Let e = int(hf(bytes(r) || bytes(P) || m)) mod n.\n e = _ecssa_e(ec, hf, r, P, m)\n\n # Let R = sG - eP.\n R = DblScalarMult(ec, s, ec.G, -e, P)\n\n # Fail if infinite(R).\n if R[1] == 0:\n raise ValueError(\"sG - eP is infinite\")\n\n # Fail if jacobi(y(R)) โ‰  1.\n if legendre_symbol(R[1], ec._p) != 1:\n raise ValueError(\"y(sG - eP) is not a quadratic residue\")\n\n # Fail if x(R) โ‰  r.\n return R[0] == r", "def ecssa_verify(ec: EC, hf, m: bytes, P: Point, sig: ECSS) -> bool:\n\n # this is just a try/except wrapper\n # _ecssa_verify raises Errors\n try:\n return _ecssa_verify(ec, hf, m, P, sig)\n except Exception:\n return False", "def ecdsa_verify(G, pub_verify, message, sig):\n plaintext = message.encode(\"utf8\")\n digest = sha256(plaintext).digest()\n res = do_ecdsa_verify(G,pub_verify,sig,digest) \n\n return res", "def _rsassa_pss_verify(self, M, S, h=None, mgf=None, sLen=None):\n\n # Set default parameters if not provided\n if h is None: # By default, sha1\n h = \"sha1\"\n if not _hashFuncParams.has_key(h):\n warning(\"Key._rsassa_pss_verify(): unknown hash function \"\n \"provided (%s)\" % h)\n return False\n if mgf is None: # use mgf1 with underlying hash function\n mgf = lambda x,y: pkcs_mgf1(x, y, h)\n if sLen is None: # use Hash output length (A.2.3 of RFC 3447)\n hLen = _hashFuncParams[h][0]\n sLen = hLen\n\n # 1) Length checking\n modBits = self._modulusLen\n k = modBits / 8\n if len(S) != k:\n return False\n\n # 2) RSA verification\n s = pkcs_os2ip(S) # 2.a)\n m = self._rsavp1(s) # 2.b)\n emLen = math.ceil((modBits - 1) / 8.) # 2.c)\n EM = pkcs_i2osp(m, emLen)\n\n # 3) EMSA-PSS verification\n result = pkcs_emsa_pss_verify(M, EM, modBits - 1, h, mgf, sLen)\n\n return result # 4)", "def _rsassa_pkcs1_v1_5_verify(self, M, S, h):\n\n # 1) Length checking\n k = self._modulusLen / 8\n if len(S) != k:\n warning(\"invalid signature (len(S) != k)\")\n return False\n\n # 2) RSA verification\n s = pkcs_os2ip(S) # 2.a)\n m = self._rsavp1(s) # 2.b)\n EM = pkcs_i2osp(m, k) # 2.c)\n\n # 3) EMSA-PKCS1-v1_5 encoding\n EMPrime = pkcs_emsa_pkcs1_v1_5_encode(M, k, h)\n if EMPrime is None:\n warning(\"Key._rsassa_pkcs1_v1_5_verify(): unable to encode.\")\n return False\n\n # 4) Comparison\n return EM == EMPrime", "def verify(self, message, sig):\n\n assert len(message) == 32\n lr, r, ls, s = unpack(\"H32sH32s\", sig)\n sig = Bn.from_binary(r[:lr]), Bn.from_binary(s[:ls])\n return do_ecdsa_verify(self.G, self.pub, sig, message)", "def verify(self):\n if not self.public_key:\n self.fetch_public_key()\n data = self.doc.find(\".//{http://salmon-protocol.org/ns/magic-env}data\").text\n sig = self.doc.find(\".//{http://salmon-protocol.org/ns/magic-env}sig\").text\n sig_contents = '.'.join([\n data,\n b64encode(b\"application/xml\").decode(\"ascii\"),\n b64encode(b\"base64url\").decode(\"ascii\"),\n b64encode(b\"RSA-SHA256\").decode(\"ascii\")\n ])\n sig_hash = SHA256.new(sig_contents.encode(\"ascii\"))\n cipher = PKCS1_v1_5.new(RSA.importKey(self.public_key))\n if not cipher.verify(sig_hash, urlsafe_b64decode(sig)):\n raise SignatureVerificationError(\"Signature cannot be verified using the given public key\")", "def verify():", "def checkvalid(s: bytes, m: bytes, pk: bytes) -> None:\n if len(s) != b // 4:\n raise ValueError(\"signature length is wrong\")\n\n if len(pk) != b // 8:\n raise ValueError(\"public-key length is wrong\")\n\n R = decodepoint(s[: b // 8])\n A = decodepoint(pk)\n S = decodeint(s[b // 8 : b // 4])\n h = Hint(encodepoint(R) + pk + m)\n\n (x1, y1, z1, _) = P = scalarmult_B(S)\n (x2, y2, z2, _) = Q = edwards_add(R, scalarmult(A, h))\n\n if (\n not isoncurve(P)\n or not isoncurve(Q)\n or (x1 * z2 - x2 * z1) % q != 0\n or (y1 * z2 - y2 * z1) % q != 0\n ):\n raise SignatureMismatch(\"signature does not pass verification\")", "def verify_rsa(sig_hex, message, public_key):\n sig_int = int(sig_hex , 16)\n m_int = pow(sig_int, public_key.e, public_key.n)\n m_hex = \"%0512x\" % m_int\n h = SHA.new(message).hexdigest()\n return re.match('0001f*' + ASN1_MAGIC + h, m_hex) is not None", "def validate(msg, pubkey: dict, signature):\n if signature is None:\n print(\"Signature is None. probably cause something other than a string or byte being passed to signer\")\n return False\n try:\n x_int = base64.b85decode(pubkey[\"x\"].encode())\n x_int = int.from_bytes(x_int, \"big\")\n\n y_int = base64.b85decode(pubkey[\"y\"].encode())\n y_int = int.from_bytes(y_int, \"big\")\n except KeyError:\n return False\n\n signature = signature.encode()\n signature = base64.b85decode(signature)\n\n # if it a string\n try:\n hash_of_message = SHA256.new(msg)\n except TypeError:\n hash_of_message = SHA256.new(msg.encode())\n\n try:\n pubkey = ECC.construct(point_x=x_int, point_y=y_int, curve=\"P-256\").public_key()\n verifier = DSS.new(pubkey, mode=\"fips-186-3\")\n verifier.verify(hash_of_message, signature=signature)\n except ValueError:\n return False\n else:\n return True", "def verify_str(message):\n filename = f'/tmp/{get_temp_filename()}'\n filename_in = f'{filename}.pem'\n filename_plain = f'{filename}.plain'\n filename_certs = f'{filename}.crt'\n with open(filename_in, 'w') as in_file:\n in_file.write(message)\n in_file.close()\n\n cmd = [\n \"openssl\",\n \"cms\",\n \"-verify\",\n \"-inform\", \"PEM\",\n \"-in\", f'{filename_in}',\n \"-inkey\", server_key_files[\"key\"],\n \"-recip\", server_key_files[\"crt\"],\n \"-CAfile\", server_key_files[\"ca\"],\n \"-out\", f'{filename_plain}',\n \"-certsout\", f'{filename_certs}'\n ]\n try:\n res = exec_cmd(cmd)\n with open(filename_plain, \"r\") as f_plain_text:\n plain_text = f_plain_text.read()\n f_plain_text.close()\n with open(filename_certs, \"r\") as f_certs:\n certs = f_certs.read()\n f_certs.close()\n return {\"content\": plain_text,\n \"certs\": certs,\n \"result\": res.stderr.decode(\"utf8\").find(\"Verification successful\") != -1}\n except OSError as err:\n logging.error(\"verify_str failed: %s\", err)\n finally:\n unlink_filenames = [filename_in, filename_plain, filename_certs]\n for unlink_filename in unlink_filenames:\n os.unlink(unlink_filename)", "def verify(public_key, message, signature):\n hasher = SHA256.new(message)\n verifier = PKCS1_v1_5.new(public_key)\n return verifier.verify(hasher, signature)", "async def verify(self, data, signature):\n\t\tsignature_struct = NTLMSSP_MESSAGE_SIGNATURE.from_bytes(signature)\n\t\tcalc_sig = self.MAC(self.crypthandle_server.encrypt, self.SignKey_server, signature_struct.SeqNum, data)\n\t\t#print('server signature : %s' % signature)\n\t\t#print('calculates signature: %s' % calc_sig)\n\t\treturn signature == calc_sig", "def verify(self, signature, body, external_aad, public_key):", "def verify_legacy(self, M, S, t=None, h=None, mgf=None, sLen=None):\n if t is None: # RSAVP1\n S = pkcs_os2ip(S)\n n = self._modulus\n if S > n-1:\n warning(\"Signature to be verified is too long for key modulus\")\n return False\n m = self._rsavp1(S)\n if m is None:\n return False\n l = int(math.ceil(math.log(m, 2) / 8.)) # Hack\n m = pkcs_i2osp(m, l)\n return M == m\n elif t == \"pkcs\": # RSASSA-PKCS1-v1_5-VERIFY\n if h is None:\n h = \"sha1\"\n return self._rsassa_pkcs1_v1_5_verify(M, S, h)\n elif t == \"pss\": # RSASSA-PSS-VERIFY\n return self._rsassa_pss_verify(M, S, h, mgf, sLen)\n else:\n warning(\"Key.verify(): Unknown signature type (%s) provided\" % t)\n return None", "def salt_sign_and_verify(self, msg, salt):\n ciphertext, tag = self.signer.encrypt_and_digest((msg+salt).encode('utf-8'))\n plaintext = self.verifier.decrypt(ciphertext).replace(salt.encode(), \"\".encode())\n try:\n self.verifier.verify(tag)\n print(\"The message is authentic: \", plaintext)\n except ValueError:\n print(\"Key incorrect or message corrupted\")", "def verify_plaintext(request):\n sig = plaintext_signature(request.client_secret, request.token_secret)\n return hmac.compare_digest(sig, request.signature)", "def verify(self, text):\n\n components = text.split(self.HASHSEP)\n if len(components) != 2:\n print 'verify: cannot parse text [%s]' % text\n return False\n\n body, digest = components\n check = self.digest(body)\n\n if check == digest:\n return True\n else:\n print 'verify: Expected [%s] got [%s] text [%s]' % (\n digest, check, text)\n return False", "def verifySignature(self, message: bytes, signature: bytes, sigAlgo: SignatureAlgorithm) -> bool:\n\n # Convert parent type algos.SignedDigestAlgorithm to SignatureAlgorithm\n if not isinstance(sigAlgo, SignatureAlgorithm):\n sigAlgo.__class__ = SignatureAlgorithm\n\n # Convert plain ECDSA sig to x9.62 format\n if sigAlgo.isPlain:\n signature = ECDSA_X962_Signature.fromPlain(signature).dump()\n\n hash_algo = algo_utils.get_hash_algo_by_name(sigAlgo.hashAlgo)\n\n class Verifier:\n def __init__(self, vf):\n self._vf = vf\n def verify(self):\n return self._vf()\n\n def get_rsa_verifier(pub_key: rsa.RSAPublicKey):\n if sigAlgo.signature_algo == 'rsassa_pss':\n sig_algo_params = sigAlgo['parameters']\n assert 'mask_gen_algorithm' in sig_algo_params\n assert 'salt_length' in sig_algo_params\n\n mgf = sig_algo_params['mask_gen_algorithm']['algorithm'].native\n if 'mgf1' != mgf:\n raise ValueError(\"Invalid mask generation algorithm: {}\".format(mgf))\n\n mgf1_hash_algo = sig_algo_params['mask_gen_algorithm']['parameters']['algorithm'].native\n mgf1_hash_algo = algo_utils.get_hash_algo_by_name(mgf1_hash_algo)\n return Verifier(lambda:\n pub_key.verify(\n signature,\n message,\n padding.PSS(\n mgf = padding.MGF1(mgf1_hash_algo),\n salt_length = sig_algo_params['salt_length'].native\n ),\n hash_algo\n ))\n else:\n return Verifier(lambda:\n pub_key.verify(signature, message, padding.PKCS1v15(), hash_algo)\n )\n\n def get_ecdsa_verifier(pub_key: ecc.EllipticCurvePublicKey):\n return Verifier(lambda:\n pub_key.verify(signature, message, ecc.ECDSA(hash_algo))\n )\n\n def get_eddsa_verifier(pub_key: ed25519.Ed25519PublicKey):\n return Verifier(lambda:\n pub_key.verify(signature, message)\n )\n\n def get_dsa_verifier(pub_key: ecc.EllipticCurvePublicKey):\n return Verifier(lambda:\n pub_key.verify(signature, message, hash_algo)\n )\n\n # Get signature verifier\n if self.isRsaKey():\n verifier = get_rsa_verifier(self._pub_key)\n elif self.isEcKey():\n verifier = get_ecdsa_verifier(self._pub_key)\n elif self.isEdKey():\n verifier = get_eddsa_verifier(self._pub_key)\n else:\n verifier = get_dsa_verifier(self._pub_key)\n\n # Verify sig\n try:\n verifier.verify()\n except cryptography_exceptions.InvalidSignature:\n return False\n return True", "def verifies_ok(email, val, verification):\r\n if verification.get(\"VerifyMethod\") != \"FoldItVerify\":\r\n log.debug(\"VerificationMethod in %r isn't FoldItVerify\", verification)\r\n return False\r\n hash_str = verification.get(\"Verify\")\r\n\r\n return verify_code(email, val) == hash_str", "def rsa_verify(cypher, publickey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions. \r\n temp_key_obj = _rsa_keydict_to_keyobj(publickey)\r\n \r\n return _rsa_gluechops(cypher, temp_key_obj, temp_key_obj.verify)", "def sign_and_verify(self, msg):\n ciphertext, tag = self.signer.encrypt_and_digest(msg.encode('utf-8'))\n plaintext = self.verifier.decrypt(ciphertext)\n try:\n self.verifier.verify(tag)\n print(\"The message is authentic: \", plaintext)\n except ValueError:\n print(\"Key incorrect or message corrupted\")", "def check(self):\n if self.is_signed():\n data = self._document.read()\n hash_value = data[-self._append_size+1:-1]\n data = data[:-self._append_size]\n\n encrypted = self._encryptor.encrypt_cbc(data, self._init_vector)\n current_hash_value = encrypted[-16:]\n\n if current_hash_value != hash_value:\n print(\"Hash values did not matched!\")\n else:\n print(\"Hash values matched!\")\n else:\n print(\"The document is not signed!\")", "def Verify(self, msg, sig):\n try:\n return self.key.verify(util.MakeEmsaMessage(msg, self.size), (util.BytesToLong(sig),))\n except ValueError:\n # if sig is not a long, it's invalid\n return False", "def verify(signature: Signature, pub_key: rsa.RSAPublicKey, msg: bytes) -> bool:\n try:\n pub_key.verify(signature, msg, PADDING, HASH)\n except:\n return False\n return True", "def verify(self, assoc_handle, message):\n assoc = self.getAssociation(assoc_handle, dumb=True)\n if not assoc:\n logger.error(\"failed to get assoc with handle %r to verify \"\n \"message %r\" % (assoc_handle, message))\n return False\n\n try:\n valid = assoc.checkMessageSignature(message)\n except ValueError as ex:\n logger.exception(\"Error in verifying %s with %s: %s\" %\n (message, assoc, ex))\n return False\n return valid", "def verifySignature(self, message: bytes, signature: bytes, sigAlgo: Optional[SignatureAlgorithm] = None) -> bool:\n\n if self.isRsaKey():\n v = iso9796e2.Dss1Verifier(self._pub_key)\n return v.verifySignature(message, signature)\n elif self.isEcKey():\n # WARNING: THIS SCOPE WAS TESTED WITH ECDSA SIGNATURE NOT FROM eMRTD IC\n if sigAlgo is None:\n raise ValueError(\"Missing required param 'sigAlgo'\")\n return super().verifySignature(message, signature, sigAlgo)\n else:\n raise ValueError(\"Unsupported digital signature scheme\")", "def verifyImageVerification( imageVerification ):\n if \"hash-algorithm\" in imageVerification:\n assert imageVerification[ \"hash-algorithm\" ] == \\\n \"ietf-sztp-conveyed-info:sha-256\",\\\n \"Unsupported hash-algorithm\"\n assert \"hash-value\" in imageVerification, \\\n \"Expected hash-value not present\"\n hashValue = imageVerification[ \"hash-value\" ]\n # Verify hashValue appears to be a yang:hex-string\n assert len( hashValue ) == 32 * 3 - 1 and \\\n all( c == ':' or c in string.hexdigits for c in hashValue ), \\\n \"hash-value invalid\"", "def Verify(self, msg, sig):\n try:\n (r, s) = util.ParseDsaSig(sig)\n return self.key.verify(util.Hash(msg), (r, s))\n except errors.KeyczarError:\n # if signature is not in correct format\n return False" ]
[ "0.67954147", "0.657173", "0.64441437", "0.6231932", "0.6160214", "0.61599946", "0.6157672", "0.6145806", "0.5965935", "0.5962814", "0.5930591", "0.5899652", "0.58821315", "0.58375406", "0.57815605", "0.57596624", "0.5742779", "0.5730758", "0.57255775", "0.57254857", "0.5716272", "0.5708308", "0.5686308", "0.56643844", "0.56490564", "0.564298", "0.56227005", "0.5601429", "0.5597701", "0.5555328" ]
0.6674336
1
Implements EMSAPKCS1V1_5ENCODE() function described in Sect. 9.2 of RFC 3447.
def pkcs_emsa_pkcs1_v1_5_encode(M, emLen, h): # section 9.2 of RFC 3447 hLen = _hashFuncParams[h][0] # 1) hFunc = _hashFuncParams[h][2] H = hFunc(M) hLeadingDigestInfo = _hashFuncParams[h][3] # 2) T = hLeadingDigestInfo + H tLen = len(T) if emLen < tLen + 11: # 3) warning("pkcs_emsa_pkcs1_v1_5_encode:" "intended encoded message length too short") return None PS = '\xff'*(emLen - tLen - 3) # 4) EM = '\x00' + '\x01' + PS + '\x00' + T # 5) return EM # 6)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _rsassa_pkcs1_v1_5_sign(self, M, h):\n\n # 1) EMSA-PKCS1-v1_5 encoding\n k = self._modulusLen / 8\n EM = pkcs_emsa_pkcs1_v1_5_encode(M, k, h)\n if EM is None:\n warning(\"Key._rsassa_pkcs1_v1_5_sign(): unable to encode\")\n return None\n\n # 2) RSA signature\n m = pkcs_os2ip(EM) # 2.a)\n s = self._rsasp1(m) # 2.b)\n S = pkcs_i2osp(s, k) # 2.c)\n\n return S # 3)", "def encryptAESCBCPKCS5(key, iv, pt):\n\tpt = padPKCS5(pt)\n\tct = b''\n\txorSource = iv\n\tfor ptBlock in chunks(pt, 16):\n\t\tb = xor(ptBlock, xorSource)\n\t\tctBlock = encryptAESBlock(key, b)\n\t\tct += ctBlock\n\t\txorSource = ctBlock\n\treturn ct", "def test_encode():\n enig = Enigma(534, 16, 8, [4, 6, 0, 7, 3, 0, 2, 3, 7, 0, 4, 2, 6, 1, 5, 5])\n string = \"\"\"Hello, this is a test string. I will follow this with a return\nbringing it onto a new line. I can do this forever, but I won't. Just\nfor a while.\"\"\"\n encoded = \"\"\"-)m>&)IKp[1`Sro$82[@_`TV&`f%}|<]a1R*\\W4IEb6j@+':`R[.(1$vV4rTJ2\n6V?5.;8q r%0p@+[Ir7-?rzIl;nV<4W7,PD[5-?;RE+~vR5-`i}>=z@S \"eJ`8g:S:1ir\nE0=<F0~/;6).\"\"\"\n\n assert_equal(encoded, enig.encode(string))\n\n endsettings = [5, 2, 2, 7, 3, 0, 2, 3, 7, 0, 4, 2, 6, 1, 5, 5]\n assert_equal(endsettings, enig.getrotsettings())", "def cipher_feedback_mode_encode(msg, CEK, IV = int(0).to_bytes(8, 'big')):\n assert(len(CEK) == 32)\n assert(len(IV) == 8)\n last_block = IV\n res = b''\n for i in range(0, len(msg), 8):\n gamma = GOST2814789ECB_encode(last_block, CEK)\n block = msg[i: min(i + 8, len(msg))]\n encrypted_block = b''\n for j in range(len(block)):\n encrypted_block += int(block[j] ^ gamma[j]).to_bytes(1, 'big')\n res += encrypted_block\n last_block = encrypted_block\n return res", "def pkcs5_pad(self,s):\n return s + (self.BLOCK_SIZE - len(s) % self.BLOCK_SIZE) * chr(self.BLOCK_SIZE - len(s) % self.BLOCK_SIZE)", "def encode(self):\n if self.ciphered:\n raise CipherError(\"already encoded.\")\n try:\n self.result = self.doEncode(self.msg,self.shift)\n except Exception as e:\n raise CipherError(\"encoding failure: {}.\".format(e))\n self.ciphered = True\n return self.result", "def encode(self):\n # Encoding Join-accept:\n # MAC Header\n # 3 bytes appnonce\n # 3 bytes netid \n # 4 bytes devaddr\n # 1 byte dlsettings\n # 1 byte rxdelay\n # Optional cflist\n \n # Create the message\n header = self.mhdr.encode()\n msg = intPackBytes(self.appnonce, 3, endian='little') + \\\n intPackBytes(self.netid, 3, endian='little') + \\\n struct.pack('<L', self.devaddr) + \\\n struct.pack('B', self.dlsettings) + \\\n struct.pack('B', self.rxdelay)\n # CFList is not used in a Join Accept message for US/AU bands\n if self.cflist:\n pass\n # Create the MIC over the entire message\n self.mic = aesEncrypt(intPackBytes(self.appkey, 16), header + msg,\n mode='CMAC')[0:4]\n msg += self.mic\n # Add the header and encrypt the message using AES-128 decrypt\n data = header + aesDecrypt(intPackBytes(self.appkey, 16), msg)\n return data", "def encryption(msg):\n \n start_key = 123\n key_increment = 4\n string = []\n encoded = []\n key = start_key\n message = msg\n for c in range(0, len(message)):\n code = ord(message[c])\n change = code+key\n new = chr(change)\n string += new\n key += key_increment\n \n encoded = ''.join(string)\n return ('Encoded Message:\\t' + encoded)", "def strEnc(data, *keys):\n r = len(data) % 4\n data += (4 - r if r else 0) * chr(0)\n encData = \"\"\n for i in range(len(data) // 4):\n tempBt = strToBt(data[i * 4: i * 4 + 4])\n for key in keys:\n for b in getKeyBytes(key):\n tempBt = enc(tempBt, b)\n encData += bt64ToHex(tempBt)\n return encData", "def encode(self):\n self.preprocess_msg()\n self._find_e()\n\n self.__encoded_msg = self._calculate(self.e)", "def doEncode(self):\n raise CipherError(\"override this func and return the encoded msg\")", "def encode(key, value, ber_length=0):\n return bytearray(key) + encode_ber(len(value), ber_length) + bytearray(value)", "def encode_message(self, message):\n\n self.validate_machine_config()\n self.validate_message(message)\n encoded_message = ''\n\n for char in message:\n encoded_message += self.encode_char(char)\n\n return encoded_message", "def encode(self, seq):", "def test_encode_latin1(self):\n\n sm = binascii.a2b_hex(self.latin1_sm)\n pdu = self.buildSubmitSmTest(sm) \n\n # SM shall not be altered since it is not sliced (not too long)\n self.assertEquals(pdu.params['short_message'], sm)", "def encrypt_message(message,public_key,symetric_key):\n\tif message != None:\n\t\tnonce = os.urandom(12)\n\t\tmessage = AESCCM(symetric_key).encrypt(nonce,message.encode(\"iso-8859-1\"),None)\n\t\tnonce, *_ = encrypt(public_key,nonce)\n\t\tmessage ={'nonce' : nonce.decode(\"iso-8859-1\"),'message':message.decode(\"iso-8859-1\")}\n\n\treturn message", "def test_encode_pair():\n\tassert encode_pair(0, 0) == 0\n\tassert encode_pair(1, 0) == 1\n\tassert encode_pair(0, 1) == 2\n\tassert encode_pair(4, 6) == 207", "def encodeMessage(self, key, message):\n # Make sure pure ascii, and replace bullshit\n message = message.encode('ascii', 'replace')\n # Any message needs to be a multiple of 8.\n dsize = len(message)\n #message = struct.pack('!q', int(binascii.crc32(message)& 0xffffffff)) + struct.pack('!q', dsize) + message\n # Encode two dsize parity blocks and check them against eachother on the receiving end\n message = struct.pack('!q', dsize) + struct.pack('!q', dsize) + message\n key = self.createKey(key)\n short = (math.ceil(len(message)/8.0) * 8) - len(message)\n space = \"\\x00\" * int(short)\n enc = DES.new(key, DES.MODE_ECB)\n return enc.encrypt(message+space)", "def enc(elements):\n encoded = ''\n for key, dtype, value in elements:\n binary = enc_elem(dtype, value)\n encoded += struct.pack('>HBH', key, dtype, len(binary)) + binary\n return encoded", "def encode(self):\n return asn1.encode(self, './data_groups/configs/dg1.json')", "def encode(self, strs):", "def encode(self, strs):", "def encode_email(email, key):\n return", "def bcur_encode(data):\n cbor = cbor_encode(data)\n enc = bc32encode(cbor)\n h = hashlib.sha256(cbor).digest()\n enc_hash = bc32encode(h)\n return enc, enc_hash", "def _encode_message(message):\n aes_key = get_settings()['aes_key'].encode('utf-8')\n hmac_key = get_settings()['hmac_key'].encode('utf-8')\n\n pad = lambda s: s + (AES.block_size - len(s) % AES.block_size) * chr(\n AES.block_size - len(s) % AES.block_size)\n init_vector = Random.new().read(AES.block_size)\n cipher = AES.new(aes_key, AES.MODE_CBC, init_vector)\n padded_message = pad(message)\n aes_message = init_vector + cipher.encrypt(padded_message)\n hmac_digest = hmac.new(bytes(hmac_key), bytes(aes_message), hashlib.sha1)\n\n return aes_message, hmac_digest", "def rc4_encode(data, key, encode=base64.b64encode, salt_length=16):\n salt = ''\n for n in range(salt_length):\n salt += chr(random.randrange(256))\n data = salt + crypt(data, sha1(key + salt).digest())\n if encode:\n data = encode(data)\n return data", "def _encode(self, boxes, anchors):\n pass", "def encoder(self):\n prefixe = pack('=BHB', VERSION_PROTOCOLE, self.type_message, self.node_id)\n \n return prefixe", "def testEncode(self):\n test_cases = [\n ('\\n', '1\\n'),\n (' ', '1 '),\n ('aaabbb', '3a 3b'),\n ('a b', '1a 1 1b'),\n ('\\n\\n\\n', '3\\n'),\n ('122333', '11 22 33'),\n ('aaaaaaaaaa', '10a'),\n ('aaaaaaaaaabbbbbbbbbbb', '10a 11b'),\n ('a'*1001, '1001a'),\n (''.join(['a'*1001, 'b'*909, 'c'*65, 'd'*2]), '1001a 909b 65c 2d'),\n ]\n for data, expected in test_cases:\n encoded_result = ASCIITransportFormat.encode_data(data)\n self.assertEqual(encoded_result, expected)", "def _encode(klass, pwm_str_key):\n\t\t#print(\"Encoding pwm key %s\" % (pwm_str_key,))\n\t\tdec_pwm_key = int(pwm_str_key, 2)\n\t\t#print \"Decimal (PWN) key:\",dec_pwm_key\n\t\tkey_packed = ''\n\t\tfor byte in Lirc._chunk(pwm_str_key, 8, '0'):\n\t\t\tdec_pwm_key = int(byte, 2)\n\t\t\tkey_packed = key_packed + struct.pack(\">B\", dec_pwm_key)\n\t\treturn key_packed" ]
[ "0.58637017", "0.57749647", "0.5767254", "0.562831", "0.5544573", "0.5455732", "0.54434717", "0.5439919", "0.54337126", "0.5427922", "0.54122305", "0.5409033", "0.53953534", "0.533786", "0.5249663", "0.52453196", "0.52303314", "0.5200437", "0.5186495", "0.5167307", "0.5165996", "0.5165996", "0.51415575", "0.5133114", "0.5124316", "0.512196", "0.5109032", "0.5100206", "0.5084373", "0.508353" ]
0.66770023
0
Implements RSASSAPKCS1v1_5VERIFY() function as described in Sect. 8.2.2 of RFC 3447.
def _rsassa_pkcs1_v1_5_verify(self, M, S, h): # 1) Length checking k = self._modulusLen / 8 if len(S) != k: warning("invalid signature (len(S) != k)") return False # 2) RSA verification s = pkcs_os2ip(S) # 2.a) m = self._rsavp1(s) # 2.b) EM = pkcs_i2osp(m, k) # 2.c) # 3) EMSA-PKCS1-v1_5 encoding EMPrime = pkcs_emsa_pkcs1_v1_5_encode(M, k, h) if EMPrime is None: warning("Key._rsassa_pkcs1_v1_5_verify(): unable to encode.") return False # 4) Comparison return EM == EMPrime
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_rsa_sha1(request):\n from .rsa import verify_sha1\n base_string = generate_signature_base_string(request)\n sig = binascii.a2b_base64(to_bytes(request.signature))\n return verify_sha1(sig, to_bytes(base_string), request.rsa_public_key)", "def verify(public_key, message, signature):\n hasher = SHA256.new(message)\n verifier = PKCS1_v1_5.new(public_key)\n return verifier.verify(hasher, signature)", "def verify_legacy(self, M, S, t=None, h=None, mgf=None, sLen=None):\n if t is None: # RSAVP1\n S = pkcs_os2ip(S)\n n = self._modulus\n if S > n-1:\n warning(\"Signature to be verified is too long for key modulus\")\n return False\n m = self._rsavp1(S)\n if m is None:\n return False\n l = int(math.ceil(math.log(m, 2) / 8.)) # Hack\n m = pkcs_i2osp(m, l)\n return M == m\n elif t == \"pkcs\": # RSASSA-PKCS1-v1_5-VERIFY\n if h is None:\n h = \"sha1\"\n return self._rsassa_pkcs1_v1_5_verify(M, S, h)\n elif t == \"pss\": # RSASSA-PSS-VERIFY\n return self._rsassa_pss_verify(M, S, h, mgf, sLen)\n else:\n warning(\"Key.verify(): Unknown signature type (%s) provided\" % t)\n return None", "def verify(self):\n if not self.public_key:\n self.fetch_public_key()\n data = self.doc.find(\".//{http://salmon-protocol.org/ns/magic-env}data\").text\n sig = self.doc.find(\".//{http://salmon-protocol.org/ns/magic-env}sig\").text\n sig_contents = '.'.join([\n data,\n b64encode(b\"application/xml\").decode(\"ascii\"),\n b64encode(b\"base64url\").decode(\"ascii\"),\n b64encode(b\"RSA-SHA256\").decode(\"ascii\")\n ])\n sig_hash = SHA256.new(sig_contents.encode(\"ascii\"))\n cipher = PKCS1_v1_5.new(RSA.importKey(self.public_key))\n if not cipher.verify(sig_hash, urlsafe_b64decode(sig)):\n raise SignatureVerificationError(\"Signature cannot be verified using the given public key\")", "def _rsassa_pkcs1_v1_5_sign(self, M, h):\n\n # 1) EMSA-PKCS1-v1_5 encoding\n k = self._modulusLen / 8\n EM = pkcs_emsa_pkcs1_v1_5_encode(M, k, h)\n if EM is None:\n warning(\"Key._rsassa_pkcs1_v1_5_sign(): unable to encode\")\n return None\n\n # 2) RSA signature\n m = pkcs_os2ip(EM) # 2.a)\n s = self._rsasp1(m) # 2.b)\n S = pkcs_i2osp(s, k) # 2.c)\n\n return S # 3)", "def verify_rsa(sig_hex, message, public_key):\n sig_int = int(sig_hex , 16)\n m_int = pow(sig_int, public_key.e, public_key.n)\n m_hex = \"%0512x\" % m_int\n h = SHA.new(message).hexdigest()\n return re.match('0001f*' + ASN1_MAGIC + h, m_hex) is not None", "def verify(self, sigBytes, bytes):\r\n if len(sigBytes) != numBytes(self.n):\r\n return False\r\n paddedBytes = self._addPKCS1Padding(bytes, 1)\r\n c = bytesToNumber(sigBytes)\r\n if c >= self.n:\r\n return False\r\n m = self._rawPublicKeyOp(c)\r\n checkBytes = numberToByteArray(m, numBytes(self.n))\r\n return checkBytes == paddedBytes", "def verify(self, message, sig):\n\n assert len(message) == 32\n lr, r, ls, s = unpack(\"H32sH32s\", sig)\n sig = Bn.from_binary(r[:lr]), Bn.from_binary(s[:ls])\n return do_ecdsa_verify(self.G, self.pub, sig, message)", "def _rsassa_pss_verify(self, M, S, h=None, mgf=None, sLen=None):\n\n # Set default parameters if not provided\n if h is None: # By default, sha1\n h = \"sha1\"\n if not _hashFuncParams.has_key(h):\n warning(\"Key._rsassa_pss_verify(): unknown hash function \"\n \"provided (%s)\" % h)\n return False\n if mgf is None: # use mgf1 with underlying hash function\n mgf = lambda x,y: pkcs_mgf1(x, y, h)\n if sLen is None: # use Hash output length (A.2.3 of RFC 3447)\n hLen = _hashFuncParams[h][0]\n sLen = hLen\n\n # 1) Length checking\n modBits = self._modulusLen\n k = modBits / 8\n if len(S) != k:\n return False\n\n # 2) RSA verification\n s = pkcs_os2ip(S) # 2.a)\n m = self._rsavp1(s) # 2.b)\n emLen = math.ceil((modBits - 1) / 8.) # 2.c)\n EM = pkcs_i2osp(m, emLen)\n\n # 3) EMSA-PSS verification\n result = pkcs_emsa_pss_verify(M, EM, modBits - 1, h, mgf, sLen)\n\n return result # 4)", "def Verify(self, signed_bytes, signature_b64):\r\n # Generate the PKCS1-v1_5 compatible message, which includes\r\n # magic ASN.1 bytes and padding:\r\n emsa_msg = self._MakeEmsaMessageSha256(signed_bytes,\r\n self.keypair.size())\r\n\r\n # Get putative signature:\r\n putative_signature = base64.urlsafe_b64decode(signature_b64.encode('utf-8'))\r\n putative_signature = number.bytes_to_long(putative_signature)\r\n\r\n # Verify signature given public key:\r\n return self.keypair.verify(emsa_msg, (putative_signature,))", "def verify(self, message, signature):\n symkey = self.gen_symkey(message)\n\n # v is the verification value, X is the ring of signatures\n v, X = signature[0], signature[1:]\n\n # permute an X value to a Y value using the g function\n mapper = lambda i: self.g(X[i], self.public_keys[i].e, self.public_keys[i].n)\n\n # map the array of x -> array of y\n Y = map(mapper, range(len(X)))\n\n # XOR the cumulative hash with the next value, then hash that\n reducer = lambda x, i: self.concat_hash(x ^ Y[i], symkey)\n\n # now do the verification:\n # C(k, v, y[]) = E(k, y[r] ^ E(k, y[r-1] ^ E(... ^ E(k, y[0] ^ v)...)))\n r = reduce(reducer, range(self.n_keys), v)\n return r == v", "def Verify(self, msg, sig):\n try:\n (r, s) = util.ParseDsaSig(sig)\n return self.key.verify(util.Hash(msg), (r, s))\n except errors.KeyczarError:\n # if signature is not in correct format\n return False", "def rsa_verify(cypher, publickey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions. \r\n temp_key_obj = _rsa_keydict_to_keyobj(publickey)\r\n \r\n return _rsa_gluechops(cypher, temp_key_obj, temp_key_obj.verify)", "def _ecssa_verify(ec: EC, hf, m: bytes, P: Point, sig: ECSS) -> bool:\n\n # the bitcoin proposed standard is only valid for curves\n # whose prime p = 3 % 4\n if not ec.pIsThreeModFour:\n errmsg = 'curve prime p must be equal to 3 (mod 4)'\n raise ValueError(errmsg)\n\n # Let r = int(sig[ 0:32]); fail if r is not [0, p-1].\n # Let s = int(sig[32:64]); fail if s is not [0, n-1].\n r, s = to_ssasig(ec, sig)\n\n # The message m: a 32-byte array\n if len(m) != hf().digest_size:\n errmsg = f'message of wrong size: {len(m)}'\n errmsg += f' instead of {hf().digest_size}'\n raise ValueError(errmsg)\n\n # Let P = point(pk); fail if point(pk) fails.\n ec.requireOnCurve(P)\n if P[1] == 0:\n raise ValueError(\"public key is infinite\")\n\n # Let e = int(hf(bytes(r) || bytes(P) || m)) mod n.\n e = _ecssa_e(ec, hf, r, P, m)\n\n # Let R = sG - eP.\n R = DblScalarMult(ec, s, ec.G, -e, P)\n\n # Fail if infinite(R).\n if R[1] == 0:\n raise ValueError(\"sG - eP is infinite\")\n\n # Fail if jacobi(y(R)) โ‰  1.\n if legendre_symbol(R[1], ec._p) != 1:\n raise ValueError(\"y(sG - eP) is not a quadratic residue\")\n\n # Fail if x(R) โ‰  r.\n return R[0] == r", "def verifySig(pub, inHash, r, s):\n # See [NSA] 3.4.2\n N = Curve.N\n\n if r <= 0 or s <= 0:\n return False\n\n if r >= N or s >= N:\n return False\n\n e = hashToInt(inHash)\n\n w = crypto.modInv(s, N)\n\n u1 = (e * w) % N\n u2 = (r * w) % N\n\n x1, y1 = Curve.scalarBaseMult(u1)\n x2, y2 = Curve.scalarMult(pub.x, pub.y, u2)\n x, y = Curve.add(x1, y1, x2, y2)\n\n if x == 0 and y == 0:\n return False\n x = x % N\n return x == r", "def Verify(self, msg, sig_bytes):\n correctMac = self.Sign(msg)\n if len(sig_bytes) != len(correctMac):\n return False\n result = 0\n for x, y in zip(correctMac, sig_bytes):\n result |= ord(x) ^ ord(y)\n return result == 0", "def verify(signature: Signature, pub_key: rsa.RSAPublicKey, msg: bytes) -> bool:\n try:\n pub_key.verify(signature, msg, PADDING, HASH)\n except:\n return False\n return True", "def checkvalid(s: bytes, m: bytes, pk: bytes) -> None:\n if len(s) != b // 4:\n raise ValueError(\"signature length is wrong\")\n\n if len(pk) != b // 8:\n raise ValueError(\"public-key length is wrong\")\n\n R = decodepoint(s[: b // 8])\n A = decodepoint(pk)\n S = decodeint(s[b // 8 : b // 4])\n h = Hint(encodepoint(R) + pk + m)\n\n (x1, y1, z1, _) = P = scalarmult_B(S)\n (x2, y2, z2, _) = Q = edwards_add(R, scalarmult(A, h))\n\n if (\n not isoncurve(P)\n or not isoncurve(Q)\n or (x1 * z2 - x2 * z1) % q != 0\n or (y1 * z2 - y2 * z1) % q != 0\n ):\n raise SignatureMismatch(\"signature does not pass verification\")", "def Verify(self, msg, sig):\n try:\n (r, s) = util.ParseDsaSig(sig)\n return self.key.verify(util.Hash(msg), (r, s))\n except errors.KeyczarError:\n # if signature is not in correct format\n return False", "async def verify(self, data, signature):\n\t\tsignature_struct = NTLMSSP_MESSAGE_SIGNATURE.from_bytes(signature)\n\t\tcalc_sig = self.MAC(self.crypthandle_server.encrypt, self.SignKey_server, signature_struct.SeqNum, data)\n\t\t#print('server signature : %s' % signature)\n\t\t#print('calculates signature: %s' % calc_sig)\n\t\treturn signature == calc_sig", "def salt_sign_and_verify(self, msg, salt):\n ciphertext, tag = self.signer.encrypt_and_digest((msg+salt).encode('utf-8'))\n plaintext = self.verifier.decrypt(ciphertext).replace(salt.encode(), \"\".encode())\n try:\n self.verifier.verify(tag)\n print(\"The message is authentic: \", plaintext)\n except ValueError:\n print(\"Key incorrect or message corrupted\")", "def sident_verify(self, connection):\n sident_verify_msg = {'type':'sident_verify',\n 'timestamp':calendar.timegm(time.gmtime())}\n self._send_queue.put((sident_verify_msg, connection))\n return True", "def verify(self, msg, sig, key):\n if not isinstance(key, ec.EllipticCurvePublicKey):\n raise TypeError(\"The public key must be an instance of \" \"ec.EllipticCurvePublicKey\")\n self._cross_check(key)\n\n num_bits = key.curve.key_size\n num_bytes = (num_bits + 7) // 8\n if len(sig) != 2 * num_bytes:\n raise ValueError(\"Invalid signature\")\n\n try:\n # cryptography uses ASN.1-encoded signature data; split JWS\n # signature (r||s) and encode before verification\n (r, s) = self._split_raw_signature(sig)\n asn1sig = encode_dss_signature(r, s)\n key.verify(asn1sig, msg, ec.ECDSA(self.hash_algorithm()))\n except InvalidSignature as err:\n raise BadSignature(err)\n else:\n return True", "def SHA1(self) -> _n_0_t_3[_n_0_t_9]:", "def verification(file_name: str) -> None:\n print(\"Verification process...\")\n file_name = os.path.join('data', file_name)\n\n file1 = open(\"data/key.txt\", \"r\")\n file2 = open(\"data/signature.txt\", \"r\")\n p = int(file1.readline().rstrip())\n q = int(file1.readline().rstrip())\n g = int(file1.readline().rstrip())\n h = int(file1.readline().rstrip())\n\n c1 = int(file2.readline().rstrip())\n c2 = int(file2.readline().rstrip())\n print('c1 = ', c1)\n print('c2 = ', c2)\n\n t1 = sha_hash(file_name)\n print('hash = ', t1)\n inverseC2 = compute_inverse(c2, q)\n t1 = (t1 * inverseC2) % q\n\n t2 = compute_inverse(c2, q)\n t2 = (t2 * c1) % q\n\n valid1 = square_multiply(g, t1, p)\n valid2 = square_multiply(h, t2, p)\n valid = ((valid1 * valid2) % p) % q\n if valid == c1:\n print(\"Valid signature\")\n else:\n print(\"Invalid signature\")", "def verify_signature(self, key, data):\n verify_signature(self, key, data)", "def verify(pk: PublicKey, signature: Signature, msgs: List[bytes]) -> bool:\n # Check that generator is not 1\n if signature.gen == G1.unity():\n return False\n else:\n assert(len(msgs) == len(pk.Y2)\n ), f\"Message length: {len(msgs)}, pk.Y2 length: {len(pk.Y2)}\"\n accum = pk.X2\n for Y2_i, m_i in zip(pk.Y2.values(), msgs):\n accum = accum * Y2_i**Bn.from_binary(m_i)\n return signature.gen.pair(accum) == signature.sig.pair(pk.g2)", "def _verify(pubkey: SupportedKeyTypes, sig: bytes, filehash: bytes, hashfunc: hashes.HashAlgorithm) -> None:\n if isinstance(pubkey, RSAPublicKey):\n pubkey.verify(sig, filehash, padding.PKCS1v15(), Prehashed(hashfunc))\n elif isinstance(pubkey, EllipticCurvePublicKey):\n pubkey.verify(sig, filehash, ec.ECDSA(Prehashed(hashfunc)))", "def Verify(self, publicKey: str) -> bool:\r\n if not publicKey:\r\n raise ValueError(str(publicKey))\r\n isValid = False\r\n if self.m_Licence1:\r\n signature = self.m_Licence1.find('Code').text\r\n self.m_Licence1.find('Code').text = ''\r\n try:\r\n isValid = RSAVerify().Verify(self.m_Licence1, signature, publicKey)\r\n self.m_Licence1.find('Code').text = signature\r\n except Exception as ex:\r\n print(\"Exception 22\")\r\n print(ex)\r\n isValid = False\r\n return isValid", "def verify(self, signature, body, external_aad, public_key):" ]
[ "0.63578653", "0.63091177", "0.62384", "0.6164231", "0.61427945", "0.61369234", "0.592434", "0.58686256", "0.58176625", "0.5731193", "0.5705756", "0.5680333", "0.5656388", "0.56533843", "0.5642347", "0.56344414", "0.56185615", "0.5606178", "0.5587348", "0.5557569", "0.5531679", "0.5530506", "0.5502381", "0.5498383", "0.5497129", "0.54910773", "0.5482942", "0.54615813", "0.5449119", "0.5445912" ]
0.7382296
0
Implements RSASSAPSSSIGN() function described in Sect. 8.1.1 of RFC 3447.
def _rsassa_pss_sign(self, M, h=None, mgf=None, sLen=None): # Set default parameters if not provided if h is None: # By default, sha1 h = "sha1" if not _hashFuncParams.has_key(h): warning("Key._rsassa_pss_sign(): unknown hash function " "provided (%s)" % h) return None if mgf is None: # use mgf1 with underlying hash function mgf = lambda x,y: pkcs_mgf1(x, y, h) if sLen is None: # use Hash output length (A.2.3 of RFC 3447) hLen = _hashFuncParams[h][0] sLen = hLen # 1) EMSA-PSS encoding modBits = self._modulusLen k = modBits / 8 EM = pkcs_emsa_pss_encode(M, modBits - 1, h, mgf, sLen) if EM is None: warning("Key._rsassa_pss_sign(): unable to encode") return None # 2) RSA signature m = pkcs_os2ip(EM) # 2.a) s = self._rsasp1(m) # 2.b) S = pkcs_i2osp(s, k) # 2.c) return S # 3)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rsa_sign(message, privatekey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(privatekey = privatekey) \r\n \r\n return _rsa_chopstring(message, temp_key_obj, temp_key_obj.sign)", "def _rsa_sign(blob, private_key_pem):\n # Lazy import crypto. It is not available in unit tests outside of sandbox.\n from Crypto.Hash import SHA256\n from Crypto.PublicKey import RSA\n from Crypto.Signature import PKCS1_v1_5\n pkey = RSA.importKey(private_key_pem)\n return PKCS1_v1_5.new(pkey).sign(SHA256.new(blob))", "def sign(self, message):\n\n assert len(message) == 32\n assert self.sec is not None\n r, s = do_ecdsa_sign(self.G, self.sec, message, self.optim)\n r0, s0 = r.binary(), s.binary()\n assert len(r0) <= 32 and len(s0) <= 32\n sig = pack(\"H32sH32s\", len(r0), r0, len(s0), s0)\n return sig", "def sign_with_rsa(private_key, data):\n if isinstance(data, six.text_type):\n encoded_data = data.encode('utf8')\n else:\n encoded_data = data\n\n signed_data = OpenSSL.crypto.sign(private_key, encoded_data, \"sha1\")\n return base64.b64encode(signed_data).decode('ascii')", "def sign(priv_key: rsa.RSAPrivateKey, msg: bytes) -> Signature:\n return priv_key.sign(msg, PADDING, HASH)", "def sign_certificate(csr):\n unique_filename = str(uuid.uuid4().hex)\n\n file = open(\"./csr_req/%s.csr\" % unique_filename, \"w\")\n file.write(csr.decode(\"utf-8\"))\n file.close()\n\n subprocess.run([\"../ca/scripts/sign.sh\", unique_filename], check=False)\n\n file = open(\"./csr_req/%s.p7b\" % unique_filename, \"r\")\n cert = file.read()\n\n os.remove(\"./csr_req/%s.csr\" % unique_filename)\n os.remove(\"./csr_req/%s.p7b\" % unique_filename)\n\n return cert", "def Sign(self, msg):\n emsa_encoded = util.MakeEmsaMessage(msg, self.size)\n return util.BigIntToBytes(self.key.sign(emsa_encoded, None)[0])", "def sendPublicKey(g, p, s):\r\n status = \"120 PubKey \" + str(computePublicKey(g, p, s))\r\n return status", "def rsa_sha1_signature(base_string, rsa_private_key):\n from .rsa import sign_sha1\n base_string = to_bytes(base_string)\n s = sign_sha1(to_bytes(base_string), rsa_private_key)\n sig = binascii.b2a_base64(s)[:-1]\n return to_unicode(sig)", "def sign(self, msg, key):\n\n if not isinstance(key, ec.EllipticCurvePrivateKey):\n raise TypeError(\"The private key must be an instance of \" \"ec.EllipticCurvePrivateKey\")\n\n self._cross_check(key.public_key())\n num_bits = key.curve.key_size\n num_bytes = (num_bits + 7) // 8\n asn1sig = key.sign(msg, ec.ECDSA(self.hash_algorithm()))\n # Cryptography returns ASN.1-encoded signature data; decode as JWS\n # uses raw signatures (r||s)\n (r, s) = decode_dss_signature(asn1sig)\n return int.to_bytes(r, num_bytes, \"big\") + int.to_bytes(s, num_bytes, \"big\")", "def sign(message, priv_key):\n\n if not isinstance(priv_key, key.PrivateKey):\n raise TypeError(\"You must use the private key with sign\")\n\n return chopstring(message, priv_key.d, priv_key.n, encrypt_int)", "def _ecssa_pubkey_recovery(ec: EC, hf, e: int, sig: ECSS) -> Point:\n\n r, s = to_ssasig(ec, sig)\n\n # could be obtained from to_ssasig...\n K = r, ec.yQuadraticResidue(r, True)\n\n if e == 0:\n raise ValueError(\"invalid (zero) challenge e\")\n e1 = mod_inv(e, ec.n)\n P = DblScalarMult(ec, e1*s, ec.G, -e1, K)\n assert P[1] != 0, \"how did you do that?!?\"\n return P", "def pkcs_emsa_pss_encode(M, emBits, h, mgf, sLen):\n\n # 1) is not done\n hLen = _hashFuncParams[h][0] # 2)\n hFunc = _hashFuncParams[h][2]\n mHash = hFunc(M)\n emLen = int(math.ceil(emBits/8.))\n if emLen < hLen + sLen + 2: # 3)\n warning(\"encoding error (emLen < hLen + sLen + 2)\")\n return None\n salt = randstring(sLen) # 4)\n MPrime = '\\x00'*8 + mHash + salt # 5)\n H = hFunc(MPrime) # 6)\n PS = '\\x00'*(emLen - sLen - hLen - 2) # 7)\n DB = PS + '\\x01' + salt # 8)\n dbMask = mgf(H, emLen - hLen - 1) # 9)\n maskedDB = strxor(DB, dbMask) # 10)\n l = (8*emLen - emBits)/8 # 11)\n rem = 8*emLen - emBits - 8*l # additionnal bits\n andMask = l*'\\x00'\n if rem:\n j = chr(reduce(lambda x,y: x+y, map(lambda x: 1<<x, range(8-rem))))\n andMask += j\n l += 1\n maskedDB = strand(maskedDB[:l], andMask) + maskedDB[l:]\n EM = maskedDB + H + '\\xbc' # 12)\n return EM # 13)", "def sign(self, message, randombytes=urandom):\r\n int_header = 0x30 + logn[self.n]\r\n header = int_header.to_bytes(1, \"little\")\r\n\r\n salt = randombytes(SALT_LEN)\r\n hashed = self.hash_to_point(message, salt)\r\n\r\n # We repeat the signing procedure until we find a signature that is\r\n # short enough (both the Euclidean norm and the bytelength)\r\n '''\r\n print(\"---------Inside sign----------\")\r\n '''\r\n while(1):\r\n if (randombytes == urandom):\r\n s = self.sample_preimage(hashed)\r\n '''\r\n print(\"s: \", s)\r\n '''\r\n else:\r\n seed = randombytes(SEED_LEN)\r\n s = self.sample_preimage(hashed, seed=seed)\r\n norm_sign = sum(coef ** 2 for coef in s[0])\r\n norm_sign += sum(coef ** 2 for coef in s[1])\r\n # Check the Euclidean norm\r\n if norm_sign <= self.signature_bound:\r\n\r\n enc_s = compress(s[1], self.sig_bytelen - HEAD_LEN - SALT_LEN)\r\n # Check that the encoding is valid (sometimes it fails)\r\n if (enc_s is not False):\r\n return header + salt + enc_s\r\n '''\r\n else:\r\n print(\"-------------INVALID encoding---------------\")\r\n\r\n else:\r\n print(\"-------------NOT within signature bound---------------\")\r\n '''", "def sign(private_key: RsaKey, content: dict) -> None:\n\n signer = PKCS1_v1_5.new(private_key)\n encoded_content = json.dumps(content, sort_keys=True).encode()\n h = SHA256.new(encoded_content)\n signature = signer.sign(h)\n\n return binascii.hexlify(signature).decode('ascii')", "def sign_rsa_sha1(client, request):\n base_string = generate_signature_base_string(request)\n return rsa_sha1_signature(base_string, client.rsa_key)", "def publickey_unsafe(sk: bytes) -> bytes:\n h = H(sk)\n a = decodecoord(h)\n A = scalarmult_B(a)\n return encodepoint(A)", "def Sign(self, msg):\n # Need to chose a random k per-message, SystemRandom() is available\n # since Python 2.4.\n k = random.SystemRandom().randint(2, self.key.q-1)\n (r, s) = self.key.sign(util.Hash(msg), k)\n return util.MakeDsaSig(r, s)", "def sign(self, bytes):\r\n if not self.hasPrivateKey():\r\n raise AssertionError()\r\n paddedBytes = self._addPKCS1Padding(bytes, 1)\r\n m = bytesToNumber(paddedBytes)\r\n if m >= self.n:\r\n raise ValueError()\r\n c = self._rawPrivateKeyOp(m)\r\n sigBytes = numberToByteArray(c, numBytes(self.n))\r\n return sigBytes", "def ecdsa_sign(G, priv_sign, message):\n plaintext = message.encode(\"utf8\")\n digest = sha256(plaintext).digest()\n sig = do_ecdsa_sign(G,priv_sign,digest)\n\n return sig", "def signature_unsafe(m: bytes, sk: bytes, pk: bytes) -> bytes:\n h = H(sk)\n a = decodecoord(h)\n r = Hint(h[b // 8 : b // 4] + m)\n R = scalarmult_B(r)\n S = (r + Hint(encodepoint(R) + pk + m) * a) % l\n return encodepoint(R) + encodeint(S)", "def sign(data):\n return _make.sign(data)", "def sign_message(message: bytes, sender_private_key: RsaKey) -> bytes:\n return pkcs1_15.new(sender_private_key).sign(SHA256.new(message))", "def sign(self, message):\n\n # if not already a byte string turn it to making sure\n if not isinstance(message, (bytes, str)):\n return None\n elif isinstance(message, str):\n message = message.encode()\n\n hash_of_message = SHA256.new(message)\n\n signer = DSS.new(self.privkey, mode=\"fips-186-3\")\n\n digital_signature = signer.sign(hash_of_message)\n digital_signature = base64.b85encode(digital_signature).decode()\n\n return digital_signature", "def payToStakePKHScript(pkh, stakeCode):\n if len(pkh) != crypto.RIPEMD160_SIZE:\n raise DecredError(\n f\"pubkey hash must be {crypto.RIPEMD160_SIZE} bytes but is {len(pkh)}\"\n )\n if stakeCode not in (opcode.OP_SSTX, opcode.OP_SSTXCHANGE, opcode.OP_SSRTX):\n raise DecredError(\n f\"stake code is not sstx, sstxchange, or ssrtx: {hex(stakeCode)}\"\n )\n script = ByteArray(stakeCode)\n script += opcode.OP_DUP\n script += opcode.OP_HASH160\n script += addData(pkh)\n script += opcode.OP_EQUALVERIFY\n script += opcode.OP_CHECKSIG\n return script", "def sign(self, privateKey, message): \n signArray = self._winternitzHash(message)\n \n filler = '0' * (m / 8)\n retSig, retPub = [filler,] * l, [filler,] * l\n# benchmarkCount=0\n\n for i in xrange(0, l):\n signedValue = signArray[i]\n iterateHash = self._iterateHashInit(privateKey, i)\n# benchmarkCount += 1\n \n for j in xrange(0, w):\n if j == signedValue:\n retSig[i] = iterateHash\n iterateHash = hashAlgorithm(iterateHash).digest()\n # ^^ hash after potential push, therefore\n # no fear of leaking private key.\n# benchmarkCount += 1\n \n retPub[i] = iterateHash\n\n# print '# hashed %d times' % benchmarkCount\n\n return [retPub, retSig]", "def _rsassa_pkcs1_v1_5_sign(self, M, h):\n\n # 1) EMSA-PKCS1-v1_5 encoding\n k = self._modulusLen / 8\n EM = pkcs_emsa_pkcs1_v1_5_encode(M, k, h)\n if EM is None:\n warning(\"Key._rsassa_pkcs1_v1_5_sign(): unable to encode\")\n return None\n\n # 2) RSA signature\n m = pkcs_os2ip(EM) # 2.a)\n s = self._rsasp1(m) # 2.b)\n S = pkcs_i2osp(s, k) # 2.c)\n\n return S # 3)", "def sign(self):\n private_key = serialization.load_pem_private_key(\n binascii.unhexlify(self.sender_private_key.encode('utf8')),\n password=None,\n backend=default_backend()\n )\n signature = private_key.sign(\n str(self.to_dict()).encode('utf8'),\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n\n return signature", "def sign(self, data):\n from base64 import urlsafe_b64encode\n\n if self.sign_private == \"\":\n raise ValueError(\"Error signing: No private signing key found for {}\".format(self))\n\n key_private = RsaPrivateKey.Read(self.sign_private)\n signature = key_private.Sign(data)\n return urlsafe_b64encode(signature)", "def pkcs_emsa_pss_verify(M, EM, emBits, h, mgf, sLen):\n\n # 1) is not done\n hLen = _hashFuncParams[h][0] # 2)\n hFunc = _hashFuncParams[h][2]\n mHash = hFunc(M)\n emLen = int(math.ceil(emBits/8.)) # 3)\n if emLen < hLen + sLen + 2:\n return False\n if EM[-1] != '\\xbc': # 4)\n return False\n l = emLen - hLen - 1 # 5)\n maskedDB = EM[:l]\n H = EM[l:l+hLen]\n l = (8*emLen - emBits)/8 # 6)\n rem = 8*emLen - emBits - 8*l # additionnal bits\n andMask = l*'\\xff'\n if rem:\n val = reduce(lambda x,y: x+y, map(lambda x: 1<<x, range(8-rem)))\n j = chr(~val & 0xff)\n andMask += j\n l += 1\n if strand(maskedDB[:l], andMask) != '\\x00'*l:\n return False\n dbMask = mgf(H, emLen - hLen - 1) # 7)\n DB = strxor(maskedDB, dbMask) # 8)\n l = (8*emLen - emBits)/8 # 9)\n rem = 8*emLen - emBits - 8*l # additionnal bits\n andMask = l*'\\x00'\n if rem:\n j = chr(reduce(lambda x,y: x+y, map(lambda x: 1<<x, range(8-rem))))\n andMask += j\n l += 1\n DB = strand(DB[:l], andMask) + DB[l:]\n l = emLen - hLen - sLen - 1 # 10)\n if DB[:l] != '\\x00'*(l-1) + '\\x01':\n return False\n salt = DB[-sLen:] # 11)\n MPrime = '\\x00'*8 + mHash + salt # 12)\n HPrime = hFunc(MPrime) # 13)\n return H == HPrime # 14)" ]
[ "0.65902674", "0.6372968", "0.59697056", "0.5959437", "0.5933867", "0.5871692", "0.5871691", "0.5791218", "0.57825357", "0.57733977", "0.5738437", "0.56678075", "0.5650177", "0.56263316", "0.5613606", "0.56071854", "0.5593046", "0.5567222", "0.5563613", "0.55477333", "0.5540616", "0.5533691", "0.5532212", "0.5528541", "0.548697", "0.5473628", "0.5443333", "0.5433671", "0.541784", "0.5415685" ]
0.68218243
0
Implements RSASSAPKCS1v1_5SIGN() function as described in Sect. 8.2.1 of RFC 3447.
def _rsassa_pkcs1_v1_5_sign(self, M, h): # 1) EMSA-PKCS1-v1_5 encoding k = self._modulusLen / 8 EM = pkcs_emsa_pkcs1_v1_5_encode(M, k, h) if EM is None: warning("Key._rsassa_pkcs1_v1_5_sign(): unable to encode") return None # 2) RSA signature m = pkcs_os2ip(EM) # 2.a) s = self._rsasp1(m) # 2.b) S = pkcs_i2osp(s, k) # 2.c) return S # 3)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sign_rsa_sha1(client, request):\n base_string = generate_signature_base_string(request)\n return rsa_sha1_signature(base_string, client.rsa_key)", "def Sign(self, bytes_to_sign, logf=None):\r\n # Implements PKCS1-v1_5 w/SHA256 over the bytes, and returns\r\n # the result as a base64url encoded bignum.\r\n\r\n self._Log(logf, 'bytes_to_sign = [%s]' % bytes_to_sign.encode('hex'))\r\n\r\n self._Log(logf, 'keypair size : %s' % self.keypair.size())\r\n\r\n # Generate the PKCS1-v1_5 compatible message, which includes\r\n # magic ASN.1 bytes and padding:\r\n emsa_msg = self._MakeEmsaMessageSha256(bytes_to_sign, self.keypair.size(), logf)\r\n # TODO(jpanzer): Check whether we need to use max keysize above\r\n # or just keypair.size\r\n\r\n self._Log(logf, 'emsa_msg = [%s]' % emsa_msg.encode('hex'))\r\n\r\n # Compute the signature:\r\n signature_long = self.keypair.sign(emsa_msg, None)[0]\r\n\r\n # Encode the signature as armored text:\r\n signature_bytes = number.long_to_bytes(signature_long)\r\n\r\n self._Log(logf, 'signature_bytes = [%s]' % signature_bytes.encode('hex'))\r\n\r\n return base64.urlsafe_b64encode(signature_bytes).encode('utf-8')", "def _rsassa_pkcs1_v1_5_verify(self, M, S, h):\n\n # 1) Length checking\n k = self._modulusLen / 8\n if len(S) != k:\n warning(\"invalid signature (len(S) != k)\")\n return False\n\n # 2) RSA verification\n s = pkcs_os2ip(S) # 2.a)\n m = self._rsavp1(s) # 2.b)\n EM = pkcs_i2osp(m, k) # 2.c)\n\n # 3) EMSA-PKCS1-v1_5 encoding\n EMPrime = pkcs_emsa_pkcs1_v1_5_encode(M, k, h)\n if EMPrime is None:\n warning(\"Key._rsassa_pkcs1_v1_5_verify(): unable to encode.\")\n return False\n\n # 4) Comparison\n return EM == EMPrime", "def rsa_sha1_signature(base_string, rsa_private_key):\n from .rsa import sign_sha1\n base_string = to_bytes(base_string)\n s = sign_sha1(to_bytes(base_string), rsa_private_key)\n sig = binascii.b2a_base64(s)[:-1]\n return to_unicode(sig)", "def Sign(self, data):\n return self.rsa_key.sign(data, padding.PKCS1v15(), utils.Prehashed(hashes.SHA1()))", "def sign(self, bytes):\r\n if not self.hasPrivateKey():\r\n raise AssertionError()\r\n paddedBytes = self._addPKCS1Padding(bytes, 1)\r\n m = bytesToNumber(paddedBytes)\r\n if m >= self.n:\r\n raise ValueError()\r\n c = self._rawPrivateKeyOp(m)\r\n sigBytes = numberToByteArray(c, numBytes(self.n))\r\n return sigBytes", "def sign(self, msg):\n z = int.from_bytes(helper.hash256(msg), \"big\")\n k = self.deterministic_k(z)\n k_inv = pow(k, N-2, N)\n r = (k*G).x.num\n s = (z + r * self.secret) * k_inv % N\n if s > N/2:\n s = N - s\n\n return Signature(r, s)", "def _rsassa_pss_sign(self, M, h=None, mgf=None, sLen=None):\n\n # Set default parameters if not provided\n if h is None: # By default, sha1\n h = \"sha1\"\n if not _hashFuncParams.has_key(h):\n warning(\"Key._rsassa_pss_sign(): unknown hash function \"\n \"provided (%s)\" % h)\n return None\n if mgf is None: # use mgf1 with underlying hash function\n mgf = lambda x,y: pkcs_mgf1(x, y, h)\n if sLen is None: # use Hash output length (A.2.3 of RFC 3447)\n hLen = _hashFuncParams[h][0]\n sLen = hLen\n\n # 1) EMSA-PSS encoding\n modBits = self._modulusLen\n k = modBits / 8\n EM = pkcs_emsa_pss_encode(M, modBits - 1, h, mgf, sLen)\n if EM is None:\n warning(\"Key._rsassa_pss_sign(): unable to encode\")\n return None\n\n # 2) RSA signature\n m = pkcs_os2ip(EM) # 2.a)\n s = self._rsasp1(m) # 2.b)\n S = pkcs_i2osp(s, k) # 2.c)\n\n return S # 3)", "def SHA1(self) -> _n_0_t_3[_n_0_t_9]:", "def sign(self, message, randombytes=urandom):\r\n int_header = 0x30 + logn[self.n]\r\n header = int_header.to_bytes(1, \"little\")\r\n\r\n salt = randombytes(SALT_LEN)\r\n hashed = self.hash_to_point(message, salt)\r\n\r\n # We repeat the signing procedure until we find a signature that is\r\n # short enough (both the Euclidean norm and the bytelength)\r\n '''\r\n print(\"---------Inside sign----------\")\r\n '''\r\n while(1):\r\n if (randombytes == urandom):\r\n s = self.sample_preimage(hashed)\r\n '''\r\n print(\"s: \", s)\r\n '''\r\n else:\r\n seed = randombytes(SEED_LEN)\r\n s = self.sample_preimage(hashed, seed=seed)\r\n norm_sign = sum(coef ** 2 for coef in s[0])\r\n norm_sign += sum(coef ** 2 for coef in s[1])\r\n # Check the Euclidean norm\r\n if norm_sign <= self.signature_bound:\r\n\r\n enc_s = compress(s[1], self.sig_bytelen - HEAD_LEN - SALT_LEN)\r\n # Check that the encoding is valid (sometimes it fails)\r\n if (enc_s is not False):\r\n return header + salt + enc_s\r\n '''\r\n else:\r\n print(\"-------------INVALID encoding---------------\")\r\n\r\n else:\r\n print(\"-------------NOT within signature bound---------------\")\r\n '''", "def sign(self, message):\n\n assert len(message) == 32\n assert self.sec is not None\n r, s = do_ecdsa_sign(self.G, self.sec, message, self.optim)\n r0, s0 = r.binary(), s.binary()\n assert len(r0) <= 32 and len(s0) <= 32\n sig = pack(\"H32sH32s\", len(r0), r0, len(s0), s0)\n return sig", "def sign(private_key: RsaKey, content: dict) -> None:\n\n signer = PKCS1_v1_5.new(private_key)\n encoded_content = json.dumps(content, sort_keys=True).encode()\n h = SHA256.new(encoded_content)\n signature = signer.sign(h)\n\n return binascii.hexlify(signature).decode('ascii')", "def _rsa_sign(blob, private_key_pem):\n # Lazy import crypto. It is not available in unit tests outside of sandbox.\n from Crypto.Hash import SHA256\n from Crypto.PublicKey import RSA\n from Crypto.Signature import PKCS1_v1_5\n pkey = RSA.importKey(private_key_pem)\n return PKCS1_v1_5.new(pkey).sign(SHA256.new(blob))", "def sign(self, msg, key):\n\n if not isinstance(key, ec.EllipticCurvePrivateKey):\n raise TypeError(\"The private key must be an instance of \" \"ec.EllipticCurvePrivateKey\")\n\n self._cross_check(key.public_key())\n num_bits = key.curve.key_size\n num_bytes = (num_bits + 7) // 8\n asn1sig = key.sign(msg, ec.ECDSA(self.hash_algorithm()))\n # Cryptography returns ASN.1-encoded signature data; decode as JWS\n # uses raw signatures (r||s)\n (r, s) = decode_dss_signature(asn1sig)\n return int.to_bytes(r, num_bytes, \"big\") + int.to_bytes(s, num_bytes, \"big\")", "def Sign(self, msg):\n # Need to chose a random k per-message, SystemRandom() is available\n # since Python 2.4.\n k = random.SystemRandom().randint(2, self.key.q-1)\n (r, s) = self.key.sign(util.Hash(msg), k)\n return util.MakeDsaSig(r, s)", "def _build_signature(self):\n sig_contents = \\\n self.payload + \".\" + \\\n b64encode(b\"application/xml\").decode(\"ascii\") + \".\" + \\\n b64encode(b\"base64url\").decode(\"ascii\") + \".\" + \\\n b64encode(b\"RSA-SHA256\").decode(\"ascii\")\n sig_hash = SHA256.new(sig_contents.encode(\"ascii\"))\n cipher = PKCS1_v1_5.new(self.private_key)\n sig = urlsafe_b64encode(cipher.sign(sig_hash))\n key_id = urlsafe_b64encode(bytes(self.author_handle, encoding=\"utf-8\"))\n return sig, key_id", "def Sign(self, msg):\n emsa_encoded = util.MakeEmsaMessage(msg, self.size)\n return util.BigIntToBytes(self.key.sign(emsa_encoded, None)[0])", "def sign_hmac_sha1(client, request):\n base_string = generate_signature_base_string(request)\n return hmac_sha1_signature(\n base_string, client.client_secret, client.token_secret)", "def sign(self):\n private_key = serialization.load_pem_private_key(\n binascii.unhexlify(self.sender_private_key.encode('utf8')),\n password=None,\n backend=default_backend()\n )\n signature = private_key.sign(\n str(self.to_dict()).encode('utf8'),\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n\n return signature", "def signature_unsafe(m: bytes, sk: bytes, pk: bytes) -> bytes:\n h = H(sk)\n a = decodecoord(h)\n r = Hint(h[b // 8 : b // 4] + m)\n R = scalarmult_B(r)\n S = (r + Hint(encodepoint(R) + pk + m) * a) % l\n return encodepoint(R) + encodeint(S)", "def sign(self, msg: Dict) -> Dict:\n ser = serialize_msg_for_signing(msg, topLevelKeysToIgnore=[f.SIG.nm,\n f.SIGS.nm])\n bsig = self.naclSigner.signature(ser)\n sig = base58.b58encode(bsig).decode(\"utf-8\")\n return sig", "def sign(self, payload):\n raise NotImplementedError", "def Sign(self, msg):\n # Need to chose a random k per-message, SystemRandom() is available\n # since Python 2.4.\n k = random.SystemRandom().randint(2, self.key.q - 1)\n (r, s) = self.key.sign(util.Hash(msg), k)\n return util.MakeDsaSig(r, s)", "def _sign(self, data, salt):\r\n strBuffer = \"\"\r\n # print data.keys()\r\n for k in sorted(data.iterkeys()):\r\n\r\n # Handle the BOOL special case\r\n v = data[k]\r\n if type(v) == bool:\r\n if v:\r\n v = 1\r\n else:\r\n v = 0\r\n data[k] = v\r\n\r\n # Update buffer\r\n strBuffer += \"%s=%s\\n\" % (str(k).lower(), vmcp.myquote(str(v)))\r\n\r\n # Append salt\r\n strBuffer += salt\r\n return strBuffer", "def pkcs_emsa_pkcs1_v1_5_encode(M, emLen, h): # section 9.2 of RFC 3447\n hLen = _hashFuncParams[h][0] # 1)\n hFunc = _hashFuncParams[h][2]\n H = hFunc(M)\n hLeadingDigestInfo = _hashFuncParams[h][3] # 2)\n T = hLeadingDigestInfo + H\n tLen = len(T)\n if emLen < tLen + 11: # 3)\n warning(\"pkcs_emsa_pkcs1_v1_5_encode:\"\n \"intended encoded message length too short\")\n return None\n PS = '\\xff'*(emLen - tLen - 3) # 4)\n EM = '\\x00' + '\\x01' + PS + '\\x00' + T # 5)\n return EM # 6)", "def Sign(self, msg):\n return hmac.new(self.key_bytes, msg, sha1).digest()", "def gen_sig():\n return hashlib.md5(\n (\n app.config[\"ROVI_API_KEY\"]\n + app.config[\"ROVI_SHARED_SECRET\"]\n + repr(int(time.time()))\n ).encode(\"utf-8\")\n ).hexdigest()", "def sign(key, file, sign):\n\n try:\n key = TomlKeyFormatter().from_string(key.read())\n signature = Signature.sign(SignableBinaryIO(file), Md5, key)\n\n sign.write(TomlSignatureFormatter().to_string(signature))\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")", "def test_tte5_short_write_tile_signature(self):\n filename = str(self.temp_j2k_filename)\n xtx5_setup(filename, short_sig=True)\n self.assertTrue(True)", "def sign(sk: SecretKey, msgs: List[bytes]) -> Signature:\n assert(len(msgs) == len(sk.y))\n\n # pick generator\n h = G1.generator()\n exponent = sk.x + sum([y_i * Bn.from_binary(m_i)\n for (y_i, m_i) in zip(sk.y.values(), msgs)])\n\n return Signature(h, h**exponent) # type:ignore" ]
[ "0.6281883", "0.59832203", "0.5783482", "0.5714475", "0.5635768", "0.5571883", "0.5559704", "0.5497805", "0.54522115", "0.5442058", "0.54414326", "0.5421102", "0.5405827", "0.5400036", "0.53347284", "0.53216994", "0.53019977", "0.527801", "0.5203799", "0.51961136", "0.51927143", "0.51252556", "0.5111036", "0.51110214", "0.5110419", "0.5103539", "0.5100121", "0.50986046", "0.5076585", "0.5052523" ]
0.75837
0
Concatenate all the certificates (PEM format for the export) in 'anchor_list' and write the result to file 'filename'. On success 'filename' is returned, None otherwise. If you are used to OpenSSL tools, this function builds a CAfile that can be used for certificate and CRL check. Also see create_temporary_ca_file().
def create_ca_file(anchor_list, filename): try: f = open(filename, "w") for a in anchor_list: s = a.output(fmt="PEM") f.write(s) f.close() except: return None return filename
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_temporary_ca_file(anchor_list):\n try:\n f, fname = tempfile.mkstemp()\n for a in anchor_list:\n s = a.output(fmt=\"PEM\")\n l = os.write(f, s)\n os.close(f)\n except:\n return None\n return fname", "def create_temporary_ca_path(anchor_list, folder):\n # We should probably avoid writing duplicate anchors and also\n # check if they are all certs.\n try:\n if not os.path.isdir(folder):\n os.makedirs(folder)\n except:\n return None\n\n l = len(anchor_list)\n if l == 0:\n return None\n fmtstr = \"%%0%sd.pem\" % math.ceil(math.log(l, 10))\n i = 0\n try:\n for a in anchor_list:\n fname = os.path.join(folder, fmtstr % i)\n f = open(fname, \"w\")\n s = a.output(fmt=\"PEM\")\n f.write(s)\n f.close()\n i += 1\n except:\n return None\n\n r,w=popen2.popen2(\"c_rehash %s\" % folder)\n r.close(); w.close()\n\n return l", "def ca_file(tmpdir):\n key = rsa.generate_private_key(public_exponent=65537, key_size=2048)\n public_key = key.public_key()\n\n builder = x509.CertificateBuilder()\n builder = builder.subject_name(\n x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, \"pyopenssl.org\")])\n )\n builder = builder.issuer_name(\n x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, \"pyopenssl.org\")])\n )\n one_day = datetime.timedelta(1, 0, 0)\n builder = builder.not_valid_before(datetime.datetime.today() - one_day)\n builder = builder.not_valid_after(datetime.datetime.today() + one_day)\n builder = builder.serial_number(int(uuid.uuid4()))\n builder = builder.public_key(public_key)\n builder = builder.add_extension(\n x509.BasicConstraints(ca=True, path_length=None),\n critical=True,\n )\n\n certificate = builder.sign(private_key=key, algorithm=hashes.SHA256())\n\n ca_file = tmpdir.join(\"test.pem\")\n ca_file.write_binary(\n certificate.public_bytes(\n encoding=serialization.Encoding.PEM,\n )\n )\n\n return str(ca_file).encode(\"ascii\")", "def __build_anchors(anchor_parameters, features):\n anchors = [\n layers.Anchors(\n size=anchor_parameters.sizes[i],\n stride=anchor_parameters.strides[i],\n ratios=anchor_parameters.ratios,\n scales=anchor_parameters.scales,\n name='anchors_{}'.format(i)\n )(f) for i, f in enumerate(features)\n ]\n\n return keras.layers.Concatenate(axis=1, name='anchors')(anchors)", "def add_ca_certs(s3_client, certs):\n logger.info(\"Fetching CA certs and writing to filesystem\")\n\n # Determine which update-ca command to use and directory to store CAs in\n if command_exists(\"update-ca-trust\"):\n logger.info(\"update-ca-trust available\")\n update_ca_cmd = \"update-ca-trust\"\n ca_dir = \"/etc/pki/ca-trust/source/anchors/\"\n elif command_exists(\"update-ca-certificates\"):\n logger.info(\"update-ca-certificates available\")\n update_ca_cmd = \"update-ca-certificates\"\n ca_dir = \"/usr/local/share/ca-certificates/\"\n else:\n logger.error(\"Environment is missing required CA commands\")\n raise OSError(\n \"OS is missing a required command for CA trust. Either update-ca-trust or \"\n \"update-ca-certificates is required.\"\n )\n\n for cert_entry in certs:\n alias = cert_entry[\"alias\"]\n entry = cert_entry[\"cert\"]\n source = cert_entry[\"source\"]\n logger.info(\"...Processing cert with alias = {} from {}\".format(alias, source))\n\n pem_cert_body = fetch_cert(source, entry, s3_client)\n logger.debug(\"...cert body = {}\".format(pem_cert_body))\n\n with open(ca_dir + alias + \".crt\", \"a\") as f:\n f.write(str(pem_cert_body))\n\n logger.info(\"Updating CA trust\")\n os.system(update_ca_cmd)", "def save_ca():\n cert_file = os.environ.get('HOME') + '/.cat_installer/ca.pem'\n debug(\"saving cert\")\n with open(cert_file, 'w') as cert:\n cert.write(Config.CA + \"\\n\")", "def store_anchors(base_url, anchors, path=\"logs/\"):\n\n url_filename = url_to_filename(base_url)\n filename = f\"{path}ANCHORS-{url_filename}.txt\"\n\n if os.path.isfile(filename):\n with open(filename, \"rb\") as fp:\n all_anchors = pickle.load(fp)\n all_anchors.append(anchors)\n else:\n all_anchors = anchors\n\n with open(filename, \"wb\") as fp:\n pickle.dump(all_anchors, fp)", "def write_anchor(args, synteny_parent=None, mailbox_reader=None):\n idx = args[0]\n with mailbox_reader(idx) as file_handle:\n anchor_frame = pd.read_csv(\n file_handle, sep=\"\\t\", index_col=0\n ).convert_dtypes()\n in_anchor = len(anchor_frame)\n if in_anchor == 0:\n return None\n # drop any duplicated ID's--normally shouldn't happen\n anchor_frame.drop(\n anchor_frame[anchor_frame.index.duplicated()].index, inplace=True\n )\n anchor_frame.sort_values(\n by=[\"syn.anchor.sub_id\", \"frag.idx\", \"frag.pos\"], inplace=True\n )\n # Make a dictionary of common anchor properties, order will be kept\n anchor_props = {\n \"anchor.id\": idx,\n \"sub\": None,\n \"code\": None,\n \"count\": None,\n \"n\": None,\n \"n_ambig\": None,\n \"n_adj\": None,\n \"adj_groups\": None,\n \"frag.direction\": None,\n \"syn.anchor.direction\": None,\n \"anchor.subframe.ok\": True,\n \"hash\": None,\n }\n code_set = set(anchor_frame[\"syn.code\"])\n for test_code in CODE_DICT.keys():\n if test_code in code_set:\n anchor_props[\"code\"] = test_code\n break\n bad_subframe = False\n prop_list = []\n for sub_no, subframe in anchor_frame.groupby(by=[\"syn.anchor.sub_id\"]):\n (subanchor_props, anchor_subframe, bad_subframe) = _subframe_props(\n anchor_props, subframe, sub_no\n )\n if bad_subframe:\n break\n write_tsv_or_parquet(\n anchor_subframe,\n synteny_parent / f\"{idx}.{sub_no}.{SYNTENY_FILETYPE}\",\n sort_cols=False,\n )\n prop_list.append(subanchor_props)\n if bad_subframe: # Probably means a hash collision\n logger.error(f\"bad anchor set {idx}\")\n prop_list = []\n sub_no = 0\n anchor_props[\"anchor.subframe.ok\"] = False\n for cluster_id, subframe in anchor_frame.groupby(by=[\"hom.cluster\"]):\n (\n subanchor_props,\n anchor_subframe,\n unused_bad_subframe,\n ) = _subframe_props(anchor_props, subframe, sub_no)\n write_tsv_or_parquet(\n anchor_subframe,\n synteny_parent / f\"{idx}.{sub_no}.{SYNTENY_FILETYPE}\",\n sort_cols=False,\n )\n sub_no += 1\n prop_list.append(subanchor_props)\n return prop_list", "def add_cert_and_key(priv_key, cert_list, alias):\n logger.info(\"Writing certificate and private key to filesystem\")\n\n # Determine which directory to store certs in\n if command_exists(\"update-ca-trust\"):\n ca_dir = \"/etc/pki/tls\"\n elif command_exists(\"update-ca-certificates\"):\n ca_dir = \"/etc/ssl\"\n else:\n logger.error(\"Cannot determine certs directory\")\n raise OSError(\n \"OS is missing a required command for CA trust. Either update-ca-trust or \"\n \"update-ca-certificates is required.\"\n )\n\n logger.info(\"Using cert directory:\" + ca_dir)\n\n with open(ca_dir + \"/private/\" + alias + \".key\", \"a\") as f:\n f.write(str(priv_key))\n\n for cert in cert_list:\n with open(ca_dir + \"/certs/\" + alias + \".crt\", \"a\") as f:\n f.write(cert)", "def test_set_one_ca_list(self):\n cacert = load_certificate(FILETYPE_PEM, root_cert_pem)\n cadesc = cacert.get_subject()\n\n def single_ca(ctx):\n ctx.set_client_ca_list([cadesc])\n return [cadesc]\n\n self._check_client_ca_list(single_ca)", "def write_CA_atoms():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n ca_list = []\n with open(filepath, 'r') as pdb:\n for line in pdb:\n if line[:4] == 'ATOM' and line[12:16] == \" CA \":\n line_split = line.split()[6:9]\n ca_list.append(line_split)\n choice1 = input('Enter name of the outfile: ')\n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as outfile:\n for i in ca_list:\n outfile.writelines(i)\n print('Done!')\n print(i)", "def gen_ca():\n require_root()\n\n config.proxy.gen_ca_certs()\n log.info('OK')", "def _generate_certificates(certfile_path: str, keyfile_path: str,\n common_name: str) -> None:\n ca_key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM,\n _CA_KEY)\n ca_cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,\n _CA_CERT)\n\n k = OpenSSL.crypto.PKey()\n k.generate_key(OpenSSL.crypto.TYPE_RSA, 4096)\n\n cert = OpenSSL.crypto.X509()\n cert.get_subject().C = 'US'\n cert.get_subject().CN = common_name\n cert.set_serial_number(random.randint(0, 2**64))\n cert.gmtime_adj_notBefore(0)\n cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)\n cert.set_issuer(ca_cert.get_subject())\n cert.set_pubkey(k)\n cert.sign(ca_key, 'sha512')\n with open(certfile_path, \"w\") as f:\n f.write(\n OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,\n cert).decode(\"utf-8\"))\n f.write(_CA_CERT)\n with open(keyfile_path, \"w\") as f:\n f.write(\n OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM,\n k).decode(\"utf-8\"))", "def test_one_add_client_ca(self):\n cacert = load_certificate(FILETYPE_PEM, root_cert_pem)\n cadesc = cacert.get_subject()\n\n def single_ca(ctx):\n ctx.add_client_ca(cacert)\n return [cadesc]\n\n self._check_client_ca_list(single_ca)", "def combine_fasta_files(fastas_paths, out_file):\n with open(out_file, 'w') as out:\n for filename in fastas_paths:\n for seq_record in SeqIO.parse(filename, \"fasta\"):\n out.write('>' + str(seq_record.id) + '\\n' + str(seq_record.seq) + '\\n')", "def fusion_api_import_external_ca_certificates(self, body, api=None, headers=None):\n return self.ca.add(body, api=api, headers=headers)", "def get_aws_certificate_chain(all_aws_data):\n downloaded_cert = all_aws_data[\"Certificate\"]\n cert_chain = [downloaded_cert]\n\n downloaded_chain = all_aws_data[\"CertificateChain\"].split(certificate_suffix)\n downloaded_chain.pop()\n\n for index in range(len(downloaded_chain)):\n downloaded_chain[index] = (downloaded_chain[index] + certificate_suffix).strip()\n\n cert_chain.extend(downloaded_chain)\n return cert_chain", "def one_file(file_list, file_path, aa, n, idx=None,raa=None):\r\n \r\n if os.path.isdir(file_path):\r\n file_name = f'{idx}_{n}n.csv'\r\n file_path = os.path.join(file_path, file_name)\r\n elif os.path.isfile(file_path):\r\n file_path = file_path\r\n with open(file_path, 'w') as handle:\r\n h = csv.writer(handle)\r\n for idx, file in enumerate(file_list):\r\n f = open(file, 'r')\r\n seq = read_fasta(f)\r\n simple_seq = reduce(seq, aa, raa)\r\n if not raa:\r\n raa = [i[0] for i in aa]\r\n base_aac = seq_aac(simple_seq, raa, n)\r\n for a in base_aac:\r\n line0 = [v for v in a[1]]\r\n line1 = [idx] + line0\r\n h.writerow(line1)\r\n f.close()", "def handle_cacert(self):\n\n file = open(\"./certs/cacert.p7b\", \"r\")\n ca_certs = file.read()\n\n self.set_est_rsp_header(len(ca_certs))\n\n self.wfile.write(ca_certs.encode('utf-8'))", "def generate_root_CA():\n\n ##generating root key\n\n root_private_key = rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048,\n backend=default_backend())\n\n\n ##self-sign and generate the root certificate\n\n root_public_key = root_private_key.public_key()\n builder = x509.CertificateBuilder()\n builder = builder.subject_name(x509.Name([\n x509.NameAttribute(NameOID.COMMON_NAME, u'Northeastern SSL Test CA'),\n x509.NameAttribute(NameOID.ORGANIZATION_NAME, u'Northeastern'),\n x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, u'SSL Clock Skews'),\n ]))\n\n builder = builder.issuer_name(x509.Name([\n x509.NameAttribute(NameOID.COMMON_NAME, u'Northeastern SSL Test CA'),\n ]))\n builder = builder.not_valid_before(datetime.datetime.today() - datetime.timedelta(days=1))\n builder = builder.not_valid_after(datetime.datetime(2019, 12, 31))\n builder = builder.serial_number(int(uuid.uuid4()))\n builder = builder.public_key(root_public_key)\n builder = builder.add_extension(\n x509.BasicConstraints(ca=True, path_length=None), critical=True,)\n\n root_certificate = builder.sign(\n private_key=root_private_key, algorithm=hashes.SHA256(),\n backend=default_backend()\n )\n\n\n ##write to disk\n \n\n\n with open(\"rootCA.key\", \"wb\") as f:\n f.write(root_private_key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n ))\n\n with open(\"rootCA.crt\", \"wb\") as f:\n f.write(root_certificate.public_bytes(\n encoding=serialization.Encoding.PEM,\n ))\n\n return root_private_key, root_certificate", "def create_CA(dn):\n cmd_genrsa = [\"openssl\",\n \"genrsa\",\n \"-aes256\",\n \"-out\", f'{pki_dir}/ca.key',\n \"-passout\", f'pass:{ca_password}',\n f'{rsa_keysize}']\n cmd_req = [\"openssl\",\n \"req\",\n \"-new\",\n \"-x509\",\n \"-days\", \"999999\",\n \"-sha256\",\n \"-key\", f'{pki_dir}/ca.key',\n \"-out\", server_key_files[\"ca\"],\n \"-subj\", f'{dn}',\n \"-passin\", f'pass:{ca_password}']\n cmds = [cmd_genrsa, cmd_req]\n for cmd in cmds:\n exec_cmd(cmd)", "def zip_files(file_list, output_path):\n bname = os.path.basename # for efficiency\n with zipfile.ZipFile(output_path, mode='w') as zf:\n # adding all fasta files\n for file_name in file_list:\n zf.write(file_name, bname(file_name))\n return output_path", "def write_fasta(sequences_hash, output_fasta, concatenate_duplicates=True):\n with open(output_fasta, \"w+\") as fasta_object:\n for sequence in sequences_hash:\n if concatenate_duplicates:\n sequence_id = \"__\".join(sequences_hash[sequence])\n fasta_object.write(\">{}\\n{}\\n\".format(sequence_id, sequence))\n else:\n sequence_id = sequence\n sequence = sequences_hash[sequence_id][0]\n fasta_object.write(\">{}\\n{}\\n\".format(sequence_id, sequence))", "def test_multiple_add_client_ca(self):\n cacert = load_certificate(FILETYPE_PEM, root_cert_pem)\n secert = load_certificate(FILETYPE_PEM, server_cert_pem)\n\n cadesc = cacert.get_subject()\n sedesc = secert.get_subject()\n\n def multiple_ca(ctx):\n ctx.add_client_ca(cacert)\n ctx.add_client_ca(secert)\n return [cadesc, sedesc]\n\n self._check_client_ca_list(multiple_ca)", "def write_cert(filename, content):\r\n with open(filename, 'w') as cert_file:\r\n cert_file.write(content)", "def _write_certs_to_files(self):\n # pylint: disable=consider-using-with\n self.temp_dir_object = tempfile.TemporaryDirectory()\n temp_dir = self.temp_dir_object.name\n\n # store as temporary files for the mesh client\n self.client_cert_file = tempfile.NamedTemporaryFile(dir=temp_dir, delete=False)\n client_cert = self.params[MeshMailbox.MESH_CLIENT_CERT]\n self.client_cert_file.write(client_cert.encode(\"utf-8\"))\n self.client_cert_file.seek(0)\n\n self.client_key_file = tempfile.NamedTemporaryFile(dir=temp_dir, delete=False)\n client_key = self.params[MeshMailbox.MESH_CLIENT_KEY]\n self.client_key_file.write(client_key.encode(\"utf-8\"))\n self.client_key_file.seek(0)\n\n self.ca_cert_file = None\n if self.params.get(\"MESH_VERIFY_SSL\", False) == \"True\":\n self.ca_cert_file = tempfile.NamedTemporaryFile(dir=temp_dir, delete=False)\n ca_cert = self.params[MeshMailbox.MESH_CA_CERT]\n self.ca_cert_file.write(ca_cert.encode(\"utf-8\"))\n self.ca_cert_file.seek(0)\n # pylint: enable=consider-using-with", "def export_list_to_xacro(list, filename):\n global robot, OUTPUT\n doc = Document()\n root = doc.createElement('robot')\n doc.appendChild(root)\n root.setAttribute(\"xmlns:xacro\", \"http://www.ros.org/wiki/xacro\")\n print ('exporting ' + os.path.basename(filename))\n for string in list:\n for link in robot.links:\n if robot.links[link].name.find(string) != -1:\n root.appendChild(robot.links[link].to_xml(doc))\n for joint in robot.joints:\n if robot.joints[joint].child == robot.links[link].name:\n root.appendChild(robot.joints[joint].to_xml(doc))\n write_comments_in_xacro(doc, filename)", "def merge(self, output_file: Path = None, prepend_file_name: bool = False) -> None:\n if output_file is None:\n output_file = self.input_dir / \"merged.fasta\"\n else:\n output_file = Path(output_file)\n logger.info(\"Merging FASTA files in input directory\")\n cmd_str = f\"printf '%s\\\\0' * | xargs -0 cat > {output_file}\"\n if prepend_file_name:\n with tempfile.TemporaryDirectory() as tempdir:\n self.prepend_filename_to_record_names(output_dir=tempdir)\n utils.terminal_execute(cmd_str, work_dir=tempdir)\n else:\n utils.terminal_execute(cmd_str, work_dir=self.input_dir)\n logging.shutdown()", "def JoinFiles(files):\n configlet = ''\n for f in files:\n # Let IOErrors happen naturally.\n configlet = configlet + (open(f).read())\n return configlet", "def create_fasta(self):\n with open(generate_path(\"tmp/validate.fasta\"), \"w\") as file_:\n for (accession_name, seq) in self.seqdata.contigs:\n file_.write(\">%s\\n%s\\n\" %(self.seqdata.accession, seq))" ]
[ "0.7813592", "0.7130165", "0.54865235", "0.5233345", "0.52280974", "0.5154805", "0.51458985", "0.50976825", "0.5081456", "0.5056192", "0.49436203", "0.49168116", "0.48983476", "0.48318136", "0.482673", "0.48083445", "0.480649", "0.4801994", "0.4781368", "0.47468302", "0.47389206", "0.47004485", "0.46779212", "0.4639706", "0.4637127", "0.46318322", "0.45918667", "0.45847988", "0.4543791", "0.45435235" ]
0.84687245
0
Concatenate all the certificates (PEM format for the export) in 'anchor_list' and write the result to file to a temporary file using mkstemp() from tempfile module. On success 'filename' is returned, None otherwise. If you are used to OpenSSL tools, this function builds a CAfile that can be used for certificate and CRL check.
def create_temporary_ca_file(anchor_list): try: f, fname = tempfile.mkstemp() for a in anchor_list: s = a.output(fmt="PEM") l = os.write(f, s) os.close(f) except: return None return fname
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_ca_file(anchor_list, filename):\n try:\n f = open(filename, \"w\")\n for a in anchor_list:\n s = a.output(fmt=\"PEM\")\n f.write(s)\n f.close()\n except:\n return None\n return filename", "def create_temporary_ca_path(anchor_list, folder):\n # We should probably avoid writing duplicate anchors and also\n # check if they are all certs.\n try:\n if not os.path.isdir(folder):\n os.makedirs(folder)\n except:\n return None\n\n l = len(anchor_list)\n if l == 0:\n return None\n fmtstr = \"%%0%sd.pem\" % math.ceil(math.log(l, 10))\n i = 0\n try:\n for a in anchor_list:\n fname = os.path.join(folder, fmtstr % i)\n f = open(fname, \"w\")\n s = a.output(fmt=\"PEM\")\n f.write(s)\n f.close()\n i += 1\n except:\n return None\n\n r,w=popen2.popen2(\"c_rehash %s\" % folder)\n r.close(); w.close()\n\n return l", "def ca_file(tmpdir):\n key = rsa.generate_private_key(public_exponent=65537, key_size=2048)\n public_key = key.public_key()\n\n builder = x509.CertificateBuilder()\n builder = builder.subject_name(\n x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, \"pyopenssl.org\")])\n )\n builder = builder.issuer_name(\n x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, \"pyopenssl.org\")])\n )\n one_day = datetime.timedelta(1, 0, 0)\n builder = builder.not_valid_before(datetime.datetime.today() - one_day)\n builder = builder.not_valid_after(datetime.datetime.today() + one_day)\n builder = builder.serial_number(int(uuid.uuid4()))\n builder = builder.public_key(public_key)\n builder = builder.add_extension(\n x509.BasicConstraints(ca=True, path_length=None),\n critical=True,\n )\n\n certificate = builder.sign(private_key=key, algorithm=hashes.SHA256())\n\n ca_file = tmpdir.join(\"test.pem\")\n ca_file.write_binary(\n certificate.public_bytes(\n encoding=serialization.Encoding.PEM,\n )\n )\n\n return str(ca_file).encode(\"ascii\")", "def __build_anchors(anchor_parameters, features):\n anchors = [\n layers.Anchors(\n size=anchor_parameters.sizes[i],\n stride=anchor_parameters.strides[i],\n ratios=anchor_parameters.ratios,\n scales=anchor_parameters.scales,\n name='anchors_{}'.format(i)\n )(f) for i, f in enumerate(features)\n ]\n\n return keras.layers.Concatenate(axis=1, name='anchors')(anchors)", "def add_ca_certs(s3_client, certs):\n logger.info(\"Fetching CA certs and writing to filesystem\")\n\n # Determine which update-ca command to use and directory to store CAs in\n if command_exists(\"update-ca-trust\"):\n logger.info(\"update-ca-trust available\")\n update_ca_cmd = \"update-ca-trust\"\n ca_dir = \"/etc/pki/ca-trust/source/anchors/\"\n elif command_exists(\"update-ca-certificates\"):\n logger.info(\"update-ca-certificates available\")\n update_ca_cmd = \"update-ca-certificates\"\n ca_dir = \"/usr/local/share/ca-certificates/\"\n else:\n logger.error(\"Environment is missing required CA commands\")\n raise OSError(\n \"OS is missing a required command for CA trust. Either update-ca-trust or \"\n \"update-ca-certificates is required.\"\n )\n\n for cert_entry in certs:\n alias = cert_entry[\"alias\"]\n entry = cert_entry[\"cert\"]\n source = cert_entry[\"source\"]\n logger.info(\"...Processing cert with alias = {} from {}\".format(alias, source))\n\n pem_cert_body = fetch_cert(source, entry, s3_client)\n logger.debug(\"...cert body = {}\".format(pem_cert_body))\n\n with open(ca_dir + alias + \".crt\", \"a\") as f:\n f.write(str(pem_cert_body))\n\n logger.info(\"Updating CA trust\")\n os.system(update_ca_cmd)", "def store_anchors(base_url, anchors, path=\"logs/\"):\n\n url_filename = url_to_filename(base_url)\n filename = f\"{path}ANCHORS-{url_filename}.txt\"\n\n if os.path.isfile(filename):\n with open(filename, \"rb\") as fp:\n all_anchors = pickle.load(fp)\n all_anchors.append(anchors)\n else:\n all_anchors = anchors\n\n with open(filename, \"wb\") as fp:\n pickle.dump(all_anchors, fp)", "def _write_certs_to_files(self):\n # pylint: disable=consider-using-with\n self.temp_dir_object = tempfile.TemporaryDirectory()\n temp_dir = self.temp_dir_object.name\n\n # store as temporary files for the mesh client\n self.client_cert_file = tempfile.NamedTemporaryFile(dir=temp_dir, delete=False)\n client_cert = self.params[MeshMailbox.MESH_CLIENT_CERT]\n self.client_cert_file.write(client_cert.encode(\"utf-8\"))\n self.client_cert_file.seek(0)\n\n self.client_key_file = tempfile.NamedTemporaryFile(dir=temp_dir, delete=False)\n client_key = self.params[MeshMailbox.MESH_CLIENT_KEY]\n self.client_key_file.write(client_key.encode(\"utf-8\"))\n self.client_key_file.seek(0)\n\n self.ca_cert_file = None\n if self.params.get(\"MESH_VERIFY_SSL\", False) == \"True\":\n self.ca_cert_file = tempfile.NamedTemporaryFile(dir=temp_dir, delete=False)\n ca_cert = self.params[MeshMailbox.MESH_CA_CERT]\n self.ca_cert_file.write(ca_cert.encode(\"utf-8\"))\n self.ca_cert_file.seek(0)\n # pylint: enable=consider-using-with", "def add_cert_and_key(priv_key, cert_list, alias):\n logger.info(\"Writing certificate and private key to filesystem\")\n\n # Determine which directory to store certs in\n if command_exists(\"update-ca-trust\"):\n ca_dir = \"/etc/pki/tls\"\n elif command_exists(\"update-ca-certificates\"):\n ca_dir = \"/etc/ssl\"\n else:\n logger.error(\"Cannot determine certs directory\")\n raise OSError(\n \"OS is missing a required command for CA trust. Either update-ca-trust or \"\n \"update-ca-certificates is required.\"\n )\n\n logger.info(\"Using cert directory:\" + ca_dir)\n\n with open(ca_dir + \"/private/\" + alias + \".key\", \"a\") as f:\n f.write(str(priv_key))\n\n for cert in cert_list:\n with open(ca_dir + \"/certs/\" + alias + \".crt\", \"a\") as f:\n f.write(cert)", "def write_anchor(args, synteny_parent=None, mailbox_reader=None):\n idx = args[0]\n with mailbox_reader(idx) as file_handle:\n anchor_frame = pd.read_csv(\n file_handle, sep=\"\\t\", index_col=0\n ).convert_dtypes()\n in_anchor = len(anchor_frame)\n if in_anchor == 0:\n return None\n # drop any duplicated ID's--normally shouldn't happen\n anchor_frame.drop(\n anchor_frame[anchor_frame.index.duplicated()].index, inplace=True\n )\n anchor_frame.sort_values(\n by=[\"syn.anchor.sub_id\", \"frag.idx\", \"frag.pos\"], inplace=True\n )\n # Make a dictionary of common anchor properties, order will be kept\n anchor_props = {\n \"anchor.id\": idx,\n \"sub\": None,\n \"code\": None,\n \"count\": None,\n \"n\": None,\n \"n_ambig\": None,\n \"n_adj\": None,\n \"adj_groups\": None,\n \"frag.direction\": None,\n \"syn.anchor.direction\": None,\n \"anchor.subframe.ok\": True,\n \"hash\": None,\n }\n code_set = set(anchor_frame[\"syn.code\"])\n for test_code in CODE_DICT.keys():\n if test_code in code_set:\n anchor_props[\"code\"] = test_code\n break\n bad_subframe = False\n prop_list = []\n for sub_no, subframe in anchor_frame.groupby(by=[\"syn.anchor.sub_id\"]):\n (subanchor_props, anchor_subframe, bad_subframe) = _subframe_props(\n anchor_props, subframe, sub_no\n )\n if bad_subframe:\n break\n write_tsv_or_parquet(\n anchor_subframe,\n synteny_parent / f\"{idx}.{sub_no}.{SYNTENY_FILETYPE}\",\n sort_cols=False,\n )\n prop_list.append(subanchor_props)\n if bad_subframe: # Probably means a hash collision\n logger.error(f\"bad anchor set {idx}\")\n prop_list = []\n sub_no = 0\n anchor_props[\"anchor.subframe.ok\"] = False\n for cluster_id, subframe in anchor_frame.groupby(by=[\"hom.cluster\"]):\n (\n subanchor_props,\n anchor_subframe,\n unused_bad_subframe,\n ) = _subframe_props(anchor_props, subframe, sub_no)\n write_tsv_or_parquet(\n anchor_subframe,\n synteny_parent / f\"{idx}.{sub_no}.{SYNTENY_FILETYPE}\",\n sort_cols=False,\n )\n sub_no += 1\n prop_list.append(subanchor_props)\n return prop_list", "def save_ca():\n cert_file = os.environ.get('HOME') + '/.cat_installer/ca.pem'\n debug(\"saving cert\")\n with open(cert_file, 'w') as cert:\n cert.write(Config.CA + \"\\n\")", "def gen_ca():\n require_root()\n\n config.proxy.gen_ca_certs()\n log.info('OK')", "def generate_root_CA():\n\n ##generating root key\n\n root_private_key = rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048,\n backend=default_backend())\n\n\n ##self-sign and generate the root certificate\n\n root_public_key = root_private_key.public_key()\n builder = x509.CertificateBuilder()\n builder = builder.subject_name(x509.Name([\n x509.NameAttribute(NameOID.COMMON_NAME, u'Northeastern SSL Test CA'),\n x509.NameAttribute(NameOID.ORGANIZATION_NAME, u'Northeastern'),\n x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, u'SSL Clock Skews'),\n ]))\n\n builder = builder.issuer_name(x509.Name([\n x509.NameAttribute(NameOID.COMMON_NAME, u'Northeastern SSL Test CA'),\n ]))\n builder = builder.not_valid_before(datetime.datetime.today() - datetime.timedelta(days=1))\n builder = builder.not_valid_after(datetime.datetime(2019, 12, 31))\n builder = builder.serial_number(int(uuid.uuid4()))\n builder = builder.public_key(root_public_key)\n builder = builder.add_extension(\n x509.BasicConstraints(ca=True, path_length=None), critical=True,)\n\n root_certificate = builder.sign(\n private_key=root_private_key, algorithm=hashes.SHA256(),\n backend=default_backend()\n )\n\n\n ##write to disk\n \n\n\n with open(\"rootCA.key\", \"wb\") as f:\n f.write(root_private_key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n ))\n\n with open(\"rootCA.crt\", \"wb\") as f:\n f.write(root_certificate.public_bytes(\n encoding=serialization.Encoding.PEM,\n ))\n\n return root_private_key, root_certificate", "def test_set_one_ca_list(self):\n cacert = load_certificate(FILETYPE_PEM, root_cert_pem)\n cadesc = cacert.get_subject()\n\n def single_ca(ctx):\n ctx.set_client_ca_list([cadesc])\n return [cadesc]\n\n self._check_client_ca_list(single_ca)", "def _generate_certificates(certfile_path: str, keyfile_path: str,\n common_name: str) -> None:\n ca_key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM,\n _CA_KEY)\n ca_cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,\n _CA_CERT)\n\n k = OpenSSL.crypto.PKey()\n k.generate_key(OpenSSL.crypto.TYPE_RSA, 4096)\n\n cert = OpenSSL.crypto.X509()\n cert.get_subject().C = 'US'\n cert.get_subject().CN = common_name\n cert.set_serial_number(random.randint(0, 2**64))\n cert.gmtime_adj_notBefore(0)\n cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)\n cert.set_issuer(ca_cert.get_subject())\n cert.set_pubkey(k)\n cert.sign(ca_key, 'sha512')\n with open(certfile_path, \"w\") as f:\n f.write(\n OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,\n cert).decode(\"utf-8\"))\n f.write(_CA_CERT)\n with open(keyfile_path, \"w\") as f:\n f.write(\n OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM,\n k).decode(\"utf-8\"))", "def write_CA_atoms():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n ca_list = []\n with open(filepath, 'r') as pdb:\n for line in pdb:\n if line[:4] == 'ATOM' and line[12:16] == \" CA \":\n line_split = line.split()[6:9]\n ca_list.append(line_split)\n choice1 = input('Enter name of the outfile: ')\n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as outfile:\n for i in ca_list:\n outfile.writelines(i)\n print('Done!')\n print(i)", "def get_aws_certificate_chain(all_aws_data):\n downloaded_cert = all_aws_data[\"Certificate\"]\n cert_chain = [downloaded_cert]\n\n downloaded_chain = all_aws_data[\"CertificateChain\"].split(certificate_suffix)\n downloaded_chain.pop()\n\n for index in range(len(downloaded_chain)):\n downloaded_chain[index] = (downloaded_chain[index] + certificate_suffix).strip()\n\n cert_chain.extend(downloaded_chain)\n return cert_chain", "def zip_files(file_list, output_path):\n bname = os.path.basename # for efficiency\n with zipfile.ZipFile(output_path, mode='w') as zf:\n # adding all fasta files\n for file_name in file_list:\n zf.write(file_name, bname(file_name))\n return output_path", "def write_fasta(sequences_hash, output_fasta, concatenate_duplicates=True):\n with open(output_fasta, \"w+\") as fasta_object:\n for sequence in sequences_hash:\n if concatenate_duplicates:\n sequence_id = \"__\".join(sequences_hash[sequence])\n fasta_object.write(\">{}\\n{}\\n\".format(sequence_id, sequence))\n else:\n sequence_id = sequence\n sequence = sequences_hash[sequence_id][0]\n fasta_object.write(\">{}\\n{}\\n\".format(sequence_id, sequence))", "def one_file(file_list, file_path, aa, n, idx=None,raa=None):\r\n \r\n if os.path.isdir(file_path):\r\n file_name = f'{idx}_{n}n.csv'\r\n file_path = os.path.join(file_path, file_name)\r\n elif os.path.isfile(file_path):\r\n file_path = file_path\r\n with open(file_path, 'w') as handle:\r\n h = csv.writer(handle)\r\n for idx, file in enumerate(file_list):\r\n f = open(file, 'r')\r\n seq = read_fasta(f)\r\n simple_seq = reduce(seq, aa, raa)\r\n if not raa:\r\n raa = [i[0] for i in aa]\r\n base_aac = seq_aac(simple_seq, raa, n)\r\n for a in base_aac:\r\n line0 = [v for v in a[1]]\r\n line1 = [idx] + line0\r\n h.writerow(line1)\r\n f.close()", "def combine_fasta_files(fastas_paths, out_file):\n with open(out_file, 'w') as out:\n for filename in fastas_paths:\n for seq_record in SeqIO.parse(filename, \"fasta\"):\n out.write('>' + str(seq_record.id) + '\\n' + str(seq_record.seq) + '\\n')", "def create_fasta(self):\n with open(generate_path(\"tmp/validate.fasta\"), \"w\") as file_:\n for (accession_name, seq) in self.seqdata.contigs:\n file_.write(\">%s\\n%s\\n\" %(self.seqdata.accession, seq))", "def create_CA(dn):\n cmd_genrsa = [\"openssl\",\n \"genrsa\",\n \"-aes256\",\n \"-out\", f'{pki_dir}/ca.key',\n \"-passout\", f'pass:{ca_password}',\n f'{rsa_keysize}']\n cmd_req = [\"openssl\",\n \"req\",\n \"-new\",\n \"-x509\",\n \"-days\", \"999999\",\n \"-sha256\",\n \"-key\", f'{pki_dir}/ca.key',\n \"-out\", server_key_files[\"ca\"],\n \"-subj\", f'{dn}',\n \"-passin\", f'pass:{ca_password}']\n cmds = [cmd_genrsa, cmd_req]\n for cmd in cmds:\n exec_cmd(cmd)", "def test_one_add_client_ca(self):\n cacert = load_certificate(FILETYPE_PEM, root_cert_pem)\n cadesc = cacert.get_subject()\n\n def single_ca(ctx):\n ctx.add_client_ca(cacert)\n return [cadesc]\n\n self._check_client_ca_list(single_ca)", "def _create_certificate_chain():\n caext = X509Extension(b\"basicConstraints\", False, b\"CA:true\")\n not_after_date = datetime.date.today() + datetime.timedelta(days=365)\n not_after = not_after_date.strftime(\"%Y%m%d%H%M%SZ\").encode(\"ascii\")\n\n # Step 1\n cakey = PKey()\n cakey.generate_key(TYPE_RSA, 2048)\n cacert = X509()\n cacert.set_version(2)\n cacert.get_subject().commonName = \"Authority Certificate\"\n cacert.set_issuer(cacert.get_subject())\n cacert.set_pubkey(cakey)\n cacert.set_notBefore(b\"20000101000000Z\")\n cacert.set_notAfter(not_after)\n cacert.add_extensions([caext])\n cacert.set_serial_number(0)\n cacert.sign(cakey, \"sha256\")\n\n # Step 2\n ikey = PKey()\n ikey.generate_key(TYPE_RSA, 2048)\n icert = X509()\n icert.set_version(2)\n icert.get_subject().commonName = \"Intermediate Certificate\"\n icert.set_issuer(cacert.get_subject())\n icert.set_pubkey(ikey)\n icert.set_notBefore(b\"20000101000000Z\")\n icert.set_notAfter(not_after)\n icert.add_extensions([caext])\n icert.set_serial_number(0)\n icert.sign(cakey, \"sha256\")\n\n # Step 3\n skey = PKey()\n skey.generate_key(TYPE_RSA, 2048)\n scert = X509()\n scert.set_version(2)\n scert.get_subject().commonName = \"Server Certificate\"\n scert.set_issuer(icert.get_subject())\n scert.set_pubkey(skey)\n scert.set_notBefore(b\"20000101000000Z\")\n scert.set_notAfter(not_after)\n scert.add_extensions(\n [X509Extension(b\"basicConstraints\", True, b\"CA:false\")]\n )\n scert.set_serial_number(0)\n scert.sign(ikey, \"sha256\")\n\n return [(cakey, cacert), (ikey, icert), (skey, scert)]", "def handle_cacert(self):\n\n file = open(\"./certs/cacert.p7b\", \"r\")\n ca_certs = file.read()\n\n self.set_est_rsp_header(len(ca_certs))\n\n self.wfile.write(ca_certs.encode('utf-8'))", "def createSequenceFile(sequences, tmpDir, filename='seq.fa'):\n seqfile = os.path.join(tmpDir, filename)\n with open(seqfile, 'w') as f:\n for name, sequence in sequences.iteritems():\n f.write(\">{}\\n{}\\n\".format(name, sequence))\n subprocess.call(\"pyfasta flatten {}\".format(seqfile), shell=True)\n return seqfile", "def _make_archive(file_list, archive, root):\n with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as zipf:\n for f in file_list:\n zipf.write(f, os.path.relpath(f, root))", "def merge(self, output_file: Path = None, prepend_file_name: bool = False) -> None:\n if output_file is None:\n output_file = self.input_dir / \"merged.fasta\"\n else:\n output_file = Path(output_file)\n logger.info(\"Merging FASTA files in input directory\")\n cmd_str = f\"printf '%s\\\\0' * | xargs -0 cat > {output_file}\"\n if prepend_file_name:\n with tempfile.TemporaryDirectory() as tempdir:\n self.prepend_filename_to_record_names(output_dir=tempdir)\n utils.terminal_execute(cmd_str, work_dir=tempdir)\n else:\n utils.terminal_execute(cmd_str, work_dir=self.input_dir)\n logging.shutdown()", "def generate_ca(properties, host, isoverwrite):\n java_home = read_conf_file(properties, \"env\", \"JAVA_HOME\")\n java = java_home+'/bin/java'\n logger.info(\"Using JAVA {0}...\".format(java))\n\n try:\n os.path.exists(CA_DIR)\n except OSError:\n raise\n logger.info(\"Using {0} as base path.\".format(CA_DIR))\n if os.path.exists(properties):\n ca_props = read_ca_conf_file(properties, \"caprops\")\n logger.debug(\"CA properties are:\".format(ca_props))\n opdir = os.path.abspath(read_conf_file(properties, \"caprops\", \"outputDirectory\"))\n toolkit_cmd = [java, '-jar', '-Xms12m', '-Xmx24m', CA_DIR + '/lib/ssl_manager-1.5.0-jar-with-dependencies.jar'\n , 'standalone', '--certificateAuthorityHostname', read_conf_file(properties, \"caprops\", \"caName\")]\n if isoverwrite is True:\n toolkit_cmd.append(\"--isOverwrite\")\n create_ca = toolkit_cmd + ca_props\n logger.debug(\"tls toolkit args are : {0}\".format(create_ca))\n cacmd = subprocess.Popen(create_ca)\n cacmd.communicate()\n returncode = cacmd.poll()\n if not returncode == 0:\n logger.error(\"Unable to execute: {0}\".format(create_ca))\n sys.exit(1)\n generate_ambari_specific(properties, host, opdir)\n return", "def fusion_api_import_external_ca_certificates(self, body, api=None, headers=None):\n return self.ca.add(body, api=api, headers=headers)" ]
[ "0.8418291", "0.75909305", "0.5836558", "0.5250865", "0.51880836", "0.51657206", "0.5152744", "0.50840783", "0.50522465", "0.501845", "0.50167364", "0.498353", "0.49797028", "0.4977977", "0.49645388", "0.4925362", "0.48608357", "0.48578507", "0.4818017", "0.48157322", "0.47802362", "0.47580248", "0.4729135", "0.47175476", "0.46535182", "0.4643914", "0.46342555", "0.4614988", "0.46083343", "0.4583851" ]
0.83903056
1
Create a CA path folder as defined in OpenSSL terminology, by storing all certificates in 'anchor_list' list in PEM format under provided 'folder' and then creating the associated links using the hash as usually done by c_rehash. Note that you can also include CRL in 'anchor_list'. In that case, they will also be stored under 'folder' and associated links will be created. In folder, the files are created with names of the form 0...ZZ.pem. If you provide an empty list, folder will be created if it does not already exist, but that's all. The number of certificates written to folder is returned on success, None on error.
def create_temporary_ca_path(anchor_list, folder): # We should probably avoid writing duplicate anchors and also # check if they are all certs. try: if not os.path.isdir(folder): os.makedirs(folder) except: return None l = len(anchor_list) if l == 0: return None fmtstr = "%%0%sd.pem" % math.ceil(math.log(l, 10)) i = 0 try: for a in anchor_list: fname = os.path.join(folder, fmtstr % i) f = open(fname, "w") s = a.output(fmt="PEM") f.write(s) f.close() i += 1 except: return None r,w=popen2.popen2("c_rehash %s" % folder) r.close(); w.close() return l
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_ca_file(anchor_list, filename):\n try:\n f = open(filename, \"w\")\n for a in anchor_list:\n s = a.output(fmt=\"PEM\")\n f.write(s)\n f.close()\n except:\n return None\n return filename", "def create_temporary_ca_file(anchor_list):\n try:\n f, fname = tempfile.mkstemp()\n for a in anchor_list:\n s = a.output(fmt=\"PEM\")\n l = os.write(f, s)\n os.close(f)\n except:\n return None\n return fname", "def store_anchors(base_url, anchors, path=\"logs/\"):\n\n url_filename = url_to_filename(base_url)\n filename = f\"{path}ANCHORS-{url_filename}.txt\"\n\n if os.path.isfile(filename):\n with open(filename, \"rb\") as fp:\n all_anchors = pickle.load(fp)\n all_anchors.append(anchors)\n else:\n all_anchors = anchors\n\n with open(filename, \"wb\") as fp:\n pickle.dump(all_anchors, fp)", "def create_folder(self, foldername, parents=''):\r\n formatted_parents = (parents + '/').replace('/', '%2F')\r\n\r\n return self.yandex_requests.create_folder(\r\n foldername, formatted_parents)", "def svn_client_mkdir3(svn_commit_info_t_commit_info_p, apr_array_header_t_paths, svn_boolean_t_make_parents, apr_hash_t_revprop_table, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def makeLinks(files, folderName='SimFiles'):\n\n from os import symlink\n from os import chdir\n\n groups = { 1 : [\"Cos0.5\",\"Cos0.7\"],\n 2 : [\"Cos0.6\",\"Cos0.9\"],\n 3 : [\"Cos0.8\",\"Cos1.0\"]}\n\n for filename in files:\n for group,angles in groups.iteritems():\n if any(x in filename for x in angles):\n chdir(folderName + str(group))\n symlink('../'+filename, filename)\n chdir('../')", "def create_for_folder_subcommand(root_path, verbose, hash_format, no_directory_hashes, single_file, ignore_list=None, ignore_spec_file=None):\n # command formerly known as \"seal\"\n logger.verbose_logging = verbose\n\n if not os.path.isabs(root_path):\n root_path = os.path.join(os.getcwd(), root_path)\n\n logger.verbose(f'Sealing folder at path: {root_path} ...')\n\n existing_history = MHLHistory.load_from_path(root_path)\n\n # we collect all paths we expect to find first and remove every path that we actually found while\n # traversing the file system, so this set will at the end contain the file paths not found in the file system\n not_found_paths = existing_history.set_of_file_paths()\n\n # create the ignore specification\n ignore_spec = ignore.MHLIgnoreSpec(existing_history.latest_ignore_patterns(), ignore_list, ignore_spec_file)\n\n # start a verification session on the existing history\n session = MHLGenerationCreationSession(existing_history, ignore_spec)\n\n num_failed_verifications = 0\n # store the directory hashes of sub folders so we can use it when calculating the hash of the parent folder\n dir_hash_mappings = {}\n\n for folder_path, children in post_order_lexicographic(root_path, session.ignore_spec.get_path_spec()):\n # generate directory hashes\n dir_hash_context = None\n if not no_directory_hashes:\n dir_hash_context = DirectoryHashContext(hash_format)\n for item_name, is_dir in children:\n file_path = os.path.join(folder_path, item_name)\n not_found_paths.discard(file_path)\n if is_dir:\n if not dir_hash_context:\n continue\n hash_string = dir_hash_mappings.pop(file_path)\n else:\n hash_string, success = seal_file_path(existing_history, file_path, hash_format, session)\n if not success:\n num_failed_verifications += 1\n if dir_hash_context:\n dir_hash_context.append_hash(hash_string, item_name)\n dir_hash = None\n if dir_hash_context:\n dir_hash = dir_hash_context.final_hash_str()\n dir_hash_mappings[folder_path] = dir_hash\n modification_date = datetime.datetime.fromtimestamp(os.path.getmtime(folder_path))\n session.append_directory_hash(folder_path, modification_date, hash_format, dir_hash)\n\n commit_session(session)\n\n exception = test_for_missing_files(not_found_paths, root_path, ignore_spec)\n if num_failed_verifications > 0:\n exception = errors.VerificationFailedException()\n\n if exception:\n raise exception", "def _init_dir(self):\n for directory in ['', CERT_DIR_NAME, CRL_DIR_NAME, NEWCERT_DIR_NAME,\n PRIVATE_DIR_NAME]:\n mode = 0o755 if directory != PRIVATE_DIR_NAME else 0o700\n os.mkdir(self.ca_dir + directory, mode=mode)", "def create_folder(folder_path, retries=10, interval=1):\n\tfor i in range(retries):\n\t\ttry:\n\t\t\tfolder = client.folder(pth(folder_path)).create(ignore_if_exists=True)\n\t\texcept egnyte.exc.NotAuthorized:\n\t\t\telog(f'Create Folder Attempt {i} NotAuthorized:'+str(folder_path).replace(\"{\",\"[\").replace(\"}\",\"]\"))\n\t\t\ttime.sleep(interval)\n\t\telse:\n\t\t\tbreak\n\treturn folder", "def create_pki():\n os.mkdir(pki_dir)\n os.mkdir(f'{pki_dir}/newcerts')\n Path(f'{pki_dir}/index.txt').touch()\n with open(f'{pki_dir}/serial', 'w') as serial_file:\n serial_file.write('00000000')\n serial_file.close()\n create_CA('/CN=My cool CA/O=Honest Achmed/OU=Used Cars/C=EU')", "def create_folder(folder_path: List[str]) -> str:\n drive = _drive_gen()\n return _create_or_find_folder(folder_path, drive)", "def create_dirs(keys, file):\n if not os.path.exists(file):\n os.mkdir(file)\n \n folders = [re.split(\"/\", key)[:-1] for key in keys]\n unique_folders = [list(x) for x in set(tuple(x) for x in folders)]\n success = 0\n for folders in unique_folders:\n path = os.path.join(file,\"/\".join(folders))\n if not os.path.exists(path):\n os.makedirs(path)\n success += 1\n return print(\"{} Folders were created\".format(success))", "def make_folder(l: str) -> None:\n\n Path(l).mkdir(parents=True, exist_ok=True)\n\n return", "def create_folder(self, c_path):\n raise NotImplementedError", "def generate_root_CA():\n\n ##generating root key\n\n root_private_key = rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048,\n backend=default_backend())\n\n\n ##self-sign and generate the root certificate\n\n root_public_key = root_private_key.public_key()\n builder = x509.CertificateBuilder()\n builder = builder.subject_name(x509.Name([\n x509.NameAttribute(NameOID.COMMON_NAME, u'Northeastern SSL Test CA'),\n x509.NameAttribute(NameOID.ORGANIZATION_NAME, u'Northeastern'),\n x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, u'SSL Clock Skews'),\n ]))\n\n builder = builder.issuer_name(x509.Name([\n x509.NameAttribute(NameOID.COMMON_NAME, u'Northeastern SSL Test CA'),\n ]))\n builder = builder.not_valid_before(datetime.datetime.today() - datetime.timedelta(days=1))\n builder = builder.not_valid_after(datetime.datetime(2019, 12, 31))\n builder = builder.serial_number(int(uuid.uuid4()))\n builder = builder.public_key(root_public_key)\n builder = builder.add_extension(\n x509.BasicConstraints(ca=True, path_length=None), critical=True,)\n\n root_certificate = builder.sign(\n private_key=root_private_key, algorithm=hashes.SHA256(),\n backend=default_backend()\n )\n\n\n ##write to disk\n \n\n\n with open(\"rootCA.key\", \"wb\") as f:\n f.write(root_private_key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n ))\n\n with open(\"rootCA.crt\", \"wb\") as f:\n f.write(root_certificate.public_bytes(\n encoding=serialization.Encoding.PEM,\n ))\n\n return root_private_key, root_certificate", "def create_CA(dn):\n cmd_genrsa = [\"openssl\",\n \"genrsa\",\n \"-aes256\",\n \"-out\", f'{pki_dir}/ca.key',\n \"-passout\", f'pass:{ca_password}',\n f'{rsa_keysize}']\n cmd_req = [\"openssl\",\n \"req\",\n \"-new\",\n \"-x509\",\n \"-days\", \"999999\",\n \"-sha256\",\n \"-key\", f'{pki_dir}/ca.key',\n \"-out\", server_key_files[\"ca\"],\n \"-subj\", f'{dn}',\n \"-passin\", f'pass:{ca_password}']\n cmds = [cmd_genrsa, cmd_req]\n for cmd in cmds:\n exec_cmd(cmd)", "def create_dir_struct(self, create_first_rev_folder=\"True\"):\n # | - create_dir_struct\n for Job_i in self.Job_list:\n\n # | - FOR LOOP BODY\n # if create_first_rev_folder == \"True\":\n # path = os.path.join(Job_i.full_path, \"_1\")\n # elif create_first_rev_folder == \"False\":\n # path = Job_i.full_path\n\n path = Job_i.full_path\n\n if os.path.exists(path):\n # mess = \"Path already exists: \" + str(path)\n # print(mess)\n pass\n\n elif not os.path.exists(path):\n os.makedirs(path)\n # __|\n\n # | - folders_exist attribute should be True from now on\n # file_name = self.root_dir + \"/jobs_bin/.folders_exist\"\n file_name = os.path.join(\n self.root_dir,\n self.working_dir,\n \"jobs_bin/.folders_exist\"\n )\n\n with open(file_name, \"w\") as fle:\n fle.write(\"\\n\")\n\n self.folders_exist = self.__folders_exist__(True)\n # __|\n\n # __|", "def generate_ca(properties, host, isoverwrite):\n java_home = read_conf_file(properties, \"env\", \"JAVA_HOME\")\n java = java_home+'/bin/java'\n logger.info(\"Using JAVA {0}...\".format(java))\n\n try:\n os.path.exists(CA_DIR)\n except OSError:\n raise\n logger.info(\"Using {0} as base path.\".format(CA_DIR))\n if os.path.exists(properties):\n ca_props = read_ca_conf_file(properties, \"caprops\")\n logger.debug(\"CA properties are:\".format(ca_props))\n opdir = os.path.abspath(read_conf_file(properties, \"caprops\", \"outputDirectory\"))\n toolkit_cmd = [java, '-jar', '-Xms12m', '-Xmx24m', CA_DIR + '/lib/ssl_manager-1.5.0-jar-with-dependencies.jar'\n , 'standalone', '--certificateAuthorityHostname', read_conf_file(properties, \"caprops\", \"caName\")]\n if isoverwrite is True:\n toolkit_cmd.append(\"--isOverwrite\")\n create_ca = toolkit_cmd + ca_props\n logger.debug(\"tls toolkit args are : {0}\".format(create_ca))\n cacmd = subprocess.Popen(create_ca)\n cacmd.communicate()\n returncode = cacmd.poll()\n if not returncode == 0:\n logger.error(\"Unable to execute: {0}\".format(create_ca))\n sys.exit(1)\n generate_ambari_specific(properties, host, opdir)\n return", "def _generate_certificates(certfile_path: str, keyfile_path: str,\n common_name: str) -> None:\n ca_key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM,\n _CA_KEY)\n ca_cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,\n _CA_CERT)\n\n k = OpenSSL.crypto.PKey()\n k.generate_key(OpenSSL.crypto.TYPE_RSA, 4096)\n\n cert = OpenSSL.crypto.X509()\n cert.get_subject().C = 'US'\n cert.get_subject().CN = common_name\n cert.set_serial_number(random.randint(0, 2**64))\n cert.gmtime_adj_notBefore(0)\n cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)\n cert.set_issuer(ca_cert.get_subject())\n cert.set_pubkey(k)\n cert.sign(ca_key, 'sha512')\n with open(certfile_path, \"w\") as f:\n f.write(\n OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,\n cert).decode(\"utf-8\"))\n f.write(_CA_CERT)\n with open(keyfile_path, \"w\") as f:\n f.write(\n OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM,\n k).decode(\"utf-8\"))", "def gen_ca():\n require_root()\n\n config.proxy.gen_ca_certs()\n log.info('OK')", "def createFolders(self, *args):\n for folder in args:\n mkdir(folder)", "def createFolders(self, *args):\n for folder in args:\n mkdir(folder)", "def create_cell_anchors():\n k_max, k_min = cfg.FPN.RPN_MAX_LEVEL, cfg.FPN.RPN_MIN_LEVEL\n scales_per_octave = cfg.RETINANET.SCALES_PER_OCTAVE\n aspect_ratios = cfg.RETINANET.ASPECT_RATIOS\n anchor_scale = cfg.RETINANET.ANCHOR_SCALE\n A = scales_per_octave * len(aspect_ratios)\n anchors = {}\n for lvl in range(k_min, k_max + 1):\n # create cell anchors array\n stride = 2. ** lvl\n cell_anchors = np.zeros((A, 4))\n a = 0\n for octave in range(scales_per_octave):\n octave_scale = 2 ** (octave / float(scales_per_octave))\n for aspect in aspect_ratios:\n anchor_sizes = (stride * octave_scale * anchor_scale, )\n anchor_aspect_ratios = (aspect, )\n cell_anchors[a, :] = generate_anchors(\n stride=stride, sizes=anchor_sizes,\n aspect_ratios=anchor_aspect_ratios)\n a += 1\n anchors[lvl] = cell_anchors\n return anchors", "def ca_file(tmpdir):\n key = rsa.generate_private_key(public_exponent=65537, key_size=2048)\n public_key = key.public_key()\n\n builder = x509.CertificateBuilder()\n builder = builder.subject_name(\n x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, \"pyopenssl.org\")])\n )\n builder = builder.issuer_name(\n x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, \"pyopenssl.org\")])\n )\n one_day = datetime.timedelta(1, 0, 0)\n builder = builder.not_valid_before(datetime.datetime.today() - one_day)\n builder = builder.not_valid_after(datetime.datetime.today() + one_day)\n builder = builder.serial_number(int(uuid.uuid4()))\n builder = builder.public_key(public_key)\n builder = builder.add_extension(\n x509.BasicConstraints(ca=True, path_length=None),\n critical=True,\n )\n\n certificate = builder.sign(private_key=key, algorithm=hashes.SHA256())\n\n ca_file = tmpdir.join(\"test.pem\")\n ca_file.write_binary(\n certificate.public_bytes(\n encoding=serialization.Encoding.PEM,\n )\n )\n\n return str(ca_file).encode(\"ascii\")", "def make_output_folders():\n call([\"mkdir\", \"-p\", args.out_folder.strip()])\n call([\"mkdir\", args.out_folder.strip() + \"/files\"])\n call([\"mkdir\", args.out_folder.strip() + \"/fasta\"])", "def create_required_folders(conn_obj, path_list):\n path_list = [path_list] if type(path_list) is str else list([str(e) for e in path_list])\n for path in path_list:\n basic_obj.make_dir(conn_obj, path, \"server\")\n basic_obj.change_permissions(conn_obj, path, 777, \"server\")", "def create_folder(folder):\n import errno\n try:\n os.makedirs(folder)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise e", "def ensure_folder(*arg):\n if len(arg) == 0:\n raise Exception(\"No input to ensure_folder\")\n path = get_dir(Path(*arg))\n path.mkdir(parents=True, exist_ok=True)", "def create_dir(link_dir):\n if not os.path.exists(link_dir):\n os.makedirs(link_dir)", "def mkdirs(cls, folder_path):\n response = v2.folders.create({\"path\": folder_path})\n return response.status_code" ]
[ "0.5929768", "0.56557614", "0.5049341", "0.49274954", "0.49231866", "0.48154494", "0.4807038", "0.47962224", "0.4793785", "0.47824243", "0.47587633", "0.47089493", "0.4689932", "0.46597952", "0.46543935", "0.46317053", "0.4611708", "0.45996815", "0.45926845", "0.45742983", "0.45652154", "0.45652154", "0.45542684", "0.45386374", "0.45356482", "0.45314705", "0.45278966", "0.45259348", "0.4524165", "0.45201898" ]
0.8136269
0
Returns collection of charts that are part of the worksheet
def charts(self): return self.properties.get('charts', EntityCollection(self.context, WorkbookChart, ResourcePath("charts", self.resource_path)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def charts(self):\n return self._charts", "def charts(self):\n return self.container['charts']", "def get_weekly_chart_list(self) -> ListModel[Chart]:\n return self.retrieve(\n bind=Chart,\n flatten=\"chart\",\n params=dict(method=\"user.getWeeklyChartList\", user=self.name),\n )", "def list_charts():\n charts_root = Path(R\".\\charm\\data\\charts\")\n charts = list(charts_root.rglob(\"*.chart\"))\n return charts", "def getCharts(self):\n \n # code from Jerry to strip off irrelevant headings\n results = []\n flag = False\n for line in self.ResultsForCSVfile:\n if flag:\n results.append(line)\n if len(line) == 0:\n flag = True\n # create charts\n charts = {}\n for (eachFrameSize,eachILOAD) in map(None,self.FrameSizeList,self.ILOADlist):\n c = self.CreateRateVsRangeGraph( eachFrameSize, eachILOAD, results )\n t = c.title\n charts[t] = c\n return charts", "def make_charts(self):\n\n def _insert_pie_chart(wbook, wsheet, title, cell_pos, series):\n piechart = wbook.add_chart({\"type\": \"pie\"})\n piechart.set_title({\"name\": title})\n piechart.set_style(10)\n piechart.add_series(series)\n wsheet.insert_chart(cell_pos, piechart, {\"x_offset\": 25, \"y_offset\": 10})\n\n def _data_frame_days_to_excel(writer, sheet_name, data_frame_days):\n data_frame_days.to_excel(writer, sheet_name=sheet_name, startrow=1, header=False)\n self._set_workbook_layout(writer.book, (writer.sheets[sheet_name]), data_frame_days)\n\n with pd.ExcelWriter(\"Hive Metrics.xlsx\", engine=\"xlsxwriter\", options={\"strings_to_urls\": False}) as writer:\n workbook = writer.book\n worksheet = workbook.add_worksheet(\"Summary Charts\")\n worksheet.hide_gridlines(2)\n\n _insert_pie_chart(\n workbook,\n worksheet,\n title=\"New vs. Closed Cases\",\n cell_pos=\"D2\",\n series={\n \"name\": \"Open vs. Closed Cases Last 30\",\n \"categories\": \"=Tracking!$B$1:$C$1\",\n \"values\": \"=Tracking!$B$2:$C$2\",\n },\n )\n _insert_pie_chart(\n workbook,\n worksheet,\n title=\"Case Ownership\",\n cell_pos=\"M19\",\n series={\n \"name\": \"Case Ownership Last 30\",\n \"categories\": \"=Tracking!$A$3:$A$9\",\n \"values\": \"=Tracking!$D$3:$D$9\",\n },\n )\n _insert_pie_chart(\n workbook,\n worksheet,\n title=\"Case Resolution\",\n cell_pos=\"D19\",\n series={\n \"name\": \"Case Resolution Last 30\",\n \"categories\": \"=Tracking!$A$10:$A$12\",\n \"values\": \"=Tracking!$E$10:$E$12\",\n },\n )\n _insert_pie_chart(\n workbook,\n worksheet,\n title=\"Case Severities\",\n cell_pos=\"M2\",\n series={\n \"name\": \"Severity Last 30\",\n \"categories\": \"=Tracking!$A$13:$A$15\",\n \"values\": \"=Tracking!$F$13:$F$15\",\n },\n )\n\n _data_frame_days_to_excel(\n writer, sheet_name=\"Cases newer than 30 Days\", data_frame_days=self._data_frame_30days,\n )\n _data_frame_days_to_excel(\n writer, sheet_name=\"Cases older than 60 days\", data_frame_days=self._data_frame_60days,\n )\n _data_frame_days_to_excel(\n writer, sheet_name=\"Cases newer than 90 Days\", data_frame_days=self._data_frame_90days,\n )\n\n self._data_frame_counts.to_excel(writer, sheet_name=\"Tracking\")\n writer.save()", "def list_charts(self, app):\n return self._list(self._path() + '?app_name=' + app, 'charts')", "def _charts(self):\n # lazy instantiation here to avoid creating the charts object unless needed.\n if self.__charts is None:\n self.__charts = Charts(self)\n self.AddObserver(\"StartEvent\", partial(try_callback, self._before_render_event))\n return self.__charts", "def charts(self, charts):\n\n self.container['charts'] = charts", "def get_plots(self):\n return list(self.plots.values())", "def spreadsheets(self):\r\n return resource.Spreadsheets(self)", "def getAllWidgets(self):\n \n visualisations = Visualisation.objects.filter(dataSource=self)\n widgets = []\n for vis in visualisations:\n widgets.append(vis.getWidget())\n return widgets", "def list(self, **params):\n\n _, _, account_charts = self.http_client.get(\"/accountcharts\", params=params)\n return account_charts", "def worksheets(self):\n return self.properties.get('worksheets',\n WorkbookWorksheetCollection(self.context,\n ResourcePath(\"worksheets\", self.resource_path)))", "def cells_charts_get_worksheet_charts_with_http_info(self, name, sheet_name, **kwargs):\n\n all_params = ['name', 'sheet_name', 'folder', 'storage']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method cells_charts_get_worksheet_charts\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `cells_charts_get_worksheet_charts`\")\n # verify the required parameter 'sheet_name' is set\n if ('sheet_name' not in params) or (params['sheet_name'] is None):\n raise ValueError(\"Missing the required parameter `sheet_name` when calling `cells_charts_get_worksheet_charts`\")\n\n\n collection_formats = {}\n\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n if 'sheet_name' in params:\n path_params['sheetName'] = params['sheet_name']\n\n query_params = []\n if 'folder' in params:\n query_params.append(('folder', params['folder']))\n if 'storage' in params:\n query_params.append(('storage', params['storage']))\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api('/cells/{name}/worksheets/{sheetName}/charts', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ChartsResponse',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def charts(self, charts):\n\n self._charts = charts", "def getDependenciesCharts(self) -> Mapping[str, 'ChartVersionInfo']:\n deps = self.getDependenciesList()\n ret: Dict[str, 'ChartVersionInfo'] = {}\n for dep in deps:\n ret[dep['name']] = self.getDependencyChart(dep['name'])\n return ret", "def my_charts(request):\n\n logger.debug('called')\n\n context = {}\n\n simulations = request.user.simulations.all().exclude(\n name__icontains=settings.STANDARD_CHART_NAME\n ).select_related(\n 'fight_style',\n 'result',\n 'simulation_type',\n 'wow_class',\n 'wow_spec',\n 'queue',\n )\n\n context['charts'] = simulations\n\n return render(request, 'general_website/my_charts.html', context=context)", "def get_worksheets(self) -> list or None:\n if self.connected:\n cursor = self.workbook.cursor()\n if cursor:\n worksheet_names = []\n for table in cursor.tables():\n worksheet_names.append(table['table_name'])\n cursor.close()\n return worksheet_names\n return None", "def has_charts(self):\n return self.__charts is not None", "def get_worksheet(self, workbook):\n for worksheet_name in workbook.sheet_names():\n return workbook.sheet_by_name(worksheet_name)", "def my_charts(page_num=1):\n # Download charts that belong to the current user\n charts = Chart.query.filter_by(owner_id=current_user.id).paginate(page_num)\n return render_template('reports/my_charts.html', charts=charts)", "def chart_finder(self, keyword):\n\n data, _ = self.helm_client.search(keyword)\n return data", "def sheets(self):\n result = []\n recordset = self.connection.OpenSchema(20)\n while not recordset.EOF:\n result.append(recordset.Fields[2].Value)\n recordset.MoveNext()\n recordset.Close()\n del recordset\n return result", "def test_read_charts(self, chart, charts):\n self.chart = charts\n chart_objects = chart.objects.all()\n if not chart_objects:\n raise AssertionError(\"Could not read charts.\")", "def get_charts(self, period=\"d\", size=\"l\", chart_type=\"c\", ta=\"1\"):\n\n encoded_payload = urlencode(\n {\"ty\": chart_type, \"ta\": ta, \"p\": period, \"s\": size}\n )\n\n sequential_data_scrape(\n scrape.download_chart_image,\n [\n f\"https://finviz.com/chart.ashx?{encoded_payload}&t={row.get('Ticker')}\"\n for row in self.data\n ],\n self._user_agent,\n )", "def get_rows(self) -> WellsByDimension:\n return self._rows", "def result(self):\n\n chart_series = [] # will hold all the series created\n\n # determine the sensor to plot from the sensor selected by the user.\n the_sensor = bmsapp.models.Sensor.objects.get(pk=self.request_params['select_sensor'])\n\n # get the requested averaging interval in hours\n averaging_hours = float(self.request_params['averaging_time'])\n\n # determine the start time for selecting records\n st_ts, end_ts = self.get_ts_range()\n\n # get the database records\n df = self.reading_db.dataframeForOneID(the_sensor.sensor_id, st_ts, end_ts, pytz.timezone(self.timezone))\n\n if not df.empty:\n\n # info needed to create each series (selection list, series name, visible)\n if self.schedule:\n occupied_times = df.ts.apply(self.schedule.is_occupied)\n unoccupied_times = -occupied_times\n\n series_info = [(None, 'All Data', True),\n (occupied_times, 'Occupied Periods', False),\n (unoccupied_times, 'Unoccupied Periods', False)]\n else:\n # no schedule, so just return the 'All Data' series\n series_info = [(None, 'All Data', True)]\n\n for mask, series_name, visibility in series_info:\n if mask is None:\n select_df = df\n else:\n select_df = df[mask]\n\n if averaging_hours:\n select_df = bmsapp.data_util.resample_timeseries(select_df, averaging_hours)\n\n histogram_series = bmsapp.data_util.histogram_from_series(select_df.val)\n\n chart_series.append({'x': [x for x,y in histogram_series],\n 'y': [y for x,y in histogram_series],\n 'type': 'scatter',\n 'mode': 'lines', \n 'name': series_name, \n 'visible': 'true' if visibility else 'legendonly'\n })\n\n opt = self.get_chart_options('plotly')\n opt['data'] = chart_series\n opt['layout']['title'] = the_sensor.title + ' Histogram: ' + self.building.title\n opt['layout']['xaxis']['title'] = the_sensor.unit.label\n opt['layout']['xaxis']['type'] = 'linear'\n opt['layout']['yaxis']['title'] = '% of Readings'\n opt['layout']['yaxis']['rangemode'] = 'tozero'\n\n html = basechart.chart_config.chart_container_html(opt['layout']['title'])\n\n return {'html': html, 'objects': [('plotly', opt)]}", "def worksheets(self, visibility, projection):\r\n return Worksheets(self, visibility=visibility, projection=projection)", "def available_plots(self):\n return self.visualizer.available_plots()" ]
[ "0.76351535", "0.75301296", "0.6606785", "0.6591396", "0.6510425", "0.643841", "0.6303004", "0.6279748", "0.6144825", "0.60105306", "0.58883137", "0.5710741", "0.56871444", "0.5678899", "0.5657876", "0.5635193", "0.5595577", "0.5593473", "0.54826975", "0.5446992", "0.5428346", "0.5417936", "0.5404482", "0.539962", "0.5386766", "0.53765416", "0.5348541", "0.53222615", "0.53153604", "0.5306823" ]
0.79114956
0
Collection of tables that are part of the worksheet.
def tables(self): return self.properties.get('tables', WorkbookTableCollection(self.context, ResourcePath("tables", self.resource_path)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tables(self) -> list:\n return self.list_tables()", "def _tables(self):\n tabs = set()\n for cond in self._andalso + self._orelse:\n tabs = tabs.union(cond._tables())\n return tabs", "def make_tables(self):\n return [XLSRowSet(name, self.workbook.sheet_by_name(name), self.window)\n for name in self.workbook.sheet_names()]", "def tables(cls):\n if not hasattr(cls, '_tables'):\n cls.parse_attributes()\n return cls._tables", "def tables(self):\n result = self.execute(self.commands.get_tables(self.name))\n return [x[0] for x in result]", "def _tables(self):\n assert False, \"subclass responsibility\"", "def get_tables(self):\n return self._get_types_from_default_ns(Table)", "def get_tables(self):\n return list(self._metadata['tables'].keys())", "def tables(self):\n cursor = self.connection.cursor()\n\n # not returning an iterator: just fetch everything.\n # I'm guessing this will be fine for any realistic database\n # size, and avoids issues of having multiple open cursors\n # at the same time.\n cursor.execute('SHOW TABLES')\n table_names = []\n for result_data in cursor:\n for table_name in result_data.values():\n table_names.append(table_name)\n\n definitions = OrderedDict()\n for table_name in table_names:\n cursor.execute('SHOW CREATE TABLE %s' % table_name)\n if not cursor.rowcount:\n raise ValueError(\"Failed to execute SHOW CREATE TABLE command on table %s\" % table_name)\n\n result_data = cursor.fetchone()\n definitions[table_name] = result_data['Create Table']\n\n cursor.close()\n\n return definitions", "def make_tables(self):\n for t in self.tables:\n self.add_table(groupname=t['groupname'],\n tablename=t['tablename'],\n description=t['description'],\n tabletitle=t['tabletitle'])", "def tables(self):\n return Table.objects.filter(schema__database=self)", "def get_my_tables(self):\n qnum = self.master('sql', att={'type': 'table'}) # it's a Table._call_() function call\n if self.run():\n return (self.table_factory(self.get_table_info(result[0])) for result in self.results[qnum])\n else:\n print('An error has occurred when initializing the database.')", "def load_tables(self):\n if not len(self._meta):\n raise BufrTableError(\"No table loaded!\")\n if self._tables is None or self._tables.differs(\n self._meta['master'], self._meta['mver'], self._meta['lver'],\n self._meta['center'], self._meta['subcenter']):\n self._tables = tab.load_tables.load_all(\n self._meta['master'], self._meta['center'], self._meta['subcenter'], self._meta['mver'],\n self._meta['lver'], self._tab_p, self._tab_f\n )\n else:\n logger.debug(\"Table loading not neccessary\")\n if self._tables is None:\n raise BufrTableError(\"No table loaded!\")\n return self._tables", "def list_tables(self):\n return LIST_TABLES(db=self.db)", "def get_tables(self) -> List[Table]:\n # Assemble API calls for concurrent execution\n calls = []\n for (year, table_name), variables in self.get_variables_by_year_and_table_name().items():\n # Handle multiple for_geo values by year\n chunked_variables_by_for_geo = product(self.for_geo, chunk_variables(variables))\n for for_geo, chunk in chunked_variables_by_for_geo:\n call = self._census_api.fetch_table(\n self.estimate, year, table_name, chunk, for_geo, self.in_geo\n )\n calls.append(call)\n # Make concurrent API calls\n results = asyncio.run(self._census_api.gather_calls(calls))\n tables = list(results)\n return tables", "def tabulate(self):\n\n self.tables = []\n\n for sim in tqdm.tqdm(self.simulations):\n self.tables.append(pd.read_csv(sim.get_table()))\n\n return self.tables", "def tables(self):\n if self.table is None:\n raise GiraffeError(\"Target table has not been set.\")\n return [\n \"{}_wt\".format(self.table),\n \"{}_log\".format(self.table),\n \"{}_e1\".format(self.table),\n \"{}_e2\".format(self.table),\n ]", "def refresh_tables(self):\n\n if self.key is None:\n raise AttributeError('Can not refresh tables on uninitialised db')\n\n self.tables = self.client.ssclient.GetWorksheetsFeed(self.key)", "def get_tables(self):\n query = mssqlqueries.get_tables()\n logger.info(u'Tables query: %s', query)\n for tabular_result in self.execute_query(query):\n for row in tabular_result[0]:\n yield (row[0], row[1])", "def set_up_tables(self):\n tables = []\n tables.append({'groupname': 'metadata',\n 'tablename': 'sim_info',\n 'description': desc.SimInfoRow,\n 'tabletitle': 'Simulation Information'})\n tables.append({'groupname': 'metadata',\n 'tablename': 'sim_timeseries',\n 'description': desc.SimTimeseriesRow,\n 'tabletitle': 'Simulation Power Data'})\n tables.append({'groupname': 'th',\n 'tablename': 'th_params',\n 'description': desc.ThMetadataRow,\n 'tabletitle': 'TH Component Parameters'})\n tables.append({'groupname': 'th',\n 'tablename': 'th_timeseries',\n 'description': desc.ThTimeseriesRow,\n 'tabletitle': 'TH Timeseries'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'neutronics_timeseries',\n 'description': desc.NeutronicsTimeseriesRow,\n 'tabletitle': 'Neutronics Timeseries'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'neutronics_params',\n 'description': desc.NeutronicsParamsRow,\n 'tabletitle': 'Neutronics Metadata'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'zetas',\n 'description': desc.ZetasTimestepRow,\n 'tabletitle': 'Neutron Precursor Concentrations'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'omegas',\n 'description': desc.OmegasTimestepRow,\n 'tabletitle': 'Decay Heat Fractions'})\n return tables", "def get_tables(self):\n r = self.client.query(\"show tables\")\n if r:\n tables = [tuple(reversed(x.split(','))) for x in filter(None, r.split('\\n'))][1:]\n FT.table_cache = dict(tables)\n return tables\n else:\n logging.error(\"get_tables: no response\")", "def get_tables(self):\n\t\tbuild = 'SELECT * FROM pg_catalog.pg_tables WHERE schemaname != \\'pg_catalog\\' AND schemaname != \\'information_schema\\';'\n\t\tself.cur.execute(build)\n\t\ttotal = self.cur.fetchall()\n\t\ttable_list = []\n\t\tfor a in total:\n\t\t\ttable_list.append(a[1])\n\t\treturn table_list", "def show_tables(self) -> List[str]:\n return list(self.tb.keys())", "def table(self):\n if self._table is None:\n self._table = list(self._iter_rows())\n\n return self._table", "def pivot_tables(self):\n return self.properties.get('pivotTables',\n EntityCollection(self.context, WorkbookPivotTable,\n ResourcePath(\"pivotTables\", self.resource_path)))", "def create_all_tables(self):\n pass", "def create_tables( self ) :\n return self._create_tables", "def get_tables(self):\n logging.debug(f\"\"\"get_tables\"\"\")\n conn = self.connect(cxRepo)\n sql = f\"\"\"select table_name,server1_select,server2_select,schema1,\n schema2,tips from {self.schemaRepo}.tablediff\n where step = 0 and result = 'init' order by id\"\"\"\n with conn:\n with conn.cursor() as curs:\n try:\n curs.execute(sql)\n except conn.DatabaseError as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing {sql} : {error}\"\"\")\n rows = curs.fetchall()\n return rows", "def get_tables():\n page_html = requests.get(conf.PAGE_URL).text\n soup = BeautifulSoup(page_html, 'html.parser')\n tables = soup.find_all(\"table\", {\"class\": conf.TABLE_CLASS_NAME})\n if not tables:\n raise ValueError(\"Table class not found\")\n return tables", "def get_table_list(self, cursor):\n\n cursor.execute(\n \"\"\"\n SELECT c.relname, c.relkind\n FROM pg_catalog.pg_class c\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n WHERE c.relkind IN ('r', 'v', '')\n AND n.nspname = '%s'\n AND pg_catalog.pg_table_is_visible(c.oid)\"\"\"\n % get_current_schema().schema_name\n )\n\n return [\n TableInfo(row[0], {\"r\": \"t\", \"v\": \"v\"}.get(row[1]))\n for row in cursor.fetchall()\n if row[0] not in self.ignored_tables\n ]" ]
[ "0.7461289", "0.7429046", "0.7311719", "0.7223998", "0.70689285", "0.6959644", "0.6953568", "0.69121426", "0.675924", "0.670654", "0.66978776", "0.66874295", "0.6667738", "0.66497767", "0.6632957", "0.6626624", "0.6615419", "0.6606073", "0.659836", "0.65665925", "0.656333", "0.6545557", "0.65451795", "0.6527234", "0.65028983", "0.6422974", "0.641761", "0.63961345", "0.638277", "0.6380469" ]
0.79417855
0
Collection of PivotTables that are part of the worksheet.
def pivot_tables(self): return self.properties.get('pivotTables', EntityCollection(self.context, WorkbookPivotTable, ResourcePath("pivotTables", self.resource_path)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tables(self):\n return self.properties.get('tables',\n WorkbookTableCollection(self.context, ResourcePath(\"tables\", self.resource_path)))", "def make_tables(self):\n return [XLSRowSet(name, self.workbook.sheet_by_name(name), self.window)\n for name in self.workbook.sheet_names()]", "def _tables(self):\n tabs = set()\n for cond in self._andalso + self._orelse:\n tabs = tabs.union(cond._tables())\n return tabs", "def tables(self) -> list:\n return self.list_tables()", "def sheets(self):\n result = []\n recordset = self.connection.OpenSchema(20)\n while not recordset.EOF:\n result.append(recordset.Fields[2].Value)\n recordset.MoveNext()\n recordset.Close()\n del recordset\n return result", "def tabulate(self):\n\n self.tables = []\n\n for sim in tqdm.tqdm(self.simulations):\n self.tables.append(pd.read_csv(sim.get_table()))\n\n return self.tables", "def get_worksheets(self) -> list or None:\n if self.connected:\n cursor = self.workbook.cursor()\n if cursor:\n worksheet_names = []\n for table in cursor.tables():\n worksheet_names.append(table['table_name'])\n cursor.close()\n return worksheet_names\n return None", "def tables(self):\n return Table.objects.filter(schema__database=self)", "def tables(self):\n result = self.execute(self.commands.get_tables(self.name))\n return [x[0] for x in result]", "def get_tables(self):\n\t\tbuild = 'SELECT * FROM pg_catalog.pg_tables WHERE schemaname != \\'pg_catalog\\' AND schemaname != \\'information_schema\\';'\n\t\tself.cur.execute(build)\n\t\ttotal = self.cur.fetchall()\n\t\ttable_list = []\n\t\tfor a in total:\n\t\t\ttable_list.append(a[1])\n\t\treturn table_list", "def get_tables(self):\n return list(self._metadata['tables'].keys())", "def _get_tables(self) -> pd.DataFrame:\n return self.server._execute_extract(\n \"SELECT tablename FROM pg_catalog.pg_tables WHERE schemaname = '{}'\".format(\n self.name\n )\n )", "def worksheets(self):\n return self.properties.get('worksheets',\n WorkbookWorksheetCollection(self.context,\n ResourcePath(\"worksheets\", self.resource_path)))", "def spreadsheets(self):\r\n return resource.Spreadsheets(self)", "def facets(self):\n return self._facets", "def table(self):\n if self._table is None:\n self._table = list(self._iter_rows())\n\n return self._table", "def get_tables(self):\n return self._get_types_from_default_ns(Table)", "def tables(cls):\n if not hasattr(cls, '_tables'):\n cls.parse_attributes()\n return cls._tables", "def get_table_list(self, cursor):\n\n cursor.execute(\n \"\"\"\n SELECT c.relname, c.relkind\n FROM pg_catalog.pg_class c\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n WHERE c.relkind IN ('r', 'v', '')\n AND n.nspname = '%s'\n AND pg_catalog.pg_table_is_visible(c.oid)\"\"\"\n % get_current_schema().schema_name\n )\n\n return [\n TableInfo(row[0], {\"r\": \"t\", \"v\": \"v\"}.get(row[1]))\n for row in cursor.fetchall()\n if row[0] not in self.ignored_tables\n ]", "def all(self) -> List[ModelledTable]:\n\n return self.model.all(self.cursor)", "def rows(self):\n\n return [self._meta.row_class(self, o) for o in self.paginator.page(self._meta.page).object_list]", "def _agg_tables(self):\n tabs = set()\n for cond in self._agg_andalso + self._agg_orelse:\n tabs = tabs.union(cond._tables())\n return tabs", "def recalculate_pivots(self):\n pass", "def refresh_tables(self):\n\n if self.key is None:\n raise AttributeError('Can not refresh tables on uninitialised db')\n\n self.tables = self.client.ssclient.GetWorksheetsFeed(self.key)", "def load_tables(self):\n if not len(self._meta):\n raise BufrTableError(\"No table loaded!\")\n if self._tables is None or self._tables.differs(\n self._meta['master'], self._meta['mver'], self._meta['lver'],\n self._meta['center'], self._meta['subcenter']):\n self._tables = tab.load_tables.load_all(\n self._meta['master'], self._meta['center'], self._meta['subcenter'], self._meta['mver'],\n self._meta['lver'], self._tab_p, self._tab_f\n )\n else:\n logger.debug(\"Table loading not neccessary\")\n if self._tables is None:\n raise BufrTableError(\"No table loaded!\")\n return self._tables", "def as_players(self):\n self._assert_no_aggregate()\n\n self._sort_tables = [types.Player]\n ids = self._ids('player', self._sorter)\n results = []\n q = 'SELECT %s FROM player %s %s'\n with Tx(self._db) as cur:\n q = q % (\n types.select_columns(types.Player),\n _prefix_and(_sql_pkey_in(cur, ['player_id'], ids['player'])),\n self._sorter.sql(tabtype=types.Player),\n )\n cur.execute(q)\n\n for row in cur.fetchall():\n results.append(types.Player.from_row(self._db, row))\n return results", "def get_table_list(self, tables):\n statmt = \"SELECT tablename FROM pg_catalog.pg_tables where tablename not like 'pg_%' and tablename not like 'sql_%'\"\n self.cur.execute(statmt)\n rows = [table[0] for table in list(self.cur.fetchall())]\n\n if len(tables) != 0:\n rows = list(map(str, set(rows).intersection(tables)))\n return rows", "def get_rows(self) -> WellsByDimension:\n return self._rows", "def GetTabContainer(self):\r\n\r\n return self._tabs", "def partitions(self):\n return self._partitions" ]
[ "0.6552649", "0.65002507", "0.5966005", "0.5962965", "0.5830784", "0.5787573", "0.56597066", "0.56334597", "0.5631033", "0.5583295", "0.5544617", "0.5506703", "0.54890263", "0.5459125", "0.5382791", "0.5318419", "0.52870756", "0.5273004", "0.5272081", "0.5182937", "0.516016", "0.51574963", "0.5145011", "0.51342624", "0.50940037", "0.50937325", "0.5077889", "0.505474", "0.50187415", "0.5014495" ]
0.788829
0
Returns sheet protection object for a worksheet.
def protection(self): return self.properties.get('protection', WorkbookWorksheetProtection(self.context, ResourcePath("protection", self.resource_path)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_sheet():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('sheets', 'v4', credentials=creds)\n\n # Call the Sheets API\n sheet = service.spreadsheets()\n return sheet", "def get_drive_worksheet(spreadsheet_key, worksheet_name):\n gspread = get_authenticated_gspread()\n spreadsheet = gspread.open_by_key(spreadsheet_key)\n return spreadsheet.worksheet(worksheet_name)", "def _get_sheet(self, ws_name):\n return self._spreadsheet.sheet_by_name(ws_name)", "def getSheet(self, sheet_name):\r\n return self.workbook.Sheets(sheet_name)", "def get_worksheet(sheet_id, sheet_name):\n if (sheet_id, sheet_name) in WORKSHEET_CACHE:\n return WORKSHEET_CACHE[(sheet_id, sheet_name)]\n\n sheet = get_spreadsheet(sheet_id)\n worksheet = sheet.worksheet(sheet_name)\n\n WORKSHEET_CACHE[(sheet_id, sheet_name)] = worksheet\n return worksheet", "def get_worksheet(self):\n return self.worksheet", "def worksheet(self, worksheet_id, visibility, projection):\r\n return Worksheet(self, worksheet_id, visibility, projection)", "def get_worksheet(self, workbook):\n for worksheet_name in workbook.sheet_names():\n return workbook.sheet_by_name(worksheet_name)", "def google_sheets_connector():\n scope = ['https://www.googleapis.com/auth/drive']\n credentials = ServiceAccountCredentials.from_json_keyfile_name('key.json', scope)\n client = gspread.authorize(credentials)\n sheet = client.open('backend').sheet1\n return sheet", "def load_sheet(sheet_name):\n workbook_path = get_workbook_path()\n wb = openpyxl.load_workbook(workbook_path)\n sheet_obj = wb[sheet_name]\n return sheet_obj, wb", "def sheet(self, name, encoding=None, order_by=None):\n return _ExcelSheet(self, name, encoding, order_by)", "def get_excel(exceldocument):\r\n\r\n sheet = xlrd.open_workbook(exceldocument).sheet_by_index(0)\r\n return sheet", "def get_specsheet(self):\n if hasattr(self, 'specsheet'):\n return self.specsheet", "def get_sheet(sheet, doc):\n scope = [\"https://spreadsheets.google.com/feeds\", \"https://www.googleapis.com/auth/drive\"]\n\n credentials = ServiceAccountCredentials.from_json_keyfile_name(SECRET_FILE, scope)\n\n gc = gspread.authorize(credentials)\n wks = gc.open(doc)\n sheet = wks.worksheet(sheet)\n data = sheet.get_all_values()\n h1 = ffill(data[0])\n\n # remove extra whitespace\n h1 = [k.strip() for k in h1]\n h2 = [k.strip() for k in data[1]]\n\n # create a multiindex\n columns = MultiIndex.from_tuples(zip(h1, h2))\n\n # populate the dataframe\n df = DataFrame(data[2:], columns=columns)\n return df", "def get_sheetsclient(config, project=\"cscap\"):\n return get_googleapiclient(config, project, \"sheets\", \"v4\")", "def login_open_sheet(oauth_key_file, spreadsheet):\n try:\n scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']\n credentials = ServiceAccountCredentials.from_json_keyfile_name(oauth_key_file, scope)\n gc = gspread.authorize(credentials)\n worksheet = gc.open(spreadsheet).sheet1 # pylint: disable=redefined-outer-name\n return worksheet\n except Exception as ex: # pylint: disable=bare-except, broad-except\n print('Unable to login and get spreadsheet. Check OAuth credentials, spreadsheet name, \\\n and make sure spreadsheet is shared to the client_email address in the OAuth .json file!')\n print('Google sheet login failed with error:', ex)\n sys.exit(1)", "def get_sheet(excel_fname, sheet_name=None):\r\n book = xlrd.open_workbook(excel_fname)\r\n\r\n if sheet_name:\r\n\r\n if sheet_name in book.sheet_names():\r\n sheet = book.sheet_by_name(sheet_name)\r\n return sheet\r\n else:\r\n print(\"ERROR: Sheet '{0}' cannot be found in workbook '{1}'\".format(\r\n sheet_name, excel_fname))\r\n sys.exit(1)\r\n\r\n else:\r\n # Get the first worksheet.\r\n sheet = book.sheet_by_index(0)\r\n return sheet", "def sheets_service() -> object:\n g_sheets_service = build('sheets', 'v4', credentials=google_creds())\n\n return g_sheets_service", "def google_sheets_connector():\n print(\"Connecting to Google Sheets\")\n scope = ['https://www.googleapis.com/auth/drive']\n credentials = ServiceAccountCredentials.from_json_keyfile_name('key.json', scope)\n client = gspread.authorize(credentials)\n sheet = client.open('backend').sheet1\n return sheet", "def get_sheet_by_name(book, name):\n i = 0\n for sheetname in book.sheetnames:\n if sheetname == name:\n return book.worksheets[i]\n i += 1\n raise ValidationError(_(\"'%s' sheet not found\") % (name,))", "def login_open_sheet(oauth_key_file, spreadsheet):\n\ttry:\n\t\tjson_key = json.load(open(oauth_key_file))\n\t\tcredentials = SignedJwtAssertionCredentials(json_key['client_email'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tjson_key['private_key'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t['https://spreadsheets.google.com/feeds'])\n\t\tgc = gspread.authorize(credentials)\n\t\tworksheet = gc.open(spreadsheet).sheet1\n\t\treturn worksheet\n\texcept Exception as ex:\n\t\tprint 'Unable to login and get spreadsheet. Check OAuth credentials, spreadsheet name, and make sure spreadsheet is shared to the client_email address in the OAuth .json file!'\n\t\tprint 'Google sheet login failed with error:', ex\n\t\tsys.exit(1)", "def login_open_sheet(oauth_key_file, spreadsheet):\r\n try:\r\n scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']\r\n credentials = ServiceAccountCredentials.from_json_keyfile_name(oauth_key_file, scope)\r\n gc = gspread.authorize(credentials)\r\n worksheet = gc.open(spreadsheet).sheet1\r\n return worksheet\r\n except Exception as ex:\r\n print('Unable to login and get spreadsheet. Check OAuth credentials, spreadsheet name, and make sure spreadsheet is shared to the client_email address in the OAuth .json file!')\r\n print('Google sheet login failed with error:', ex)\r\n print(datetime.datetime.now())\r\n sys.exit(1)", "def login_open_sheet(oauth_key_file, spreadsheet):\r\n try:\r\n scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']\r\n credentials = ServiceAccountCredentials.from_json_keyfile_name(oauth_key_file, scope)\r\n gc = gspread.authorize(credentials)\r\n worksheet = gc.open(spreadsheet).sheet1\r\n return worksheet\r\n\r\n except Exception as ex:\r\n print('Unable to login and get spreadsheet. Check OAuth credentials, spreadsheet name, and make sure spreadsheet is shared to the client_email address in the OAuth .json file!')\r\n print('Google sheet login failed with error:', ex)\r\n sys.exit(1)", "def get_or_create_worksheet(sh, name):\n try:\n return sh.worksheet(name)\n except WorksheetNotFound:\n return sh.add_worksheet(title=name, rows=1, cols=1)", "def create_worksheet(self, workbook: Workbook, worksheet_name: str) -> Worksheet:\n worksheet = workbook.create_sheet(worksheet_name)\n workbook_name = self.get_workbook_name(workbook)\n workbook.save(workbook_name)\n\n return worksheet", "def get_ssclient(config):\n return smartsheet.Smartsheet(config[\"ss_access_token\"])", "def init_worksheet(SPREADSHEET_ID, ws_name):\n\n creds = oauth_file.Storage(f\"{os.environ['HOME']}/token.json\").get()\n gc = gspread.authorize(creds)\n wb = gc.open_by_key(SPREADSHEET_ID)\n\n try:\n ws = wb.worksheet(ws_name)\n except WorksheetNotFound:\n ws = wb.add_worksheet(ws_name, rows=1, cols=1)\n return ws", "def worksheets(self, visibility, projection):\r\n return Worksheets(self, visibility=visibility, projection=projection)", "def spreadsheet(self, key):\r\n return resource.Spreadsheet(self, key)", "def from_excel(self, path, worksheet=0):\n reader, release_resources = _from_excel(path, worksheet=worksheet)\n return Reader(reader, closefunc=release_resources)" ]
[ "0.6435777", "0.6164468", "0.61453366", "0.60792303", "0.57868165", "0.57408464", "0.5719054", "0.5713475", "0.56248987", "0.5560803", "0.5474494", "0.54677033", "0.5372001", "0.534923", "0.5348853", "0.53312427", "0.52593756", "0.5227131", "0.520839", "0.5198447", "0.5190751", "0.51660085", "0.516525", "0.5164346", "0.5151545", "0.51234674", "0.51147056", "0.50453246", "0.5035945", "0.503158" ]
0.64826155
0
for portal_catalog to index enableAutopublishing field
def _enableautopublishing(obj, **kwargs): from collective.autopublishing.behavior import IAutoPublishing if IAutoPublishing.providedBy(obj): return getattr(obj, "enableAutopublishing", True) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def makeATCTTypesAutoPublishAware():\n makeTypesAutoPublishAware(atct_types)\n print \"---------- PATCH: ADDED enableAutopublishing field TO ATCT TYPES ----------\"", "def enable_index_update_feature(settings):\n settings.FEATURES[INDEX_UPDATES] = True", "def supports_catalog_admin(self):\n return False", "def addCatalogIndexes(portal):\n catalog = getToolByName(portal, 'portal_catalog')\n indexes = catalog.indexes()\n wanted = (('standardTags', 'KeywordIndex'),\n ('iamTags', 'KeywordIndex'),\n ('isearchTags', 'KeywordIndex'),\n ('hiddenTags', 'KeywordIndex'))\n indexables = []\n for name, meta_type in wanted:\n if name not in indexes:\n catalog.addIndex(name, meta_type)\n indexables.append(name)\n logger.info(\"Added %s for field %s.\", meta_type, name)\n if len(indexables) > 0:\n logger.info(\"Indexing new indexes %s.\", ', '.join(indexables))\n catalog.manage_reindexIndex(ids=indexables)", "def isEnabled(self):", "def enable(self):", "def create_index():", "def _compute_export_in_woo(self):\n for rec in self:\n rec.ks_export_in_woo = bool(rec.ks_woo_id)", "def is_enabled(self):", "def Enabled(self) -> bool:", "def _setEnabled(self, indexlist):\n for index in self._items.keys():\n self.enable(index, index in indexlist)", "def test_update_deployment_visibility_query(self):\n pass", "def enablePackageInternal(self, *args):\n return _libsbml.SpeciesTypeComponentIndex_enablePackageInternal(self, *args)", "def add_catalog_indexes(context, logger):\n if logger is None:\n logger = logging.getLogger('bungenicms.membershipdirectory')\n \n # Run the catalog.xml step as that may have defined new metadata columns. \n # We could instead add <depends name=\"catalog\"/> to the registration of our \n # import step in zcml, but doing it in code makes this method usable as \n # upgrade step as well. Note that this silently does nothing when there is \n # no catalog.xml, so it is quite safe.\n setup = getToolByName(context, 'portal_setup')\n setup.runImportStepFromProfile(PROFILE_ID, 'catalog')\n \n catalog = getToolByName(context, 'portal_catalog')\n indexes = catalog.indexes()\n \n # Specify the indexes you want, with ('index_name', 'index_type')\n wanted = (('county', 'FieldIndex'),\n ('constituency', 'FieldIndex'),\n ('priority_number', 'FieldIndex'), \n ('political_party', 'FieldIndex'),\n ('elected_nominated', 'FieldIndex'),\n ('member_status', 'FieldIndex'),\n ('special_interest', 'FieldIndex'),\n ('other_names', 'FieldIndex'),\n ('member_role', 'FieldIndex'),\n ('member_title', 'FieldIndex'),\n ('body_text', 'FieldIndex'),\n ('member_full_names', 'ZCTextIndex'),\n )\n\n indexables = []\n for (name, meta_type) in wanted:\n if meta_type and name not in indexes:\n if meta_type == 'ZCTextIndex':\n item_extras = Empty()\n item_extras.doc_attr = name\n item_extras.index_type = 'Okapi BM25 Rank'\n item_extras.lexicon_id = 'plone_lexicon'\n catalog.addIndex(name, meta_type, item_extras)\n else:\n catalog.addIndex(name, meta_type)\n \n indexables.append(name)\n logger.info('Added %s for field %s.', meta_type, name)\n if len(indexables) > 0:\n logger.info('Indexing new indexes %s.', ', '.join(indexables))\n catalog.manage_reindexIndex(ids=indexables)", "def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n fc_info = {}\n fc_info['FLEX_COUNTER_STATUS'] = 'enable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"QUEUE_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PG_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", BUFFER_POOL_WATERMARK, fc_info)", "def enable(ctx):\n\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"enabled\"})", "def reindex_licence_permissions(container, event):\n if IUrbanEvent.providedBy(container):\n licence = container.aq_parent\n licence.reindexObject(idxs=['allowedRolesAndUsers'])", "def search_engine_index(request):\n return {'NO_INDEX': settings.NO_INDEX}", "def manual(self):\n\n\t\tfilter = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\", \"World War II\"],\n\n\t\tself.index[\"authorities\"] = [auth for auth in self.index[\"authorities\"] if auth not in filter]\n\n\t\tfor item in self.index[\"items\"]:\n\t\t\tself.index[\"items\"][item] = [auth for auth in self.index[\"items\"][item] if auth in self.index[\"authorities\"]]", "def build_index():\n pass", "def _SetupIndexes(self, _open=open):\n pass", "def index(self):\n\n if self.cluster:\n self.cluster.index()\n else:\n super().index()", "def enable(self) -> None:", "def enabled(self):\n raise NotImplementedError", "def define_index_field(DomainName=None, IndexField=None):\n pass", "def enableAccessedFlags(self):\n pass", "def isEnabled(self) -> bool:\n ...", "def index_queryset(self, using=None):\n # return self.get_model().objects.filter(hack_published__lte=datetime.datetime.now())\n return self.get_model().objects.all()", "def index_queryset(self, using=None):\n return self.get_model().published.all()", "def index_queryset(self, using=None):\n return self.get_model().published.all()" ]
[ "0.5977186", "0.5692152", "0.518013", "0.5164247", "0.50872403", "0.5072901", "0.50575805", "0.5032113", "0.50128716", "0.4981646", "0.49514616", "0.49275663", "0.4918709", "0.49117836", "0.49029917", "0.4898081", "0.48666328", "0.48662493", "0.4820428", "0.4810867", "0.48106065", "0.48036543", "0.47814035", "0.47713396", "0.47578475", "0.47558194", "0.4753297", "0.47518924", "0.47505", "0.47505" ]
0.5994194
0
Display schedules of upcoming content.
def schedule(request): return render(request, 'editorial/schedule.html', {})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_schedule(self):\n for entry in self.entries:\n print(entry.get_entry_string())", "def tentative_schedule(request):\n\n\tshows_dict = {\n\t\t0: [],\n\t\t1: [],\n\t\t2: [],\n\t\t3: [],\n\t\t4: [],\n\t\t5: [],\n\t\t6: []\n\t}\n\n\tfor i in range(7):\n\t\tfor show in Show.objects.filter(day=i).order_by('time'):\n\t\t\t\tshow_time = show.time\n\t\t\t\tdj = str(show.dj)\n\t\t\t\tif show.co_dj and str(show.co_dj) != \"Unknown Dj\":\n\t\t\t\t\tdj += \" & \" + str(show.co_dj)\n\t\t\t\tshows_dict[i].append([dj, show_time.strftime('%I:%M %p')])\n\n\treturn render(request, 'tentative_schedule.html', {\n\t\t\t'shows_dict': shows_dict\n\t})", "def list(request, template='events/list.html'):\n return render(request, template, {\n 'events': Event.objects.get_upcoming().order_by('start_date'),\n })", "def printSchedule(self):\n\t\tself.printWaiting()\n\t\tprint ' '.join(map(format,range(20),['2' for _ in range(20)]))\n\t\tprint \"\"", "def schedule(request):\n return render(request, 'vaxcharts/schedule.html')", "def printSchedule():\r\n print(\"{0:^45}\".format(\"Your Schedule:\\n\"))\r\n print(\" Day Class Time\")\r\n if(len(classes) == 0):\r\n print(\"\\nThere are no classes\\n\")\r\n return\r\n for class_ in classes:\r\n print(class_.scheduleString())\r\n print()", "def _view_schedule(self):\n def plus_top_attach(f):\n\n def plus(*args, **kwargs):\n top_attach, left_attach = f(*args, **kwargs)\n return top_attach + 1, left_attach + 1\n\n return plus\n\n @plus_top_attach\n def create_label(text, left_attach, right_attach,\n top_attach, bottom_attach, align=None):\n label = gtk.Label('<span font=\"%s\">%s</span>' %\n (Params().get_default_font(), text))\n label.set_use_markup(True)\n if align == 'left':\n label.set_alignment(xalign=0.0, yalign=0.5)\n elif align == 'right':\n label.set_alignment(xalign=1.0, yalign=0.5)\n self.table.attach(label, left_attach, right_attach,\n top_attach, bottom_attach, xoptions=gtk.FILL, yoptions=False)\n label.show()\n return top_attach, left_attach\n\n @plus_top_attach\n def create_separator(left_attach, right_attach,\n top_attach, bottom_attach):\n separator = gtk.HSeparator()\n self.table.attach(separator, left_attach, right_attach,\n top_attach, bottom_attach, xoptions=gtk.FILL, yoptions=False)\n separator.show()\n return top_attach, left_attach\n\n tattach, tlen, view_sch = 0, 0, Params().get_view_sch()\n for i in view_sch:\n if i:\n tlen += 1\n for day in ['Monday', 'Tuesday', 'Wednesday',\n 'Thursday', 'Friday', 'Saturday']:\n tattach = create_label('<b><span color=\"%s\">%s</span></b>' %\n (Params().get_day_color(), day), 0, tlen,\n tattach, tattach + 1, 'left')[0]\n tattach = create_separator(0, tlen, tattach, tattach + 1)[0]\n\n schedule = Schedule().get_schedule(day,\n Schedule().get_current_week() - 1)\n for i in range(8):\n if not schedule[i][1] == '' and \\\n (schedule[i][0] == Schedule().get_subgroup() or\n schedule[i][0] == 2):\n if not schedule[i][2]:\n label_color = '%s' % str(Params().get_lecture_color())\n elif schedule[i][2] == 1:\n label_color = '%s' % \\\n str(Params().get_laboratory_color())\n elif schedule[i][2] == 2:\n label_color = '%s' % str(Params().get_practice_color())\n else:\n label_color = '%s' % str(Params().get_non_color())\n\n label_template = '<span color=\"%s\">%s</span>'\n lattach = 0\n if view_sch[0]:\n lattach = create_label('<span color=\"%s\">%d.</span>' %\n (label_color, i),\n lattach, lattach + 1, tattach, tattach + 1)[1]\n if view_sch[1]:\n lattach = create_label(label_template % (label_color,\n '-'.join(Schedule().get_lessons_time()[i])),\n lattach, lattach + 1, tattach, tattach + 1)[1]\n if view_sch[2]:\n lattach = create_label(label_template %\n (label_color, schedule[i][1]),\n lattach, lattach + 1,\n tattach, tattach + 1, 'left')[1]\n if view_sch[3]:\n lattach = create_label(label_template %\n (label_color, schedule[i][3]),\n lattach, lattach + 1, tattach, tattach + 1)[1]\n if view_sch[4]:\n create_label(label_template %\n (label_color, schedule[i][4]),\n lattach, lattach + 1,\n tattach, tattach + 1, 'right')\n tattach += 1", "def schedule_text():", "def test_list_schedules(self):\n pass", "def print_list(schedule):\n START_TIME = 0\n END_TIME = 1\n MEETING_TITLE = 2\n print(\"\\nYour schedule for the day:\")\n if len(schedule) == 0:\n print(\"(empty)\\n\")\n else:\n for row in schedule:\n print(\n f\"{row[START_TIME]} - {row[END_TIME]} {row[MEETING_TITLE]}\")\n print(\"\\n\")", "def index():\n # return render_template('index.html', events=get_calendar_events_today(CALENDAR_URL))\n return render_template('index.html', events=get_calendar_events_limit(CALENDAR_URL), events_sorted=True)", "def schedule_content(request):\r\n\r\n stories = Story.objects.filter(organization=request.user.organization).exclude(archived=True)\r\n\r\n # data = {}\r\n # data['success'] = 1\r\n # data['result'] = []\r\n data = []\r\n\r\n for story in stories:\r\n # Facet Schedules\r\n for facet in story.facetstory.all():\r\n credit = {}\r\n for user in facet.credit.all():\r\n credit['id'] = []\r\n credit['id'].append(user.credit_name)\r\n credit['id'].append(user.get_absolute_url())\r\n editor = {}\r\n for user in facet.editor.all():\r\n editor['id'] = []\r\n editor['id'].append(user.credit_name)\r\n editor['id'].append(user.get_absolute_url())\r\n print credit\r\n if facet.due_edit:\r\n edit_event_dict = {}\r\n edit_event_dict['id'] = facet.id\r\n edit_event_dict['title'] = facet.name.encode('utf-8')\r\n edit_event_dict['description'] = facet.description.encode('utf-8')\r\n edit_event_dict['due-edit'] = time.mktime(facet.due_edit.timetuple())\r\n edit_event_dict['editor'] = facet.editor.credit_name\r\n edit_event_dict['credit'] = credit\r\n edit_event_dict['url'] = facet.get_absolute_url()\r\n edit_event_dict['start'] = time.mktime(facet.due_edit.timetuple()) * 1000\r\n edit_event_dict['end'] = (time.mktime(facet.due_edit.timetuple()) * 1000) + 60\r\n edit_event_dict['overlap'] = True\r\n edit_event_dict['allDay'] = False\r\n edit_event_dict['backgroundColor'] = '#00aced'\r\n edit_event_dict['textColor'] = '#fff'\r\n data.append(edit_event_dict)\r\n if facet.run_date:\r\n run_event_dict = {}\r\n run_event_dict['id'] = facet.id\r\n run_event_dict['title'] = facet.name.encode('utf-8')\r\n run_event_dict['description'] = facet.description.encode('utf-8')\r\n run_event_dict['due-edit'] = time.mktime(facet.due_edit.timetuple())\r\n run_event_dict['editor'] = facet.editor.credit_name\r\n run_event_dict['credit'] = credit\r\n run_event_dict['url'] = facet.get_absolute_url()\r\n run_event_dict['class'] = 'event_run'\r\n run_event_dict['start'] = time.mktime(facet.run_date.timetuple()) * 1000\r\n run_event_dict['end'] = (time.mktime(facet.run_date.timetuple()) * 1000) + 60\r\n run_event_dict['overlap'] = True\r\n run_event_dict['backgroundColor'] = '#5cb85c'\r\n run_event_dict['textColor'] = '#fff'\r\n data.append(run_event_dict)\r\n\r\n # print \"DATA: \", data\r\n\r\n return HttpResponse(json.dumps(data), content_type='application/json')", "def show_daySchedule(doc_user, date, logger):\n ret = []\n my_calendar = col_calendar.find_one({\"User\": doc_user[\"_id\"]})\n if my_calendar != None:\n ret = my_calendar[\"schedules\"]\n\n show_events = []\n if ret:\n for schedule in ret:\n if schedule[\"date\"] == date:\n show_events += schedule[\"event\"]\n logger.info('{}: show chosen date schedule list = {}'.format(date, show_events))\n\n return show_events", "def schedule(request,status):\n\tnow = datetime.today()\n\tsched = Sample.objects.all()\n\tif status:\n\t\tsched = Sample.objects.filter(status=status)\n\tsched = sched.order_by('date_added')\n\tsched = [(now + timedelta(i+1), s) for i,s in enumerate(sched)]\n\treturn render_to_response('schedule/schedule.html', \n\t\t\t\t\t\t\t{'sched': sched},\n\t\t\t\t\t\t\tcontext_instance=RequestContext(request))", "def timeline(self, **kwargs):\n\n def rtm(n, multiple=10):\n \"\"\"Round to multiple.\"\"\"\n return int(multiple * round(float(n) / multiple))\n\n beginning_minutes = 7 * 60 + 20 # starting time is 7:20\n end_minutes = 21 * 60 # ending time is 21:00\n\n interval = 100 # 100 minutes for each period (90 + 10)\n\n total_minutes = ((end_minutes - beginning_minutes) // interval + 1) * interval\n number_of_intervals = total_minutes // interval\n\n segments = total_minutes // 10\n days = {i: [[' '] * segments + ['โ”‚']] for i in range(5)}\n\n for course in self.get_sorted_courses(include_unscheduled=False):\n i = (rtm(course.time.start) - beginning_minutes) // 10\n width = (rtm(course.time.end) - rtm(course.time.start)) // 10\n\n day = 0\n for j in range(i, i + width):\n if days[course.weekday()][day][j] != ' ':\n day += 1\n if len(days[course.weekday()]) == day:\n days[course.weekday()].append([' '] * segments + ['โ”‚'])\n\n days[course.weekday()][day][i] = '{'\n days[course.weekday()][day][i + width - 1] = '}'\n\n space = width - 2 # width minus { and }\n\n name = Ansi.color(\n course.abbreviation\n if len(course.abbreviation) <= space\n else course.abbreviation[: space - 1] + \".\",\n course_types[course.type].color,\n )\n\n # TODO: this doesn't center correctly, for some reason\n name = Ansi.center(name, space)\n\n days[course.weekday()][day][i + 1] = name\n for j in range(i + 2, i + width - 1):\n days[course.weekday()][day][j] = ''\n\n # print the header\n print(\n (\" โ•ญ\" + \"โ”€\" * (total_minutes // 10) + \"โ•ฎ\\n โ”‚\")\n + \"\".join(\n Ansi.bold(\n minutes_to_HHMM(beginning_minutes + interval * i)\n .strip()\n .ljust(10, \" \")\n )\n for i in range(number_of_intervals)\n )\n + \"โ”‚\\nโ•ญโ”€โ”€โ”€โ”€โ”ผโ”€\"\n + \"\".join(\n \"โ”€\" * number_of_intervals\n + (\"โ”€\" if i != number_of_intervals - 1 else \"โ”ค\")\n for i in range(number_of_intervals)\n )\n )\n\n for i in range(5):\n x = f\"โ”‚ {WD_EN[i][:2].capitalize()} โ”‚\"\n\n for j, day in enumerate(days[i]):\n if j == 0:\n print(x, end=\"\")\n else:\n print(\"โ”‚ โ”‚\", end=\"\")\n\n print(\"\".join(day))\n\n # print the very last line\n print(\n \"โ•ฐโ”€โ”€โ”€โ”€โ”ดโ”€\"\n + \"\".join(\n \"โ”€\" * number_of_intervals\n + (\"โ”€\" if i != number_of_intervals - 1 else \"โ•ฏ\")\n for i in range(number_of_intervals)\n )\n )", "def show_today_tasks(self):\n today = datetime.today()\n tasks = self.session.query(self.Table).filter(self.Table.deadline == today.strftime('%Y-%m-%d')).all()\n print(f'Today {today.strftime(\"%d %b\")}:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()", "def baron_schedule(request):\n assert isinstance(request, HttpRequest)\n\n return render(\n request,\n 'AscensionESports_Baseline/schedule.html',\n {\n 'background': getBaronBackground(),\n 'color': getBaronColor(),\n 'title':'Baron League Schedule',\n 'query_results': Baron_Match_Report_Request(request),\n 'year': datetime.now().year,\n }\n )", "def _create_schedules(self):\n\n ''''''", "def index(request):\n\n\treturn render(request, 'index.html', {})\n\n\t# uncomment this line vvv and comment the above ^^^ line once we cut off scheduling\n\t#return render(request, 'cannot_schedule_anymore.html', {})", "def schedule_paragraph():", "def getSchedules(self) :\n return self.schedules", "def _PrintUserCalendars(self):\n\n feed = self.cal_client.GetAllCalendarsFeed()\n print 'Printing allcalendars: %s' % feed.title.text\n for i, a_calendar in zip(xrange(len(feed.entry)), feed.entry):\n print '\\t%s. %s' % (i, a_calendar.title.text,)", "def _PrintOwnCalendars(self):\n\n feed = self.cal_client.GetOwnCalendarsFeed()\n print 'Printing owncalendars: %s' % feed.title.text\n for i, a_calendar in zip(xrange(len(feed.entry)), feed.entry):\n print '\\t%s. %s' % (i, a_calendar.title.text,)", "def my_schedule(request,username):\n\n user = get_object_or_404(User, username=username)\n user_profile = UserProfile.objects.get_or_create(user=user)[0]\n weekly_schedule = WeeklySchedule.objects.filter(user_profile=user_profile)\n\n userScheduleInlineFormSet = inlineformset_factory(UserProfile, WeeklySchedule,\n fields=('day_of_week', 'time_from', 'time_to'),\n extra=1, can_delete=True)\n\n # prepare data for rendering in table\n user_schedule = weekly_schedule.values_list('day_of_week','time_from','time_to')\n rows = pivot_schedule(user_schedule)\n\n if request.method == 'POST':\n formset = userScheduleInlineFormSet(request.POST, instance=user_profile,)\n if formset.is_valid():\n formset.save()\n return redirect('my_schedule', user.username)\n else:\n formset = userScheduleInlineFormSet(instance=user_profile,)\n\n return render(\n request,\n 'schedule/myschedule.html',\n {\n 'formset': formset,\n 'days_of_week': WeeklySchedule.DAY_OF_WEEK,\n 'data': rows,\n }\n )", "def print_schedule():\n clear_screen()\n print(\"====Current Schedule====\")\n days = ['sun', 'mon', 'tues', 'wed', 'thurs', 'fri', 'sat']\n with open('current_courses.json', 'r') as current_file:\n schedule = json.load(current_file)\n for day in days:\n for val, val2 in schedule.items():\n if day in val2[0]:\n print(day, val, str(val2[1])+'-'+str(val2[2])+\" Presumed Grade: \"+ val2[3])\n return 0", "def get_schedules():\n return json.dumps(calendar.get_schedules())", "def main(to_be_scheduled):\n\n tasks = order_by_ftime(to_be_scheduled)\n print select_activity(tasks)", "def touragenda(request):\n active_events = TourAgendaModel.objects.order_by('number')\n friday_events = TourAgendaModel.objects.all().filter(day='FRIDAY')\n saturday_events = TourAgendaModel.objects.all().filter(day='SATURDAY')\n sunday_events = TourAgendaModel.objects.all().filter(day='SUNDAY')\n\n context = {\n 'active_events': active_events,\n 'friday_events': friday_events,\n 'saturday_events': saturday_events,\n 'sunday_events': sunday_events,\n }\n\n return render(request, 'tourAgenda.html', context=context)", "def timesheet_all(request):\r\n return render(\r\n request,\r\n 'timesheet/timesheet_all.html'\r\n )", "def all():\n schedule = Scheduler()\n schedule.committees()\n schedule.legislators()\n schedule.bills()" ]
[ "0.66718227", "0.64995277", "0.6440209", "0.6389953", "0.63754326", "0.6360611", "0.63576716", "0.632596", "0.62293255", "0.62261754", "0.6072113", "0.60381263", "0.6003317", "0.5986058", "0.5982519", "0.596582", "0.5961603", "0.5935291", "0.59298605", "0.5909142", "0.5903294", "0.58917093", "0.58813995", "0.58545196", "0.58133125", "0.5801927", "0.5778918", "0.5765591", "0.57617646", "0.57578486" ]
0.71254635
0
Test case for variables_get
def test_variables_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_variablepresentations_get(self):\n pass", "def test_variables_id_get(self):\n pass", "def test_extracting_one_value(self):\n\t\tself.assertEqual([\"b\"], au.extract_variables(bf.Var(\"b\")), \"Invalid variables extracted, expected [b].\")", "def test_variables_post(self):\n pass", "def get_variable_values(self, vars):\n raise NotImplementedError()", "def test_retrieving_variables(self):\n\t\turl = reverse('variables', args = ('b'))\n\t\tresponse = self.client.get(url, format='json')\n\t\tself.assertEqual(response.data, {'b': 567})", "def _get_test_variables():\n if __name__ == \"__main__\":\n return _MOCKUP_TEST_VARIABLES\n else:\n return BuiltIn().get_variables()", "def test_simple_extraction_of_values(self):\n\t\tself.assertEqual([\"a\", \"b\"], au.extract_variables(bf.And([bf.Var(\"b\"), bf.Var(\"a\")])), \"Invalid variables extracted, expected [a, b].\")", "def test_variablepresentations_id_get(self):\n pass", "def test_load_variables_correct_string_01(self):\n var_dict = load_variables(self.correct_string_multiple_vars)\n self.assertIsNotNone(var_dict)\n self.assertEqual(len(var_dict), 2)", "def _get_vars(self, variables, required=None):\n return_dict = {}\n for variable in variables:\n return_dict[variable] = self.module.params.get(variable)\n else:\n if isinstance(required, list):\n for var_name in required:\n check = return_dict.get(var_name)\n if check is None:\n self.failure(\n error='Missing [ %s ] from Task or found a None'\n ' value' % var_name,\n rc=000,\n msg='variables %s - available params [ %s ]'\n % (variables, self.module.params)\n )\n return return_dict", "def test_variablepresentations_post(self):\n pass", "def test_variables_id_put(self):\n pass", "def query_variables(md):\n\n # save as dictionaries with searchers as keys\n x_searchers = {}\n b_target = {}\n\n t_max = 0\n\n for var in md.getVars():\n my_var_name = var.varName\n my_var_value = var.x\n # print('%s %g' % (my_var_name, my_var_value))\n\n if 'x' in my_var_name:\n s = int(my_var_name[2:my_var_name.find(\",\")])\n v = int(my_var_name[my_var_name.find(\",\") + 1:my_var_name.rfind(\",\")])\n t = int(my_var_name[my_var_name.rfind(\",\") + 1:-1])\n\n # print('%s = %f ' % (my_var_name, my_var_value))\n x_searchers[(s, v, t)] = my_var_value\n\n if t > t_max:\n t_max = t\n\n elif 'beta' in my_var_name and '_s' not in my_var_name:\n # print('%s %g' % (my_var_name, my_var_value))\n # remember: b[0] is probability of capture\n v = int(my_var_name[5:my_var_name.find(\",\")])\n t = int(my_var_name[my_var_name.find(\",\") + 1:my_var_name.rfind(\"]\")])\n b_target[(v, t)] = my_var_value\n\n # make sure x is binary\n x_searchers = enforce_binary(x_searchers, t_max)\n b_target = enforce_sum_1(b_target, t_max)\n\n # x_searchers[(s, v, t)] and b_target[(v, t)]\n return x_searchers, b_target", "def test_load_variables_correct_string_00(self):\n var_dict = load_variables(self.correct_string_single_var)\n self.assertIsNotNone(var_dict)\n self.assertEqual(len(var_dict), 1)", "def Var(key):\n return vars[key]", "def _var(self, name=None, context=None):\n\t\tif name is None: name = None\n\t\tif context is None: context = self.context\n\t\tif (not name):\n\t\t\treturn context.getVariables().keys()\n\t\telif True:\n\t\t\treturn context.getVariables().get(name)", "def get_var(my_vars: dict, name: str):\n desired_var = my_vars.get(name)\n if desired_var is not None:\n return desired_var\n else:\n var_names = 'x, y, alpha, beta, zeta, psi'\n print('No variable with this name, current model accepts only:' + var_names)\n return None", "def test_get(self):\n self.assertEqual(self.tester.get('SEASON_ENVIRONMENT'), 'winter')\n self.assertEqual(self.tester.get('depth'), 0.15)", "def test_get_variable_non_existent_key(self):\n param = {'Name': '/airflow/variables/hello', 'Type': 'String', 'Value': 'world'}\n\n ssm_backend = SystemsManagerParameterStoreBackend()\n ssm_backend.client.put_parameter(**param)\n\n assert ssm_backend.get_variable(\"test_mysql\") is None", "def testTurntableVariables(self):\n crawler = Crawler.create(PathHolder(self.__exrFile))\n self.assertEqual(crawler.var(\"type\"), \"turntable\")\n self.assertEqual(crawler.var(\"category\"), \"render\")\n self.assertEqual(crawler.var(\"renderType\"), \"tt\")\n self.assertEqual(crawler.var(\"assetName\"), \"ass\")\n self.assertEqual(crawler.var(\"step\"), \"lookdev\")\n self.assertEqual(crawler.var(\"pass\"), \"beauty\")\n self.assertEqual(crawler.var(\"renderName\"), \"ass-default-beauty\")", "def __getitem__(self, key: str) -> Any:\n return self.variables[key]", "def __getitem__(self, key):\n return self.variables[key]", "def getVariables(self)->Dict[str,str]:\n pass", "def test_get_varnames(self):\n the_names = self.get_list_of_varnames()\n # I know that the current test file should contain the following\n ok_(\"I5\" in the_names)\n ok_(\">>\" in the_names)\n ok_(\"^^\" in the_names)", "def get_variables(self):\n\t\treturn self.variables", "def test_variables(self):\n with HTTMock(spark_cloud_mock):\n self.assertEqual(self.device.variables, self.cloud_device.variables)", "def test_load_variables_incorrect_string_00(self):\n var_dict = load_variables(self.incorrect_string)\n self.assertIsNotNone(var_dict)", "def query_and_print_variables(md):\n\n # save x variable as dictionary with keys (s, v, t)\n x_searchers = {}\n # save beta variable as dictionary with keys (v, t)\n b_target = {}\n\n for var in md.getVars():\n my_var_name = var.varName\n my_var_value = var.x\n print('%s %g' % (my_var_name, my_var_value))\n\n if 'x' in my_var_name:\n s = int(my_var_name[2])\n v = int(my_var_name[4])\n t = int(my_var_name[6])\n\n if my_var_value >= 0.5:\n x_searchers[(s, v, t)] = 1\n else:\n x_searchers[(s, v, t)] = 0\n\n elif 'beta' in my_var_name:\n # print('%s %g' % (my_var_name, my_var_value))\n # remember b[0] is probability of capture\n v = int(my_var_name[5])\n t = int(my_var_name[7])\n b_target[v, t] = my_var_value\n\n obj = md.getObjective()\n print(obj.getValue())\n\n return x_searchers, b_target", "def test_variables(x, y, z):\n a = x * y\n b = y * a\n c = a + b\n return c / z" ]
[ "0.8215227", "0.7764042", "0.6906656", "0.69032335", "0.6889915", "0.688419", "0.6710159", "0.6670743", "0.6668918", "0.66338056", "0.655833", "0.6541936", "0.6481389", "0.6392791", "0.63722074", "0.63574237", "0.6326553", "0.63067013", "0.62641114", "0.6202791", "0.61986876", "0.6181659", "0.6175901", "0.6160077", "0.61453265", "0.61399704", "0.61358476", "0.61288995", "0.6122308", "0.6078585" ]
0.90671307
0
Test case for variables_id_delete
def test_variables_id_delete(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_variablepresentations_id_delete(self):\n pass", "def test_variables_id_put(self):\n pass", "def test_variables_id_get(self):\n pass", "def delete(self, _id):", "def test_data_source_soaps_id_dynamic_datas_delete(self):\n pass", "def test_delete_identity(self):\n pass", "def test_data_source_soaps_id_delete(self):\n pass", "def delete(thing, id_):\n pass", "def test_workflows_id_delete(self):\n pass", "def test_delete_run(self):\n pass", "def test_variable_delete(self):\n self.trace('del x', env={'x': 1})\n\n events = self.variable_events\n self.assertEqual(len(events), 1)\n event = events[0]\n self.assertIsInstance(event, TraceDelete)\n self.assertEqual(event.name, 'x')", "def delete_variable(self, id):\n\n\t\tif not isinstance(id, int):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: id EXPECTED TYPE: int', None, None)\n\t\t\n\t\thandler_instance = CommonAPIHandler()\n\t\tapi_path = ''\n\t\tapi_path = api_path + '/crm/v2/settings/variables/'\n\t\tapi_path = api_path + str(id)\n\t\thandler_instance.set_api_path(api_path)\n\t\thandler_instance.set_http_method(Constants.REQUEST_METHOD_DELETE)\n\t\thandler_instance.set_category_method(Constants.REQUEST_METHOD_DELETE)\n\t\ttry:\n\t\t\tfrom zcrmsdk.src.com.zoho.crm.api.variables.action_handler import ActionHandler\n\t\texcept Exception:\n\t\t\tfrom .action_handler import ActionHandler\n\t\treturn handler_instance.api_call(ActionHandler.__module__, 'application/json')", "def test_delete1(self):\n pass", "def test_datatransformationsetups_id_delete(self):\n pass", "def test_user_id_delete(self):\n pass", "def test_coupledmodels_id_delete(self):\n pass", "def test_variablepresentations_id_put(self):\n pass", "def test_delete_occurrence(self):\n pass", "def test_delete7(self):\n pass", "def test_delete(self):\n pass", "def test_data_source_soaps_id_dynamic_datas_fk_delete(self):\n pass", "def test_delete_alert_by_id(self):\n pass", "def test_delete_on_background_response_descriptor_variables_library_variable_set_library_variable_set_resource(self):\n pass", "def test_delete_case(self):\n pass", "def DeleteVariable(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def delete():", "def test_delete_delete_and_delete_id_not_equal(self):\n doc = TestDoc(\"1\", \"test\")\n self.assertNotEqual(\n BulkActionItem.delete(doc),\n BulkActionItem.delete_id(doc.id),\n )", "def _delete():\n\tquery = myTaskSession.query(WorkToolkitDB.db.Task)\n\n\tIDStr = myOpt.id\n\tIDs = re.split('\\s*,\\s*', IDStr)\n\n\tif len(IDs) == 0:\n\t\tprint('ERR: no deleting id input')\n\t\treturn 1\n\n\tfor ID in IDs:\n\t\tmyTask = query.get(ID)\n\t\tmyTaskSession.delete(myTask)\n\n\t\n\tmyTaskSession.commit()\n\n\treturn 0", "def test_delete(self):\n SampleTemplate.create(self.metadata, self.new_study)\n SampleTemplate.delete(2)\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.required_sample_info WHERE study_id=2\")\n exp = []\n self.assertEqual(obs, exp)\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.study_sample_columns WHERE study_id=2\")\n exp = []\n self.assertEqual(obs, exp)\n with self.assertRaises(QiitaDBExecutionError):\n self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.sample_2\")", "def test_delete_goal(self):\n pass" ]
[ "0.8686963", "0.6904678", "0.6892248", "0.67542267", "0.6735828", "0.66662467", "0.66563714", "0.6654981", "0.6652009", "0.6647733", "0.662836", "0.66211337", "0.6588618", "0.6571248", "0.65261865", "0.6509833", "0.6506774", "0.64923763", "0.6465943", "0.6455365", "0.64396185", "0.64268523", "0.6397001", "0.6382152", "0.6381842", "0.6331393", "0.63154393", "0.63081235", "0.6304422", "0.6302722" ]
0.92804
0
Test case for variables_id_get
def test_variables_id_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_variablepresentations_id_get(self):\n pass", "def test_variables_id_put(self):\n pass", "def test_variablepresentations_id_put(self):\n pass", "def test_variables_get(self):\n pass", "def test_variables_id_delete(self):\n pass", "def test_prefectures_id_get(self):\n pass", "def test_variablepresentations_get(self):\n pass", "def test_drugs_id_get(self):\n pass", "def getVar(self, id):\n if id in self.variables:\n return self.variables[id]", "def test_intercommunalitys_id_get(self):\n pass", "def test_brains_id_get(self):\n pass", "def test_solareclipses_id_get(self):\n pass", "def test_metrostations_id_get(self):\n pass", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def test_get_measure_parameters_by_id(self):\n pass", "def test_cyclingleagues_id_get(self):\n pass", "def test_austriansettlements_id_get(self):\n pass", "def getID():", "def test_racetracks_id_get(self):\n pass" ]
[ "0.8552639", "0.7692867", "0.71789104", "0.6928761", "0.6813583", "0.67081404", "0.6625167", "0.65886503", "0.64681137", "0.641009", "0.6402178", "0.6359477", "0.6327255", "0.63117224", "0.63117224", "0.63117224", "0.63117224", "0.63117224", "0.63117224", "0.63117224", "0.63117224", "0.63117224", "0.63117224", "0.63117224", "0.63117224", "0.61903626", "0.61828494", "0.6144922", "0.6116843", "0.610476" ]
0.91430855
0
Test case for variables_id_put
def test_variables_id_put(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_variablepresentations_id_put(self):\n pass", "def test_variables_id_get(self):\n pass", "def test_variables_id_delete(self):\n pass", "def test_variablepresentations_id_get(self):\n pass", "def test_data_source_soaps_id_put(self):\n pass", "def test_user_id_put(self):\n pass", "def test_workflows_id_put(self):\n pass", "def test_variablepresentations_id_delete(self):\n pass", "def test_data_source_soaps_id_replace_post(self):\n pass", "def test_datatransformationsetups_id_put(self):\n pass", "def test_prefectures_id_get(self):\n pass", "def test_variables_post(self):\n pass", "def test_coupledmodels_id_put(self):\n pass", "def test_adding_variable(self):\n\t\turl = reverse('variables')\n\t\tdata = {'variableName': 'a', 'variableValue': 123}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\ts = self.client.session\n\t\tself.assertEqual(s['variables'], {'a': 123, 'b':567, 'c': 936})\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data, {'variableName': 'a', 'variableValue': 123})", "def test_convert_id():", "def test_variables_get(self):\n pass", "def test_workflows_id_replace_post(self):\n pass", "def test_id(self):\n node = Node()\n node.id = \"1234\"\n self.assertEqual(node.getId(), node.id)", "def test_get_variable_non_existent_key(self):\n param = {'Name': '/airflow/variables/hello', 'Type': 'String', 'Value': 'world'}\n\n ssm_backend = SystemsManagerParameterStoreBackend()\n ssm_backend.client.put_parameter(**param)\n\n assert ssm_backend.get_variable(\"test_mysql\") is None", "def test_team_template_folders_id_put(self):\n pass", "def test_api_v1_groups_id_put(self):\n pass", "def check_id(self, id):", "def updateVar(self, id, value, type_):\n if id in self.variables:\n symbol = self.variables[id]\n symbol = sym.Symbol(id, value, type_, symbol.row, symbol.column)\n self.variables[id] = symbol\n return True", "def test_groups_group_id_state_put(self):\n pass", "def test_id_creation(self):\n user_1_id = eval(\"uuid.UUID('\" + self.user_1.id + \"')\")\n self.assertIsInstance(user_1_id, uuid.UUID)", "def test_variablepresentations_post(self):\n pass", "def test_id_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_team_swimmer_id(input_val)\n self.assertEqual(output_val, self.line.team_swimmer_id)", "def test_update_telegram_id_success(self):\n test_data = {'telegram_id': 100}\n url = reverse('telegram_id')\n response = self.client.put(url, json.dumps(test_data), content_type='application/json')\n self.assertEqual(response.status_code, 200)", "def testSetId(self):\n ele = WorkQueueElement(RequestName='testIdImmutable')\n before_id = ele.id\n ele.id = 'something_new'\n self.assertEqual('something_new', ele.id)\n self.assertNotEqual(before_id, ele.id)", "def test_update_car_valid_id():\n car_data = {\n \"id\": 1,\n \"make\": \"BMW\",\n \"model\": \"3 Series New\",\n \"year\": 2019,\n \"vin\": \"JH4CU2F60AC794232\",\n }\n response = client.put(\"/1\", data=car_data)\n assert response.status_code == STATUS_OK\n assert response.json() == car_data\n\n # Checking data persistence with get\n response = client.get(\"/1\")\n assert response.json() == car_data" ]
[ "0.8485972", "0.7542932", "0.69008446", "0.6871325", "0.64289016", "0.6362585", "0.6187098", "0.60498357", "0.5927218", "0.58996814", "0.58913213", "0.58510417", "0.5832242", "0.5727545", "0.5676216", "0.5657101", "0.5649096", "0.55572104", "0.55464125", "0.5542622", "0.55351985", "0.5487976", "0.5468195", "0.5467136", "0.54353505", "0.5432118", "0.5400532", "0.5389785", "0.5359635", "0.535925" ]
0.90912896
0
Test case for variables_post
def test_variables_post(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_variablepresentations_post(self):\n pass", "def vefi_postprocessing(variables):\n return variables", "def test_adding_variable(self):\n\t\turl = reverse('variables')\n\t\tdata = {'variableName': 'a', 'variableValue': 123}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\ts = self.client.session\n\t\tself.assertEqual(s['variables'], {'a': 123, 'b':567, 'c': 936})\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data, {'variableName': 'a', 'variableValue': 123})", "def test_variables_get(self):\n pass", "def fpi_postprocessing(variables):\n return variables", "def mag_postprocessing(variables):\n return variables", "def rpa_postprocessing(variables):\n return variables", "def idm_postprocessing(variables):\n return variables", "def test_post(self):\n pass", "def extract_variables(expected_variables, _request):\n extracted_variables = {}\n for variable in expected_variables:\n form_var = _request.form.get(variable)\n args_var = _request.args.get(variable)\n if form_var and args_var:\n extracted_variables[variable] = [form_var, args_var]\n else:\n extracted_variables[variable] = form_var if form_var else args_var\n return extracted_variables", "def nacs_postprocessing(variables):\n return variables", "def wats_postprocessing(variables):\n return variables", "def lang_postprocessing(variables):\n return variables", "def test_v2_recognize_post(self):\n pass", "def setvariables(self, request, contextvars, thevars):\n postdata = {}\n if request.POST:\n postdata = dict(request.POST.dict())\n for var in thevars:\n if postdata.get(\"custom_\"+var):\n contextvars[var] = postdata.get(\"custom_\"+var)\n else:\n try:\n contextvars[var] = thevars[var]\n except Exception:\n pass\n return contextvars", "def test_simple_extraction_of_values(self):\n\t\tself.assertEqual([\"a\", \"b\"], au.extract_variables(bf.And([bf.Var(\"b\"), bf.Var(\"a\")])), \"Invalid variables extracted, expected [a, b].\")", "def test_instance_vars_with_values(question):\n whitelist = {\n (\"edges\", \"edgeType\"),\n (\"neighbors\", \"neighborTypes\"),\n (\"neighbors\", \"style\"),\n (\"routes\", \"rib\"),\n (\"routes\", \"prefixMatchType\"),\n (\"bgpRib\", \"prefixMatchType\"),\n (\"evpnRib\", \"prefixMatchType\"),\n }\n instance = question[\"instance\"]\n qname = instance[\"instanceName\"]\n for name, var in instance.get(\"variables\", {}).items():\n assert (\n \"allowedValues\" not in var\n ), \"variable {} should migrate to values\".format(name)\n if (qname, name) in whitelist:\n # Whitelisted, skip check that description is present\n continue\n\n for value in var.get(\"values\", []):\n assert (\n \"description\" in value\n ), \"add description to {} or whitelist it\".format(name)", "def test_variables_id_put(self):\n pass", "def test_dict_for_request_in_method_post(self):\n self.request.POST = {\"foo\": \"bar\"}\n response = self.panel.process_request(self.request)\n self.panel.generate_stats(self.request, response)\n # ensure the panel POST request data is processed correctly.\n content = self.panel.content\n self.assertIn(\"foo\", content)\n self.assertIn(\"bar\", content)", "def test_extracting_one_value(self):\n\t\tself.assertEqual([\"b\"], au.extract_variables(bf.Var(\"b\")), \"Invalid variables extracted, expected [b].\")", "def parse_post_values(self): \n self.parse_values(sys.stdin.read())", "def post(self):\n p = json.loads(self.request.body)\n\n if not p or 'config' not in p:\n self.NotFound('Unable to find pipeline config in json request.')\n else:\n logging.info('config is:\\n%r', p['config'])\n variable_names = GetVariableAttributes(p['config'])\n logging.info('var names is %r', variable_names)\n variables = p.get('variables', [])\n variables = dict([(v.get('name', ''), v) for v in variables])\n\n for v in set(variables.keys()) - variable_names:\n del variables[v] # remove vars not in variable_names\n for v in variable_names:\n variables.setdefault(v, {'name': v}) # add missing variables\n p['variables'] = variables.values()\n logging.info('returning variables %r from %r', variables, variable_names)\n self.SendJson(p)", "def verify_post_data ( ):\n # check every field is present\n try:\n request.json[ 'source_lang' ]\n request.json[ 'target_lang' ]\n request.json[ 'text' ]\n\n TranslatorApp.verify_rpc_value ( request.json )\n\n except KeyError: # All the values are not present\n # 400 Bad Request\n abort ( 400, \"All mandatory fields are not provided\" )\n except ValueError as err:\n # 422 Unprocessable Entity\n abort ( 422, \"Unprocessable value: {0}\".format ( err.args ) )\n except BadRequest:\n # 400 Bad Request\n abort ( 400, \"Provided values are having malformed syntax\" )", "def test_variablepresentations_get(self):\n pass", "def test_retrieving_variables(self):\n\t\turl = reverse('variables', args = ('b'))\n\t\tresponse = self.client.get(url, format='json')\n\t\tself.assertEqual(response.data, {'b': 567})", "def _validate_post(self, value, name, result):\n return result", "def test_data_object_post(self):\n pass", "def post():\n pass", "def test_data_parse_vanilla_postdat(self):\n lines = [\"var=val&var2=val2\"]\n dat, dat_type = parser._parse_data(lines)\n self.assertEqual(\"var=val&var2=val2\", dat)", "def test_post(self):\n return self.doRequest(self.url, method=\"POST\", body=self.input)" ]
[ "0.81294644", "0.6594173", "0.65466845", "0.6435603", "0.6416398", "0.6214588", "0.6195658", "0.61302716", "0.6070885", "0.59662485", "0.5965029", "0.5921644", "0.5803392", "0.57740813", "0.57717896", "0.5768627", "0.57051903", "0.56980747", "0.56764126", "0.56758875", "0.56173724", "0.55823", "0.55816644", "0.55784833", "0.55679", "0.5545125", "0.55240345", "0.5510972", "0.55010766", "0.5469154" ]
0.8911665
0