query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Do a NetInf PUBLISH for one file file_name is the file to do now
def pubone(file_name,alg,host): hash_alg=alg scheme="ni" rform="json" ext="{ \"meta\": { \"pubdirs\" : \"yep\" } }" # record start time of this stime=time.time() # Create NIdigester for use with form encoder and StreamingHTTP ni_digester = NIdigester() # Install the template URL built from the scheme, the authority and the digest algorithm rv = ni_digester.set_url((scheme, host, "/%s" % hash_alg)) if rv != ni_errs.niSUCCESS: nilog("Cannot construct valid ni URL: %s" % ni_errs_txt[rv]) return debug(ni_digester.get_url()) # Open the file if possible try: f = open(file_name, "rb") except Exception, e : debug("Cannot open file %s: Error: %s" %(file_name, str(e))) return # Guess the mimetype of the file m = magic.Magic(mime=True) ctype = m.from_file(file_name) debug("Content-Type: %s" % ctype) if ctype is None: # Guessing didn't work - default ctype = "application/octet-stream" # Set up HTTP form data for publish request # Make parameter for file with digester octet_param = MultipartParam("octets", fileobj=f, filetype=ctype, filename=file_name, digester = ni_digester) # Make dictionary that will dynamically retrieve ni URI when it has been made uri_dict = { "generator": octet_param.get_url, "length": (len(ni_digester.get_url()) + len(";") + ni_digester.get_b64_encoded_length())} msgid=str(random.randint(1, 2**64)) param_list = [octet_param, ("URI", uri_dict), ("msgid", msgid), ("ext", ext), ("fullPut", "yes"), ("rform", rform)] # Construct data generator and header strings datagen, headers = multipart_encode(param_list) if verbose: debug("Parameters prepared: %s"% "".join(datagen)) # Set up streaming HTTP mechanism - register handlers with urllib2 # get out for now, don't do it opener = streaminghttp.register_openers() # Where to send the publish request. http_url = "http://%s/netinfproto/publish" % host # debug("Accessing: %s" % http_url) # Send POST request to destination server fsize=os.path.getsize(file_name) nilog("%s,PUBLISH tx,file,%s,size,%d,to,%s" % (msgid,file_name,fsize,host)) try: req = urllib2.Request(http_url, datagen, headers) except Exception, e: nilog("%s,PUBLISH tx error" % msgid); if verbose: nilog("Error: Unable to create request for http URL %s: %s" % (http_url, str(e))) f.close() return # Get HTTP results try: http_object = urllib2.urlopen(req) except Exception, e: nilog("%s,PUBLISH rx error" % msgid); if verbose: nilog("Error: Unable to access http URL %s: %s" % (http_url, str(e))) f.close() return f.close() if verbose: nilog("Digester result: %s" % octet_param.get_url()) # Get message headers http_info = http_object.info() http_result = http_object.getcode() if verbose: debug("HTTP result: %d" % http_result) debug("Response info: %s" % http_info) debug("Response type: %s" % http_info.gettype()) # Read results into buffer payload = http_object.read() http_object.close() # debug(payload) # Report outcome if (http_result != 200): if verbose: debug("Unsuccessful publish request returned HTTP code %d" % http_result) nilog("%s,PUBLISH rx error bad response status,%d" % (msgid,http_result)); return # Check content type of returned message matches requested response type ct = http_object.headers["content-type"] if ct != "application/json": if verbose: debug("Error: Expecting JSON coded (application/json) " "response but received Content-Type: %s" % ct) nilog("%s,PUBLISH rx error bad content type,%s" % (msgid,ct)); return # If output of response is expected, print in the requested format if verbose: nilog( "Publication of %s successful:" % target) # JSON cases try: json_report = json.loads(payload) except Exception, e: if verbose: nilog("Error: Could not decode JSON report '%s': %s" % (payload, str(e))) nilog("%s, PUBLISH rx error bad json decode" % msgid); return if verbose: print json.dumps(json_report, indent = 4) etime=time.time() duration=etime-stime niuri=json_report["ni"] nilog("%s,PUBLISH rx fine,ni,%s,size,%d,time,%10.10f" % (msgid,niuri,fsize,duration*1000)) return niuri
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def publish(self, filename):\n # 1) Encrypt file\n # 2) Publish to remote cloud server\n # 3) Wait for the result\n # 4) Store results in files located inside RAM folder", "def detect(self, filename):\n self.publish(filename)", "def publishUploads(self, manualVerify = True):\n for key in self.nbDetails:\n # Skip metadata key if present\n if key!='proc' and self.nbDetails[key]['pkg'] and self.nbDetails[key]['archFilesOK']:\n self.publishRepoItem(key, manualVerify = manualVerify)", "def publish(self, file_name, c_id, size, torrent, files): # ver lo del id del cliente\n dht = get_remote_node(self.dht_ip, self.dht_port)\n v = dht.get(get_hash(file_name))\n\n if v == None:\n dht.set(get_hash(file_name), [c_id])\n cantstep = dht.get(get_hash(maxstep))\n print(\"cantstep\", cantstep)\n l = len(dht.get(get_hash(filestep + \"|\" + str(cantstep))))\n if l == lenstep: #create new step\n print(\"full step\")\n dht.set(get_hash(maxstep), cantstep + 1)\n dht.set(get_hash(filestep + \"|\" + str(cantstep + 1)), [file_name])\n else:\n all = dht.get(get_hash(filestep + \"|\" + str(cantstep)))\n all.append(file_name)\n dht.set(get_hash(filestep + \"|\" + str(cantstep)), all)\n k = sizefile + \"|\" + file_name\n dht.set(get_hash(k), size)\n dht.set(get_hash(file_name + \".torrent\"), torrent) #first time to publish this .torrent\n else:\n if not v.__contains__(c_id):\n v.append(c_id)\n dht.set(get_hash(file_name), v)\n\n dht.set(get_hash(myfiles + \"|\" + str(c_id)),files)\n print(\"client \", c_id, \"published file \", file_name)", "def publish():\n pass", "def create_publication(repo):\n return pulpperf.interact.post('/pulp/api/v3/publications/file/file/',\n data={'repository': repo})['task']", "def publish(self, settings, item):\n\n publisher = self.parent\n engine = publisher.engine\n document = item.properties[\"document\"]\n\n path = _document_path(document)\n item.properties[\"upload_path\"] = path\n item\n psdProject = PSDImage.open(path)\n\n #save layers to link and create new task to do so\n for layer in psdProject:\n layer.compose().save(layer.name+'.tiff')\n self.logger.info(\"Saved Layer {layerName}.psd\".format(layerName=layer.name))\n publish = sgtk.util.register_publish(publisher.sgtk,\n item.context,\n os.path.join(os.path.dirname(path),layer.name+'.tiff'),\n layer.name,\n version_number=None,\n published_file_type=\"Rendered Image\")", "async def insert_file(self, file_name: FormalName, desired_copies: int, packets: int, size: int, fetch_prefix: FormalName):\n # send command interest\n file = File()\n file.file_name = file_name\n file.desired_copies = desired_copies\n file.packets = packets \n file.size = size\n fetch_path = FetchPath()\n fetch_path.prefix = fetch_prefix\n cmd = RepoCommand()\n cmd.file = file\n cmd.sequence_number = 0\n cmd.fetch_path = fetch_path\n cmd_bytes = cmd.encode()\n\n # publish msg to repo's insert topic\n await self.pb.wait_for_ready()\n print(Name.to_str(self.repo_prefix + ['insert']))\n is_success = await self.pb.publish(self.repo_prefix + ['insert'], cmd_bytes)\n if is_success:\n logging.info('Published an insert msg and was acknowledged by a subscriber')\n else:\n logging.info('Published an insert msg but was not acknowledged by a subscriber')\n return is_success", "def files_distribute(self):\n self._post('files/distribute')", "def cvmfsPublish(reponame = None):\n if reponame == None:\n reponame = _getRepoName()\n\n rc = subprocess.call([\"cvmfs_server\", \"publish\", \"-f\", reponame])\n if rc != 0:\n raise RuntimeError(\"Could not publish CVMFS transaction\")", "def publish_files():\n print(\"Publishing files to the internet...\", end=\"\", flush=True)\n import subprocess\n try:\n subprocess.run(\"./upload.sh\", timeout=120.0)\n print(\"done.\\n\")\n except:\n print(\"failed.\\n\")", "def do_push_file(dbsync, bibkey):\n dbsync.push_file_to_dpt(bibkey)", "def create_distribution(name, base_path, pub):\n return pulpperf.interact.post('/pulp/api/v3/distributions/file/file/',\n data={'name': name, 'base_path': base_path, 'publication': pub})['task']", "def _on_package_request(self, file_name: str, chunk_index: int) -> None:\n message = self.message_factory.make_from_package_request(\n file_name, chunk_index\n )\n if not self.connectivity_service.publish(message):\n self.message_queue.put(message)", "def save_publish():\n import mop\n\n path = cmds.file(query=True, location=True)\n work_dir = os.path.dirname(path)\n publish_dir = os.path.join(work_dir, \"release\")\n\n highest_publish = None\n highest_version = -1\n\n for f in os.listdir(publish_dir):\n ext = os.path.splitext(f)[-1]\n if ext == \".ma\":\n pattern = r\"v(?P<version>\\d{3})\"\n regex = re.compile(pattern)\n match = regex.search(f)\n if match:\n version = int(match.group(\"version\"))\n if version > highest_version:\n highest_version = version\n highest_publish = f\n\n new_path = mop.increment_version(os.path.join(publish_dir, highest_publish))\n cmds.file(rename=new_path)\n cmds.file(save=True, force=True)", "def send_to_restore(file_name, data):\n urlfetch.fetch(url=config.RESTORE_URL + '?name=' + file_name + '&source=db&packet',\n payload=urllib.urlencode({\"data\": services.event.entity_to_string(data)}),\n method=urlfetch.POST)", "def file_message(self, update, context):\n # asigns file_tipe according to the program status\n if self.loading_meals:\n file_type = \"_meals\"\n elif self.load_ingridients:\n file_type = \"_ingridients\"\n else:\n # if the script isn't loading_meals or loading_ingridients\n # do a return so the function stop\n return 0\n # unique file name is created\n file_name = str(update.message.chat.id) + file_type + \".csv\"\n # the file is retrieved from the chat\n file = context.bot.get_file(update.message.document.file_id)\n # the file is downloaded\n file.download(file_name)", "def publish(self, message: str) -> None:\n if __debug__:\n logger.warning(\n \"WARN: Unnecessary call on publish on FileDistroStream\"\n )", "def transfer(file_obj):", "def content_file_name(instance, filename):\r\n return '/'.join([str(instance.app.publisher.id), str(instance.app.id), filename])", "def upload_file(self, file_path, file_name, output_path):", "def test_publish_with_add_first_file_attachment(self):\n draft = self._get_draft()\n draft.target_people.add(\n User.objects.create_user(username='testuser'))\n review_request = draft.review_request\n self.assertEqual(draft.file_attachments_count, 0)\n self.assertEqual(draft.inactive_file_attachments_count, 0)\n self.assertEqual(review_request.file_attachments_count, 0)\n self.assertEqual(review_request.inactive_file_attachments_count, 0)\n\n attachment = self.create_file_attachment(review_request,\n draft=draft,\n caption='',\n draft_caption='Test Caption')\n self.assertEqual(draft.file_attachments_count, 1)\n self.assertEqual(draft.inactive_file_attachments_count, 0)\n self.assertEqual(review_request.file_attachments_count, 0)\n self.assertEqual(review_request.inactive_file_attachments_count, 0)\n\n changes = draft.publish()\n\n attachment = FileAttachment.objects.get(pk=attachment.pk)\n self.assertEqual(attachment.caption, 'Test Caption')\n\n fields = changes.fields_changed\n\n self.assertEqual(fields['files'], {\n 'new': [\n (attachment.display_name,\n attachment.get_absolute_url(),\n attachment.pk)\n ],\n 'added': [\n (attachment.display_name,\n attachment.get_absolute_url(),\n attachment.pk)\n ],\n 'old': [],\n 'removed': [],\n })\n self.assertEqual(review_request.file_attachments_count, 1)\n self.assertEqual(review_request.inactive_file_attachments_count, 0)", "def test_file_package_request(self):\n file_name = \"file_name\"\n chunk_index = 0\n\n expected_topic = self.factory.common_topic + WAPMF.FILE_BINARY_REQUEST\n expected_payload = json.dumps(\n {\n \"name\": file_name,\n \"chunkIndex\": chunk_index,\n }\n )\n expected_message = Message(expected_topic, expected_payload)\n serialized_message = self.factory.make_from_package_request(\n file_name, chunk_index\n )\n\n self.assertEqual(expected_message, serialized_message)", "def store(self, filename):", "def sendFile(self, fullfilename):\n raise NotImplementedError(\"Implement this method in child class\")", "def publish(self, id: uplink.Path):\n pass", "def publish(self):\n #vprint(\"PUBLISHING \",self.__dict__)\n \n js = self.compute_json()\n name = self.name\n #topicdir = \"/topicd/\" if constants.publishToS3Dev else \"/topic/\"\n s3path = constants.compositeDir+\"/\"+name+\"/main.json\" #the path where the page will finally end up\n s3.s3SetContents(s3path,contents=js,relativeTo=\"\",contentType=\"application/json\")\n self.genPage()", "def _do_maya_post_publish(self, work_template, progress_cb): \n import maya.cmds as cmds\n \n progress_cb(0, \"Versioning up the scene file\")\n \n # get the current scene path:\n scene_path = os.path.abspath(cmds.file(query=True, sn=True))\n \n # increment version and construct new file name:\n progress_cb(25, \"Finding next version number\")\n fields = work_template.get_fields(scene_path)\n next_version = self._get_next_work_file_version(work_template, fields)\n fields[\"version\"] = next_version \n new_scene_path = work_template.apply_fields(fields)\n \n # log info\n self.parent.log_debug(\"Version up work file %s --> %s...\" % (scene_path, new_scene_path))\n \n # rename and save the file\n progress_cb(50, \"Saving the scene file\")\n cmds.file(rename=new_scene_path)\n cmds.file(save=True)\n \n #updating shotgun task status to cmpt---\n \n progress_cb(100)", "def send_file(cobj, dest, port, fname, hash, handler):\n pass", "def compute_published_path_to_file(self, file_to_publish: str) -> str:\n\n return os.path.join(\n self.publish_dir,\n os.path.basename(file_to_publish),\n )" ]
[ "0.64287066", "0.60361385", "0.5941095", "0.59345895", "0.5917054", "0.57990074", "0.57726157", "0.57401097", "0.57236075", "0.57073116", "0.5628994", "0.55332154", "0.5481817", "0.54700124", "0.54092836", "0.54011065", "0.5339769", "0.53389674", "0.5331978", "0.5296641", "0.529294", "0.52781814", "0.52750504", "0.52576613", "0.52543974", "0.52376753", "0.5219733", "0.52176374", "0.52157617", "0.51972914" ]
0.6248407
1
Command line program to perform a NetInf 'publish' operation using http convergence layer. Uses NIproc global instance of NI operations class
def py_nipubdir(): # Options parsing and verification stuff usage = "%%prog -d <pathname of content directory> -n <FQDN of netinf node> [-a <hash alg>] [-m NN] [-c count]" parser = OptionParser(usage) parser.add_option("-d", "--dir", dest="dir_name", type="string", help="Pathname for directory to be published.") parser.add_option("-a", "--alg", dest="hash_alg", default="sha-256", type="string", help="Hash algorithm to be used for NI URIs. Defaults to sha-256.") parser.add_option("-n", "--node", dest="host", type="string", help="The FQDN where I'll send PUBLISH messages.") parser.add_option("-m", "--multiprocess", dest="mprocs", default=1, type="int", help="The number of client processes to use in a pool (default 1)") parser.add_option("-c", "--count", dest="count", default=0, type="int", help="The number of files to publish (default: all)") (options, args) = parser.parse_args() # Check command line options: # Arguments -h is optional, all others needed # Specifying more than one of -w, -p, -j and -v is inappropriate. if len(args) != 0: parser.error("Unrecognized arguments %s supplied." % str(args)) sys.exit(-1) if options.dir_name == None: parser.error("You must supply a directory name with -d") sys.exit(-1) if options.host == None: parser.error("You must supply a host name with -n") sys.exit(-1) nilog("Starting nipubdir,dir,%s,to,%s,alg,%s,processes,%d,count,%d" % (options.dir_name,options.host,options.hash_alg,options.mprocs,options.count)) # loop over all files below directory and putone() for each we find count,goodlist,badlist=pubdirs(options.dir_name,options.hash_alg,options.host,options.mprocs,options.count) # print goodlist # print badlist nilog("Finished nipubdir,dir,%s,to,%s,alg,%s,processes,%d,count,%d" % (options.dir_name,options.host,options.hash_alg,options.mprocs,count)) sys.exit(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def publish():\n pass", "def hydronn():\n from hydronn.bin import extract_data\n from hydronn.bin import extract_retrieval_data\n from hydronn.bin import train\n from hydronn.bin import retrieve\n from hydronn.bin import evaluate\n\n description = (\"HYDRONN: A NRT precipitation retrieval for Brazil.\")\n parser = argparse.ArgumentParser(prog='hydronn', description=description)\n\n subparsers = parser.add_subparsers(help='Sub-commands')\n\n extract_data.add_parser(subparsers)\n extract_retrieval_data.add_parser(subparsers)\n train.add_parser(subparsers)\n retrieve.add_parser(subparsers)\n evaluate.add_parser(subparsers)\n\n if len(sys.argv) == 1:\n parser.print_help(sys.stderr)\n return 1\n\n logging.basicConfig(level=\"INFO\")\n args = parser.parse_args()\n args.func(args)", "def main():\n # kickoff The Norn\n nr = kickoff()\n\n # enable SCP\n c_print(f\"Enabling SCP for NAPALM on all devices\")\n # run The Norn to enable SCP\n nr.run(task=scp_enable)\n c_print(f\"Failed hosts: {nr.data.failed_hosts}\")\n print(\"~\" * 80)\n\n # gather switch info\n c_print(\"Gathering device configurations\")\n # run The Norn to get info\n nr.run(task=get_info)\n # print failed hosts\n c_print(f\"Failed hosts: {nr.data.failed_hosts}\")\n print(\"~\" * 80)\n\n # render switch configs\n c_print(f\"Rendering IBNS dot1x configurations\")\n # run The Norn to render dot1x config\n nr.run(task=render_configs)\n # print failed hosts\n c_print(f\"Failed hosts: {nr.data.failed_hosts}\")\n print(\"~\" * 80)\n\n # apply switch configs\n c_print(f\"Applying IBNS dot1x configuration files to all devices\")\n # prompt to proceed\n proceed()\n # run The Norn to apply config files\n nr.run(task=apply_configs)\n # print failed hosts\n c_print(f\"Failed hosts: {nr.data.failed_hosts}\")\n print(\"~\" * 80)\n\n # verify dot1x configs\n c_print(f\"Verifying IBNS dot1x configuration of all devices\")\n # run The Norn to verify dot1x config\n nr.run(task=verify_dot1x, num_workers=1)\n # print failed hosts\n c_print(f\"Failed hosts: {nr.data.failed_hosts}\")\n print(\"~\" * 80)\n\n # disable SCP\n c_print(f\"Disabling SCP server on all devices\")\n # prompt to proceed\n proceed()\n # run The Norn to disable SCP and save configs\n nr.run(task=scp_disable)\n c_print(f\"Failed hosts: {nr.data.failed_hosts}\")\n print(\"~\" * 80)", "def main():\n\n # Handling arguments\n args = get_args()\n debug = args.debug\n log_file = None\n if args.logfile:\n log_file = args.logfile\n nuage_enterprise = args.nuage_enterprise\n nuage_host = args.nuage_host\n nuage_port = args.nuage_port\n nuage_password = None\n if args.nuage_password:\n nuage_password = args.nuage_password\n nuage_username = args.nuage_username\n# nosslcheck = args.nosslcheck\n verbose = args.verbose\n fip_net = args.fip_net\n uplink_addr = args.uplink_addr\n uplink_mask = args.uplink_mask\n uplink_gw = args.uplink_gw\n uplink_ip = args.uplink_ip\n uplink_mac = args.uplink_mac\n gw_name = args.gw_name\n gw_port = args.gw_port\n gw_vlan = args.gw_vlan\n\n # Logging settings\n if debug:\n log_level = logging.DEBUG\n elif verbose:\n log_level = logging.INFO\n else:\n log_level = logging.WARNING\n\n logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s %(message)s', level=log_level)\n logger = logging.getLogger(__name__)\n\n # Getting user password for Nuage connection\n if nuage_password is None:\n logger.debug('No command line Nuage password received, requesting Nuage password from user')\n nuage_password = getpass.getpass(prompt='Enter password for Nuage host %s for user %s: ' % (nuage_host, nuage_username))\n\n try:\n # Connecting to Nuage\n logger.info('Connecting to Nuage server %s:%s with username %s' % (nuage_host, nuage_port, nuage_username))\n nc = vsdk.NUVSDSession(username=nuage_username, password=nuage_password, enterprise=nuage_enterprise, api_url=\"https://%s:%s\" % (nuage_host, nuage_port))\n nc.start()\n\n except Exception as e:\n logger.error('Could not connect to Nuage host %s with user %s and specified password' % (nuage_host, nuage_username))\n logger.critical('Caught exception: %s' % str(e))\n return 1\n\n nuage_user = nc.user\n\n\n # Getting the parentID of FIP subnet\n logger.debug('Getting FIP subnet parent ID')\n fip_obj = nuage_user.subnets.get_first(filter=\"address == '{0}'\".format(fip_net))\n \n # Fail if FIP subnet object was not found\n if not fip_obj:\n logger.critical('FIP subnet {0} was not found'.format(fip_net))\n return 1\n\n shared_resource_id = fip_obj.parent_id\n logger.debug('FIP parent ID is: {0}'.format(shared_resource_id))\n\n\n # Locating a gateway port and creating a new VLAN\n logger.debug('Creating a new VLAN on Gateway port')\n new_vlan = vsdk.NUVLAN(value=gw_vlan)\n gw = nuage_user.gateways.get_first(filter=\"name == '{0}'\".format(gw_name))\n\n # Fail if Gateway was not found\n if not gw:\n logger.critical('Gateway {0} was not found'.format(gw_name))\n return 1\n\n port = gw.ports.get_first(filter=\"name == '{0}'\".format(gw_port))\n\n # Fail if Port requirements are not met\n if not port:\n logger.critical('Port {0} was not found on Gateway {1}'.format(gw_port, gw_name))\n return 1\n elif not port.port_type == 'ACCESS':\n logger.critical('Port {0} is not an ACCESS port type'.format(gw_port))\n return 1\n elif not int(gw_vlan) in range(*[int(x) for x in port.vlan_range.split('-')]):\n logger.critical('Vlan {0} is not part of the port vlan range: {1}'.format(gw_vlan, port.vlan_range))\n return 1\n elif port.vlans.get_first(filter=\"value == {0}\".format(gw_vlan)):\n logger.critical('Vlan {0} already exists on port {1}'.format(gw_vlan, gw_port))\n return 1\n\n port.create_child(new_vlan)\n vlan_id = new_vlan.id\n logger.debug('New VLAN ID is: {0}'.format(vlan_id))\n\n\n # Constructing an Uplink Subnet object\n logger.debug('Creating an Uplink Subnet')\n shared_subnet = vsdk.NUSharedNetworkResource(name='uplink subnet {0}'.format(uplink_addr.replace('.','-')), \\\n description='Uplink subnet to Gateway {0}'.format(gw_name.replace('.','-')), \\\n address=uplink_addr, \\\n netmask=uplink_mask, \\\n gateway=uplink_gw, \\\n type=UPLINK_TYPE, \\\n uplink_interface_ip=uplink_ip, \\\n uplink_interface_mac=uplink_mac, \\\n uplink_gw_vlan_attachment_id=vlan_id, \\\n shared_resource_parent_id=shared_resource_id, \\\n uplink_vport_name = 'uplink vport {0} Vlan{1}'.format(gw_port, gw_vlan))\n\n # Creating a subnet on VSD\n nuage_user.create_child(shared_subnet)\n\n logger.info('Uplink Subnet is created')\n return 0", "def main():\n rospy.init_node('image_to_pointcloud')\n rospy.loginfo(\"Starting sonar image to pointcloud node...\")\n converter = ImageConverter()\n\n rate = rospy.Rate(10)\n rospy.sleep(3.0)\n while not rospy.is_shutdown():\n converter.proc_and_pub_pointcloud()\n rate.sleep()", "def main():\n\n obj = PowerStoreNfsExport()\n obj.perform_module_operation()", "def main():\n\n parser = argparse.ArgumentParser(description='Newsgroup post classifier')\n parser.add_argument('--data_dir',\n type=str,\n help=\"Data directory\")\n\n args = parser.parse_args()\n data_dir = args.data_dir\n\n example(data_dir=data_dir)", "def _run_neural_network(self):\n\t\tprogram = ['mpiexec','-np',self._np,'python','./scripts/runClosedLoopNn.py',self._eesFreq,self._eesAmp,self._nnStructFile,self._species,self._totSimulationTime,self._perturbationParams]\n\t\tself._neuralNetwork = subprocess.Popen(program, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)", "def main():\n # Parse command line arguments\n parser = argparse.ArgumentParser(\n prog=\"onnx\",\n description=\"A Command Line Interface for interacting with ONNX models\",\n epilog=\"test\\n\")\n\n parser.add_argument(\"-v\", \"--version\", action=\"store_true\",\n help=\"Print version information and quit\")\n\n # Subcommands\n subparsers = parser.add_subparsers(dest=\"subcommand\")\n\n convert_parser = subparsers.add_parser(\"convert\",\n help=\"Convert a model from an external format to the ONNX format\")\n convert_parser.add_argument(\"-f\", \"--framework\", type=str,\n choices=convert.framework_lkp.keys(),\n help=\"The source model framework\")\n convert_parser.add_argument(\"path\", type=str,\n help=\"The path to the source model\")\n\n args = parser.parse_args()\n if args.version:\n print(__version__)\n return 0\n\n try:\n cmd = cmd_lkp[args.subcommand]\n except KeyError:\n print(\"Subcommand required\")\n return 1\n\n cmd(args)", "def main():\n parser = argparse.ArgumentParser(\n description='Automatically document the API for a ROS node.')\n parser.add_argument(\n 'nodes',\n metavar=\"node\",\n type=str, nargs='*',\n help='The name of the nodes to document. If empty, ' +\n 'all nodes will be documented')\n parser.add_argument(\n '--output-dir',\n type=str,\n default=abspath(curdir),\n help='The directory where documentation should be written')\n parser.add_argument(\n '--proxy-port',\n type=int,\n default=33133,\n help='The port to use for the ROS master proxy server')\n parser.add_argument(\n '--doc-format',\n type=str,\n default=MARKDOWN,\n help=\"The format of the documentation to generate \" +\n \"(%s)\" % \", \".join(SUPPORTED_DOC_FORMATS))\n\n args = parser.parse_args()\n\n # Grab command line arguments\n nodeNames = args.nodes\n outputDir = args.output_dir\n proxyPort = args.proxy_port\n docFormat = args.doc_format.lower()\n\n # Make sure the format is valid\n if docFormat not in SUPPORTED_DOC_FORMATS:\n print \"ERROR: unknown doc-format argument: %s\" % docFormat\n exit(2)\n\n # Ensure that the output directory exists\n if not exists(outputDir):\n print \"ERROR: the output directory does not exist: %s\" % outputDir\n exit(3)\n\n # Make sure the ROS master is running\n try:\n rosgraph.Master('/rostopic').getPid()\n except socket.error:\n print \"ERROR: failed to communicate with the ROS master!\"\n exit(4)\n\n # Create the ROS master proxy node\n masterProxy = RosMasterProxy(nodeNames, port=proxyPort)\n\n try:\n print \"Starting server...\"\n masterProxy.start()\n except (KeyboardInterrupt, SystemExit):\n pass\n\n # Document the information about the node\n print \"Documenting...\"\n masterProxy.document(outputDir, docFormat=docFormat)", "def main():\n # Grab command line args\n args = build_argparser().parse_args()\n #print(args)\n #Connect to the MQTT server\n client = connect_mqtt()\n #Perform inference on the input stream\n infer_on_stream(args, client)", "def pubone(file_name,alg,host):\n\n hash_alg=alg\n scheme=\"ni\"\n rform=\"json\"\n ext=\"{ \\\"meta\\\": { \\\"pubdirs\\\" : \\\"yep\\\" } }\"\n\n # record start time of this\n stime=time.time()\n\n # Create NIdigester for use with form encoder and StreamingHTTP\n ni_digester = NIdigester()\n # Install the template URL built from the scheme, the authority and the digest algorithm\n rv = ni_digester.set_url((scheme, host, \"/%s\" % hash_alg))\n if rv != ni_errs.niSUCCESS:\n nilog(\"Cannot construct valid ni URL: %s\" % ni_errs_txt[rv])\n return\n debug(ni_digester.get_url())\n # Open the file if possible\n try:\n f = open(file_name, \"rb\")\n except Exception, e :\n debug(\"Cannot open file %s: Error: %s\" %(file_name, str(e)))\n return\n # Guess the mimetype of the file\n m = magic.Magic(mime=True)\n ctype = m.from_file(file_name)\n debug(\"Content-Type: %s\" % ctype)\n if ctype is None:\n # Guessing didn't work - default\n ctype = \"application/octet-stream\"\n # Set up HTTP form data for publish request\n # Make parameter for file with digester\n octet_param = MultipartParam(\"octets\",\n fileobj=f,\n filetype=ctype,\n filename=file_name,\n digester = ni_digester)\n # Make dictionary that will dynamically retrieve ni URI when it has been made\n uri_dict = { \"generator\": octet_param.get_url,\n \"length\": (len(ni_digester.get_url()) + len(\";\") +\n ni_digester.get_b64_encoded_length())}\n msgid=str(random.randint(1, 2**64)) \n param_list = [octet_param,\n (\"URI\", uri_dict),\n (\"msgid\", msgid),\n (\"ext\", ext),\n (\"fullPut\", \"yes\"),\n (\"rform\", rform)]\n # Construct data generator and header strings\n datagen, headers = multipart_encode(param_list)\n if verbose:\n debug(\"Parameters prepared: %s\"% \"\".join(datagen))\n\n # Set up streaming HTTP mechanism - register handlers with urllib2\n # get out for now, don't do it\n opener = streaminghttp.register_openers()\n # Where to send the publish request.\n http_url = \"http://%s/netinfproto/publish\" % host\n # debug(\"Accessing: %s\" % http_url)\n # Send POST request to destination server\n fsize=os.path.getsize(file_name)\n nilog(\"%s,PUBLISH tx,file,%s,size,%d,to,%s\" % (msgid,file_name,fsize,host))\n try:\n req = urllib2.Request(http_url, datagen, headers)\n except Exception, e:\n nilog(\"%s,PUBLISH tx error\" % msgid);\n if verbose:\n nilog(\"Error: Unable to create request for http URL %s: %s\" %\n (http_url, str(e)))\n f.close()\n return\n # Get HTTP results\n try:\n http_object = urllib2.urlopen(req)\n except Exception, e:\n nilog(\"%s,PUBLISH rx error\" % msgid);\n if verbose:\n nilog(\"Error: Unable to access http URL %s: %s\" % (http_url, str(e)))\n f.close()\n return\n f.close()\n if verbose:\n nilog(\"Digester result: %s\" % octet_param.get_url())\n # Get message headers\n http_info = http_object.info()\n http_result = http_object.getcode()\n if verbose:\n debug(\"HTTP result: %d\" % http_result)\n debug(\"Response info: %s\" % http_info)\n debug(\"Response type: %s\" % http_info.gettype())\n\n # Read results into buffer\n payload = http_object.read()\n http_object.close()\n # debug(payload)\n # Report outcome\n if (http_result != 200):\n if verbose:\n debug(\"Unsuccessful publish request returned HTTP code %d\" %\n http_result) \n nilog(\"%s,PUBLISH rx error bad response status,%d\" % (msgid,http_result));\n return\n # Check content type of returned message matches requested response type\n ct = http_object.headers[\"content-type\"]\n if ct != \"application/json\":\n if verbose:\n debug(\"Error: Expecting JSON coded (application/json) \"\n \"response but received Content-Type: %s\" % ct)\n nilog(\"%s,PUBLISH rx error bad content type,%s\" % (msgid,ct));\n return\n # If output of response is expected, print in the requested format\n if verbose:\n nilog( \"Publication of %s successful:\" % target)\n\n # JSON cases\n try:\n json_report = json.loads(payload)\n except Exception, e:\n if verbose:\n nilog(\"Error: Could not decode JSON report '%s': %s\" % (payload,\n str(e)))\n nilog(\"%s, PUBLISH rx error bad json decode\" % msgid);\n return\n\n if verbose: \n print json.dumps(json_report, indent = 4)\n etime=time.time()\n duration=etime-stime\n niuri=json_report[\"ni\"]\n nilog(\"%s,PUBLISH rx fine,ni,%s,size,%d,time,%10.10f\" % (msgid,niuri,fsize,duration*1000))\n\n return niuri", "def main():\n\n ericsson_connect = {\n \"device_type\": \"ericsson_ipos\",\n \"ip\": \"1.1.1.1\",\n \"username\": \"admin\",\n \"password\": \"admin\",\n }\n\n net_connect = ConnectHandler(**ericsson_connect)\n output = net_connect.send_command(\"show ip int brief\")\n print(output)\n\n output_commit = net_connect.commit()\n print(output_commit)", "def main():\n \n \n # Grab command line args\n args = build_argparser().parse_args()\n # Connect to the MQTT server\n client = connect_mqtt()\n # Perform inference on the input stream\n infer_on_stream(args, client)", "def main():\n # Grab command line args\n args = build_argparser().parse_args()\n #print(\"finished argparser\")\n\n # Connect to the MQTT server\n client = connect_mqtt()\n #print(\"finished mqtt connect\")\n\n # Perform inference on the input stream\n infer_on_stream(args, client)", "def main():\n params = demisto.params()\n command = demisto.command()\n args = demisto.args()\n base_url = params.get('url')\n insecure = not params.get('insecure', False)\n proxy = params.get('proxy', False)\n\n try:\n client = Client(\n base_url=base_url,\n verify=insecure,\n proxy=proxy,\n )\n if command == 'test-module':\n client.get_ips(params, 10)\n demisto.results('ok')\n if command == 'nucleon-get-indicators':\n type_ = args.get('type')\n if type_ == 'hash':\n return_results(get_hashes_command(client, args))\n elif type_ == 'url':\n return_results(get_urls_command(client, args))\n else:\n return_results(get_indicators_command(client, params, args))\n elif command == 'fetch-indicators':\n # This is the command that initiates a request to the feed endpoint and create new indicators objects from\n # the data fetched. If the integration instance is configured to fetch indicators, then this is the command\n # that will be executed at the specified feed fetch interval.\n ips, urls, hashes = fetch_indicators_command(client, params)\n for iter_ in batch(ips, batch_size=2000):\n demisto.createIndicators(iter_)\n for iter_ in batch(urls, batch_size=2000):\n demisto.createIndicators(iter_)\n for iter_ in batch(hashes, batch_size=2000):\n demisto.createIndicators(iter_)\n else:\n raise NotImplementedError(f'Command {command} is not implemented.')\n except Exception as e:\n demisto.error(traceback.format_exc()) # Print the traceback\n err_msg: str = str(e)\n if 'Error in API call [401]' in err_msg:\n err_msg = 'Unauthorized. Make sure your credentials are correct.'\n return_error(f'Failed to execute {command} command.\\nError:\\n{err_msg}')", "def post(self, request, nnid):\n try:\n\n input_parm = request.data\n max_nnid = NNCommonManager().get_nn_id_max() + 1\n if nnid == \"new\":\n nnid = \"nn\" + str(max_nnid).zfill(8)\n else:\n return_data = NNCommonManager().get_nn_id_info(nnid)\n if return_data != []:\n return Response(json.dumps(nnid+\" Network ID already exists\"))\n input_parm['nn_id'] = nnid\n if input_parm.get('automl_parms') == None:\n input_parm['automl_parms'] = {}\n if input_parm.get('automl_runtime') == None:\n input_parm['automl_runtime'] = {}\n if input_parm.get('automl_stat') == None:\n input_parm['automl_stat'] = {}\n\n input_parm_s = {}\n input_parm_s['id'] = max_nnid\n input_parm_s['nn_id'] = nnid\n return_data = NNCommonManager().insert_nn_info(input_parm, input_parm_s)\n # Data node name\n graph = NNCommonManager.get_nn_node_name(None, return_data)\n\n return_param = {}\n return_param['nn_id'] = return_data\n return_param['graph'] = graph\n\n return Response(json.dumps(return_param))\n except Exception as e:\n return_data = {\"status\": \"404\", \"result\": str(e)}\n return Response(json.dumps(return_data))\n finally:\n graph = NNCommonManager().get_nn_node_name(nnid)\n for net in graph:\n if net['fields']['graph_node'] in ['netconf_data','eval_data']:\n utils.get_source_path(nnid, None, net['fields']['graph_node_name'])", "def main():\n args, config = parse_args()\n\n \"\"\"\n Log on wandb for track of experiments\n \"\"\"\n wandb.init(project=\"adaptive-finetuning-resnet\", name=f'Inference_{config.VERSION}', config=config)\n\n \"\"\"\n Set config GPUs and torch cuda device\n \"\"\"\n config.GPUS = str(0)\n torch.cuda.set_device(0)\n\n \"\"\"\n Create the model, put it to GPU and then create dataloader\n \"\"\"\n model = eval(config.MODULE)(config=config.NETWORK)\n model = model.cuda()\n\n val_loader = make_dataloader(config, mode='val', distributed=False)\n\n \"\"\"\n Load the model with pretrained weights\n \"\"\"\n assert config.NETWORK.PRETRAINED_MODEL != '', \"For inference, there must be pre-trained weights\"\n\n pretrain_state_dict = torch.load(config.NETWORK.PRETRAINED_MODEL, map_location = lambda storage, loc: storage)['net_state_dict']\n smart_model_load(model, pretrain_state_dict, loading_method=config.NETWORK.PRETRAINED_LOADING_METHOD)\n\n \"\"\"\n Pass the model and val loader for validation\n \"\"\"\n print(\"Inference started!!\")\n val_accuracy = do_validation(config, model, val_loader)\n print(f\"Inference complete!!\\nAccuracy:{val_accuracy}\")\n\n wandb.log({'Accuracy': val_accuracy})", "def main():\r\n mvip, user, user_pass, mvip_node = get_inputs()\r\n payload = build_payload()\r\n headers, url = build_auth(mvip, user, user_pass, mvip_node)\r\n response_json = connect_cluster(headers, url, payload)\r\n paired_vols = get_replication_status(response_json)\r\n payload = get_vol_stats(paired_vols)\r\n response_json = connect_cluster(headers, url, payload)\r\n parse_volume_stats(paired_vols, response_json)", "def _run_ci_publish():\n _run_install(False)\n _run_tests(False)\n _run_publish(True)", "def main():\n # Grab command line args\n args = build_argparser().parse_args()\n # Connect to the MQTT server\n client = connect_mqtt()\n # Perform inference on the input stream\n infer_on_stream(args, client)", "def main():\n # Grab command line args\n args = build_argparser().parse_args()\n # Connect to the MQTT server\n client = connect_mqtt()\n # Perform inference on the input stream\n infer_on_stream(args, client)", "def main(args):\n\n\t##############################################################################\n\t######## Pass user command line arguments to setup.py which will #############\n\t############# initialise some parameters for the analysis ###################\n\t##############################################################################\n\tinit_ = setup.initialise_user_input(args)\n\n\t##############################################################################\n\t######## Define system_ which is the object, of class nanoCISC, ##############\n\t######## which contains all relevant information about your nanoparticle ####\n\t##############################################################################\n\tsystem_ = nano_cisc.nanoCISC(init_.nano_particle, init_.anchors, init_.beta, init_.calcrange, \n init_.curves, init_.targetinc, init_.density) \n\t# initialise system_ as nanoCISC class here ^^^\n\n\t# If density is being calculated, define grid from grid class\n\tif args['density']:\n\t\tgrid=grids.grid(system_)\n\n\n\t##############################################################################\n\t################ Process trajectory, frame by frame ##########################\n\t##############################################################################\n\n\tfor ts in init_.u.trajectory: # loop through trajectory frames here \n\t\tprint \"Processing snapshot %d \" % (ts.frame)\n\n\t\t# Array for calculating intrinsic density is initialised to {0}\n\t\tintrinsic_count=np.zeros( ( np.ceil( 3 * system_.calculation_range).astype(np.int) ,len(system_.density) ), dtype = np.float32) \n\n\t\t# Array that stores the instantaneous volume of each spatial interval is initialised to {0}\n\t\tvolume_at_dist=np.zeros( ( np.ceil( 3 * system_.calculation_range).astype(np.int) ,len(system_.density) ), dtype = np.float32) \n\n\t\t# Centre of mass position is updated\n\t\tsystem_.update_com()\n\n\t\t# Vectors describing the anchor points are updated \n\t\tsystem_.update_anchors() \n\n\t\t# Nanoparticle depth values are updated\n\t\tsystem_.update_surface() \t\n\n\t\tif args['XYZsurface']:\n\t\t\tsystem_.write_surface(init_.f_visualise_surface) # write micelle surface to xyz file\n \n \t\tif args['density']: \n \t\t\tgrid.update_volume_estimate(volume_at_dist, system_) # volume estimate is updated for snapshot\n\t\t\tsystem_.calculate_density(intrinsic_count, volume_at_dist) # calculate density here\n\n\t\tsystem_.frames_processed += 1\n\n\t##################################\n\t##### Print results to files #####\n\t##################################\n\tif args['density']:\n\t\tsystem_.print_intrinsic_density(init_.f_intrinsic_density_out)\n\t\tsystem_.print_radial_density()\n\n\n\tprint \"Program finished successfully!!!\\n\"", "def main():\n run_nutanix_vm_creation_module()", "def execute(self, nodenet, nodes, netapi):\n pass # pragma: no cover", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input-ontology',\n default=config_test.config[\"msh_test_onto\"])\n parser.add_argument('-s', '--signature')\n parser.add_argument('-f', '--format-name', default=None)\n parser.add_argument('-o', '--output-file', default=\"ontology/output.owl\")\n parser.add_argument('-d', '--max-depth', default=10)\n parser.add_argument('-l', '--locality', default='top')\n\n args = parser.parse_args()\n\n g = Graph().parse(args.input_ontology, format=args.format_name)\n resource = entity_mapper.match_entity(args.signature, g)\n ontomodule = extract_module.extract_module(\n [resource], g, locality=args.locality, max_depth=args.max_depth)\n\n with open(args.output_file, \"w\") as f:\n ontomodule.serialize(f)", "def main():\n print(\"\\n\\n\")\n print(\"*\" * 50)\n print(f\"[LOG] Printing command line arguments [{', '.join(sys.argv)}]\")\n check_file_name()\n print(\"*\" * 50)\n #http_request_pipeline(123,\"HEAD / HTTP/1.0\\r\\nHost: www.google.com\\r\\n\\r\\n\")\n # This argument is optional, defaults to 18888\n proxy_port_number = get_arg(1, 18888)\n entry_point(proxy_port_number)", "def main(args):\n t1 = time.time()\n # Most of the URL fetching libraries will want an explicit protocol.\n # Allow loose commandline args\n base = protocolise(args.domain)\n\n seen = set()\n sitemap = networkx.DiGraph()\n\n #fetch_website(sitemap, seen, base, base)\n fetch_website_gevent(sitemap, set([base]), base, [base])\n\n outfile = output(sitemap, deprotocolise(base))\n\n t2 = time.time()\n print DONE.format(outfile)\n if args.bench:\n bench_report(t1, t2)\n\n return 0", "def publish_updates():\n run_subprocess(['osg-batch-update'])", "def main():\n\tnews = Googlenews()\n\tnews.gnews()" ]
[ "0.5767328", "0.5661973", "0.5631436", "0.55480254", "0.54211825", "0.54131734", "0.5406113", "0.53765", "0.53701484", "0.53538346", "0.5352672", "0.5315707", "0.5301884", "0.5267521", "0.5263778", "0.5224786", "0.52056885", "0.5168929", "0.51480085", "0.5146815", "0.5140904", "0.5140904", "0.5121054", "0.51130325", "0.5103974", "0.5093437", "0.5056653", "0.5055913", "0.50542855", "0.50372607" ]
0.5670017
1
Generate cached and grouped activity items for personal and universal news. Args
def build_activity(self, user_id): activity_ids = self.raw_activity_links_collection.get_activity_ids_for_user(user_id) friend_ids = self.friends_collection.getFriends(user_id, limit=None) personal_items = self.raw_activity_items_collection.get_activity_items(activity_ids) universal_verbs = ['todo', 'follow', 'like', 'comment'] universal_items = self.raw_activity_items_collection.get_activity_for_users(friend_ids, verbs=universal_verbs) def build_activity_keys(items): keys = {} keys_order = [] for item in items: # Generate unique key key = '%s::%s' % (item.verb, item.activity_id) # Generate grouped key if item.verb == 'todo': key = 'todo::%s::%s' % (item.objects.entity_id, item.timestamp.created.isoformat()[:10]) elif item.verb == 'follow': key = 'follow::%s::%s' % (item.objects.user_id, item.timestamp.created.isoformat()[:10]) elif item.verb == 'like' or item.verb == 'credit' or item.verb.startswith('action_'): key = '%s::%s::%s' % (item.verb, item.objects.stamp_id, item.timestamp.created.isoformat()[:10]) elif item.verb in set(['comment', 'reply', 'mention']): if item.objects.comment_id is not None: key = 'comment::%s' % item.objects.comment_id else: key = 'mention::%s' % item.objects.stamp_id # Apply keys if key in keys: # Existing item if item.subject is not None and item.subject not in keys[key].subjects: keys[key].subjects = list(keys[key].subjects) + [item.subject] if item.benefit is not None: if keys[key].benefit is None: keys[key].benefit = 0 keys[key].benefit += 1 if item.timestamp.created > keys[key].timestamp.created: keys[key].timestamp.created = item.timestamp.created else: logs.warning("Missing subjects! %s" % item) else: # New item ### TODO (PHASE II): Better handling of schema conversion activity = Activity() activity.benefit = item.benefit activity.timestamp = item.timestamp if item.subject is not None: activity.subjects = [item.subject] activity.verb = item.verb objects = ActivityObjectIds() if item.objects.user_id is not None: objects.user_ids = [item.objects.user_id] if item.objects.stamp_id is not None: objects.stamp_ids = [item.objects.stamp_id] if item.objects.entity_id is not None: objects.entity_ids = [item.objects.entity_id] if item.objects.comment_id is not None: objects.comment_ids = [item.objects.comment_id] activity.objects = objects activity.source = item.source if item.header is not None: activity.header = item.header if item.body is not None: activity.body = item.body if item.footer is not None: activity.footer = item.footer keys[key] = activity keys_order.append(key) return keys, keys_order personal_keys, personal_keys_order = build_activity_keys(personal_items) universal_keys, universal_keys_order = build_activity_keys(universal_items) mark = str(int(time.time() * 1000000)) """ Generate a "sort" value that will be used in the db to order our grouped, cached items. This is composed of a timestamp for versioning concatenated with an value for ordering within that timestamp. Queries to the database will return data sorted by this value in descending order, and we can include a check in retrieval to verify that the timestamp is unique -- this is hack to get around the fact that Mongo does not allow for atomic insertion and deletion. It's theoretically possible for two sets of data to have the same timestamp, so we should add an additional hash to decrease the odds of duplication. I'm not terrible concerned about this in the short term, though. Also, note that we've effectively capped the number of grouped, cached items that we can generate at 10,000. """ i = 9999 personal_result = [] for key in personal_keys_order: item = personal_keys[key] sort = ("%s|%04d" % (mark, i)).zfill(22) personal_result.append((item, sort)) i -= 1 i = 9999 universal_result = [] for key in universal_keys_order: if key not in personal_keys: item = universal_keys[key] sort = ("%s|%04d" % (mark, i)).zfill(22) universal_result.append((item, sort)) i -= 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def activities_to_jsonfeed(activities, actor=None, title=None, feed_url=None,\n home_page_url=None):\n try:\n iter(activities)\n except TypeError:\n raise TypeError('activities must be iterable')\n\n if isinstance(activities, (dict, str)):\n raise TypeError('activities may not be a dict or string')\n\n def image_url(obj):\n return util.get_first(obj, 'image', {}).get('url')\n\n def actor_name(obj):\n return obj.get('displayName') or obj.get('username')\n\n if not actor:\n actor = {}\n\n items = []\n for activity in activities:\n obj = as1.get_object(activity) or activity\n if obj.get('objectType') == 'person':\n continue\n author = as1.get_object(obj, 'author')\n content = microformats2.render_content(\n obj, include_location=True, render_attachments=True,\n # Readers often obey CSS white-space: pre strictly and don't even line wrap,\n # so don't use it. https://github.com/snarfed/granary/issues/456\n white_space_pre=False)\n obj_title = obj.get('title') or obj.get('displayName')\n item = {\n 'id': obj.get('id') or obj.get('url'),\n 'url': obj.get('url'),\n 'image': image_url(obj),\n 'title': obj_title if mf2util.is_name_a_title(obj_title, content) else None,\n 'summary': obj.get('summary'),\n 'content_html': content,\n 'date_published': obj.get('published'),\n 'date_modified': obj.get('updated'),\n 'author': {\n 'name': actor_name(author),\n 'url': author.get('url'),\n 'avatar': image_url(author),\n },\n 'attachments': [],\n }\n\n for att in obj.get('attachments', []):\n url = util.get_url(att, 'stream') or util.get_url(att, 'image')\n mime = mimetypes.guess_type(url)[0] if url else None\n if (att.get('objectType') in ATTACHMENT_TYPES or\n mime and mime.split('/')[0] in ATTACHMENT_TYPES):\n item['attachments'].append({\n 'url': url or '',\n 'mime_type': mime,\n 'title': att.get('title'),\n })\n\n if not item['content_html']:\n item['content_text'] = ''\n items.append(item)\n\n return util.trim_nulls({\n 'version': 'https://jsonfeed.org/version/1',\n 'title': title or actor_name(actor) or 'JSON Feed',\n 'feed_url': feed_url,\n 'home_page_url': home_page_url or actor.get('url'),\n 'author': {\n 'name': actor_name(actor),\n 'url': actor.get('url'),\n 'avatar': image_url(actor),\n },\n 'items': items,\n }, ignore='content_text')", "def process_content(self, channel) -> dict:\n\n self.print_if_verbose(f\"Method 'process_content' is working:\")\n\n if self.limit is None or self.limit >= self.news_amount:\n self.limit = self.news_amount\n\n rss_feed = {}\n rss_feed[\"Feed\"] = channel.findtext('title')\n rss_feed[\"Description\"] = channel.findtext('description')\n rss_feed[\"Link\"] = channel.findtext('link')\n rss_feed[\"Language\"] = channel.findtext('language')\n rss_feed[\"News\"] = []\n\n append_news_to_rss_feed = 0\n\n self.print_if_verbose(f\"Adding data to the work dict 'rss_feed'...\")\n\n POSSIBLE_IMAGE_TAGS = (\"content\", \"thumbnail\", \"image\")\n POSSIBLE_IMAGE_ATTR = (\"url\", \"href\")\n\n for item in channel.iterfind(\"item\"):\n child_news = {}\n child_news[\"Title\"] = item.findtext(\"title\")\n child_news[\"Link\"] = item.findtext(\"link\")\n child_news[\"PubDate\"] = self.get_formatted_date(item.findtext(\"pubDate\"))\n child_news[\"Source\"] = item.findtext(\"source\")\n child_news[\"ImageLink\"] = None\n child_news[\"ImageCacheName\"] = None\n\n for tag in POSSIBLE_IMAGE_TAGS:\n for item_field in item:\n if tag in item_field.tag:\n for attr in POSSIBLE_IMAGE_ATTR:\n if attr in item_field.attrib:\n child_news[\"ImageLink\"] = item_field.attrib[attr]\n child_news[\"ImageCacheName\"] = \\\n f\"{''.join(char for char in child_news['Link'] if char.isalnum())}.jpg\"\n break\n if child_news[\"ImageLink\"]:\n break\n if child_news[\"ImageLink\"]:\n break\n\n rss_feed[\"News\"].append(child_news)\n\n append_news_to_rss_feed += 1\n if append_news_to_rss_feed == self.limit:\n break\n\n self.print_if_verbose(\n f\"{append_news_to_rss_feed} news were added. \\n\"\n f\"Method 'process_content' is finished. \\n\"\n )\n\n return rss_feed", "def get_activities():\n pass", "def systray_get_activities(self):\n activities = super(Users, self).systray_get_activities()\n for activity in activities:\n if activity.get('model') == 'mailing.mailing':\n activities.remove(activity)\n query = \"\"\"SELECT m.mailing_type, count(*), act.res_model as model, act.res_id,\n CASE\n WHEN %(today)s::date - act.date_deadline::date = 0 Then 'today'\n WHEN %(today)s::date - act.date_deadline::date > 0 Then 'overdue'\n WHEN %(today)s::date - act.date_deadline::date < 0 Then 'planned'\n END AS states\n FROM mail_activity AS act\n JOIN mailing_mailing AS m ON act.res_id = m.id\n WHERE act.res_model = 'mailing.mailing' AND act.user_id = %(user_id)s \n GROUP BY m.mailing_type, states, act.res_model, act.res_id;\n \"\"\"\n self.env.cr.execute(query, {\n 'today': fields.Date.context_today(self),\n 'user_id': self.env.uid,\n })\n activity_data = self.env.cr.dictfetchall()\n \n user_activities = {}\n for act in activity_data:\n if not user_activities.get(act['mailing_type']):\n if act['mailing_type'] == 'sms':\n module = 'mass_mailing_sms'\n name = _('SMS Marketing')\n else:\n module = 'mass_mailing'\n name = _('Email Marketing')\n icon = module and modules.module.get_module_icon(module)\n res_ids = set()\n user_activities[act['mailing_type']] = {\n 'name': name,\n 'model': 'mailing.mailing',\n 'type': 'activity',\n 'icon': icon,\n 'total_count': 0, 'today_count': 0, 'overdue_count': 0, 'planned_count': 0,\n 'res_ids': res_ids,\n }\n user_activities[act['mailing_type']]['res_ids'].add(act['res_id'])\n user_activities[act['mailing_type']]['%s_count' % act['states']] += act['count']\n if act['states'] in ('today', 'overdue'):\n user_activities[act['mailing_type']]['total_count'] += act['count']\n\n for mailing_type in user_activities.keys():\n user_activities[mailing_type].update({\n 'actions': [{'icon': 'fa-clock-o', 'name': 'Summary',}],\n 'domain': json.dumps([['activity_ids.res_id', 'in', list(user_activities[mailing_type]['res_ids'])]])\n })\n activities.extend(list(user_activities.values()))\n break\n\n return activities", "def get_activities(self, user_id=None, group_id=None, app_id=None,\n activity_id=None, start_index=0, count=0):\n raise NotImplementedError()", "def select_news(self):\n data = self.soup.findAll('item')\n for item in data:\n news_data = dict()\n for tag in ['title', 'link']:\n news_data[tag] = item.find(tag).get_text()\n\n news_data['pubDate'] = parse(item.find('pubDate').get_text())\n media = item.find('media:content')\n\n if media:\n news_data['media'] = media.get('url')\n else:\n news_data['media'] = None\n\n yield news_data", "def createFeedItems(self):\r\n for item in self.item_data:\r\n self.initCreateFeedItem(item)\r\n self.createItem(item)", "def run(self,dispatcher,tracker,domain): \n data=newsapi(\"us\")\n leng=len(data)\n for i in range(leng):\t\n gt = {\n \"attachment\": {\n \"type\": \"template\",\n \"payload\": {\n \"template_type\": \"generic\",\n \"elements\": [\n {\n \"title\": data['articles'][i]['title'],\n \"image_url\":data['articles'][i]['urlToImage'],\n \"subtitle\": data['articles'][i]['description'],\n \"buttons\": [\n {\n \"type\": \"web_url\",\n \"url\": data['articles'][i]['url'],\n \"title\": \"Read More\"\n },\n ]\n },\n ]\n }\n }\n } \n dispatcher.utter_custom_json(gt)\n return []", "def run(self,dispatcher,tracker,domain): \n data=newsapi(\"in\")\n leng=len(data)\n for i in range(leng):\t\n gt = {\n \"attachment\": {\n \"type\": \"template\",\n \"payload\": {\n \"template_type\": \"generic\",\n \"elements\": [\n {\n \"title\": data['articles'][i]['title'],\n \"image_url\":data['articles'][i]['urlToImage'],\n \"subtitle\": data['articles'][i]['description'],\n \"buttons\": [\n {\n \"type\": \"web_url\",\n \"url\": data['articles'][i]['url'],\n \"title\": \"Read More\"\n },\n ]\n },\n ]\n }\n }\n } \n dispatcher.utter_custom_json(gt)\n return []", "def get_listing():\n\n result_items = []\n\n rss_data = urllib.request.urlopen(ActivityURL)\n rss_xml = xml.dom.minidom.parse(rss_data)\n\n channel = rss_xml.getElementsByTagName('channel')[0]\n items = channel.getElementsByTagName('item')\n for item in items:\n # Most of these are hackish, but a result of using the RSS\n # feed instead of something nicer like a JSON API. This\n # listing method is specifically isolated so we can easily\n # swap out the implementation later.\n asset_id = item.getElementsByTagName('guid')[0].childNodes[0].data.split('/')[-1]\n img_url = item.getElementsByTagName('description')[0].childNodes[0].data\n # Get part after start of img src attribute\n split_href = img_url.split('src=\"', 1)[1]\n # Get part before closing quote\n img_url = split_href.split('\"', 1)[0]\n # FIXME\n zip_url = ''\n result_items.append( Asset(asset_id, img_url, zip_url) )\n\n return result_items", "def generate_activity(user):\n data = {}\n random_index = random.randint(0, 5)\n data['uid'] = user[0]\n data['username'] = user[1]\n data['action'] = actions[random_index]\n data['ts'] = datetime.datetime.now().isoformat()\n return json.dumps(data)", "def run(self,dispatcher,tracker,domain): \n data=newsapi(\"au\")\n leng=len(data)\n for i in range(leng):\t\n gt = {\n \"attachment\": {\n \"type\": \"template\",\n \"payload\": {\n \"template_type\": \"generic\",\n \"elements\": [\n {\n \"title\": data['articles'][i]['title'],\n \"image_url\":data['articles'][i]['urlToImage'],\n \"subtitle\": data['articles'][i]['description'],\n \"buttons\": [\n {\n \"type\": \"web_url\",\n \"url\": data['articles'][i]['url'],\n \"title\": \"Read More\"\n },\n ]\n },\n ]\n }\n }\n } \n dispatcher.utter_custom_json(gt)\n return []", "def get_content_from_cache(self):\n\n rss_feed = []\n news_to_show = 0\n\n try:\n self.print_if_verbose(\n f\"Method 'get_content_from_cache' is working: \\n\"\n f\"Trying to get content from cache...\"\n )\n os.chdir(\"cache\")\n except Exception as error:\n print(f\"{error}: cache does not exists!\")\n return\n\n try:\n os.chdir(\"image_cache\")\n self.full_path_to_image_cache = os.getcwd()\n os.chdir(\"..\")\n except:\n pass\n\n try:\n with open(\"rss_reader_cache.json\", \"r\", encoding=\"utf-8\") as cache_file:\n data_from_cache = json.load(cache_file)\n self.print_if_verbose(f\"Content from cache has been received successfully. \\n\")\n except Exception as error:\n self.print_if_verbose(f\"{error}: cache file does not exist! \\n\")\n return\n\n if self.source:\n for feed in data_from_cache:\n if self.source in feed.keys():\n for news in feed[self.source]:\n if news[\"PubDate\"] == str(self.date):\n rss_feed.append(news)\n news_to_show += 1\n if self.limit and news_to_show == self.limit:\n break\n if self.limit and news_to_show == self.limit:\n break\n else:\n for channel in data_from_cache:\n for feed_link in channel:\n for news in channel[feed_link]:\n if news[\"PubDate\"] == str(self.date):\n rss_feed.append(news)\n news_to_show += 1\n if self.limit and news_to_show == self.limit:\n break\n if self.limit and news_to_show == self.limit:\n break\n\n os.chdir(\"..\")\n\n self.news_amount = len(rss_feed)\n\n if self.news_amount == 0:\n print(f\"There is no news in cache for specified date. \\n\")\n else:\n self.print_if_verbose(f\"There is {self.news_amount} news in cache for specified date. \\n\")\n\n self.print_if_verbose(f\"Method 'get_content_from_cache' is finished. \\n\")\n\n return rss_feed", "def all_activity(self):\n\t\tself.db = DB()\n\t\tactivity_all = self.db.select_all_from(\"activity\")\n\t\ttmpl = lookup.get_template(\"activity.html\")\n\t\treturn (tmpl.render(activity=activity_all))", "def getItems(self): \n if self.itemCount > 0:\n \n site = getSite()\n \n \n # Make string path relative to the site root\n # E.g. string path \"news\" becomes \"/yoursiteid/news\"\n site_path = site.getPhysicalPath();\n \n path = \"/\".join(site_path) + \"/\" + self.path \n \n types = [self.itemPortalType]\n \n items = []\n \n #if self.itemPortalType2 != None:\n # types.append(self.itemPortalType2) \n \n #print \"Querying by:\" + type + \" \" + path\n content_by_type = self.context.portal_catalog(path={ \"query\": path, \"depth\" :9 }, \n portal_type=self.itemPortalType, \n sort_on=\"created\", \n sort_order=\"reverse\")[0:self.itemCount]\n\n content_by_type = list(content_by_type)\n \n if self.itemPortalType2 != None:\n content_by_type2 = self.context.portal_catalog(path={ \"query\": path, \"depth\" :9 }, \n portal_type=self.itemPortalType2, \n sort_on=\"created\", \n sort_order=\"reverse\")[0:self.itemCount]\n\n content_by_type += list(content_by_type2)\n\n \n items += [ brain.getObject() for brain in content_by_type ]\n else:\n items = []\n \n #if self.title == \"Daily deals\":\n # import pdb ; pdb.set_trace()\n \n # XXX: custom hack for deals\n def is_expired_deal(i):\n \"\"\"\n \"\"\"\n if hasattr(i, \"validUntil\"):\n now = datetime.datetime.utcnow()\n if now > i.validUntil:\n return True\n \n return False\n \n items = [ i for i in items if not is_expired_deal(i) ]\n \n return items", "def news(self):\n\n # Get articles with search term, if available, from each News API source\n news_api_articles = pd.DataFrame()\n\n q = urllib.parse.quote(\" OR \".join(self.search_terms), safe='')\n\n response = requests.get(\"https://newsapi.org/v2/everything?q=\" + q + \"&from=\" + datetime.now().strftime(\n \"%Y-%m-%d\") + \"&sortBy=popularity&pageSize=100&apiKey=\" + self.__news_api_key)\n\n if response.status_code == 200:\n data = json.loads(response.text)\n\n source_articles = []\n\n for article in data['articles']:\n source_articles.append([article['title'],\n article['description'],\n article['url'],\n article['publishedAt']])\n\n source_articles = pd.DataFrame(source_articles, columns=['title', 'description', 'url', 'publishedAt'])\n news_api_articles = pd.concat([news_api_articles, source_articles])\n\n news_api_articles = news_api_articles.reset_index(drop='True')\n\n news_api_articles['publishedAt'] = news_api_articles['publishedAt'].apply(pd.to_datetime)\n\n news_api_articles = news_api_articles.fillna(' ')\n\n term_in_title = news_api_articles['title'].apply(self.any_term)\n\n news_api_articles = news_api_articles[term_in_title]\n\n if (len(news_api_articles) > 10):\n news_api_articles = news_api_articles[0:10]\n\n else:\n print(\"News API failed to return any items\")\n\n # Create shortened links using bitly if access token is provided\n if self.__bitly_access_token != '':\n\n bitly_urls = []\n\n for index, article in news_api_articles.iterrows():\n url = article['url']\n bitly_response = requests.get(\"https://api-ssl.bitly.com/v3/shorten\",\n params={'longUrl': url, 'access_token': self.__bitly_access_token})\n\n if bitly_response.status_code == 200:\n data = json.loads(bitly_response.text)\n bitly_urls.append(data['data']['url'])\n\n news_api_articles['url'] = bitly_urls\n\n # Store final list to TwitterBot object\n self.list = news_api_articles\n\n return", "def get_activities_dictionary(self):\r\n activities_dict_list = list()\r\n activities = self.get_specific_node_list('activity')\r\n for activity in activities:\r\n activities_dict = dict()\r\n activity_name = None\r\n category = None\r\n for key, val in activity.attrib.iteritems():\r\n if \"}name\" in key:\r\n activity_name = val.split(\".\")[-1]\r\n break\r\n if activity_name:\r\n intent_filter_node = self.get_specific_node_list('intent-filter', root_node=activity)\r\n if len(intent_filter_node) == 1:\r\n categories_nodes = self.get_specific_node_list('category', root_node=intent_filter_node[0])\r\n category = self.get_category_value(categories_nodes)\r\n else:\r\n category = None\r\n activities_dict[\"name\"] = activity_name\r\n activities_dict[\"category\"] = category\r\n activities_dict_list.append(activities_dict)\r\n return activities_dict_list", "def run(self,dispatcher,tracker,domain): \n data=sourcenews(\"bbc-news\")\n leng=len(data)\n for i in range(leng):\t\n gt = {\n \"attachment\": {\n \"type\": \"template\",\n \"payload\": {\n \"template_type\": \"generic\",\n \"elements\": [\n {\n \"title\": data['articles'][i]['title'],\n \"image_url\":data['articles'][i]['urlToImage'],\n \"subtitle\": data['articles'][i]['description'],\n \"buttons\": [\n {\n \"type\": \"web_url\",\n \"url\": data['articles'][i]['url'],\n \"title\": \"Read More\"\n },\n ]\n },\n ]\n }\n }\n }\n dispatcher.utter_custom_json(gt) \n return []", "def get_context_data(self, **kwargs):\n context = super(CreatePageView, self).get_context_data(**kwargs)\n activities = sorted(personalize_activities(self.request.user),\n key=lambda k: k['source_name'].lower())\n sources = OrderedDict([\n (activity['source_name'], activity) for activity in activities if\n 'data_source' in activity and activity['data_source']\n ])\n context.update({'sources': sources})\n return context", "def joinData(item_list):\n\n t_1 = datetime.now()\n news_dict = {}\n ln_item_list = len(item_list)\n for i, r in enumerate(item_list):\n str_date = r[\"date\"].strftime(\"%Y-%m\")\n if str_date not in news_dict:\n news_dict[str_date] = \"\"\n news_dict[str_date] += \" %s\" % r[\"text\"]\n print (i * 100.) / ln_item_list, datetime.now() - t_1\n return news_dict", "def create_activity_all(self, f_output='activity_all.txt'):\n list_tuple = []\n epoch = datetime.datetime.utcfromtimestamp(0) \n\n # For each records_*.csv, excluding records_{0,1,2,3,4}.csv\n regex = re.compile('records_.\\.csv')\n for filename in os.listdir(self.dir_name):\n if not re.match(regex, filename):\n if fnmatch.fnmatch(filename, 'records_*.csv'):\n path_to_file = self.dir_name + \"/\" + filename\n ret = subprocess.check_output(['wc', '-l', path_to_file])\n num = int(ret.split(' ')[0])\n # If follower has activity\n if num > 1:\n follower_id = filename.split('_')[1].split('.')[0]\n # Extract id of follower, get the anonymous number\n if follower_id in self.map_userid_number:\n follower_num = self.map_userid_number[follower_id]\n # Parse through file\n f = open(path_to_file,'r')\n # Skip first line\n f.readline()\n for line in f:\n line_split = line.split(',')\n # Extract the time of post, create the pair\n # year-month-day-hour-min-second (UTC - 4)\n date_and_time = line_split[1]\n dt_local = datetime.datetime.strptime(date_and_time, '%Y-%m-%d-%H:%M:%S')\n dt_utc = dt_local + datetime.timedelta(hours=4)\n seconds = (dt_utc - epoch).total_seconds()\n list_tuple.append((seconds,follower_num)) \n # Now append the bot activity\n for bot_id in range(0,5):\n print bot_id\n filename = \"records_%d.csv\" % bot_id\n path_to_file = self.dir_name + \"/\" + filename\n f = open(path_to_file, 'r')\n # Skip first line\n f.readline()\n for line in f:\n line_split = line.split(',')\n # Extract time of post, create the pair\n date_and_time = line_split[1]\n dt_local = datetime.datetime.strptime(date_and_time, '%Y-%m-%d-%H-%M-%S')\n dt_utc = dt_local + datetime.timedelta(hours=4)\n seconds = (dt_utc - epoch).total_seconds()\n list_tuple.append((seconds, bot_id+1))\n\n # Sort all pairs based on time of post\n list_tuple.sort()\n # Write f_output\n f_write = open(f_output, 'w')\n for t in list_tuple:\n f_write.write(\"%d %d\\n\" % (t[0], t[1]))\n f_write.close()", "def construct_personal_group_data(content_list_of_dictionaries, own_id, own_uname, their_uname, own_avurl, their_avurl):\n\tfor dictionary in content_list_of_dictionaries:\n\t\tis_own_blob = True if dictionary['id'] == str(own_id) else False \n\t\twhich_blob = dictionary.get(\"which_blob\",None) # identifies 'nor' (normal), 'res' (response), 'action', 'notif' (notification) blobs\n\t\tif is_own_blob:\n\t\t\tdictionary[\"username\"] = own_uname\n\t\t\tdictionary[\"av_url\"] = own_avurl\n\t\telse:\n\t\t\tdictionary[\"username\"] = their_uname\n\t\t\tdictionary[\"av_url\"] = their_avurl\n\t\tif which_blob == 'res':\n\t\t\tdictionary[\"res_time\"] = float(dictionary[\"res_time\"])\n\t\t\tif is_own_blob:\n\t\t\t\tdictionary[\"t_username\"] = their_uname \n\t\t\t\tdictionary[\"t_av_url\"] = their_avurl\n\t\t\telse:\n\t\t\t\tdictionary[\"t_username\"] = own_uname\n\t\t\t\tdictionary[\"t_av_url\"] = own_avurl\n\t\telif which_blob in ('action','notif'):\n\t\t\tif is_own_blob:\n\t\t\t\tdictionary[\"t_username\"] = their_uname \n\t\t\t\tdictionary[\"t_av_url\"] = their_avurl\n\t\t\telse:\n\t\t\t\tdictionary[\"t_username\"] = own_uname\n\t\t\t\tdictionary[\"t_av_url\"] = own_avurl\n\t\telse:\n\t\t\t\"\"\"\n\t\t\tDegree of completeness (of retrieved metadata):\n\n\t\t\t'0': no metadata retrieved\n\t\t\t'1': just image retrieved\n\t\t\t'2': just title retrieved\n\t\t\t'3': just desc retrieved\n\t\t\t'4': just img and img_dim retrieved\n\t\t\t'5': just desc and img retrieved\n\t\t\t'6': just title and img retrieved\n\t\t\t'7': just desc and title retrieved\n\t\t\t'8': just title, img and img_dim retrieved\n\t\t\t'9': just desc, img and img_dim retrieved\n\t\t\t'10': just desc, title and img retrieved\n\t\t\t'11': desc, title, img and img_dim retrieved\n\t\t\t\"\"\"\n\t\t\tnormal_chat = []\n\t\t\tfor i in range(1,int(dictionary[\"idx\"])+1):\n\t\t\t\tidx = str(i)\n\t\t\t\tdoc = 'doc'+idx\n\t\t\t\thas_url_meta = doc in dictionary\n\t\t\t\tif has_url_meta and dictionary['type'+idx] == 'text':\n\t\t\t\t\tmeta_complete = dictionary[doc]\n\t\t\t\t\t# add meta_complete in every 5th index (i.e. tup.5)\n\t\t\t\t\t# add meta_data in this order: url, desc, title, img, img_hw_ratio, 'yt' - youtube (add empty index in case data doesn't exist - useful in personal_group.html)\n\t\t\t\t\tif meta_complete == '1':\n\t\t\t\t\t\t# just image retrieved\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'1',\\\n\t\t\t\t\t\t\tdictionary['url'+idx],'','',dictionary['url_img'+idx],'',dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '2':\n\t\t\t\t\t\t# just title retrieved\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'2',\\\n\t\t\t\t\t\t\tdictionary['url'+idx],'',dictionary['url_title'+idx],'','',dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '3':\n\t\t\t\t\t\t# just desc retrieved\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'3',\\\n\t\t\t\t\t\t\tdictionary['url'+idx], dictionary['url_desc'+idx],'','','',dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '4':\n\t\t\t\t\t\t# img and img_dim\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'4',\\\n\t\t\t\t\t\t\tdictionary['url'+idx],'','',dictionary['url_img'+idx],dictionary['url_hw_ratio'+idx],dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '5':\n\t\t\t\t\t\t# desc and img\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'5',\\\n\t\t\t\t\t\t\tdictionary['url'+idx], dictionary['url_desc'+idx],'',dictionary['url_img'+idx],'',dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '6':\n\t\t\t\t\t\t# title and img\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'6',\\\n\t\t\t\t\t\t\tdictionary['url'+idx],'',dictionary['url_title'+idx],dictionary['url_img'+idx],'',dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '7':\n\t\t\t\t\t\t# desc and title\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'7',\\\n\t\t\t\t\t\t\tdictionary['url'+idx],dictionary['url_desc'+idx],dictionary['url_title'+idx],'','',dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '8':\n\t\t\t\t\t\t# title, img and img_dim\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'8',\\\n\t\t\t\t\t\t\tdictionary['url'+idx],'',dictionary['url_title'+idx],dictionary['url_img'+idx],dictionary['url_hw_ratio'+idx],\\\n\t\t\t\t\t\t\tdictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '9':\n\t\t\t\t\t\t# desc, img and img_dim\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'9',\\\n\t\t\t\t\t\t\tdictionary['url'+idx],dictionary['url_desc'+idx],'',dictionary['url_img'+idx],dictionary['url_hw_ratio'+idx],\\\n\t\t\t\t\t\t\tdictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '10':\n\t\t\t\t\t\t# desc, title and img\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'10',\\\n\t\t\t\t\t\t\tdictionary['url'+idx],dictionary['url_desc'+idx],dictionary['url_title'+idx],dictionary['url_img'+idx],'',\\\n\t\t\t\t\t\t\tdictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '11':\n\t\t\t\t\t\t# desc, title, img and img_dim\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx]),'11',\\\n\t\t\t\t\t\t\tdictionary['url'+idx],dictionary['url_desc'+idx],dictionary['url_title'+idx],dictionary['url_img'+idx],\\\n\t\t\t\t\t\t\tdictionary['url_hw_ratio'+idx],dictionary['yt'+idx]))\n\t\t\t\t\telse:\n\t\t\t\t\t\t# no meaningful metadata\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx])))\n\t\t\t\telif has_url_meta and dictionary['type'+idx] == 'img':\n\t\t\t\t\tmeta_complete = dictionary[doc]\n\t\t\t\t\t# add meta_complete in each 11th index (i.e. tup.11)\n\t\t\t\t\t# add meta_data in this order: url, desc, title, img, img_hw_ratio, 'yt' - youtube (add empty index in case data doesn't exist - useful in personal_group.html)\n\t\t\t\t\tif meta_complete == '1':\n\t\t\t\t\t\t# just image retrieved\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'1',dictionary['url'+idx],'','',dictionary['url_img'+idx],'',\\\n\t\t\t\t\t\t\tdictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '2':\n\t\t\t\t\t\t# just title retrieved\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'2',dictionary['url'+idx],'',dictionary['url_title'+idx],'','',\\\n\t\t\t\t\t\t\tdictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '3':\n\t\t\t\t\t\t# just desc retrieved\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'3',dictionary['url'+idx],dictionary['url_desc'+idx],'','','',\\\n\t\t\t\t\t\t\tdictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '4':\n\t\t\t\t\t\t# img and img_dim\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'4',dictionary['url'+idx],'','',dictionary['url_img'+idx],\\\n\t\t\t\t\t\t\tdictionary['url_hw_ratio'+idx],dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '5':\n\t\t\t\t\t\t# desc and img\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'5',dictionary['url'+idx],dictionary['url_desc'+idx],'',\\\n\t\t\t\t\t\t\tdictionary['url_img'+idx],'',dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '6':\n\t\t\t\t\t\t# title and img\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'6',dictionary['url'+idx],'',dictionary['url_title'+idx],\\\n\t\t\t\t\t\t\tdictionary['url_img'+idx],'',dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '7':\n\t\t\t\t\t\t# desc and title\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'7',dictionary['url'+idx],dictionary['url_desc'+idx],\\\n\t\t\t\t\t\t\tdictionary['url_title'+idx],'','',dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '8':\n\t\t\t\t\t\t# title, img and img_dim\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'8',dictionary['url'+idx],'',dictionary['url_title'+idx],\\\n\t\t\t\t\t\t\tdictionary['url_img'+idx],dictionary['url_hw_ratio'+idx],dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '9':\n\t\t\t\t\t\t# desc, img and img_dim\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'9',dictionary['url'+idx],dictionary['url_desc'+idx],'',\\\n\t\t\t\t\t\t\tdictionary['url_img'+idx],dictionary['url_hw_ratio'+idx],dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '10':\n\t\t\t\t\t\t# desc, title and img\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'10',dictionary['url'+idx],dictionary['url_desc'+idx],\\\n\t\t\t\t\t\t\tdictionary['url_title'+idx],dictionary['url_img'+idx],'',dictionary['yt'+idx]))\n\t\t\t\t\telif meta_complete == '11':\n\t\t\t\t\t\t# desc, title, img and img_dim\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],'11',dictionary['url'+idx],dictionary['url_desc'+idx],\\\n\t\t\t\t\t\t\tdictionary['url_title'+idx],dictionary['url_img'+idx],dictionary['url_hw_ratio'+idx],dictionary['yt'+idx]))\n\t\t\t\t\telse:\n\t\t\t\t\t\t# no meaningful metadata\n\t\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx]))\n\t\t\t\telif dictionary['type'+idx] == 'text':\n\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'text', dictionary['text'+idx], float(dictionary['time'+idx])))\n\t\t\t\telif dictionary['type'+idx] == 'img':\n\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'img', dictionary['img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx]))\n\t\t\t\telif dictionary['type'+idx] == 'shared_img':\n\t\t\t\t\tnormal_chat.append((dictionary['status'+idx], idx, 'shared_img', dictionary['shared_img'+idx], float(dictionary['time'+idx]), \\\n\t\t\t\t\t\tdictionary['img_s_caption'+idx],dictionary['img_caption'+idx],dictionary['hidden'+idx],dictionary['img_width'+idx],\\\n\t\t\t\t\t\tdictionary['img_hw_ratio'+idx],dictionary['img_id'+idx],dictionary['owner_uname'+idx].decode('utf-8')))\n\t\t\t\telse:\n\t\t\t\t\t# append nothing - this case shouldn't arise\n\t\t\t\t\tpass\n\t\t\tdictionary[\"iterator\"] = normal_chat\n\treturn content_list_of_dictionaries", "def get_list_news_files(self):\n # list_news_files = []\n for site in self.sites:\n for news_type in self.news_types:\n\n # accessing files through directories\n site_folder = join(self.dirname, site)\n news_path = join(site_folder, news_type)\n\n # only obtaining the news articles at this time\n exclude = [\"tweets\", \"retweets\", \"user_profile\", \"user_timeline_tweets\", \"user_followers\",\n \"user_following\"]\n\n # iterating through directories only focusing on ones containing the news content\n for root, dirs, files in walk(news_path, topdown=True):\n dirs[:] = [d for d in dirs if d not in exclude]\n\n # collecting all articles\n for f in files:\n if f.endswith(\".json\") and len(dirs) == 0:\n yield join(root, f)\n # list_news_files.append(join(root, f))\n # print(len(list_news_files))\n # return list_news_files\n\n def get_list_twitter_files(self):\n \"\"\"Return files path iterator of news\"\"\"\n list_twitter_files = []\n for site in self.sites:\n for news_type in self.news_types:\n\n # accessing files through directories\n site_folder = join(self.dirname, site)\n news_path = join(site_folder, news_type)\n\n # only obtaining the tweets/retweets at this time\n exclude = [\"news\", \"user_profile\", \"user_timeline_tweets\", \"user_followers\",\n \"user_following\"]\n\n # iterating through directories only focusing on ones containing the news content\n for root, dirs, files in walk(news_path, topdown=True):\n dirs[:] = [d for d in dirs if d not in exclude]\n\n # collecting all articles\n for f in files:\n if f.endswith(\".json\") and len(dirs) == 0:\n yield join(root, f)\n list_twitter_files.append(join(root, f))\n print(len(list_twitter_files))\n # return list_news_files", "def run(self,dispatcher,tracker,domain): \n data=sourcenews(\"abc-news\")\n leng=len(data)\n for i in range(leng):\t\n gt = {\n \"attachment\": {\n \"type\": \"template\",\n \"payload\": {\n \"template_type\": \"generic\",\n \"elements\": [\n {\n \"title\": data['articles'][i]['title'],\n \"image_url\":data['articles'][i]['urlToImage'],\n \"subtitle\": data['articles'][i]['description'],\n \"buttons\": [\n {\n \"type\": \"web_url\",\n \"url\": data['articles'][i]['url'],\n \"title\": \"Read More\"\n },\n ]\n },\n ]\n }\n }\n }\n dispatcher.utter_custom_json(gt) \n return []", "def article_cleanser(dirty_content):\n global id_\n\n articles_dict = {}\n source = dirty_content.find(\"title\").get_text()\n data_tuple_list = []\n\n for item in dirty_content.find_all(\"item\"):\n article_link = item.link.get_text()\n\n jj = item.pubDate.get_text().split(' ')\n article_pubdate = datetime(int(jj[3]), int(strptime(jj[2], '%b').tm_mon), int(jj[1]), int(jj[4][:2]),\n int(jj[4][3:5]),\n int(jj[4][6:])).isoformat(' ')\n\n if source == \"Sudans Post\":\n \"\"\" If the feed is for Sudans Post then it should scrap accordingly\"\"\"\n article_number = item.guid.get_text()\n guid_list = re.findall(\"(?<=p=)[0-9]{5}\", article_number)\n article_guid = int(guid_list[0] + '000')\n article_description = item.description.get_text().replace('[&#8230;]', '....')\n article_description = article_description.replace('&#8217;', \"'\")\n article_title = item.title.get_text().replace(u'\\xa0', ' ')\n article_text = item.encoded.get_text() # gives a not-navigable string\n\n \"\"\"\n So to make the article_content navigable I write it into a html file\n and retrieving it again below\n \"\"\"\n with open(\"article_content.html\", 'w') as fp:\n fp.write(article_text)\n\n with open(\"article_content.html\", \"r\") as fp:\n article_soup = BeautifulSoup(fp, \"lxml\")\n\n article_content = \"\"\n for c in article_soup.find_all('p'):\n article_content += \"\\n\" + c.get_text()\n\n image_link = article_soup.find('a')[\"href\"]\n\n categories = \"\"\n for cat in item.find_all(\"category\"):\n categories += f\"/{cat.get_text().lower()}\"\n\n elif source == \"Radio Tamazuj - Latest news\":\n \"\"\"If the feed is for Radio tamazuj, scrap accordingly \"\"\"\n\n article_title = item.title.get_text()\n article_description = item.description.get_text()\n article_guid = item.guid.get_text()[:8]\n categories = \"Not available\"\n\n def get_content_from_link(link):\n \"\"\" \n Gets the article from the link, because radio tamazuj doesn't \n post the article content on the rss feed\n \"\"\"\n\n page = requests.get(link)\n soup = BeautifulSoup(page.text, \"lxml\")\n return soup\n\n soup = get_content_from_link(article_link)\n image_link = \"https://radiotamazuj.org\" + soup.find(\"img\")[\"src\"]\n\n body = soup.select(\".body-text\")\n content = \"\"\n for i in body:\n content += i.get_text()\n article_content = content.replace(u'\\xa0', ' ')\n\n articles_dict[id_] = {\n \"id\": id_,\n \"source\": source,\n \"Title\": article_title,\n \"Link\": article_link,\n \"PubDate\": article_pubdate,\n \"guid\": article_guid,\n \"Description\": article_description,\n \"Content\": article_content,\n \"categories\": categories,\n \"image_link\": image_link,\n }\n data_tuple = (\n id_,\n source,\n article_title,\n article_link,\n article_pubdate,\n article_guid,\n article_description,\n article_content,\n categories,\n image_link,\n )\n id_ += 1 # counts number of articles\n data_tuple_list.append(data_tuple)\n return data_tuple_list", "def generatePosts(self,**kwargs):\n oldestTimeSoFar = None\n while True:\n if oldestTimeSoFar is None:\n items = self.getPosts(**kwargs)\n else:\n items = self.getPosts(before_time=oldestTimeSoFar,**kwargs)\n if not items:\n return\n for item in items:\n yield item\n oldestTimeSoFar = item['published_at']\n time.sleep(0.5)", "def generate_feed(results, generator):\n\n for result in results:\n content = FeedContentWrapper(result)\n\n content.add_premium_logo_to_image_url()\n feed_item = generator.add_entry(order='append')\n feed_item.id(content.id)\n feed_item.author(author=content.author)\n feed_item.link(href='%s%s' % (WELT_URL, content.web_url))\n feed_item.catalogue.availability_date(content.publication_date)\n feed_item.title(content.seo_title)\n feed_item.description(content.intro)\n feed_item.content(content.premium_paragraph)\n feed_item.catalogue.id(content.id)\n feed_item.catalogue.brand('WELT Plus')\n feed_item.catalogue.condition('new')\n feed_item.catalogue.google_product_category('Media > Magazines & Newspapers')\n feed_item.catalogue.product_type(content.category)\n feed_item.catalogue.image_link(content.add_premium_logo_to_image_url())\n feed_item.catalogue.additional_image_link(content.add_premium_logo_to_image_url(default_image=False))\n feed_item.catalogue.custom_label_0(content.topic)\n feed_item.catalogue.custom_label_1(content.headline)\n feed_item.catalogue.custom_label_2(str(content.reading_time))\n feed_item.catalogue.custom_label_3(content.age)\n feed_item.catalogue.custom_label_4(content.tags)", "def get_activities_response(self, user_id=None, group_id=None, app_id=None,\n activity_id=None, start_index=0, count=0,\n etag=None, min_id=None, cache=None,\n fetch_replies=False, fetch_likes=False,\n fetch_shares=False, fetch_events=False):\n if activity_id:\n # Sometimes Facebook requires post ids in USERID_POSTID format; sometimes\n # it doesn't accept that format. I can't tell which is which yet, so try\n # them all.\n ids_to_try = [activity_id]\n if '_' in activity_id:\n user_id_prefix, activity_id = activity_id.split('_', 1)\n ids_to_try.insert(0, activity_id)\n if user_id:\n ids_to_try.append('%s_%s' % (user_id, activity_id))\n\n for id in ids_to_try:\n try:\n posts = [json.loads(self.urlopen(API_OBJECT_URL % id).read())]\n break\n except urllib2.URLError, e:\n logging.warning(\"Couldn't fetch object %s: %s\", id, e)\n else:\n posts = []\n\n if posts == [False]: # FB returns false for \"not found\"\n posts = []\n\n else:\n url = API_SELF_POSTS_URL if group_id == source.SELF else API_HOME_URL\n url = url % (user_id if user_id else 'me', start_index)\n if count:\n url = util.add_query_params(url, {'limit': count})\n headers = {'If-None-Match': etag} if etag else {}\n try:\n resp = self.urlopen(url, headers=headers)\n etag = resp.info().get('ETag')\n posts = json.loads(resp.read()).get('data', [])\n except urllib2.HTTPError, e:\n if e.code == 304: # Not Modified, from a matching ETag\n posts = []\n else:\n raise\n\n activities = [self.post_to_activity(p) for p in posts]\n response = self._make_activities_base_response(activities)\n response['etag'] = etag\n return response", "def construct_strava_activity_data(activity):\n # if the timestamp has been saved then use this over converting the other one\n # issues with server tz so better to use the timestamp at the point the activity record was created\n if activity.iso_timestamp:\n local_time = activity.iso_timestamp\n else:\n local_time = activity.local_timestamp.isoformat()\n\n data = {'name': activity.title,\n 'type': STRAVA_ACTIVITIES_LOOKUP[activity.type],\n 'start_date_local': local_time,\n 'elapsed_time': activity.duration * 60, # need to convert to seconds, stored in db as minutes\n 'description': activity.description}\n\n if activity.distance is not None and activity.distance > 0:\n data['distance'] = activity.distance * 1000 # Strava API requires distance in m, stored in db as km\n\n return data", "def get_user_activity(my_id, user_ids, page):\n activities = Activity.objects.select_related('sender',\n 'sender__userprofile') \\\n .prefetch_related('target_object',\n 'target_object__user',\n 'target_object__item',\n 'target_object__item__movie') \\\n .filter(sender__in=user_ids)\n bookmarks = get_bookmarked_items(my_id)\n\n paginator = Paginator(activities, 20)\n \n try:\n next_page = paginator.page(page).next_page_number()\n paginator.page(next_page)\n except (EmptyPage, InvalidPage):\n next_page = ''\n\n response = [{\n 'object_id': activity.object_id,\n 'sender_avatar': get_thumbnail(\n activity.sender.userprofile.avatar, '100x100', crop='center').url,\n 'rating': activity.target_object.rating,\n 'target_url': reverse('movie-profile',\n args=[activity.target_object.item.movie.url]),\n 'target_image': get_thumbnail(\n activity.target_object.item.movie.image, 'x285').url,\n 'target_title': activity.target_object.item.movie.title,\n 'sender_id': activity.sender_id,\n 'sender_url': reverse('user-profile', args=[activity.sender_id]),\n 'sender_full_name': activity.sender.get_full_name(),\n 'verb': activity.verb,\n 'target_user_id': activity.target_object.user_id,\n 'target_user_url': reverse('user-profile',\n args=[activity.target_object.user_id]),\n 'target_user_full_name': activity.target_object.user \\\n .get_full_name(),\n 'target_user_first_name': activity.target_object.user \\\n .first_name.lower(),\n 'text': linebreaks(escape(activity.target_object.text)),\n 'time_since': timesince(activity.created_at),\n 'item_id': activity.target_object.item_id,\n 'bookmarked': (True if activity.target_object.item_id in \n bookmarks else False),\n 'next': next_page \n } for activity in paginator.page(page)]\n\n return simplejson.dumps(response)" ]
[ "0.58782476", "0.58245516", "0.5714196", "0.54559016", "0.5373944", "0.5279887", "0.52462864", "0.52327186", "0.52325124", "0.5232471", "0.5232227", "0.5223817", "0.5216105", "0.51968014", "0.51924586", "0.519236", "0.51744807", "0.51639336", "0.51562124", "0.5153761", "0.50996965", "0.50981486", "0.5094384", "0.506582", "0.50469714", "0.5044881", "0.50307304", "0.50138366", "0.50126916", "0.49805313" ]
0.6624148
0
This initializes the C fitting library.
def initializeC(self, image): super(CPupilFit, self).initializeC(image) self.mfit = self.clib.pfitInitialize(self.pupil_fn.getCPointer(), self.rqe, self.scmos_cal, self.default_tol, self.scmos_cal.shape[1], self.scmos_cal.shape[0]) self.clib.pfitSetZRange(self.mfit, self.pupil_fn.getZMin(), self.pupil_fn.getZMax())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__ (self) :\n self.loadCSPAD2x2CalibParsDefault()", "def initialize(self):\n self.write_model(path=PATH.GRAD, suffix='new')\n\n if PAR.RANDOM_OVER_IT or optimize.iter == 1:\n self.get_random_frequencies()\n\n print('Generating synthetics')\n system.run('solver', 'eval_func',\n hosts='all',\n path=PATH.GRAD)\n\n self.write_misfit(path=PATH.GRAD, suffix='new')", "def initialize(self, cwrap):\n pass", "def boost_initialization():\n global Lib_c \n Lib_c = ctypes.CDLL('./integral_function.so')\n Lib_c.set.restype = None\n Lib_c.set.argtypes = (ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p)\n Lib_c.set_target.restype = None\n Lib_c.set_target.argtypes = (ctypes.c_int,)\n Lib_c.function.restype = ctypes.c_double\n Lib_c.function.argtypes = (ctypes.c_int,ctypes.c_double)", "def initialize(self):\n self.initilize_multiply_array() # m\n self.initialize_cameras()\n self.initialize_electronics()\n self.logger.info('Starting free runs and continuous reads')\n self.camera_microscope.start_free_run()\n self.camera_microscope.continuous_reads()\n self.camera_fiber.start_free_run()\n self.camera_fiber.continuous_reads()\n self.servo_off()\n\n time.sleep(1) #m Without the sleep below initialize_multiply_array does not work", "def init():", "def setup(self):\n # define misfit function and adjoint source generator\n self.misfit = getattr(misfit, PAR.MISFIT)\n self.adjoint = getattr(adjoint, PAR.MISFIT)\n\n # define seismic data reader and writer\n self.reader = getattr(readers, PAR.READER)\n self.writer = getattr(writers, PAR.WRITER)\n\n # prepare channels list\n self.channels = []\n for char in PAR.CHANNELS:\n self.channels += [char]", "def init_ca(self):\n self._init_dir()\n self._init_serial()\n self._init_keys()", "def initialize():\n environment = Environment()\n environment.setup()", "def init():\n pass", "def init():\n safe_call(backend.get().af_init())", "def initialize(self,*args,**kwargs):\n self.__instrumentID = c_uint32(0) \n self.__numInstruments = c_uint32()\n self.__nbrOfChannels = c_uint32()\n self.__nbrADCBits = c_uint32()\n self.__temperature = c_int32()\n self.__time_us = c_double()\n\n self.loadDLLs(**kwargs) # Load the different DLLs or DLL based modules\n self.reinit() # init or reinit the board\n self.createDictAndGlobals() # create dictionaries and global variables\n self.nbrOfChannels=int(self.__nbrOfChannels.value) # duplicate self.nbrOfChannels in a Python type variable \n self.getInitialConfig()", "def initialize(self):\n self.initialize_edges()\n self.initialize_prob()\n self.initialize_total_input_dict()\n\n self.initialize_fpmusigv_dict()", "def _real_initialize(self):\n pass", "def init():\n rino.initialize.initialize()", "def init():\n ########################\n # OPTIONS\n ########################\n # Debugging tools\n global TIMER # displays time of every major step\n TIMER = True\n global MONITOR # displays monitoring infos\n MONITOR = False\n \n global directories\n directories = {'1Face': 'data/1Face/',\n '2Faces': 'data/2Faces/',\n '3Faces': 'data/3Faces/',\n 'test': 'data/test/'}\n \n # Opt. swicthes\n global maxfinder # to find the max dim. amongst the pictures\n maxfinder = False\n global ML_mode\n ML_mode = {'CNN_Train': False,\n 'CNN_Pred' : True,\n 'Sampler': True}\n \n # Global variables\n global num_pics\n num_pics = {'1Face': 0,\n '2Faces': 0,\n '3Faces': 0}\n global labels\n labels = {'1Face': 0,\n '2Faces': 1,\n '3Faces': 2}\n global num_data\n num_data = 0\n global splitsize # Fraction of data to build the training set\n splitsize = 0.7 \n global maxheight # Resize the pictures to a power of 2 for CNN (2^8 here)\n maxheight = 128\n global maxwidth\n maxwidth = 128\n global TreshEdge # Number of consecutive black pixels to define an edge\n TreshEdge = 2\n global TreshFace # Number of white pixels to define a face (or large edge)\n TreshFace = maxheight/16", "def _initlib():\n global _libhfof\n\n if _libhfof is not None:\n return _libhfof\n\n suffix = sysconfig.get_config_var('EXT_SUFFIX')\n \n name = path.join(path.dirname(path.abspath(__file__)), '../build/libhfof'+suffix)\n if not path.exists(name):\n raise Exception('Library '+str(name)+' does not exist. Maybe you forgot to make it?')\n\n print('Loading libhfof - C functions for FoF calculations', name)\n _libhfof = ctypes.cdll.LoadLibrary(name)\n\n # morton indexing\n # void get_morton_idx(const double *pos, const int num_pos, const double inv_cell_width, int64_t *restrict out)\n func = _libhfof.get_morton_idx\n func.restype = None\n func.argtypes = [ndpointer(ctypes.c_double), ctypes.c_int, ctypes.c_double, ndpointer(int64)]\n \n # minimum and maximum per cell\n # void get_min_max(const double *pos, const uint32_t num_pos, double *restrict out)\n func = _libhfof.get_min_max\n func.restype = None\n func.argtypes = [ndpointer(ctypes.c_double), ctypes.c_uint32, ndpointer(ctypes.c_double)]\n # void get_min_max_2d(const double *pos, const uint32_t num_pos, double *restrict out)\n func = _libhfof.get_min_max_2d\n func.restype = None\n func.argtypes = [ndpointer(ctypes.c_double), ctypes.c_uint32, ndpointer(ctypes.c_double)]\n \n # Find the cell for each point\n # void find_lattice(const double *pos, const uint32_t num_pos, \n # const double inv_cell_width, const int N, const int M, int64_t *out)\n func = _libhfof.find_lattice\n func.restype = None\n func.argtypes = [ndpointer(ctypes.c_double), ctypes.c_uint32, ctypes.c_double, \n ctypes.c_int, ctypes.c_int64, ndpointer(int64)]\n\n # Find the block+cell for each point\n # void blocks_cells(const double min_x, const double min_y, const double min_z, \n #\t\t const double *pos, const uint32_t num_pos, \n #\t\t const double inv_cell_width, const int Py, const int64_t Px, \n #\t\t int64_t *out)\n func = _libhfof.blocks_cells\n func.restype = None\n func.argtypes = [ctypes.c_double, ctypes.c_double, ctypes.c_double,\n ndpointer(ctypes.c_double), ctypes.c_uint32, ctypes.c_double, \n ctypes.c_int, ctypes.c_int64, ndpointer(int64)]\n # void blocks_cells_2d(const double min_x, const double min_y, \n #\t\t const double *restrict pos, const uint32_t N, \n #\t\t const double inv_cell_width, const int64_t P, \n #\t\t int64_t *restrict out)\n func = _libhfof.blocks_cells_2d\n func.restype = None\n func.argtypes = [ctypes.c_double, ctypes.c_double, \n ndpointer(ctypes.c_double), ctypes.c_uint32, ctypes.c_double, \n ctypes.c_int64, ndpointer(int64)]\n\n # Friends of Friends linking periodic (on 4x4x4 cells)\n # see src/fof64.c\n func = _libhfof.fof64\n func.restype = ctypes.c_int\n func.argtypes = [ctypes.c_uint32,ctypes.c_int,ctypes.c_int64,ctypes.c_uint32,ctypes.c_double, \n ndpointer(float64), ndpointer(int64),ndpointer(int64), ndpointer(int64), \n ndpointer(int32), ctypes.c_double]\n\n # Friends-of-friends in 2d, using implementation with 8x8 (=64) cells\n # see src/fof64_2d\n func = _libhfof.fof64_2d\n func.restype = ctypes.c_int\n func.argtypes = [ctypes.c_uint32,ctypes.c_int,ctypes.c_uint32,ctypes.c_double, \n ndpointer(float64), ndpointer(int64),ndpointer(int64), ndpointer(int64), \n ndpointer(int32), ctypes.c_double]\n \n # Friends of Friends periodic linking\n # see src/fof.c\n func = _libhfof.fof_periodic\n func.restype = ctypes.c_int\n func.argtypes = [ctypes.c_uint32,ctypes.c_int,ctypes.c_int64,ctypes.c_uint32,ctypes.c_double,\n ndpointer(float64), ndpointer(int64),ndpointer(int64), ndpointer(int64), ndpointer(int32)]\n\n # Periodic image insertion\n # int pad_box(const double inv_boxsize, const double r_pad, const uint32_t num_pos, \n # const double *restrict pos, double *restrict periodic_pos)\n # \t int64_t *restrict pad_idx, const int max_images)\n\n func = _libhfof.pad_box\n func.restype = ctypes.c_int\n func.argtypes = [ctypes.c_double, ctypes.c_double,ctypes.c_uint32,\n ndpointer(float64), ndpointer(float64), ndpointer(int64), ctypes.c_int]\n # int pad_square(const double inv_width, const double r_pad, const uint32_t num_pos, \n #\t const double *restrict pos, double *restrict pad_pos, \n #\t int64_t *restrict pad_idx, const int max_images)\n func = _libhfof.pad_square\n func.restype = ctypes.c_int\n func.argtypes = [ctypes.c_double, ctypes.c_double,ctypes.c_uint32,\n ndpointer(float64), ndpointer(float64), ndpointer(int64), ctypes.c_int] \n \n return _libhfof", "def __init__(self):\n self._read_calibration_data()\n self.configure_sensor(\n TemperatureOversamplings.x08,\n PressureOversamplings.x16,\n HumidityOversamplings.x08,\n IIRFilterCoefficients.FC_003,\n 250,\n 250)", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def setup_lib(CLIB):\n # {{ SETUP_LIB }}", "def initialize(self):\n self.conv1.reset_parameters()\n self.conv2.reset_parameters()", "def gff_init():\n pass", "def init():\n return _libsbml.FbcExtension_init()", "def initialise(self):\n # Can take quite a lot of time due to the homing\n print(\"Initialising spectrograph.\")\n err = self._dll.ShamrockInitialize()\n self.status(\"Initialisation\", err)", "def autonomousInit(self):\n self.globalInit()\n self.autonomous.start()", "def setup(self):\n self.bqSession.update_mex('Initializing...')\n self.mex_parameter_parser(self.bqSession.mex.xmltree)\n self.output_file = None", "def initialize(self):\n self.lib.Initialize()\n\n self.triggers = {'Internal': 0, 'External': 1, 'External Start': 6,\n 'External Exposure': 7, 'External FVB EM': 9,\n 'Software Trigger': 10,\n 'External Charge Shifting': 12}\n self.savetypes = {'Signed16bits': 1, 'Signed32bits': 2, 'Float': 3}\n\n # Initial values\n\n self.readout_packing_state = False\n self.readout_packing = self.readout_packing_state\n\n self.readout_mode_mode = 'Image'\n self.readout_mode = self.readout_mode_mode\n\n self.photon_counting_mode_state = False\n self.photon_counting_mode = self.photon_counting_mode_state\n\n self.frame_transfer_mode_state = False\n self.frame_transfer_mode = self.frame_transfer_mode_state\n\n self.fan_mode_index = 'onfull'\n self.fan_mode = self.fan_mode_index\n\n self.EM_gain_mode_index = 'RealGain'\n self.EM_gain_mode = self.EM_gain_mode_index\n\n self.cooled_on_shutdown_value = False\n self.cooled_on_shutdown = self.cooled_on_shutdown_value\n\n self.baseline_offset_value = 100\n self.baseline_offset = self.baseline_offset_value\n\n self.adv_trigger_mode_state = True\n self.adv_trigger_mode = self.adv_trigger_mode_state\n\n self.acq_mode = 'Single Scan'\n self.acquisition_mode = self.acq_mode\n\n self.amp_typ = 0\n\n self.horiz_shift_speed_index = 0\n self.horiz_shift_speed = self.horiz_shift_speed_index\n\n self.vert_shift_speed_index = 0\n self.vert_shift_speed = self.vert_shift_speed_index\n\n self.preamp_index = 0\n self.preamp = self.preamp_index\n\n self.temperature_sp = 0 * degC\n self.temperature_setpoint = self.temperature_sp\n\n self.auxout = np.zeros(4, dtype=bool)\n for i in np.arange(1, 5):\n self.out_aux_port[i] = False\n\n self.trigger_mode_index = 'Internal'\n self.trigger_mode = self.trigger_mode_index", "def initialize(self):\n if not self._ready:\n self._real_initialize()\n self._ready = True" ]
[ "0.6538869", "0.640215", "0.62104684", "0.6187296", "0.61846364", "0.6099485", "0.602922", "0.60163647", "0.5997682", "0.5991749", "0.597411", "0.5933944", "0.5894047", "0.58402103", "0.57940716", "0.57654905", "0.57517904", "0.5741623", "0.57360727", "0.57360727", "0.57360727", "0.5730894", "0.5686298", "0.56818545", "0.5676695", "0.56761557", "0.5658619", "0.56555104", "0.5639244", "0.56321806" ]
0.64429295
1
Pass new peaks to the C library.
def newPeaks(self, peaks, peaks_type): c_peaks = self.formatPeaks(peaks, peaks_type) self.clib.pfitNewPeaks(self.mfit, c_peaks, ctypes.c_char_p(peaks_type.encode()), c_peaks.shape[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def changePeaks(self):\n # Change the number of peaks\n if self.minpeaks is not None and self.maxpeaks is not None:\n npeaks = len(self.peaks_function)\n u = self.random.random()\n r = self.maxpeaks - self.minpeaks\n if u < 0.5:\n # Remove n peaks or less depending on the minimum number of peaks\n u = self.random.random()\n n = min(npeaks - self.minpeaks, int(round(r * u * self.number_severity)))\n for i in range(n):\n idx = self.random.randrange(len(self.peaks_function))\n self.peaks_function.pop(idx)\n self.peaks_position.pop(idx)\n self.peaks_height.pop(idx)\n self.peaks_width.pop(idx)\n self.last_change_vector.pop(idx)\n else:\n # Add n peaks or less depending on the maximum number of peaks\n u = self.random.random()\n n = min(self.maxpeaks - npeaks, int(round(r * u * self.number_severity)))\n for i in range(n):\n self.peaks_function.append(self.random.choice(self.pfunc_pool))\n self.peaks_position.append([self.random.uniform(self.min_coord, self.max_coord) for _ in range(self.dim)])\n self.peaks_height.append(self.random.uniform(self.min_height, self.max_height))\n self.peaks_width.append(self.random.uniform(self.min_width, self.max_width))\n self.last_change_vector.append([self.random.random() - 0.5 for _ in range(self.dim)])\n\n for i in range(len(self.peaks_function)):\n # Change peak position\n shift = [self.random.random() - 0.5 for _ in range(len(self.peaks_position[i]))]\n shift_length = sum(s**2 for s in shift)\n shift_length = self.move_severity / math.sqrt(shift_length) if shift_length > 0 else 0\n \n shift = [shift_length * (1.0 - self.lambda_) * s \\\n + self.lambda_ * c for s, c in zip(shift, self.last_change_vector[i])]\n \n shift_length = sum(s**2 for s in shift)\n shift_length = self.move_severity / math.sqrt(shift_length) if shift_length > 0 else 0\n\n shift = [s*shift_length for s in shift]\n \n new_position = []\n final_shift = []\n for pp, s in zip(self.peaks_position[i], shift):\n new_coord = pp + s\n if new_coord < self.min_coord:\n new_position.append(2.0 * self.min_coord - pp - s)\n final_shift.append(-1.0 * s)\n elif new_coord > self.max_coord:\n new_position.append(2.0 * self.max_coord - pp - s)\n final_shift.append(-1.0 * s)\n else:\n new_position.append(new_coord)\n final_shift.append(s)\n\n self.peaks_position[i] = new_position\n self.last_change_vector[i] = final_shift\n\n # Change peak height\n change = self.random.gauss(0, 1) * self.height_severity\n new_value = change + self.peaks_height[i]\n if new_value < self.min_height:\n self.peaks_height[i] = 2.0 * self.min_height - self.peaks_height[i] - change\n elif new_value > self.max_height:\n self.peaks_height[i] = 2.0 * self.max_height - self.peaks_height[i] - change\n else:\n self.peaks_height[i] = new_value\n\n # Change peak width\n change = self.random.gauss(0, 1) * self.width_severity\n new_value = change + self.peaks_width[i]\n if new_value < self.min_width:\n self.peaks_width[i] = 2.0 * self.min_width - self.peaks_width[i] - change\n elif new_value > self.max_width:\n self.peaks_width[i] = 2.0 * self.max_width - self.peaks_width[i] - change\n else:\n self.peaks_width[i] = new_value\n\n self._optimum = None", "def __init__(self, peaks, pki, parent):\n self.filters = list(peaks.keys())\n self.deblendedPeaks = peaks\n self.parent = parent\n for pki, peak in self.deblendedPeaks.items():\n peak.multiColorPeak = self\n\n # Fields common to the peak in all bands that will be set by the deblender\n # In the future this is likely to contain information about the probability of the peak\n # being a point source, color-information about templateFootprints, etc.\n self.pki = pki\n self.skip = False\n self.deblendedAsPsf = False\n self.x = self.deblendedPeaks[self.filters[0]].peak.getFx()\n self.y = self.deblendedPeaks[self.filters[0]].peak.getFy()", "def peakmem_reference(self, *args):\n pass", "def call_peaks(interval, gene_length, bam_file=None, max_gap=25,\n fdr_alpha=0.05, user_threshold=None, binom_alpha=0.05, method=\"binomial\",\n min_reads=3, poisson_cutoff=0.05,\n plotit=False, w_cutoff=10, windowsize=1000,\n SloP=False, max_width=None, min_width=None,\n algorithm=\"spline\", reverse_strand=False, exons=None):\n ###########################################################################\n # print(\"starting call_peaks on gene_no:\", gene_no, \"interval:\", interval)\n # genecallpeaksloggingperiode = 100\n # should_log_gene_call_peaks_this_time = (gene_no % genecallpeaksloggingperiode == 0)\n ###########################################################################\n # if should_log_gene_call_peaks_this_time:\n # logging.info(\" starting call_peaks on gene_no {}\".format(gene_no))\n ###########################################################################\n\n if plotit:\n plt.rcParams['interactive'] = True\n pass\n\n bam_fileobj = pysam.Samfile(bam_file, 'rb')\n\n # fixes non-standard chrom file names (without the chr)\n if not interval.chrom.startswith(\"chr\") and not interval.chrom.startswith(\"ENST\") and not interval.chrom.startswith(\"ERCC\") and not interval.chrom.startswith(\n \"phiX\"):\n interval.chrom = \"chr\" + interval.chrom\n\n # fetch reads in the genomic region\n subset_reads = list(bam_fileobj.fetch(reference=str(interval.chrom), start=interval.start, end=interval.stop))\n strand = str(interval.strand)\n if reverse_strand:\n if strand == \"+\":\n strand = \"-\"\n elif strand == \"-\":\n strand = \"+\"\n\n # convert pysam to a wiggle vector, junction, positional count(coverage), read lengths, all_reads, location\n (wiggle, jxns, pos_counts,\n lengths, allreads, read_locations) = readsToWiggle_pysam(subset_reads, interval.start,\n interval.stop, strand, \"start\", False)\n\n nreads_in_gene = sum(pos_counts)\n gene_length = int(gene_length)\n lengths = [gene_length - 1 if read >= gene_length else read for read in lengths]\n\n # pre-mRNA Threshold\n if user_threshold is None:\n if method == \"binomial\": # Uses Binomial Distribution to get cutoff if specified by user\n # print(len(lengths), gene_length, binom_alpha)\n premRNA_threshold = get_FDR_cutoff_binom(lengths, gene_length, binom_alpha)\n # print(premRNA_threshold)\n elif method == \"random\":\n premRNA_threshold = get_FDR_cutoff_mean(readlengths=lengths,\n genelength=gene_length,\n alpha=fdr_alpha)\n else:\n raise ValueError(\"Method %s does not exist\" % (method))\n else:\n logging.info(\"using user threshold\")\n premRNA_threshold = user_threshold\n\n # mRNA Threshold\n exons = pybedtools.BedTool(exons)\n exons = exons.filter(lambda x: x.name == interval.attrs['gene_id']).saveas()\n\n total_exonic_reads = []\n total_exonic_length = 0\n htseq_exons = HTSeq.GenomicArrayOfSets(chroms=\"auto\", stranded=False)\n\n for exon, exon_interval in zip(exons, bed_to_genomic_interval(exons)):\n exon.stop += 1\n exonic_reads = get_reads_in_interval_pysam(exon, interval.start, read_locations)\n\n exon_read_lengths = read_lengths_from_pysam(exonic_reads)\n exon_read_lengths = [exon_interval.length - 1 if read > exon_interval.length else read for read in\n exon_read_lengths]\n total_exonic_reads += exon_read_lengths\n total_exonic_length += exon_interval.length\n htseq_exons[exon_interval] += 'exon'\n\n mRNA_threshold = get_FDR_cutoff_binom(total_exonic_reads, total_exonic_length, binom_alpha)\n if not isinstance(premRNA_threshold, int):\n raise TypeError\n\n # these are what is built in this dict, complicated enough that it might\n # be worth turning into an object\n peak_dict = {}\n peak_dict['clusters'] = []\n peak_dict['sections'] = {}\n peak_dict['nreads'] = int(nreads_in_gene)\n peak_dict['threshold'] = premRNA_threshold\n peak_dict['loc'] = interval\n\n peak_number = 0\n\n sections = find_sections(wiggle, max_gap) # return list of base with contiguous read > 0 (gap allowed)\n if plotit:\n plot_sections(wiggle, sections, premRNA_threshold)\n\n # for each section, call peaks\n for sect in sections:\n\n sectstart, sectstop = sect\n sect_length = sectstop - sectstart + 1\n data = wiggle[sectstart:(sectstop + 1)]\n\n # make interval for teh section\n cur_interval = HTSeq.GenomicInterval(str(interval.chrom),\n sectstart + interval.start,\n sectstop + interval.start + 1,\n strand)\n\n # Logic to use variable thresholds for exons or introns, still superseded by superLocal logic\n overlaps_exon = len(reduce(set.union, (val for iv, val in htseq_exons[cur_interval].steps()))) > 0\n gene_threshold = mRNA_threshold if overlaps_exon else premRNA_threshold\n\n # maybe make a function that takes a genomic interval and converts it into a pybedtools interval\n bed_format = [interval.chrom,sectstart + interval.start,sectstop + interval.start + 1,interval.name,interval.score,strand]\n bed_format = list(map(str, bed_format))\n cur_pybedtools_interval = pybedtools.create_interval_from_list(bed_format)\n\n Nreads = count_reads_in_interval_pysam(cur_pybedtools_interval, interval.start, read_locations)\n\n cts = pos_counts[sectstart:(sectstop + 1)]\n xvals = arange(len(data))\n peak_dict['sections'][sect] = {}\n peak_dict['sections'][sect]['nreads'] = int(Nreads)\n\n # makes sure there are enough reads\n if Nreads < min_reads:\n logging.info(\"\"\"%d is not enough reads, skipping section: %s\"\"\" % (Nreads, sect))\n peak_dict['sections'][sect]['tried'] = False\n continue\n else:\n logging.info(\"\"\"Analyzing section %s with %d reads\"\"\" % (sect, Nreads))\n pass\n\n if user_threshold is None:\n if SloP:\n # super local p-value: section +/- 500 b.p.'; instead of using whole gene's length and read, use this extended region\n half_width = 500\n section_start = max(0, sectstart + interval.start - half_width) # aim at -500 offset from section start\n section_stop = sectstop + interval.start + 1 + half_width # aim at _500 from section stop\n expanded_sect_length = section_stop - section_start\n\n bed_format = [interval.chrom, section_start,section_stop,interval.name,interval.score,strand]\n bed_format = list(map(str, bed_format))\n cur_pybedtools_interval = pybedtools.create_interval_from_list(bed_format)\n\n expanded_Nreads = get_reads_in_interval_pysam(cur_pybedtools_interval, interval.start, read_locations)\n sect_read_lengths = read_lengths_from_pysam(expanded_Nreads)\n sect_read_lengths = [sect_length - 1 if read > sect_length else read for read in sect_read_lengths]\n peak_dict['sections'][sect]['expanded_Nreads'] = len(expanded_Nreads)\n\n if method == \"binomial\": # Uses Binomial Distribution to get cutoff if specified by user\n slop_threshold = get_FDR_cutoff_binom(readlengths=sect_read_lengths,\n genelength=expanded_sect_length,\n alpha=binom_alpha)\n elif method == \"random\":\n # use the minimum FDR cutoff between superlocal and gene-wide calculations\n slop_threshold = get_FDR_cutoff_mean(readlengths=sect_read_lengths,\n genelength=expanded_sect_length,\n alpha=fdr_alpha)\n else:\n raise ValueError(\"Method %s does not exist\" % (method))\n threshold = max(gene_threshold, slop_threshold)\n\n logging.info(\"Using super-local threshold %d\" % (threshold))\n\n\n else:\n # if not use super local threshold (+/- 500 bp), use mRNA_threshold for exon; premRNA_threshold if section does not overlap with exon\n threshold = gene_threshold\n else:\n threshold = user_threshold\n\n # saves threshold for each individual section\n peak_dict['sections'][sect]['threshold'] = threshold\n peak_dict['sections'][sect]['nreads'] = int(Nreads)\n peak_dict['sections'][sect]['tried'] = True\n peak_dict['sections'][sect]['nPeaks'] = 0\n\n if max(data) < threshold:\n logging.info(\"data does not excede threshold, stopping\")\n continue\n\n if algorithm == \"spline\":\n data = list(map(float, data))\n # Magic number for initial smoothing, but it works\n initial_smoothing_value = ((sectstop - sectstart + 1) ** (1 / 3)) + 10\n\n peak_dict['sections'][sect]['smoothing_factor'] = initial_smoothing_value\n\n logging.info(\"initial smoothing value: %.2f\" % initial_smoothing_value)\n fitter = SmoothingSpline(xvals, data, smoothing_factor=initial_smoothing_value,\n lossFunction=\"get_turn_penalized_residuals\",\n threshold=threshold,\n num_reads=Nreads)\n\n elif algorithm == \"gaussian\":\n cts = list(map(float, cts))\n fitter = GaussMix(xvals, cts)\n\n elif algorithm == \"classic\":\n data = list(map(float, data))\n fitter = Classic(xvals, data, max_width, min_width, max_gap)\n\n try:\n peak_definitions = fitter.peaks()\n logging.info(\"optimized smoothing value: %.2f\" % fitter.smoothing_factor)\n peak_dict['sections'][sect]['final_smoothing_factor'] = fitter.smoothing_factor\n if peak_definitions is None:\n numpeaks = 0\n else:\n numpeaks = len(peak_definitions)\n logging.info(\"I identified %d potential peaks\" % (numpeaks))\n\n except Exception as error:\n logging.error(\"peak finding failed:, %s, %s\" % (interval.name, error))\n raise error\n\n # subsections that are above threshold\n # peak center is actually the location where we think binding should\n # occur, not the average of start and stop\n\n # Need to get all ranges, count number of reads in each range and compute from there\n for peak_start, peak_stop, peak_center in peak_definitions:\n genomic_start = interval.start + sectstart + peak_start\n genomic_stop = interval.start + sectstart + peak_stop\n\n # save to bedtool\n bed_format = [interval.chrom,genomic_start,genomic_stop,interval.name,interval.score,strand]\n bed_format = list(map(str, bed_format)) # create_interval_only_take_str\n cur_pybedtools_interval = pybedtools.create_interval_from_list(bed_format)\n\n number_reads_in_peak = count_reads_in_interval_pysam(cur_pybedtools_interval, interval.start,\n read_locations)\n\n peak_length = genomic_stop - genomic_start + 1\n\n logging.info(\"\"\"Peak %d (%d - %d) has %d\n reads\"\"\" % (peak_number, peak_start,\n (peak_stop + 1), number_reads_in_peak))\n\n # highest point in start stop\n genomic_center = interval.start + sectstart + peak_center\n\n # makes it thicker so we can see on the browser\n # error checking logic to keep bed files from breaking\n thick_start = max(genomic_center - 2, genomic_start)\n thick_stop = min(genomic_center + 2, genomic_stop)\n\n # super local logic\n area_start = max(0, (peak_center + sectstart) - windowsize)\n area_stop = min((peak_center + sectstart) + windowsize, len(wiggle))\n\n bed_format = [interval.chrom,interval.start + area_start,interval.start + area_stop,interval.name,interval.score,strand]\n bed_format = list(map(str, bed_format))\n cur_pybedtools_interval = pybedtools.create_interval_from_list(bed_format)\n\n number_reads_in_area = count_reads_in_interval_pysam(cur_pybedtools_interval, interval.start,\n read_locations)\n area_length = area_stop - area_start + 1\n\n peak_dict['clusters'].append(Peak(chrom=interval.chrom,\n genomic_start=genomic_start,\n genomic_stop=genomic_stop,\n gene_name=interval.attrs['gene_id'],\n strand=interval.strand,\n thick_start=thick_start,\n thick_stop=thick_stop,\n peak_number=peak_number,\n number_reads_in_peak=number_reads_in_peak,\n size=peak_length,\n p=0,\n effective_length=int(interval.attrs['effective_length']),\n peak_length=peak_length,\n area_reads=number_reads_in_area,\n area_size=area_length,\n nreads_in_gene=nreads_in_gene,\n # nreads_in_input=input_number_reads_in_peak,\n ))\n\n peak_number += 1\n peak_dict['sections'][sect]['nPeaks'] += 1\n\n peak_dict['Nclusters'] = peak_number\n if plotit:\n import sys\n plt.show()\n v = sys.stdin.read(1)\n ###################################################\n # print(\"returning gene_no:\", gene_no, \"peak_dict:\", peak_dict)\n ####################################################\n\n return peak_dict", "def __thresholdInput(self,samples):\n absSamples = np.abs(samples) # 1 ms\n thresh = self.peakThresholdScale*np.mean(absSamples) # 0.2 ms\n i = np.where(absSamples>thresh)[0] # 1e-5 s\n samples[i] = thresh * (samples[i]/absSamples[i]) # 8e-5 s\n # Do it again in case the spikes were really loud\n absSamples[i] = np.abs(samples[i])\n thresh = self.peakThresholdScale*np.mean(absSamples)\n i = np.where(absSamples>thresh)[0]\n self.clippedPeakIPure = i # All peaks that are clipped at first round are clipped again. Requires that the peaks in first round are not set to 0\n samples[i] = thresh * (samples[i]/absSamples[i])\n # Mark peaks close to each other\n if len(self.clippedPeakIPure)>0:\n # t = time.time()\n # Mark peaks close to each other as continuous\n diffPeaks = np.diff(self.clippedPeakIPure)\n gapsAll = np.where(diffPeaks>1)[0]\n self.peakMinGap = 100\n gaps = np.where(diffPeaks[gapsAll] < self.peakMinGap)[0] # find gaps smaller than 100\n gapsLen = diffPeaks[gapsAll[gaps]] # length of the gaps\n gapsIdx = gapsAll[gaps] # Index of all gaps\n\n\n # fill the gaps smaller than self.peakMinGap\n pp = np.zeros(self.Nfft,dtype=np.int8)\n pp[self.clippedPeakIPure] = 1\n for i in range(len(gapsLen)):\n pp[self.clippedPeakIPure[gapsIdx[i]]:self.clippedPeakIPure[gapsIdx[i]]+gapsLen[i]] = 1\n\n self.clippedPeakI = np.where(pp==1)[0]\n else:\n self.clippedPeakI = self.clippedPeakIPure.copy()\n if log.level == logging.DEBUG:\n log.debug('clipped peaks ' + str(len(self.clippedPeakIPure)))", "def fitPeaks(self, new_peaks, peaks_type):\n # Check if we need to do anything.\n if (new_peaks[\"x\"].size > 0):\n\n # Update status of current peaks (if any) that are near\n # to the new peaks that are being added.\n #\n if (self.mfitter.getNFit() > 0):\n c_x = self.mfitter.getPeakProperty(\"x\")\n c_y = self.mfitter.getPeakProperty(\"y\")\n status = self.mfitter.getPeakProperty(\"status\")\n new_status = iaUtilsC.runningIfHasNeighbors(status,\n c_x,\n c_y,\n new_peaks[\"x\"],\n new_peaks[\"y\"],\n self.neighborhood)\n self.mfitter.setPeakStatus(new_status)\n \n # Add new peaks.\n self.mfitter.newPeaks(new_peaks, peaks_type)\n\n # Iterate fitting and remove any error peaks.\n #\n # The assumption is that because error peaks are longer in the\n # fit image we don't have to do additional iterations on the\n # remaining peaks after the error peaks have been removed.\n #\n if not self.no_fitting:\n self.mfitter.doFit()\n self.mfitter.removeErrorPeaks()\n\n # Remove peaks that are too close to each other and/or that\n # have a low significance score.\n #\n status = self.mfitter.getPeakProperty(\"status\")\n\n # Identify peaks that are to close based on the somewhat\n # arbitrary criteria of being within 1 sigma.\n #\n # markDimmerPeaks() will update the status array, in particular\n # it will mark the dimmer of two peaks that are too close as ERROR.\n #\n px = self.mfitter.getPeakProperty(\"x\")\n py = self.mfitter.getPeakProperty(\"y\")\n n_proximity = iaUtilsC.markDimmerPeaks(px,\n py,\n self.mfitter.getPeakProperty(\"height\"),\n status,\n self.sigma,\n self.neighborhood)\n\n # Identify peaks that have a low significance score.\n #\n # markLowSignificancePeaks() will update the status array, in particular\n # it will mark low significance peaks as ERROR.\n #\n n_significance = iaUtilsC.markLowSignificancePeaks(px,\n py,\n self.mfitter.getPeakProperty(\"significance\"),\n status,\n self.minimum_significance,\n self.neighborhood)\n\n # This does the actual peak removal. We update the peak status in\n # mfitter, then tell mfitter to remove all the ERROR peaks.\n #\n if ((n_proximity + n_significance) > 0):\n self.mfitter.setPeakStatus(status)\n self.mfitter.removeErrorPeaks()\n self.mfitter.incProximityCounter(n_proximity)\n self.mfitter.incSignificanceCounter(n_significance)\n\n # If we have unconverged peaks, iterate some more.\n if (self.mfitter.getUnconverged() > 0) and (not self.no_fitting):\n self.mfitter.doFit()\n self.mfitter.removeErrorPeaks()\n\n # Return the current fit image.\n return self.mfitter.getFitImage()", "def peaks(self, threshold, plotit):\n\n raise (\"Error abstract class, peaks not implemented\")", "def findpeaks(project_name, treatment_id, control_id, index_file_parameters, tool_parameters_dict, temp_dir, macs_cnv_region_identifiers, output_dir):\n treatment_bamfile=getcodetofilename(index_file_parameters,treatment_id)\n control_bamfile=getcodetofilename(index_file_parameters,control_id)\n \n cmd_dict=genPeakToolRunCommands(project_name,treatment_id,treatment_bamfile,control_bamfile, tool_parameters_dict, temp_dir )\n MACSpeakfile='%s/MACS/%s_peaks.bed'%(temp_dir,treatment_id)\n HMCanpeakfile='%s/HMCan/%s_regions.bed'%(temp_dir,treatment_id)\n \n if not os.path.exists(MACSpeakfile): \n flog.write('%s: Running %s\\n'%(time.asctime(),cmd_dict['MACS']))\n os.system(cmd_dict['MACS'])\n else:\n flog.write('%s: No need to run %s\\nMACS peaks already there\\n'%(time.asctime(),cmd_dict['MACS']))\n \n if not os.path.exists(HMCanpeakfile): \n flog.write('%s: Running %s\\n'%(time.asctime(),cmd_dict['HMCan'])) \n os.system(cmd_dict['HMCan'])\n else:\n flog.write('%s: No need to run %s\\nHMCan peaks already there'%(time.asctime(),cmd_dict['HMCan'])) \n \n min_size,min_coverage_gain_over_average,window_size=macs_cnv_region_identifiers\n \n MACSpeaklist=[]\n for lntxt in open(MACSpeakfile):\n ln=lntxt.rstrip('\\n').split('\\t')\n MACSpeaklist.append([ln[0],int(ln[1]),int(ln[2])]) \n flog.write('%s: Info: number of MACS peaks %d\\n'%(time.asctime(),len(MACSpeaklist)))\n missedoutregionslist=getmissedoutregions(MACSpeakfile,treatment_bamfile, min_size, min_coverage_gain_over_average,window_size)\n \n \n HMCanpeaklist=[]\n for lntxt in open(HMCanpeakfile):\n ln=lntxt.rstrip('\\n').split('\\t')\n HMCanpeaklist.append([ln[0],int(ln[1]),int(ln[2])])\n flog.write('%s: Info: number of HMCan peaks %d\\n'%(time.asctime(),len(HMCanpeaklist)))\n \n HMCanadditions=common.interval_join(HMCanpeaklist, missedoutregionslist,3)\n flog.write('%s: Info: number of HMCan added peaks %d\\n'%(time.asctime(),len(HMCanadditions)))\n \n all_peaklist=[]\n for peak in MACSpeaklist:\n all_peaklist.append(peak+['MACS'])\n for peak in HMCanadditions:\n all_peaklist.append(peak+['HMCan']) \n all_peaklist.sort()\n \n outcsv='%s/peaks/%s__%s__peaks.bed'%(output_dir,project_name,treatment_id)\n outjson='%s/peaks/%s__%s__peaks.json'%(output_dir,project_name,treatment_id)\n \n fout=open(outcsv,'w')\n jsondict={}\n \n for peak in all_peaklist:\n fout.write('%s\\t%d\\t%d\\t%s\\n'%tuple(peak))\n jsondict['%s:%d-%d'%tuple(peak[0:3])]={}\n jsondict['%s:%d-%d'%tuple(peak[0:3])]['called_by']=peak[3]\n \n fout.close()\n json.dump(jsondict, open(outjson,'w'),indent=4,sort_keys=True)", "def addPeakResonances(peaks):\n \n contribs = []\n for peak in peaks:\n for peakDim in peak.peakDims:\n if len(peakDim.peakDimContribs) < 1:\n contrib = assignResToDim(peakDim)\n contribs.append(contrib)\n \n resonances = [c.resonance for c in contribs]\n \n return resonances", "def test_find_peaks_withnoise(self):\n sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0]\n num_points = 500\n test_data, act_locs = _gen_gaussians_even(sigmas, num_points)\n widths = np.arange(0.1, max(sigmas))\n noise_amp = 0.07\n np.random.seed(18181911)\n test_data += (np.random.rand(num_points) - 0.5)*(2*noise_amp)\n found_locs = find_peaks_cwt(test_data, widths, min_length=15,\n gap_thresh=1, min_snr=noise_amp / 5)\n\n np.testing.assert_equal(len(found_locs), len(act_locs), 'Different number' +\n 'of peaks found than expected')\n diffs = np.abs(found_locs - act_locs)\n max_diffs = np.array(sigmas) / 5\n np.testing.assert_array_less(diffs, max_diffs, 'Maximum location differed' +\n 'by more than %s' % (max_diffs))", "def FindPeaks_graph(self):\n import string\n \n maxima = self['FP_LOC'].copy()\n maxima = num.where(maxima)\n maxima = (maxima[1],maxima[0])\n detectimg = self['FP_DETECT'].copy()\n \n id = self._getGraphId()\n root = 'FindPeaks_%s' % (id,)\n pngname = root + '.png' ; epsname = root + '.eps'\n jpgname = root + '.jpg'\n\n doStamp(detectimg,pngname,format='PNG')\n Convert(pngname,jpgname)\n \n Painted = Paint(jpgname)\n Painted.load()\n Painted.DrawCross(maxima,length=7,color='green')\n \n strpeaks = string.strip('%i'% (self['M_NPEAKS']))\n text = 'NP=%s' % strpeaks \n \n # Painted.Graffiti(text,commtextpos)\n \n Painted.save(jpgname)\n Painted.release()\n \n Convert(jpgname,epsname)\n os.system('rm %s %s' % (pngname,jpgname))\n self['figures']['FindPeaks'] = epsname\n self['figcomms']['FindPeaks'] = text", "def test_find_peaks_nopeak(self):\n noise_amp = 1.0\n num_points = 100\n np.random.seed(181819141)\n test_data = (np.random.rand(num_points) - 0.5)*(2*noise_amp)\n widths = np.arange(10, 50)\n found_locs = find_peaks_cwt(test_data, widths, min_snr=5, noise_perc=30)\n np.testing.assert_equal(len(found_locs), 0)", "def _ecg_findpeaks_promac(\n signal,\n sampling_rate=1000,\n show=False,\n promac_methods=[\n \"neurokit\",\n \"gamboa\",\n \"ssf\",\n \"zong\",\n \"engzee\",\n \"elgendi\",\n \"kalidas\",\n \"martinez\",\n \"rodrigues\",\n ],\n threshold=0.33,\n gaussian_sd=100,\n **kwargs,\n):\n x = np.zeros(len(signal))\n promac_methods = [method.lower() for method in promac_methods] # remove capitalised letters\n error_list = [] # Stores the failed methods\n\n for method in promac_methods:\n try:\n func = _ecg_findpeaks_findmethod(method)\n x = _ecg_findpeaks_promac_addconvolve(\n signal, sampling_rate, x, func, gaussian_sd=gaussian_sd, **kwargs\n )\n except ValueError:\n error_list.append(f\"Method '{method}' is not valid.\")\n except Exception as error:\n error_list.append(f\"{method} error: {error}\")\n\n # Rescale\n x = x / np.max(x)\n convoluted = x.copy()\n\n # Remove below threshold\n x[x < threshold] = 0\n # Find peaks\n peaks = signal_findpeaks(x, height_min=threshold)[\"Peaks\"]\n\n if show is True:\n signal_plot(pd.DataFrame({\"ECG\": signal, \"Convoluted\": convoluted}), standardize=True)\n [\n plt.axvline(x=peak, color=\"red\", linestyle=\"--\") for peak in peaks\n ] # pylint: disable=W0106\n\n # I am not sure if mandatory print the best option\n if error_list: # empty?\n print(error_list)\n\n return peaks", "def peak_enhance(signal, peaks, window: int = 0.08, fs: int = processing.FS):\n window = int(fs * window)\n if not window % 2 == 0:\n window += 1\n enhanced_peaks = np.zeros(len(peaks), dtype=int)\n signal = np.abs(signal)\n for i, peak in enumerate(peaks):\n if peak < window // 2:\n enhanced_peaks[i] = np.argmax(signal[0:peak + window // 2 + 1])\n elif peak + window // 2 + 1 > signal.shape[0]:\n enhanced_peaks[i] = np.argmax(signal[peak - window // 2:]) + peak - window // 2\n else:\n # Because of one-side lag -> window: p - w * 0.25% : p + w * 75%\n enhanced_peaks[i] = np.argmax(signal[peak - window // 4:peak + 3 * window // 4]) + peak - window // 4\n\n return enhanced_peaks", "def propagatePeakAssignments(peaks, refPeak=None, cleanNonRef=False,\n tolerances=None, warnUnalias=False):\n\n if refPeak:\n peaksIn = [refPeak, ]\n else:\n peaksIn = peaks\n \n if not tolerances:\n tolerances = []\n \n dimResonances = {}\n resonanceDims = {}\n for peak in peaksIn:\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n dataDim = peakDim.dataDim\n expDimRef = dataDim.expDim.findFirstExpDimRef()\n \n if not expDimRef:\n continue\n \n key = expDimRef.isotopeCodes\n if dimResonances.get(key) is None:\n dimResonances[key] = []\n \n if peakDim.peakDimContribs:\n # could be in different spectra\n \n for contrib in peakDim.peakDimContribs:\n resonance = contrib.resonance\n \n dimResonances[key].append(resonance)\n if resonanceDims.get(resonance) is None:\n resonanceDims[resonance] = []\n \n if i not in resonanceDims[resonance]:\n resonanceDims[resonance].append(i)\n\n if refPeak and cleanNonRef:\n for peak in peaks:\n if peak is refPeak:\n continue\n \n for peakDim in peak.peakDims:\n clearPeakDim(peakDim)\n\n shiftRanges = {}\n for peak in peaks:\n if peak is refPeak:\n continue\n\n for i, peakDim in enumerate(peak.sortedPeakDims()):\n dataDimRef = peakDim.dataDimRef\n \n if dataDimRef:\n dataDim = dataDimRef.dataDim\n \n if dataDim not in shiftRanges:\n shiftMin, shiftMax = getDataDimFullShiftRange(dataDim)\n shiftRanges[dataDim] = (shiftMin, shiftMax)\n else:\n shiftMin, shiftMax = shiftRanges[dataDim]\n \n if i < len(tolerances):\n tolerance = tolerances[i]\n else:\n tolerance = getAnalysisDataDim(dataDim).assignTolerance\n \n key = dataDimRef.expDimRef.isotopeCodes\n pValue = peakDim.realValue\n\n extantResonances = []\n for contrib in peakDim.peakDimContribs:\n if contrib.peakDimComponent:\n continue\n extantResonances.append(contrib.resonance)\n \n assignResonances = []\n closeResonances = []\n for resonance in dimResonances[key]:\n if resonance not in extantResonances:\n shiftList = peak.peakList.dataSource.experiment.shiftList\n shift = resonance.findFirstShift(parentList=shiftList)\n \n if shift:\n # Could result in unaliasing the peak\n\n sValue = shift.value\n # Only assign if within known bounds\n if not (shiftMin < sValue < shiftMax): # Inside, not on edge\n continue\n \n assignResonances.append(resonance)\n \n if abs(sValue-pValue) <= tolerance:\n closeResonances.append(resonance)\n \n elif i in resonanceDims.get(resonance, []):\n # No shift so only propagate across the same dim numbers\n assignResonances.append(resonance)\n \n # Can't have both aliased and unaliased resonances: go for the\n # unaliased/close ppm ones in preference \n \n if closeResonances:\n for resonance in closeResonances:\n assignResToDim(peakDim, resonance, tolerance=tolerance,\n doWarning=False)\n \n elif not extantResonances:\n # Don't risk aliasing changes if already assigned\n # warn for aliasing changes\n for resonance in assignResonances:\n assignResToDim(peakDim, resonance, tolerance=tolerance,\n doWarning=warnUnalias)", "def get_peaks(\n self,\n new_sample: np.ndarray,\n threshold: float,\n min_peaks_interval=None,\n ) -> tuple:\n tic = time.time()\n nb_peaks = []\n if len(new_sample.shape) == 1:\n new_sample = np.expand_dims(new_sample, 0)\n sample_proc = np.copy(new_sample)\n if not self._is_one:\n self._is_one = [False] * new_sample.shape[0]\n\n for i in range(new_sample.shape[0]):\n for j in range(new_sample.shape[1]):\n if new_sample[i, j] < threshold:\n sample_proc[i, j] = 0\n self._is_one[i] = False\n elif new_sample[i, j] >= threshold:\n if not self._is_one[i]:\n sample_proc[i, j] = 1\n self._is_one[i] = True\n else:\n sample_proc[i, j] = 0\n\n if len(self.raw_data_buffer) == 0:\n self.raw_data_buffer = new_sample\n self.processed_data_buffer = sample_proc\n nb_peaks = None\n\n elif self.raw_data_buffer.shape[1] < self.processing_window:\n self.raw_data_buffer = np.append(self.raw_data_buffer, new_sample, axis=1)\n self.processed_data_buffer = np.append(self.processed_data_buffer, sample_proc, axis=1)\n nb_peaks = None\n\n else:\n self.raw_data_buffer = np.append(self.raw_data_buffer[:, new_sample.shape[1] :], new_sample, axis=1)\n self.processed_data_buffer = np.append(\n self.processed_data_buffer[:, new_sample.shape[1] :], sample_proc, axis=1\n )\n\n if min_peaks_interval:\n self.processed_data_buffer = RealTimeProcessing._check_and_adjust_interval(\n self.processed_data_buffer, min_peaks_interval\n )\n if isinstance(nb_peaks, list):\n for i in range(self.processed_data_buffer.shape[0]):\n nb_peaks.append(np.count_nonzero(self.processed_data_buffer[i, :]))\n self.process_time.append(time.time() - tic)\n return nb_peaks, self.processed_data_buffer", "def clean(data, N_peaks, f_interval=None, f_resolution=None, sampling=None, w_column=None):\n print('-------------------------- clean')\n \n # Avoid overwritting data:\n data0 = data.copy()\n\n # Standard frequency resolution:\n T = data0[-1,0]-data[0,0]\n if f_resolution==None:\n f_resolution = 1/T\n \n # Avoid 0 as input as not peaks are found:\n if f_interval[0]==0:\n f_interval = [f_resolution, f_interval[1]]\n \n # Constants:\n SAMPLING = 1\n f_RES = 0.1*f_resolution # Standard frequency resolution\n picon = 2*np.pi*data0[:,0] # Optimization constant\n f_peaks = np.zeros(N_peaks)\n A_peaks = np.zeros(N_peaks)\n \n for i in range(N_peaks):\n k = i+1\n print '%s. Peak' %k\n\n # 1. Iteration - start finding largest peak:\n Pf_power, _, _, _, = tt.power(data0, f_interval, f_resolution, sampling, w_column)\n f = Pf_power[:,0]; P = Pf_power[:,1]; j = np.nanargmax(P)\n f_int = (f[j-1], f[j+1]) # Smaller f_int (Tuple instead of array for optimization)\n\n # Testing that the frequency resolution > sigma_f to continue:\n A_peak = P[j]\n A_av = np.mean(np.sqrt(P))\n sigma_a = 0.8*A_av\n sigma_phi = sigma_a/A_peak\n sigma_f = np.sqrt(3)*sigma_phi/(np.pi*T)\n if f_RES>sigma_f: \n \n # 2. Iteration: uses now f_res and so on..\n Pf_power, _, _, _, = tt.power(data0, f_int, f_RES, SAMPLING, w_column)\n f = Pf_power[:,0]; P = Pf_power[:,1]; j = np.nanargmax(P)\n f_int = (f[j-1], f[j+1])\n \n # 3. Iteration: last\n Pf_power, P_comp, _, _, = tt.power(data0, f_int, f_RES, SAMPLING, w_column)\n f = Pf_power[:,0]; P = Pf_power[:,1]; j = np.nanargmax(P)\n fpicon = picon*f[j] # Optimization constant\n alpha = P_comp[:,0]; beta = P_comp[:,1]\n alpha0 = alpha[j]*np.sin(fpicon)\n beta0 = beta[j]* np.cos(fpicon)\n data0[:,1] = data0[:,1] - alpha0 - beta0\n f_peaks[i] = f[j]\n A_peaks[i] = np.sqrt(P[j])\n\n # Output:\n St_clean = data0\n print f_peaks, A_peaks\n return St_clean, f_peaks, A_peaks", "def gen_peaks_both():\n\n low_peaks = gen_peak_def(1)\n high_peaks = gen_peak_def_high()\n\n while True:\n\n peaks = [next(low_peaks), next(high_peaks)]\n\n yield peaks", "def extendPeaks(srcPeaks, propThresh=30):\n #octave propagation of the reference peaks\n tempPeaks = [i+1200 for i in srcPeaks[\"peaks\"][0]]\n tempPeaks.extend([i-1200 for i in srcPeaks[\"peaks\"][0]])\n extendedPeaks = []\n extendedPeaks.extend(srcPeaks[\"peaks\"][0])\n for i in tempPeaks:\n #if a peak exists around, don't add this new one.\n nearestInd = findNearestIndex(srcPeaks[\"peaks\"][0], i)\n diff = abs(srcPeaks[\"peaks\"][0][nearestInd] - i)\n diff = np.mod(diff, 1200)\n if diff > propThresh:\n extendedPeaks.append(i)\n return extendedPeaks", "def peaks(n, binCenters, method=\"JI\", window=100, peakAmpThresh=0.00005, valleyThresh=0.00003):\n data = zip(binCenters, n)\n binCenters = np.array(binCenters)\n firstCenter = (min(binCenters)+1.5*window)/window*window\n lastCenter = (max(binCenters)-window)/window*window\n if firstCenter < -1200: firstCenter = -1200\n if lastCenter > 3600: lastCenter = 3600\n\n\n if method == \"slope\" or method == \"hybrid\":\n peaks = {}\n peakInfo = peaksBySlope(n, binCenters, lookahead=20, delta=valleyThresh, averageHist=True)\n\n #find correspondences between peaks and valleys, and set valleys are left and right Indices\n #see the other method(s) for clarity!\n\n peakData = peakInfo[\"peaks\"]\n valleyData = peakInfo[\"valleys\"]\n\n #print len(peakData[0]), len(peakData[1])\n for i in xrange(len(peakData[0])):\n nearestIndex = findNearestIndex(valleyData[0], peakData[0][i])\n if valleyData[0][nearestIndex] < peakData[0][i]:\n leftIndex = findNearestIndex(binCenters, valleyData[0][nearestIndex])\n if (len(valleyData[0][nearestIndex+1:]) == 0):\n rightIndex = findNearestIndex(binCenters, peakData[0][i]+window/2.0)\n else:\n offset = nearestIndex+1\n nearestIndex = offset+findNearestIndex(valleyData[0][offset:], peakData[0][i])\n rightIndex = findNearestIndex(binCenters, valleyData[0][nearestIndex])\n else:\n rightIndex = findNearestIndex(binCenters, valleyData[0][nearestIndex])\n if (len(valleyData[0][:nearestIndex]) == 0):\n leftIndex = findNearestIndex(binCenters, peakData[0][i]-window/2.0)\n else:\n nearestIndex = findNearestIndex(valleyData[0][:nearestIndex], peakData[0][i])\n leftIndex = findNearestIndex(binCenters, valleyData[0][nearestIndex])\n\n pos = findNearestIndex(binCenters, peakData[0][i])\n #print binCenters[pos], peakData[1][i], binCenters[leftIndex], binCenters[rightIndex]\n peaks[pos] = [peakData[1][i], leftIndex, rightIndex]\n\n if method == \"hybrid\": slopePeaks = peaks\n \n if method == \"JI\" or method == \"ET\" or method == \"hybrid\":\n peaks = {}\n #Obtain max value per interval\n if method == \"JI\" or method == \"hybrid\":\n firstCenter = nearestJI(firstCenter)\n lastCenter = nearestJI(lastCenter)\n\n interval = firstCenter\n prevInterval = firstCenter-window\n #NOTE: All *intervals are in cents. *indices are of binCenters/n\n while interval < lastCenter:\n if method == \"ET\":\n leftIndex = findNearestIndex(binCenters, interval-window/2)\n rightIndex = findNearestIndex(binCenters, interval+window/2)\n interval += window\n elif method == \"JI\" or method == \"hybrid\":\n leftIndex = findNearestIndex(binCenters, (interval+prevInterval)/2.0)\n prevInterval = interval\n interval = nextJI(interval)\n rightIndex = findNearestIndex(binCenters, (interval+prevInterval)/2.0)\n peakPos = np.argmax(n[leftIndex:rightIndex])\n peakAmp = n[leftIndex+peakPos]\n peaks[leftIndex+peakPos] = [peakAmp, leftIndex, rightIndex]\n \n #print binCenters[leftIndex], binCenters[rightIndex], binCenters[leftIndex+peakPos], peakAmp\n #NOTE: All the indices (left/rightIndex, peakPos) are to be changed to represent respective cent \n #value corresponding to the bin. Right now, they are indices of respective binCenters in the array.\n \n if method == \"hybrid\":\n #Mix peaks from slope method and JI method.\n p1 = slopePeaks.keys()\n p2 = peaks.keys()\n allPeaks = {} #overwriting peaks dict\n for p in p1:\n nearIndex = findNearestIndex(p2, p)\n if abs(p-p2[nearIndex]) < window/2.0: p2.pop(nearIndex)\n \n for p in p1: allPeaks[p] = slopePeaks[p]\n for p in p2: allPeaks[p] = peaks[p]\n peaks = allPeaks\n\n #Filter the peaks and retain eligible peaks, also get their valley points.\n\n # ----> peakAmpThresh <---- : remove the peaks which are below that\n\n for pos in peaks.keys():\n #pos is an index in binCenters/n. DOES NOT refer to a cent value.\n if peaks[pos][0] < peakAmpThresh:\n #print \"peakAmp: \", binCenters[pos]\n peaks.pop(pos)\n\n #Check if either left or right valley is deeper than ----> valleyThresh <----.\n valleys = {}\n for pos in peaks.keys():\n leftLobe = n[peaks[pos][1]:pos]\n rightLobe = n[pos:peaks[pos][2]]\n #Sanity check: Is it a genuine peak? Size of distributions on either side of the peak should be comparable.\n if len(leftLobe) == 0 or len(rightLobe) == 0:\n continue\n if 1.0*len(leftLobe)/len(rightLobe) < 0.15 or 1.0*len(leftLobe)/len(rightLobe) > 6.67:\n #print \"size: \", binCenters[pos]\n #peaks.pop(pos)\n continue\n\n leftValleyPos = np.argmin(leftLobe)\n rightValleyPos = np.argmin(rightLobe)\n if (abs(leftLobe[leftValleyPos]-n[pos]) < valleyThresh and abs(rightLobe[rightValleyPos]-n[pos]) < valleyThresh):\n #print \"valley: \", binCenters[pos]\n peaks.pop(pos)\n else:\n valleys[peaks[pos][1]+leftValleyPos] = leftLobe[leftValleyPos]\n valleys[pos+rightValleyPos] = rightLobe[rightValleyPos]\n \n if len(peaks) > 0:\n temp1 = np.array(peaks.values())\n temp1 = temp1[:, 0]\n\n return {'peaks':[binCenters[peaks.keys()], temp1], 'valleys':[binCenters[valleys.keys()], valleys.values()]}\n else:\n return {'peaks':[[], []], 'valleys':[[], []]}", "def reduce_peaks(self,peaks,odf_min):\n if len(peaks)==0:\n return -1 \n if odf_min<self.iso_thr*peaks[0]:\n #remove small peaks\n ismallp=np.where(peaks<self.peak_thr*peaks[0])\n if len(ismallp[0])>0:\n l=ismallp[0][0]\n else:\n l=len(peaks)\n else:\n return -1\n return l", "def unique_peaks(self):\n return(None)", "def __init__ ( self ) :\n\n self.m_src = self.configSrc ('source', ':Cspad.')\n self.m_key_in = self.configStr ('key_in', 'peaks_nda')\n self.m_print_bits = self.configInt ('print_bits', 1)\n\n self.counter = 0\n self.count_msg = 0\n\n if self.m_print_bits & 1 : self.print_input_pars()\n\n self.list_of_dtypes = [\n psana.ndarray_float32_2,\n psana.ndarray_float64_2\n ]", "def peaks(self, **kwargs):\n peaks, properties = self.peak_indices(**kwargs)\n return self.xs[peaks], properties", "def init(self, target):\n # Finds positive and negative peaks\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n positive_peak_ixs, peak_props = find_peaks(np.clip(target, 0, None), width=0, prominence=0, height=0)\n negative_peak_ixs, dip_props = find_peaks(np.clip(-target, 0, None), width=0, prominence=0, height=0)\n\n # Indexes for minimum and maximum center frequency\n min_fc_ix = np.argmin(np.abs(self.f - self.min_fc))\n max_fc_ix = np.argmin(np.abs(self.f - self.max_fc))\n\n # All peak indexes together\n peak_ixs = np.concatenate([positive_peak_ixs, negative_peak_ixs])\n # Exclude peak indexes which are outside of minimum and maximum center frequency\n mask = np.logical_and(peak_ixs >= min_fc_ix, peak_ixs <= max_fc_ix)\n\n if (len(positive_peak_ixs) == 0 and len(negative_peak_ixs) == 0) or np.sum(mask) == 0:\n # No peaks found\n params = []\n if self.optimize_fc:\n self.fc = self.f[(min_fc_ix + max_fc_ix) // 2]\n params.append(np.log10(self.fc))\n if self.optimize_q:\n self.q = np.sqrt(2)\n params.append(self.q)\n if self.optimize_gain:\n self.gain = 0.0\n params.append(self.gain)\n return params\n\n peak_ixs = peak_ixs[mask]\n # Properties of included peaks together\n widths = np.concatenate([peak_props['widths'], dip_props['widths']])[mask]\n heights = np.concatenate([peak_props['peak_heights'], dip_props['peak_heights']])[mask]\n # Find the biggest peak, by height AND width\n sizes = widths * heights # Size of each peak for ranking\n ixs_ix = np.argmax(sizes) # Index to indexes array which point to the biggest peak\n ix = peak_ixs[ixs_ix] # Index to f and target\n\n params = []\n if self.optimize_fc:\n self.fc = np.clip(self.f[ix], self.min_fc, self.max_fc)\n params.append(np.log10(self.fc)) # Convert to logarithmic scale for optimizer\n if self.optimize_q:\n width = widths[ixs_ix]\n # Find bandwidth which matches the peak width\n f_step = np.log2(self.f[1] / self.f[0])\n bw = np.log2((2 ** f_step) ** width)\n # Calculate quality with bandwidth\n self.q = np.sqrt(2 ** bw) / (2 ** bw - 1)\n self.q = np.clip(self.q, self.min_q, self.max_q)\n params.append(self.q)\n if self.optimize_gain:\n # Target value at center frequency\n self.gain = heights[ixs_ix] if target[ix] > 0 else -heights[ixs_ix]\n self.gain = np.clip(self.gain, self.min_gain, self.max_gain)\n params.append(self.gain)\n return params", "def peaks(x, *par, noiseLevel=0.):\n x = np.array(x)\n n = len(x)\n noise = np.random.rand(n)\n noise = noise * noiseLevel\n noise += noiseLevel*noiseLevel\n #print(f'peaks.par: {par}')\n peaks = func_sum_of_peaks(x, *par)\n return peaks + noise", "def test_find_peaks_exact(self):\n sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0]\n num_points = 500\n test_data, act_locs = _gen_gaussians_even(sigmas, num_points)\n widths = np.arange(0.1, max(sigmas))\n found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=0,\n min_length=None)\n np.testing.assert_array_equal(found_locs, act_locs,\n \"Found maximum locations did not equal those expected\")", "def find_peaks(self, t_measure):\n self._check_time(t_measure)\n #widths = np.arange(2,7) # range of widths to check by find_peaks_cwt\n #peak_nodes = find_peaks_cwt(self.get_velocities(t_measure), widths, min_snr=2.0,noise_perc=30.0)\n peak_beads = peakutils.peak.indexes(self.get_velocities(t_measure), thres=0.75, min_dist=7)\n return peak_beads", "def event( self, evt, env ) :\n\n # Should work for both pyana and pytonic-psana (as compatability method):\n\n #print '\\nex_peaks_nda: evt.keys():', evt.keys()\n\n self.arr = None\n\n if env.fwkName() == \"psana\":\n for dtype in self.list_of_dtypes :\n self.arr = evt.get(dtype, self.m_src, self.m_key_in)\n if self.arr is not None:\n break\n \n else : \n msg = __name__ + ': WARNING!!! THIS MODULE DOES NOT HAVE IMPLEMENTATION FOR PYANA'\n print(msg)\n return\n\n self.counter +=1 \n\n if self.arr is None :\n self.count_msg +=1\n if self.count_msg <20 :\n #if self.m_print_bits & 32 :\n msg = __name__ + ': WARNING! peaks array object %s is not found in evt' % self.m_key_in\n #logging.info( msg )\n print(msg)\n return\n\n\n self.print_nda()", "def draw_peaks(self, x, peaks, spectral_centroid):\n \n y1 = self.image_height * 0.5 - peaks[0] * (self.image_height - 4) * 0.5\n y2 = self.image_height * 0.5 - peaks[1] * (self.image_height - 4) * 0.5\n \n line_color = self.color_lookup[int(spectral_centroid*255.0)]\n \n if self.previous_y != None:\n self.draw.line([self.previous_x, self.previous_y, x, y1, x, y2], line_color)\n else:\n self.draw.line([x, y1, x, y2], line_color)\n \n self.previous_x, self.previous_y = x, y2\n \n self.draw_anti_aliased_pixels(x, y1, y2, line_color)" ]
[ "0.64907897", "0.6027173", "0.57900417", "0.57882077", "0.5668051", "0.56633043", "0.5647525", "0.5629544", "0.562596", "0.5559616", "0.5544985", "0.5536392", "0.5512018", "0.5455464", "0.5427178", "0.54028904", "0.5350657", "0.5269623", "0.52637213", "0.525326", "0.52431154", "0.5239455", "0.5222689", "0.5211853", "0.51847005", "0.51796734", "0.51737833", "0.5143052", "0.51367617", "0.5121297" ]
0.70837945
0
Test that addEventListener gets flagged appropriately.
def test_addEventListener(): err = _do_test_raw(""" x.addEventListener("click", function() {}, true); x.addEventListener("click", function() {}, true, false); """) assert not err.failed() assert not err.notices err = _do_test_raw(""" x.addEventListener("click", function() {}, true, true); """) assert not err.failed() assert err.notices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_subscribe_one_listener(self):\n def listener():\n pass\n EVENT_MANAGER.subscribe('test_listener', listener)\n self.assertIn(listener, EVENT_MANAGER._listeners['test_listener'])", "def test_mouseevents():\n\n err = _do_test_raw(\"window.addEventListener('mousemove', func);\")\n assert err.warnings", "def test_add_listener(self):\n self.wrapper.add_listener(self._assert_in_reactor_thread)\n event = object()\n internal_listener, = self.client.listeners\n internal_listener(event)\n self.assertIdentical(self.received_event, event)", "def test_unsupported_event(event_manager: EventManager, subscriber: Mock) -> None:\n event_manager.handler(GLOBAL_SCENE_CHANGE)\n subscriber.assert_not_called()", "def test_subscribe_any_listener(self):\n with self.assertRaises(AssertionError):\n EVENT_MANAGER.subscribe('test_any_listener')", "def test_dispatch(self):\r\n self.hit = False\r\n\r\n def handler(event):\r\n self.hit = True\r\n\r\n self.events.register(handler, TestEvent)\r\n \r\n self.events.dispatch(TestEvent())\r\n\r\n self.assertTrue(self.hit)", "def test_gameHandleEvents(self):\n # this kinda gonna be reiterating the other tests??\n # the tests of all the individual methods below make this test work\n pass", "def test_listen_for_registers_listener(self):\n bus = event_bus._event_bus\n\n def event_listener(_):\n pass\n\n with event_bus.listen_for(Event, event_listener):\n self.assertEqual(len(bus._registration_id_map), 1)", "def test_unsubscribe_one_listener(self):\n def listener():\n pass\n\n EVENT_MANAGER.subscribe('test_listener', listener)\n EVENT_MANAGER.unsubscribe(listener)\n self.assertNotIn(listener, EVENT_MANAGER._listeners['test_listener'])", "def test_listeners(self):\n logcat = logcat_thread.LogcatThread(\n log_stream=self.fake_log_stream,\n log_parsing_config=_log_parsing_config())\n\n # Set up a listener that modifies an arbitrary state.\n some_state = False\n\n def my_handler(event: Pattern[str], match: Match[str]):\n del event, match\n nonlocal some_state\n some_state = True\n\n # Create a desired event and hook up the listener.\n my_event = re.compile('Hello world')\n listener = logcat_thread.EventListener(my_event, my_handler)\n logcat.add_event_listener(listener)\n self.fake_log_stream.logs.send_value('Hi there!') # This should not match.\n self.assertFalse(some_state)\n self.fake_log_stream.logs.send_value(make_stdout('Hello world'))\n logcat.wait(event=my_event, timeout_sec=1.0)\n self.assertTrue(some_state)\n\n # Waiting for any events should also trigger the listener.\n some_state = False\n self.fake_log_stream.logs.send_value(make_stdout('Hello world'))\n logcat.wait(event=None, timeout_sec=1.0)\n self.assertTrue(some_state)\n\n # After removing the listener, it should not be called anymore.\n some_state = False\n logcat.remove_event_listener(listener)\n self.fake_log_stream.logs.send_value(make_stdout('Hello world'))\n logcat.wait(event=my_event, timeout_sec=1.0)\n self.assertFalse(some_state)", "def test_unsubscribe_any_listener(self):\n with self.assertRaises(AssertionError):\n EVENT_MANAGER.unsubscribe()", "def on_event(self, evt):\n\t\treturn False", "def test_listen_for_unregisters_listener(self):\n bus = event_bus._event_bus\n\n def event_listener(_):\n pass\n\n with event_bus.listen_for(Event, event_listener):\n pass\n\n self.assertEqual(len(bus._registration_id_map), 0)", "def test_subscribe_many_listeners(self):\n def listener():\n pass\n\n def listener1():\n pass\n\n def listener2():\n pass\n\n EVENT_MANAGER.subscribe('test_listeners', listener, listener1, listener2)\n\n self.assertIn(listener, EVENT_MANAGER._listeners['test_listeners'])\n self.assertIn(listener1, EVENT_MANAGER._listeners['test_listeners'])\n self.assertIn(listener2, EVENT_MANAGER._listeners['test_listeners'])", "def check_event_status(self):\n pass", "def handle(self, event):\n try:\n for event_listeners in self.listeners[event.type]:\n if event_listeners:\n for listener in event_listeners:\n if listener(event) is False:\n return False\n except KeyError:\n logger.insane('No listeners defined for event \"%s\"', hr_event_type(event.type))\n pass\n\n return True", "def test_check_event(self):\n field = Field()\n\n # out of borders\n self.assertEqual(field.check_event([-1, 1]), Field.Event.OBSTACLE_HIT)\n self.assertEqual(field.check_event([40, 1]), Field.Event.OBSTACLE_HIT)\n self.assertEqual(field.check_event([1, -1]), Field.Event.OBSTACLE_HIT)\n self.assertEqual(field.check_event([1, 40]), Field.Event.OBSTACLE_HIT)\n\n obj_pos = [1, 1]\n field.obstacles.append(obj_pos)\n self.assertEqual(field.check_event(obj_pos), Field.Event.OBSTACLE_HIT)\n field.obstacles.pop()\n\n field.poison.append(obj_pos)\n self.assertEqual(field.check_event(obj_pos), Field.Event.POISON)\n\n field.apple = obj_pos\n self.assertEqual(field.check_event(obj_pos), Field.Event.FOOD)", "def event(self, QEvent): # real signature unknown; restored from __doc__\n return False", "def event(self, QEvent): # real signature unknown; restored from __doc__\n return False", "def event(self, QEvent): # real signature unknown; restored from __doc__\n return False", "def event(self, QEvent): # real signature unknown; restored from __doc__\n return False", "def event(self, QEvent): # real signature unknown; restored from __doc__\n return False", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit() \n if event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)", "def check_events(self):\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n self.ai_game.quit()\r\n elif event.type == pg.KEYDOWN:\r\n self._check_keydown_events(event)\r\n elif event.type == pg.KEYUP:\r\n self._check_keyup_events(event)\r\n elif event.type == pg.MOUSEBUTTONDOWN:\r\n mouse_pos = pg.mouse.get_pos()\r\n self._check_button(mouse_pos)", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)", "def _check_events(self):\n\t\t# Watch for keyboard and mouse events.\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tsys.exit()\n\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\tself._check_keydown_events(event)\n\t\t\telif event.type == pygame.KEYUP:\n\t\t\t\tself._check_keyup_events(event)\n\t\t\telif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tmouse_pos = pygame.mouse.get_pos()\n\t\t\t\tself._check_play_button(mouse_pos)", "async def test_event_handler_asserts_if_called_off_event_loop(self):\n event_loop = asyncio.get_running_loop()\n session = _create_test_session(event_loop)\n\n # Pretend we're calling this function from a thread with another event_loop.\n with patch(\n \"streamlit.runtime.app_session.asyncio.get_running_loop\",\n return_value=MagicMock(),\n ):\n with self.assertRaises(AssertionError):\n session._handle_scriptrunner_event_on_event_loop(\n sender=MagicMock(), event=ScriptRunnerEvent.SCRIPT_STARTED\n )", "def _check_events(self):\t\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tsys.exit()\n\n\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\tself._check_keydown_events(event)\n\n\t\t\telif event.type == pygame.KEYUP:\n\t\t\t\tself._check_keyup_events(event)\n\n\t\t\telif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tself._check_retry_button(pygame.mouse.get_pos())", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_event(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_event(event)", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)" ]
[ "0.63600886", "0.63525814", "0.629856", "0.6232452", "0.60886395", "0.58877194", "0.5872726", "0.57898706", "0.5751316", "0.5695141", "0.56895745", "0.5664929", "0.56027156", "0.5536601", "0.54882324", "0.54466474", "0.5415127", "0.5400491", "0.5400491", "0.5400491", "0.5400491", "0.5400491", "0.5397462", "0.5392612", "0.53884614", "0.53781265", "0.5372238", "0.5361473", "0.53469825", "0.5312426" ]
0.85027266
0
Tests that createElement calls are filtered properly
def test_createElement(): assert not _do_test_raw(""" var x = "foo"; x.createElement(); x.createElement("foo"); """).failed() assert _do_test_raw(""" var x = "foo"; x.createElement("script"); """).failed() assert _do_test_raw(""" var x = "foo"; x.createElement(bar); """).failed()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_render_element2():\n elem = hr.Element()\n elem.append(\"this is some text\")\n elem.append(\"and this is some more text\")\n\n # This uses the render_results utility above\n file_contents = render_result(elem).strip()\n\n # making sure the content got in there.\n assert \"this is some text\" in file_contents\n assert \"and this is some more text\" in file_contents\n\n # make sure it's in the right order\n assert file_contents.index(\"this is\") < file_contents.index(\"and this\")\n\n # making sure the opening and closing tags are right.\n assert file_contents.startswith(\"<html>\")\n assert file_contents.endswith(\"</html>\")", "def test_render_element():\n elem = hr.Element(\"this is some text\")\n elem.append(\"and this is some more text\")\n\n # This uses the render_results utility above\n file_contents = render_result(elem).strip()\n\n # making sure the content got in there.\n assert \"this is some text\" in file_contents\n assert \"and this is some more text\" in file_contents\n\n # make sure it's in the right order\n assert file_contents.index(\"this is\") < file_contents.index(\"and this\")\n\n # making sure the opening and closing tags are right.\n assert file_contents.startswith(\"<html>\")\n assert file_contents.endswith(\"</html>\")", "def test_sub_element():\n page = hr.Html()\n page.append(\"some plain text.\")\n page.append(hr.P(\"A simple paragraph of text\"))\n page.append(\"Some more plain text.\")\n\n file_contents = render_result(page)\n print(file_contents) # so we can see it if the test fails\n\n # note: The previous tests should make sure that the tags are getting\n # properly rendered, so we don't need to test that here.\n assert \"some plain text\" in file_contents\n assert \"A simple paragraph of text\" in file_contents\n assert \"Some more plain text.\" in file_contents\n assert \"some plain text\" in file_contents\n # but make sure the embedded element's tags get rendered!\n assert \"<p>\" in file_contents\n assert \"</p>\" in file_contents", "def test_generate_single_element_get(self):\n pass", "def test_createElementNS():\n\n assert not _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElementNS();\n x.createElementNS(\"foo\");\n x.createElementNS(\"foo\", \"bar\");\n \"\"\").failed()\n\n assert _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElementNS(\"foo\", \"script\");\n \"\"\").failed()\n\n assert _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElementNS(\"foo\", bar);\n \"\"\").failed()\n\n # Test for https://github.com/mozilla/amo-validator/issues/368\n assert not _do_test_raw(\"\"\"\n var x = \"foo\",\n nsXUL = \"http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul\";\n\n x.createElementNS(nsXUL, 'panelview')\n \"\"\").failed()\n\n # Creating a <script> element raises a warning of course.\n assert _do_test_raw(\"\"\"\n var x = \"foo\",\n nsXUL = \"http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul\";\n\n x.createElementNS(nsXUL, 'script')\n \"\"\").failed()", "def test_filter_content(self):\n bs = self.get_soup(baseUrl + 'food/filter/')\n self.assertOneExists(bs, \"#page_filter\")", "def test_get_tag_fail(self):\n self.assertRaises(AttributeError, get_tag, None, \"h1\")\n self.assertRaises(\n AttributeError, get_tag, \"<h1>This is not a XML tag object</h1>\", \"h1\"\n )", "def test_container_no_assets(self):\n context = {}\n container_name = \"left\"\n html = container(context, container_name)\n self.assertIn(\"storybase-container-placeholder\", html)\n self.assertIn(container_name, html)", "def createElement(tagName):\n print(\"Warning: createElement is deprecated in favor of createComponent\")\n return createComponent(tagName)", "def test_liechtensteinsettlements_get(self):\n pass", "def test_01_Tags(self):\n # print(PrettyFormatAny.form(self.m_xml, 'A1-01-A - Tags'))\n self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)", "def test_get_elements(self):\n\n xml_file = get_test_config('test_files/subscriptions.xml')\n self.assertEqual(xml_utilities.get_elements('foo', xml_file),\n [],\n 'Got invalid elements')\n subscriptions = xml_utilities.get_elements('subscription', xml_file)\n self.assertEqual(len(subscriptions),\n 2,\n 'Got wrong number of elements')\n tag_names = [x.tagName for x in subscriptions]\n self.assertEqual(['subscription', 'subscription'],\n tag_names,\n 'Got wrong elements')", "def test_init(self):\n for tag in self.tags:\n for value in self.values:\n this_tag = tag(value)\n self.assertEqual(value, this_tag.value)\n self.assertEqual([], this_tag.body)", "def test_dummy3(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n self.assertTrue(xp.parenthesize() is xp)", "def createElementFromState(state):", "def test_can_filter_tags(self):\n text = '<b><i>Example</i></b><!-- comment -->'\n filter = Bleach(tags=['b'], strip=True)\n filtered = filter.filter(text)\n expected = '<b>Example</b>'\n self.assertEquals(expected, filtered)", "def test_can_filter_attributes(self):\n text = '<b><a href=\"\" target=\"_blank\">Example</a></b>'\n filter = Bleach(\n tags=['a'],\n attributes=dict(a=['href', 'title'])\n )\n filtered = filter.filter(text)\n expected = '<a href=\"\">Example</a>'\n self.assertEquals(expected, filtered)", "def test_02_Tags(self):\n # print(PrettyFormatAny.form(self.m_xml, 'Xml'))\n self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)\n self.assertEqual(self.m_xml.computer_div.tag, 'ComputerDivision')\n self.assertEqual(self.m_xml.house_div.tag, 'HouseDivision')\n self.assertEqual(self.m_xml.lighting_sect.tag, 'LightingSection')\n self.assertEqual(self.m_xml.button_sect.tag, 'ButtonSection')\n self.assertEqual(self.m_xml.button.tag, 'Button')\n self.assertEqual(self.m_xml.controller_sect.tag, 'ControllerSection')\n self.assertEqual(self.m_xml.controller.tag, 'Controller')\n self.assertEqual(self.m_xml.light_sect.tag, 'LightSection')\n self.assertEqual(self.m_xml.light.tag, 'Light')", "def test_dummy6(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n xp = xpb.bar | xp\n exp = '/bar'\n self.assertEqual(xp.tostring(), exp)", "def test_allowed_tags_unescaped(self):\n summary = ('<p>This is the <a href=\"#\">first paragraph</a><br><br/></p>'\n '<ul><li>List item</ul>'\n '<ol><li>List item</li></ol>'\n )\n story = create_story(title=\"Test Story\", summary=summary,\n call_to_action=\"\", status='draft')\n story.save()\n story = Story.objects.get(story_id=story.story_id)\n self.assertIn(\"<p>\", story.summary)\n self.assertIn(\"<a\", story.summary)\n self.assertIn(\"<br>\", story.summary)\n self.assertIn(\"<ul>\", story.summary)\n self.assertIn(\"<ol>\", story.summary)\n self.assertIn(\"<li>\", story.summary)\n self.assertIn(\"<p>\", story.summary)", "def test_render_html_no_layout(self):\n assets = Asset.objects.select_subclasses()\n # Create a section without specifying layout \n section = create_section(title=\"Test Section1\", story=self.story)\n # Associate assets with the section without specifying a container\n SectionAsset.objects.create(section=section, asset=assets[0])\n SectionAsset.objects.create(section=section, asset=assets[1])\n html = section.render_html()\n self.assertIn(assets[0].title, html)\n self.assertIn(assets[1].title, html)", "def testGetTagPathsAndObjectIDsWithoutData(self):\n self.assertEqual([], list(getTagPathsAndObjectIDs([])))", "def test_simple_attributes(self) -> None:\n\n class TestClass:\n def __init__(self, name, child=None) -> None:\n self.child = child\n self.bar = name\n\n t1 = TestClass('t1', TestClass('t1child'))\n t2 = TestClass('t2', TestClass('t2child'))\n t3 = TestClass('t3')\n\n nl = NodeList([t1, t2, t3])\n assert nl.bar == ['t1', 't2', 't3'], nl.bar\n assert nl[0:2].child.bar == ['t1child', 't2child'], \\\n nl[0:2].child.bar", "def test_is_pointer_tag(self):\r\n\r\n yes = [\"\"\"<html url_name=\"blah\"/>\"\"\",\r\n \"\"\"<html url_name=\"blah\"></html>\"\"\",\r\n \"\"\"<html url_name=\"blah\"> </html>\"\"\",\r\n \"\"\"<problem url_name=\"blah\"/>\"\"\",\r\n \"\"\"<course org=\"HogwartsX\" course=\"Mathemagics\" url_name=\"3.14159\"/>\"\"\"]\r\n\r\n no = [\"\"\"<html url_name=\"blah\" also=\"this\"/>\"\"\",\r\n \"\"\"<html url_name=\"blah\">some text</html>\"\"\",\r\n \"\"\"<problem url_name=\"blah\"><sub>tree</sub></problem>\"\"\",\r\n \"\"\"<course org=\"HogwartsX\" course=\"Mathemagics\" url_name=\"3.14159\">\r\n <chapter>3</chapter>\r\n </course>\r\n \"\"\"]\r\n\r\n for xml_str in yes:\r\n print(\"should be True for {0}\".format(xml_str))\r\n self.assertTrue(is_pointer_tag(etree.fromstring(xml_str)))\r\n\r\n for xml_str in no:\r\n print(\"should be False for {0}\".format(xml_str))\r\n self.assertFalse(is_pointer_tag(etree.fromstring(xml_str)))", "def test_html(self):\n tags = (('<input', 3),\n ('<span', 1),\n ('<button', 1))\n\n for text, count in tags:\n with self.subTest():\n self.assertContains(self.resp, text, count)", "def test_append():\n elem = hr.Element(\"this is some text\")\n elem.append(\"some more text\")", "def test_extended_init():\n elem = hr.Element(\"this is some text\", id=\"spam\", style=\"eggs\")\n\n assert get_opening_line(elem) == '<html id=\"spam\" style=\"eggs\">'", "def test_create(self):\n filter = Bleach()\n self.assertIsInstance(filter, Bleach)", "def test_iterChildNodesByTagName(self):\n _node = MagicMock()\n _node.childNodes = self._createNodeList([\n (1, 'abba'),\n (2, 'trara'),\n (4, 'child'),\n (3, 'child'),\n (4, 'child')\n ])\n _test_object = Node(_node)\n values = list(_test_object.iterChildNodesByTagName('child'))\n self.assertListEqual(\n values, [_node.childNodes[2], _node.childNodes[4]])", "def test_is_an_element_name():\n for el in roentgen.elements['name']:\n assert(is_an_element(el))" ]
[ "0.6099641", "0.5910537", "0.56316715", "0.5482185", "0.54426223", "0.54182744", "0.5415149", "0.5406062", "0.53996783", "0.53621614", "0.5355254", "0.5328316", "0.5324107", "0.5315543", "0.5293697", "0.5273912", "0.5255659", "0.5250865", "0.5217955", "0.51548404", "0.5153654", "0.51344913", "0.51099545", "0.51098967", "0.50883096", "0.5085064", "0.50816464", "0.5074231", "0.5066936", "0.50652236" ]
0.7561235
0
Tests that createElementNS calls are filtered properly
def test_createElementNS(): assert not _do_test_raw(""" var x = "foo"; x.createElementNS(); x.createElementNS("foo"); x.createElementNS("foo", "bar"); """).failed() assert _do_test_raw(""" var x = "foo"; x.createElementNS("foo", "script"); """).failed() assert _do_test_raw(""" var x = "foo"; x.createElementNS("foo", bar); """).failed() # Test for https://github.com/mozilla/amo-validator/issues/368 assert not _do_test_raw(""" var x = "foo", nsXUL = "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul"; x.createElementNS(nsXUL, 'panelview') """).failed() # Creating a <script> element raises a warning of course. assert _do_test_raw(""" var x = "foo", nsXUL = "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul"; x.createElementNS(nsXUL, 'script') """).failed()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_createElement():\n\n assert not _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElement();\n x.createElement(\"foo\");\n \"\"\").failed()\n\n assert _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElement(\"script\");\n \"\"\").failed()\n\n assert _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElement(bar);\n \"\"\").failed()", "def test_create_image_stream_tag_for_all_namespaces(self):\n pass", "def testXmlns(self):\n def setXML():\n self.node.xmlns = 'banana'\n\n self.assertRaises(\n AttributeError,\n setXML\n )\n\n self.assertEqual(\n \"urn:ASC:CDL:v1.01\",\n self.node.xmlns\n )", "def test_create_namespaced_image_stream_tag(self):\n pass", "def test_create_net_namespace(self):\n pass", "def test_namespaceFound(self):\n xp = XPathQuery(\"/foo[@xmlns='testns']/bar\")\n self.assertEqual(xp.matches(self.e), 1)", "def test_dummy3(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n self.assertTrue(xp.parenthesize() is xp)", "def test_namespaceNotFound(self):\n xp = XPathQuery(\"/foo[@xmlns='badns']/bar2\")\n self.assertEqual(xp.matches(self.e), 0)", "def test_create_image_stream_for_all_namespaces(self):\n pass", "def addElement(node, name, content=None, attributes={}):\n if node.nodeType == node.DOCUMENT_NODE:\n doc = node\n else:\n doc = node.ownerDocument\n\n newElement = doc.createElementNS(None, name)\n if content:\n newElement.appendChild(doc.createTextNode(content))\n for attrName, attrValue in attributes.items():\n newElement.setAttributeNS(None, attrName, attrValue)\n node.appendChild(newElement)\n return newElement", "def test_dummy6(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n xp = xpb.bar | xp\n exp = '/bar'\n self.assertEqual(xp.tostring(), exp)", "def test_create_namespaced_processed_template(self):\n pass", "def test_create_template_for_all_namespaces(self):\n pass", "def test_node_name(self):\n xmlns = {\n \"a\": \"_a\",\n \"g\": \"_g\"\n }\n self.assertEqual(\n utils._node_name(\"a\", \"g\", xmlns),\n (False, \"g\", \"{_g}a\")\n )\n self.assertEqual(\n utils._node_name(\"a@a\", \"g\", xmlns),\n (False, \"a\", \"{_a}a\")\n )\n self.assertEqual(\n utils._node_name(\"_@@a\", \"g\", xmlns),\n (True, \"g\", \"{_g}a\")\n )\n self.assertEqual(\n utils._node_name(\"_@a@a\", \"g\", xmlns),\n (True, \"a\", \"{_a}a\")\n )\n # something not equal to _\n with self.assertRaises(cfy_exc.NonRecoverableError):\n utils._node_name(\"1@a@a\", \"g\", xmlns),\n # too many @\n with self.assertRaises(cfy_exc.NonRecoverableError):\n utils._node_name(\"@@a@a\", \"g\", xmlns),", "def makeelement(self, _tag, attrib=None, nsmap=None, **_extra): # real signature unknown; restored from __doc__\n pass", "def test_xml_to_dict_net_namespace(self):\n xml = \"\"\"\n <a\n xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\"\n >\n <b xmlns=\"something\">b</b>\n <!-- Comment, ignore it -->\n </a>\n \"\"\"\n xmlns = {\n \"_\": utils.NETCONF_NAMESPACE\n }\n result = utils.generate_dict_node(etree.XML(xml), xmlns)\n # check dict\n self.assertEqual(\n {'a': {'_something@b': 'b'}},\n result\n )\n # check xmlns\n self.assertEqual(\n {\n '_': utils.NETCONF_NAMESPACE,\n '_something': 'something'\n }, xmlns\n )", "def test_create_namespaced_image_stream(self):\n pass", "def test_replace_namespaced_image_stream_tag(self):\n pass", "def test_get_elements(self):\n\n xml_file = get_test_config('test_files/subscriptions.xml')\n self.assertEqual(xml_utilities.get_elements('foo', xml_file),\n [],\n 'Got invalid elements')\n subscriptions = xml_utilities.get_elements('subscription', xml_file)\n self.assertEqual(len(subscriptions),\n 2,\n 'Got wrong number of elements')\n tag_names = [x.tagName for x in subscriptions]\n self.assertEqual(['subscription', 'subscription'],\n tag_names,\n 'Got wrong elements')", "def test_namespace_resource_creation_rpc(self, ns_resource_factory):\n # Create the namespace resource and verify health\n ns_resource_factory()", "def test_list_image_stream_tag_for_all_namespaces(self):\n pass", "def test_patch_namespaced_image_stream_tag(self):\n pass", "def test_01_FindXml(self):\n self.assertEqual(self.m_xml.root.tag, 'PyHouse')\n self.assertEqual(self.m_xml.button_sect.tag, 'ButtonSection')\n self.assertEqual(self.m_xml.button.tag, 'Button')", "def test_ooo_ns(self):\n calcdoc = OpenDocumentSpreadsheet()\n table = odf.table.Table(name=\"Costs\")\n forms = odf.office.Forms()\n form = odf.form.Form(\n controlimplementation=\"ooo:com.sun.star.form.component.Form\")\n lb = odf.form.Listbox(\n controlimplementation=\"ooo:com.sun.star.form.component.ListBox\", dropdown=\"true\", id=\"control1\")\n form.addElement(lb)\n forms.addElement(form)\n table.addElement(forms)\n\n # One empty line\n tr = odf.table.TableRow()\n table.addElement(tr)\n\n tr = odf.table.TableRow()\n # One empty cell\n cell = odf.table.TableCell()\n tr.addElement(cell)\n\n cell = odf.table.TableCell()\n\n draw = odf.draw.Control(\n control=\"control1\", height=\"0.1126in\", width=\"0.798in\",\n x=\"0.0303in\", y=\"0.0205in\", endcelladdress=\"Costs.B2\",\n endx=\"0.8283in\", endy=\"0.1331in\")\n\n cell.addElement(draw)\n tr.addElement(cell)\n table.addElement(tr)\n\n calcdoc.spreadsheet.addElement(table)\n result = calcdoc.contentxml() # contentxml() is supposed to yeld a bytes\n self.assertNotEqual(-1, result.find(b'''xmlns:ooo=\"http://openoffice.org/2004/office\"'''))", "def test_create_namespaced_template(self):\n pass", "def test_create_node_with_children(self):\n root = netapp_api.NaElement('root')\n self.mock_object(root, 'add_new_child', return_value='abc')\n\n result_xml = str(root.create_node_with_children(\n 'options', test1=zapi_fakes.FAKE_XML_STR,\n test2=zapi_fakes.FAKE_XML_STR))\n\n # No ordering is guaranteed for elements in this XML.\n self.assertTrue(result_xml.startswith(\"<options>\"), result_xml)\n self.assertIn(\"<test1>abc</test1>\", result_xml)\n self.assertIn(\"<test2>abc</test2>\", result_xml)\n self.assertTrue(result_xml.rstrip().endswith(\"</options>\"), result_xml)", "def test_01_Tags(self):\n # print(PrettyFormatAny.form(self.m_xml, 'A1-01-A - Tags'))\n self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)", "def test_get_free_ns(self):\n xmlns = {\"a\": \"b\"}\n self.assertEqual(utils._get_free_ns(xmlns, \"abrac:adabra\"),\n \"_abrac_adabra\")\n self.assertEqual(xmlns, {\"a\": \"b\",\n \"_abrac_adabra\": \"abrac:adabra\"})\n # duplicate\n self.assertEqual(utils._get_free_ns(xmlns, \"abrac/adabra\"),\n \"__abrac_adabra_\")\n self.assertEqual(xmlns, {\"a\": \"b\",\n \"_abrac_adabra\": \"abrac:adabra\",\n \"__abrac_adabra_\": \"abrac/adabra\"})", "def createElement(tagName):\n print(\"Warning: createElement is deprecated in favor of createComponent\")\n return createComponent(tagName)", "def test_02_Tags(self):\n # print(PrettyFormatAny.form(self.m_xml, 'Xml'))\n self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)\n self.assertEqual(self.m_xml.computer_div.tag, 'ComputerDivision')\n self.assertEqual(self.m_xml.house_div.tag, 'HouseDivision')\n self.assertEqual(self.m_xml.lighting_sect.tag, 'LightingSection')\n self.assertEqual(self.m_xml.button_sect.tag, 'ButtonSection')\n self.assertEqual(self.m_xml.button.tag, 'Button')\n self.assertEqual(self.m_xml.controller_sect.tag, 'ControllerSection')\n self.assertEqual(self.m_xml.controller.tag, 'Controller')\n self.assertEqual(self.m_xml.light_sect.tag, 'LightSection')\n self.assertEqual(self.m_xml.light.tag, 'Light')" ]
[ "0.6553644", "0.5767339", "0.56110704", "0.5496338", "0.5460686", "0.5308755", "0.52212656", "0.5208479", "0.5199475", "0.5189086", "0.5153285", "0.51497877", "0.5131236", "0.51135963", "0.51049906", "0.50972146", "0.5070891", "0.50700235", "0.50475746", "0.5033134", "0.5026621", "0.50246024", "0.50063896", "0.49867648", "0.49818945", "0.4942801", "0.49343318", "0.49251723", "0.4895427", "0.48851162" ]
0.7780457
0
Tests that warnings on SQL methods are emitted properly
def test_sql_methods(): err = _do_test_raw(""" x.executeSimpleSQL("foo " + y); """) assert err.warnings[0]['id'][-1] == 'executeSimpleSQL_dynamic' err = _do_test_raw(""" x.createStatement("foo " + y); """) assert err.warnings[0]['id'][-1] == 'executeSimpleSQL_dynamic' err = _do_test_raw(""" x.createAsyncStatement("foo " + y); """) assert err.warnings[0]['id'][-1] == 'executeSimpleSQL_dynamic'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def warning(self, *args, **kwargs):", "def warn():\n pass", "def test_query_wrapper_operational_error(self):\n\n _session = self.sessionmaker()\n\n _session.begin()\n self.addCleanup(_session.rollback)\n q = _session.query(self.Foo).filter(\n self.Foo.counter == sqla.func.imfake(123))\n matched = self.assertRaises(sqla.exc.OperationalError, q.all)\n self.assertIn(\"no such function\", str(matched))", "def sql_exceptwrapper(method, integrity, *args, **kwargs):\n try:\n result = method(*args, **kwargs)\n return result\n except sqlite3.IntegrityError if bool(integrity) else exceptions.DummyException:\n dummy.UselessStdout.write(\"ASDASDASD\") # DummyException never going to happen\n except sqlite3.Error as sqerror:\n print(sqerror)", "def has_warnings(self) -> bool:", "def test_unsupported_sql(self):\n user = getuser()\n impala_client = self.create_impala_client()\n error_msg = \"UnsupportedFeatureException: {0} is not supported by Sentry.\"\n statements = [(\"grant select on database functional to user foo\",\n error_msg.format(\"GRANT <privilege> TO USER\")),\n (\"grant select on database functional to group foo\",\n error_msg.format(\"GRANT <privilege> TO GROUP\")),\n (\"revoke select on database functional from user foo\",\n error_msg.format(\"REVOKE <privilege> FROM USER\")),\n (\"revoke select on database functional from group foo\",\n error_msg.format(\"REVOKE <privilege> FROM GROUP\")),\n (\"show grant group foo\", error_msg.format(\"SHOW GRANT GROUP\"))]\n for statement in statements:\n result = self.execute_query_expect_failure(impala_client, statement[0], user=user)\n assert statement[1] in str(result)", "def warning(self, *args, **kwargs): # real signature unknown\n pass", "def test_disallowed_queries():\n strings = [\"select * from test times 10\",\n \"select * from test save clusters with threshold .5 as test.csv\",\n \"select * from test given a=5\",\n \"select * from test with confidence .4\",\n \"select a conf .4 from test\",\n \"select a conf .4, b from test\",\n \"simulate a conf .4 from test times 10\",\n \"simulate a conf .4, b from test times 10\",\n \"infer * from test times 10\",\n \"infer typicality from test\",\n \"infer * from test with confidence 1.5\",\n \"simulate typicality from test\",\n \"infer * from test save clusters with threshold .5 as test.csv\",\n \"infer * from test given a=5\",\n \"simulate * from test where a < 4\",\n \"simulate * from test save clusters with threshold .5 as test.csv\",\n \"simulate * from test with confidence .4\",\n \"simulate * from test with 4 samples\",\n \"simulate * from test\",\n \"estimate columns from test with confidence .4\",\n \"estimate columns from test given a=4\",\n \"estimate columns from test times 10\",\n \"summarize estimate columns from test\",\n \"plot estimate columns from test\",\n \"estimate columns from test save clusters with threshold .5 as test.csv\",\n \"estimate pairwise correlation from test where a = b\",\n \"estimate pairwise correlation from test times 10\",\n \"estimate pairwise correlation from test given a = 5\",\n \"estimate pairwise correlation from test with confidence .2\",\n \"estimate pairwise row similarity from test where a = b\",\n \"estimate pairwise row similarity from test times 10\",\n \"estimate pairwise row similarity from test given a = 5\",\n \"estimate pairwise row similarity from test with confidence .2\",\n \"estimate pairwise row similarity from test where a = b\"\n ]\n\n for query_string in strings:\n ast = bql_statement.parseString(query_string,parseAll=True)\n with pytest.raises(AssertionError):\n parser.parse_single_statement(ast)", "def warnings(self) -> List[Error]:", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def test_instances(self):\n\n @deprecate(bar=\"use baz instead\")\n def foo(bar=None, baz=None):\n pass\n\n @deprecate(baz=\"use bar instead\")\n def food(bar=None, baz=None):\n pass\n\n with warnings.catch_warnings(record=True) as w:\n foo(bar=True)\n food(baz=True)\n self.assertEqual(len(w), 2, \"Not all warnings preserved.\")", "def test_table_false_positives(self):\n pass", "def test_using_nonexistant_column_names_in_query_args_raises_error(self):\r\n with self.assertRaises(AttributeError):\r\n TestModel.objects(TestModel.nonsense == 5)", "def warning(self, warning):\n pass", "def test_no_implicit_returning_clause(self):\n\n MockTable = self.classes.MockTable\n ins = MockTable.__table__.insert().values(test=5).compile()\n expected = str(ins)\n assert expected == 'INSERT INTO test_schema.mocktable (test) VALUES (:test)'", "def test_warnings():\n tree = parse(dedent(\"\"\"\\\n import warnings\n\n warnings.warn(\"Hello World!\")\n \"\"\"))\n visitor = LoggingVisitor()\n visitor.visit(tree)\n\n assert_that(visitor.violations, is_(empty()))", "def test_unsupported_syntax(self):\n\n self.assertRaises(\n (TypeError, ValueError), self.table.where, 'c_bool[0]'\n )\n self.assertRaises(TypeError, self.table.where, 'c_bool()')\n self.assertRaises(NameError, self.table.where, 'c_bool.__init__')", "def warning(self, msg, *args, **kwargs):\n pass", "def test_runtime_warnings(self):\n\t\tprint(\"test runtime warning: sqrt(-1)=\", np.sqrt(-1.0))\n\t\tassert(True)", "def test_dbs_func_docstrings(self):\n for func in self.dbs_f:\n self.assertIsNot(func[1].__doc__, None,\n \"{:s} method needs a docstring\".format(func[0]))\n self.assertTrue(len(func[1].__doc__) >= 1,\n \"{:s} method needs a docstring\".format(func[0]))", "def test_dbs_func_docstrings(self):\n for func in self.dbs_f:\n self.assertIsNot(func[1].__doc__, None,\n \"{:s} method needs a docstring\".format(func[0]))\n self.assertTrue(len(func[1].__doc__) >= 1,\n \"{:s} method needs a docstring\".format(func[0]))", "def test_order_warnings(self):\n\n o1 = OrderTest.create_order_1()\n\n # make sure there are no warnings now, one clean order\n self.assertEqual(len(o1.warnings()), 0)\n\n o2 = OrderTest.create_order_2(o1.inmate)\n\n # make sure there is a prior-order warning\n self.assertTrue(\"Patron received an order less than 3 months ago\" in o2.warnings())\n\n # make sure there's a prior-book warning\n self.assertTrue(True in [\"Patron already received\" in warning for warning in o2.warnings()])\n self.assertFalse(True in [\"blah blah blah this isn't a warning\" in warning for warning in o2.warnings()])\n\n # make sure we haven't triggered the same-book warning\n self.assertFalse(True in [\"Two books in this\" in warning for warning in o2.warnings()])\n\n # Add another book\n b3 = models.Book()\n b3.order = o2\n b3.title = \"dictionary\"\n b3.full_clean()\n b3.save()\n\n # ...and test if it triggers the same-book warning\n self.assertTrue(True in [\"Two books in this\" in warning for warning in o2.warnings()])", "def test_syntax_error(self):\n\n self.assertRaises(SyntaxError, self.table.where, 'foo bar')" ]
[ "0.63846934", "0.6327814", "0.6292967", "0.6227003", "0.61164504", "0.606927", "0.60324985", "0.6029675", "0.60110146", "0.6006412", "0.6006412", "0.6006412", "0.6006412", "0.6006412", "0.6006412", "0.6006412", "0.6006412", "0.5999971", "0.5977941", "0.5971273", "0.59604573", "0.594531", "0.5904013", "0.5863978", "0.58559674", "0.5817908", "0.5780606", "0.5780606", "0.57628703", "0.5759734" ]
0.73811924
0
Tests that setAttribute calls are blocked successfully
def test_setAttribute(): assert not _do_test_raw(""" var x = "foo"; x.setAttribute(); x.setAttribute("foo"); x.setAttribute("foo", "bar"); """).failed() assert _do_test_raw(""" var x = "foo"; x.setAttribute("onfoo", "bar"); """).failed()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_set_attribute():\n elem = hr.Element(\"this is some text\", id=\"spam\", style=\"eggs\")\n elem.set_attributes(holy=\"grail\", answer=42)\n\n assert (\n get_opening_line(elem)\n == '<html id=\"spam\" style=\"eggs\" holy=\"grail\" answer=\"42\">'\n )", "def testSetAttributeAction(self):\n\t action = SetAttributeAction('x', 'y', ('key',), 'z')\n\t self.failUnless(action.field == 'y')\n\t self.failUnless(action.value == 'z')", "def test_set_attribute_override():\n elem = hr.Element(\n \"this is some text\",\n style=\"cheese\",\n answer=1,\n clas=\"spam\", # cspell:disable-line\n )\n elem.set_attributes(holy=\"grail\", answer=42, _clas=\"eggs\") # cspell:disable-line\n\n opening_tag = get_opening_line(elem)\n assert 'style=\"cheese\"' in opening_tag\n assert 'answer=\"42\"' in opening_tag\n assert 'class=\"eggs\"' in opening_tag\n assert 'holy=\"grail\"' in opening_tag", "def _setAttributes(self, primaryAttr, attrs):\n return False", "def setAttributeValue(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass", "def _setAttr(self, attrName, value):\n\n if (value not in (None, \"\")):\n setattr(self, attrName, value)", "def test_set_attributes_error(self):\n r = Resources()\n attr_lst = [\"num_wires\", \"num_gates\", \"depth\", \"shots\", \"gate_types\"]\n\n for attr_name in attr_lst:\n with pytest.raises(FrozenInstanceError, match=\"cannot assign to field\"):\n setattr(r, attr_name, 1)", "def __setattr__(self, name, value):\n raise AttributeError(\"You cannot modify attributes on a %s\" % self.__class__.__name__)", "def test_update_attribute_data(self):\n pass", "def __setattr__(self, attr: str, _value: t.Any) -> t.NoReturn:\n raise AttributeError(attr)", "def test_set_invalid_attribute(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(AttributeError):\n md.invalid_attribute = \"value\"", "def __setattr__(self, name, value):\n if name == 'ALLOW_CHANGE':\n raise AttributeError(\"attribute name 'ALLOW_CHANGE' has been occupied, please use another name\")\n if getattr(self, 'ALLOW_CHANGE', None):\n self.__dict__[name] = value\n else:\n raise AttributeReadOnlyError(self, name)", "def test_dom_mutation_fail():\n\n assert not _do_test_raw(\"foo.DOMAttr = bar;\").failed()\n assert _do_test_raw(\"foo.DOMAttrModified = bar;\").failed()", "def _set_attributes(self):", "def test_attribute_not_found(self):\n with pytest.raises(\n ClickException,\n match=\"Attribute `non_existing_attribute` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"skills.dummy.non_existing_attribute\",\n \"value\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def __setattr__(self, name: str, val: Any) -> None:\n if name == \"_unready_attributes\":\n pass\n elif hasattr(self, \"_unready_attributes\") and name in self._unready_attributes:\n self._unready_attributes.remove(name)\n super().__setattr__(name, val)", "def test_container_attribute_async(self):\n global GLOB_SIGNAL\n global GLOB_RC\n\n self.add_pool()\n self.add_container(self.pool)\n self.container.open()\n self.daos_cmd = DaosCommand(self.bin)\n\n expected_for_param = []\n name = self.params.get(\"name\", '/run/attrtests/name_handles/*/')\n expected_for_param.append(name[1])\n value = self.params.get(\"value\", '/run/attrtests/value_handles/*/')\n expected_for_param.append(value[1])\n\n # Convert any test yaml string to bytes\n if isinstance(name[0], str):\n name[0] = name[0].encode(\"utf-8\")\n if isinstance(value[0], str):\n value[0] = value[0].encode(\"utf-8\")\n\n attr_dict = {name[0]: value[0]}\n expected_result = 'PASS'\n for result in expected_for_param:\n if result == 'FAIL':\n expected_result = 'FAIL'\n break\n try:\n GLOB_SIGNAL = threading.Event()\n self.container.container.set_attr(data=attr_dict, cb_func=cb_func)\n GLOB_SIGNAL.wait()\n if GLOB_RC != 0 and expected_result in ['PASS']:\n self.fail(\"RC not as expected after set_attr First {0}\"\n .format(GLOB_RC))\n\n # Workaround\n # Due to DAOS-7093 skip the usage of pydaos cont list attr\n # GLOB_SIGNAL = threading.Event()\n #\n # size, buf = self.container.container.list_attr(cb_func=cb_func)\n #\n data = self.daos_cmd.container_list_attrs(\n pool=self.pool.uuid,\n cont=self.container.uuid)\n\n # GLOB_SIGNAL.wait()\n # if GLOB_RC != 0 and expected_result in ['PASS']:\n # self.fail(\"RC not as expected after list_attr First {0}\"\n # .format(GLOB_RC))\n\n if expected_result in ['PASS']:\n # Workaround: async mode is not used for list_attr\n self.verify_list_attr(attr_dict, data['response'])\n\n # Request something that doesn't exist\n if name[0] is not None and b\"Negative\" in name[0]:\n name[0] = b\"rubbish\"\n\n GLOB_SIGNAL = threading.Event()\n self.container.container.get_attr([name[0]],\n cb_func=cb_func)\n GLOB_SIGNAL.wait()\n\n if GLOB_RC != 0 and expected_result in ['PASS']:\n self.fail(\"RC not as expected after get_attr {0}\"\n .format(GLOB_RC))\n\n # not verifying the get_attr since its not available asynchronously\n # Therefore we want to avoid passing negative test\n # e.g. rubbish getting assigned.\n\n if value[0] is not None:\n if GLOB_RC == 0 and expected_result in ['FAIL']:\n if name[0] != b\"rubbish\":\n self.fail(\"Test was expected to fail but it passed.\\n\")\n\n except DaosApiError as excep:\n print(excep)\n print(traceback.format_exc())\n if expected_result == 'PASS':\n self.fail(\"Test was expected to pass but it failed.\\n\")", "def SetAttributes(self, attr):\r\n \r\n if self._ownsAttr:\r\n del self._attr\r\n \r\n self._attr = attr\r\n self._ownsAttr = False", "def AssignAttributes(self, attr):\r\n \r\n self.SetAttributes(attr)\r\n self._ownsAttr = True", "def test_get_fails_when_setting_nested_object(self):\n with pytest.raises(\n ClickException,\n match=r\"Attribute `non_existing_attribute.dummy` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"skills.dummy.non_existing_attribute.dummy\",\n \"new_value\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def set_attribute(self, attribute, value) -> None:\n logging.info(f\"setting element attribute. {self.desc}\")\n js = f\"\"\"var elm = document.querySelectorAll(\"{self.css}\")[{self.index}];\n elm.setAttribute(\"{attribute}\", \"{value}\");\n \"\"\"\n self._execute_javascript(js)", "def testAttributeAssignmentIsIntercepted(self):\n\t \n\t c = Controller()\n\t x = c.mock(KlassBeingMocked)\n\t x.w = 5\n\t c.replay()\n\t try:\n\t x.w\n\t self.fail()\n\t except PlaybackFailure, e:\n\t pass", "def test_register_existing_attr(self):\n pass", "def testClassNotMutable(self):\n self.assertRaises(AttributeError,\n setattr,\n Color,\n 'something_new',\n 10)", "def set_attribute(self, attr, value):\n super().set_attribute(attr, value) # Keep this line, it triggers the parent class method.\n setattr(self, attr, value)", "def set_attribute(self, attr, value):\n logger.debug(\"SET ATTRIBUTE {} to {}\".format(attr, value))", "def test_set_attributes(test_common_dao):\n _session = test_common_dao.RAMSTK_SESSION(\n bind=test_common_dao.engine, autoflush=False, expire_on_commit=False)\n DUT = _session.query(RAMSTKSiteInfo).first()\n\n _error_code, _msg = DUT.set_attributes(ATTRIBUTES)\n\n assert _error_code == 0\n assert _msg == (\"RAMSTK SUCCESS: Updating RAMSTKSiteInfo attributes.\")", "def test_attribute_order(self):\n element = Element(\"div\")\n element.set_attribute(\"def\", \"\")\n element.set_attribute(\"abc\", \"\")\n element.set_attribute(\"ghi\", \"\")\n assert_equal(\n [b'<div abc=\"\" def=\"\" ghi=\"\">', b\"</div>\"], list(iter(element))\n )", "def set_attr_values(self):\n ats = self.attributes # convenient short name\n for aid in ats:\n value = ats[aid]['nv'] if 'nv' in ats[aid] else (\n ats[aid]['value'] if 'value' in ats[aid] else None)\n if value is not None:\n# self.h5node.attrs[aid] = value\n #- self.file.file_pointer[self.full_path].attrs[aid] = value\n self.file.set_attribute(self.full_path, aid, value)\n #- self.file.h5save_attribute(self.full_path, aid, value)\n #- self.file.h5commands.append(\"set attribute(%s:%s)-%s\" % (self.full_path,\n #- aid, value))", "def test_bad_attribute_access(self):\n test = self.test\n\n self.assertRaises(AttributeError, test.__getattr__, 'poop')\n # test.poop = 'foo' should set a new object attr 'poop'\n self.assertRaises(KeyError, test.__getitem__, 'poop')\n self.assertRaises(KeyError, test.__setitem__, 'poop', 'foo')" ]
[ "0.6553509", "0.6533299", "0.6466073", "0.62271565", "0.60845727", "0.600334", "0.5973352", "0.59136784", "0.5868815", "0.5804294", "0.58023375", "0.57952565", "0.57601655", "0.5696862", "0.56905687", "0.5690205", "0.5658569", "0.5632546", "0.563241", "0.56156796", "0.5583333", "0.5558767", "0.55506825", "0.5540719", "0.5485932", "0.54736394", "0.54554904", "0.5455279", "0.5447012", "0.5441001" ]
0.7886286
0
Test that insertAdjacentHTML works the same as innerHTML.
def test_insertAdjacentHTML(): assert not _do_test_raw(""" var x = foo(); x.insertAdjacentHTML("foo bar", "<div></div>"); """).failed() assert _do_test_raw(""" var x = foo(); x.insertAdjacentHTML("foo bar", "<div onclick=\\"foo\\"></div>"); """).failed() # Test without declaration assert _do_test_raw(""" x.insertAdjacentHTML("foo bar", "<div onclick=\\"foo\\"></div>"); """).failed() assert _do_test_raw(""" var x = foo(); x.insertAdjacentHTML("foo bar", "x" + y); """).failed()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_createElement():\n\n assert not _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElement();\n x.createElement(\"foo\");\n \"\"\").failed()\n\n assert _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElement(\"script\");\n \"\"\").failed()\n\n assert _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElement(bar);\n \"\"\").failed()", "def DocumentElementInsertAfter(self):\n raise NotImplementedError()", "def test_insert_html_amp(parsed_html):\n parsed_amp = utils.insert_html_amp(parsed_html)\n assert parsed_amp.find(\"html\", amp=\"\") is not None", "def assertHtmlEqual(self, actual, expected):\r\n return self._assertHtmlEqual(\r\n fragment_fromstring(actual, create_parent='div'),\r\n fragment_fromstring(expected, create_parent='div')\r\n )", "def DocumentElementInsertBefore(self):\n raise NotImplementedError()", "def _assertHtmlEqual(self, actual, expected):\r\n self.assertEqual(actual.tag, expected.tag)\r\n self.assertEqual(actual.attrib, expected.attrib)\r\n self.assertEqual(actual.text, expected.text)\r\n self.assertEqual(actual.tail, expected.tail)\r\n self.assertEqual(len(actual), len(expected))\r\n for actual_child, expected_child in zip(actual, expected):\r\n self._assertHtmlEqual(actual_child, expected_child)", "def test_insert_end_for_one_element_list(test_linkedlist):\n test_linkedlist.insert_end('A')\n test_linkedlist.insert_end('B')\n assert test_linkedlist.head.data == 'A'", "def insertChildBefore(new_elem, elem):\n parent = DOM.getParent(elem)\n id = DOM.getChildIndex(parent, elem)\n DOM.insertChild(parent, new_elem, id)", "def test_linked_list_insert_exists():\n assert LinkedList.insert", "def test_insert_end_for_empty_list(test_linkedlist):\n test_linkedlist.insert_end('A')\n assert test_linkedlist.head.data == 'A'", "def test_insert_no_parent(tree):\n with pytest.raises(ValueError):\n assert tree.insert(1)", "def test_insert_node_singleton_content_2():\n first = 0\n second = first\n chain = N.Node(second)\n node = N.Node(first)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and singleton chain\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and singleton chain\"", "def test_insertion_for_each_element_input_list(empty_list):\n a = [5, 6, 7, 8]\n empty_list.insert(a)\n assert len(empty_list) == len(a)", "def add_extra_html(instance, placeholder, rendered_content, original_context):\n html_before = getattr(placeholder, '_extra_html_before', '')\n html_after = getattr(placeholder, '_extra_html_after', '')\n if not html_before and not html_after:\n return rendered_content\n\n template_data = ['{{rendered_content|safe}}']\n context = Context({'rendered_content': rendered_content})\n\n if html_before:\n template_data.insert(0, '{{html_before|safe}}')\n context.update({'html_before': html_before})\n del placeholder._extra_html_before\n if html_after:\n template_data.append('{{html_after|safe}}')\n context.update({'html_after': html_after})\n del placeholder._extra_html_after\n\n return Template(''.join(template_data)).render(context)", "def test_insert_node_singleton_content_1():\n first = 0\n second = 1\n chain = N.Node(second)\n node = N.Node(first)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and singleton chain\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and singleton chain\"", "def test_insert_end(self):\n sll = SinglyLinkedList()\n a = Node('a')\n b = Node('b')\n c = Node('c')\n sll.insert_beg(a)\n sll.insert_end(b)\n sll.insert_beg(c)\n actual = [i.data for i in sll][-1]\n expected = 'b'\n assert(actual==expected)", "def test_insertion2(engine_contents, engine_locations):\n file_name = 'Triangle.java.xml'\n new_contents = copy.deepcopy(engine_contents)\n new_locations = copy.deepcopy(engine_locations)\n target1 = (file_name, '_inter_block', 10)\n target2 = (file_name, 'expr_stmt', 1)\n target3 = (file_name, 'expr_stmt', 0)\n assert XmlEngine.do_insert(engine_contents, engine_locations, new_contents, new_locations, target1, target2)\n assert XmlEngine.do_insert(engine_contents, engine_locations, new_contents, new_locations, target1, target3)\n dump = XmlEngine.dump(engine_contents[file_name])\n new_dump = XmlEngine.dump(new_contents[file_name])\n expected = \"\"\"--- \n+++ \n@@ -5,6 +5,10 @@\n }\n \n public static TriangleType classifyTriangle(int a, int b, int c) {\n+\n+ delay();\n+\n+ a = b;\n \n delay();\n \n\"\"\"\n assert_diff(dump, new_dump, expected)", "def test_insert_node_singleton_content_3():\n first = 0\n second = 1\n chain = N.Node(first)\n node = N.Node(second)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and singleton chain\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and singleton chain\"", "def test_insert_if_node_value_exist(balanced_3_nodes):\n with pytest.raises(ValueError):\n balanced_3_nodes.insert(10)", "def test_insert_will_not_duplicate_value(bst_balanced):\n bst_balanced.insert(6)\n assert bst_balanced.size() == 6", "def test_insert_node_multiple_content_2():\n first = 0\n second = 1\n third = 3\n chain = N.Node(first, N.Node(third))\n node = N.Node(second)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and chain length 2 (insert at mid)\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at middle)\"\n assert result.next.next.data == third, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at middle)\"", "def test_binarytree_insert_exists(empty_list):\n assert empty_list.insert(42)", "def test_binarytree_insert_error_expected():\n input = [42, 13]\n f = BinaryTree(input)\n with pytest.raises(ValueError) as err:\n f.insert(13)\n expected = 'Neither < or > for 13, 13'\n assert 'ValueError' in str(err.type)\n assert expected == str(err.value)", "def test_createElementNS():\n\n assert not _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElementNS();\n x.createElementNS(\"foo\");\n x.createElementNS(\"foo\", \"bar\");\n \"\"\").failed()\n\n assert _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElementNS(\"foo\", \"script\");\n \"\"\").failed()\n\n assert _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElementNS(\"foo\", bar);\n \"\"\").failed()\n\n # Test for https://github.com/mozilla/amo-validator/issues/368\n assert not _do_test_raw(\"\"\"\n var x = \"foo\",\n nsXUL = \"http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul\";\n\n x.createElementNS(nsXUL, 'panelview')\n \"\"\").failed()\n\n # Creating a <script> element raises a warning of course.\n assert _do_test_raw(\"\"\"\n var x = \"foo\",\n nsXUL = \"http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul\";\n\n x.createElementNS(nsXUL, 'script')\n \"\"\").failed()", "def innerHTML(element):\n return element.encode_contents()", "def test_insert_node_multiple_content_1():\n first = 0\n second = 1\n third = 3\n chain = N.Node(second, N.Node(third))\n node = N.Node(first)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and chain length 2 (insert at start)\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at start)\"\n assert result.next.next.data == third, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at start)\"", "def _insert_between(self, e, predecessor, successor):\n node = super()._insert_between(e, predecessor, successor)\n return self._make_position(node)", "def test_insert_no_value(tree):\n with pytest.raises(TypeError):\n assert tree.insert()", "def testInsert(self):\n\n for i in xrange(randint(50,150)):\n self.s.insert(i, None)", "def test_insertion_for_each_element_in_iterable_tuple(empty_list):\n b = (1, 2, 3)\n bb = LinkedList([])\n bb.insert(b)\n assert len(bb) == 3" ]
[ "0.6206021", "0.54315954", "0.5343544", "0.5333806", "0.5289093", "0.52715856", "0.52019536", "0.51820296", "0.5170138", "0.51649594", "0.51338845", "0.5128217", "0.51144373", "0.50881356", "0.506577", "0.5063727", "0.50439453", "0.50368094", "0.50320894", "0.49574444", "0.4952644", "0.4937416", "0.49199316", "0.49194598", "0.491538", "0.49024013", "0.4895906", "0.48952588", "0.48899063", "0.48893455" ]
0.84246767
0
Test that `nsIFile.launch()` is flagged.
def test_nsIFile_launch(): assert _do_test_raw('foo.launch()').failed()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_fail_launch_file(self):\n args = self.args.copy()\n # Pass a string instead of a list\n args[\"traj_file\"] = \"nofile.xtc\"\n with pytest.raises(FileNotFoundError) as err:\n UI.launch(**args)\n assert \"nofile.xtc does not exist.\" in str(err.value)", "def test_openDialog_flag_var(self):\n self.run_script(\"\"\"\n foo.openDialog(bar)\n \"\"\")\n self.assert_notices()", "def test_openDialog_pass(self):\n self.run_script(\"\"\"\n foo.openDialog(\"foo\")\n foo.openDialog(\"chrome://foo/bar\")\n \"\"\")\n self.assert_silent()", "def test_py_script_file_attribute_interactively(self):\n src = \"True\\n\"\n self.mktmp(src)\n\n out, err = tt.ipexec(\n self.fname,\n options=[\"-i\"],\n commands=['\"__file__\" in globals()', \"print(123)\", \"exit()\"],\n )\n assert \"False\" in out, f\"Subprocess stderr:\\n{err}\\n-----\"", "def test_BLINK_LAUNCH_PROCESS(self):\n self.verify_references_to_prerequisites(processes.BLINK_LAUNCH_PROCESS)", "def start_test(url):\n \n Debug.user(' ################# start Test ######################')\n App.open('firefox --private-window '+url)\n wait(\"1501595436606.png\", 10)\n\n click(\"1501595453560.png\")\n\n if exists():\n \n click()\n else:\n click()\n \n\n\n if exists(\"1499781534684.png\"):\n click(\"1499781552298.png\")\n type('root')\n click(\"1499781563870.png\")\n else:\n pass\n click(\"1499781591282.png\")", "def check_running_from_automator():\n if (\n platform.system() == \"Darwin\"\n and os.environ.get(\"XPC_SERVICE_NAME\") == \"com.apple.automator.xpc.runner\"\n ):\n return True\n return False", "def test_file(self):\n a = False\n if \"show()\" in open('attempt.py').read():\n a = True\n self.assertEquals(a,True)", "def test_startProcessAlreadyStarted(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.startProcess(\"foo\")\r\n self.assertIdentical(None, self.pm.startProcess(\"foo\"))", "def test_window_loaded(self):", "def test_open():\n z = XPIManager(get_path('xpi/install_rdf_only.xpi'))\n assert z is not None", "def testStageOpens(self):\n self.assertTrue(self._stage)", "def test_launch(self):\n\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.launch(TOOLNAME,username,userpass)", "def test_launch_minimal(self, capsys):\n UI.launch(**self.args)\n captured = capsys.readouterr().out\n assert \"Results written to OP_buildH.out\" in captured", "def test_conditions(self):\n Utils.start_home(self.serial)\n AppUtils.kill_app(self.serial, self.package)\n AppUtils.open_app(self.device, self.serial, self.app)\n Utils.wait_short()", "def get_prog_runatstart(self):\n #en = self._get_prop(\"runAtStartup\")\n #return bool( en == \"true\" )\n return bool(self._mydict['runAtStartup'] == \"true\")", "def test_launch_composition(self):\n pass", "def handleInstallerinterrruptions(window,texttosearch,fileexist):\n pass", "def test_InvalideFileExtension_NoRedirectionAfterSubmint(self):\n\n self.open(config.url)\n self.choose_file(config.file_upload, config.wrong_file_format_filepath)\n self.click(config.submit_file)\n self.assert_element_present('#file_response')", "def LaunchAndWait(cmd):\n call(cmd)", "def test_missing_file():\n passed = False\n try:\n x = XPIManager('foo.bar')\n except:\n passed = True\n assert passed", "def isstarted():", "def is_running(program):\n return program in get_running()", "def test_executable():\n arg1 = get_args(driver=\"chrome\", executable=\"/tmp\")\n assert arg1[\"executable_path\"] == \"/tmp\"", "def OnSim42RunCmdFile(self, event):\n path = self.PromptPathOpenCmd()\n if not path: return\n self.RunCmdFile(path)", "def test_startProcess(self):\r\n self.pm.addProcess(\"foo\", [\"foo\"])\r\n self.pm.startProcess(\"foo\")\r\n self.assertIsInstance(self.pm.protocols[\"foo\"], LoggingProtocol)\r\n self.assertIn(\"foo\", self.pm.timeStarted.keys())", "def opened(self, *args) -> \"bool\":\n return _ida_fpro.qfile_t_opened(self, *args)", "def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)", "def check_file_flag(file):\n return process_file_flag(file, None)", "def waitUntilSubprocessLaunched(self):\n\n def hasLaunched():\n return self._has_launched\n\n with self._has_launched_cv:\n self._has_launched_cv.wait_for(hasLaunched)\n assert self._has_launched" ]
[ "0.64032733", "0.6122187", "0.5883255", "0.5880068", "0.5665483", "0.5528563", "0.552782", "0.55099374", "0.5467473", "0.5463001", "0.544874", "0.5420529", "0.5306385", "0.5299579", "0.52987474", "0.52926964", "0.52705574", "0.52683437", "0.52574074", "0.5248173", "0.52363783", "0.5235729", "0.52211386", "0.5211538", "0.5205534", "0.5190713", "0.51884913", "0.51795006", "0.51779604", "0.51775897" ]
0.82881325
0
Test that `.openDialog("")` throws doesn't throw an error for chrome/local URIs.
def test_openDialog_pass(self): self.run_script(""" foo.openDialog("foo") foo.openDialog("chrome://foo/bar") """) self.assert_silent()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_openDialog(self):\n\n def test_uri(self, uri):\n self.setUp()\n self.setup_err()\n self.run_script('foo.openDialog(\"%s\")' % uri)\n self.assert_failed(with_warnings=True)\n\n uris = ['http://foo/bar/',\n 'https://foo/bar/',\n 'ftp://foo/bar/',\n 'data:asdf']\n for uri in uris:\n yield test_uri, self, uri", "def test_openWindowWithWrongSettingsFile(self):\n self.createWrongSettingsFile()\n return self.assertRaises(SettingsCorrupted, ConfigurationWindow)", "def test_openDialog_flag_var(self):\n self.run_script(\"\"\"\n foo.openDialog(bar)\n \"\"\")\n self.assert_notices()", "def test_running_main_error_in_parsing(exopy_qtbot):\n def check_dialog(qtbot, dial):\n assert 'cmd' in dial.text\n\n with pytest.raises(SystemExit):\n with handle_dialog(exopy_qtbot, 'reject', check_dialog):\n main(['dummy'])", "def test_nsIFile_launch():\n\n assert _do_test_raw('foo.launch()').failed()", "def error_open_mess(url: str) -> None:\n meta = MainData()\n print(('{0}Can not open URL: {1} {2}{3}').format(meta.clrs['red'],\n meta.clrs['lblue'],\n url,\n meta.clrs['reset']))", "def test_open_mainpage(open_browser, url_param):\n open_browser.get(url_param)\n assert open_browser.current_url == url_param\n open_browser.close()", "def test_fail_launch_file(self):\n args = self.args.copy()\n # Pass a string instead of a list\n args[\"traj_file\"] = \"nofile.xtc\"\n with pytest.raises(FileNotFoundError) as err:\n UI.launch(**args)\n assert \"nofile.xtc does not exist.\" in str(err.value)", "def test_open_browser_linux_no_xdg(self):\n from streamlit import env_util\n\n env_util.IS_LINUX_OR_BSD = True\n\n with patch(\"streamlit.env_util.is_executable_in_path\", return_value=False):\n with patch(\"webbrowser.open\") as webbrowser_open:\n with patch(\"subprocess.Popen\") as subprocess_popen:\n util.open_browser(\"http://some-url\")\n self.assertEqual(True, webbrowser_open.called)\n self.assertEqual(False, subprocess_popen.called)", "def test_open(self):\n page, resources = self.ghost.open(base_url)\n self.assertEqual(page.url, base_url)\n \n self.ghost.click(\"#run\")", "def error(message, title=None):\n return dialog(\"error\", message=message, title=title)", "def test_failToOpenLocalFile(self):\n fp = FilePath(self.mktemp()).child(\"child-with-no-existing-parent\")\n\n self.assertRaises(IOError, self.makeConnectedDccFileReceive, fp.path)", "def simple_test_open_url(url):\n try:\n return requests.get(url, headers={\"User-Agent\": random.choice(useragents.useragents())}).status_code\n except Exception as _:\n return False", "def test_launch_tool_invalid_path_4(self):\n\n home_dir = os.path.join('/','bad_home','bad_hubname','fake_user')\n\n parameters_text = 'file(datafile1):%s/file_does_not_exist' % (home_dir)\n\n try:\n sessnum = launch_tool(self.https_authority,self.reguser,\n self.regpass,self.browser,self.catalog,self.utils,\n TOOL_NAME,TOOL_REVISION,parameters_text)\n\n self.close_sessions.append(sessnum)\n\n assert False, \"while passing tool parameters, cms failed to\" \\\n + \" catch invalid path: %s\" % (repr(parameters_text))\n\n except BadParameterError as e:\n pass", "def test_request_fetch_bogus_url():\n with pytest.raises(SystemExit):\n request.fetch(\"lsdfjlsdjf\")", "def _unable_open_option(self):\n self.tap_on_open_option()\n\n unable_msg = self.UTILS.element.getElement(DOM.DownloadManager.download_confirm_h1, \"Unable to open msg\")\n self.UTILS.test.test(unable_msg.text == _(\"Unable to open\"), \"Unable to open msg\")", "def assert_cannot_view(obj):\n selenium_utils.open_url(obj.url)\n assert ui_utils.is_error_403()", "def test_nothing(self):\n with self.assertRaises(URLParameterError):\n create_url()", "def open_url(self, url):\n try:\n if url != \"\":\n self.driver.maximize_window()\n self.driver.get(url)\n print(url + \" : url is opened\")\n else:\n print(\"Please enter valid url\")\n except Exception as e:\n print(str(e))", "def assert_forbidden(self, url):\n with pytest.raises(zope.testbrowser.browser.HTTPError) as err:\n self.open(url)\n assert 'HTTP Error 403: Forbidden' == str(err.value), \\\n 'Raised \"{}\" instead of HTTP-403 Forbidden'.format(err.value)", "def browser_open(url):\n FNULL = open(os.devnull, 'w')\n subprocess.Popen([udata.browser, url], stdout=FNULL, stderr=subprocess.STDOUT )", "def unable_to_open_message(filepath: Path, not_ok: Exception) -> None:\n if \"PYTEST_CURRENT_TEST\" in os.environ:\n print('DBG> Running inside a pytest -> not showing error message.')\n return\n info = QMessageBox()\n info.setIcon(QMessageBox.Information)\n print('Output from gemmi:', not_ok)\n try:\n line = str(not_ok)[4:].split(':')[1]\n except IndexError:\n line = None\n info.setText('This cif file is not readable! ')\n if line:\n try:\n int(line)\n info.setInformativeText(f'\\nPlease check line {line} in\\n{filepath.name}')\n except ValueError:\n info.setInformativeText(f'\"{filepath.name}\"\\n{not_ok}')\n else:\n info.setInformativeText(f'\"{filepath.name}\"\\n{not_ok}')\n info.show()\n info.exec()", "def open_link(self):\n try:\n # webbrowser.open(self.url) # if you are on Windows OS\n webbrowser.get('safari').open_new_tab(self.url) # if you are on Mac OS\n except(AttributeError):\n self.ids.label.text = self.error_msg", "def test_invalid_username():\n expect_error(edit, InputError, \"aaa\", 1, True, None, None)", "def open_uri(uri):\n\tclose_fds = True\n\tstartupinfo = None\n\tproc_args = []\n\tif sys.platform.startswith('win'):\n\t\tproc_args.append(which('cmd.exe'))\n\t\tproc_args.append('/c')\n\t\tproc_args.append('start')\n\t\turi = uri.replace('&', '^&')\n\t\tclose_fds = False\n\t\tstartupinfo = subprocess.STARTUPINFO()\n\t\tstartupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n\t\tstartupinfo.wShowWindow = subprocess.SW_HIDE\n\telif which('gvfs-open'):\n\t\tproc_args.append(which('gvfs-open'))\n\telif which('xdg-open'):\n\t\tproc_args.append(which('xdg-open'))\n\telse:\n\t\traise RuntimeError('could not find suitable application to open uri')\n\tproc_args.append(uri)\n\tproc_h = subprocess.Popen(proc_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=close_fds, startupinfo=startupinfo)\n\treturn proc_h.wait() == 0", "def test_NoFileUploaded_NoRedirectionAfterSubmint(self):\n\n self.open(config.url)\n self.click(config.submit_file)\n self.assert_element_present('#send-title')", "def test_running_main_error_in_loading(exopy_qtbot, monkeypatch):\n import exopy.__main__ as em\n\n def false_iter(arg):\n\n class FalseEntryPoint(EntryPoint):\n def load(self, *args, **kwargs):\n raise Exception(\"Can't load entry point\")\n\n return [FalseEntryPoint('dummy', 'dummy')]\n\n monkeypatch.setattr(em, 'iter_entry_points', false_iter)\n\n def check_dialog(qtbot, dial):\n assert 'extension' in dial.text\n\n with pytest.raises(SystemExit):\n with handle_dialog(exopy_qtbot, 'reject', check_dialog):\n main([])", "def test_running_main_error_in_app_startup(exopy_qtbot, monkeypatch):\n from exopy.app.app_plugin import AppPlugin\n\n def false_run_startup(self, args):\n raise Exception('Fail to run start up')\n\n monkeypatch.setattr(AppPlugin, 'run_app_startup', false_run_startup)\n\n def check_dialog(qtbot, dial):\n assert 'starting' in dial.text\n\n with pytest.raises(SystemExit):\n with handle_dialog(exopy_qtbot, 'reject', check_dialog):\n main([])", "def test_client_id_scheme() -> None:\n assert indieauth._parse_client_id(\"http://ex.com/\")\n assert indieauth._parse_client_id(\"https://ex.com/\")\n\n with pytest.raises(ValueError):\n indieauth._parse_client_id(\"ftp://ex.com\")", "def test_invalid_request_url(self):\r\n self.launch_uri = self.uri + 'wrong_lti_endpoint'\r\n response = requests.post(self.launch_uri, data=self.payload)\r\n self.assertIn('Invalid request URL', response.content)" ]
[ "0.6870198", "0.61324924", "0.6001495", "0.5948701", "0.59436876", "0.58810073", "0.56357914", "0.56282413", "0.5534864", "0.5530544", "0.55118865", "0.5507113", "0.5468689", "0.545093", "0.53994143", "0.5359619", "0.5342453", "0.5341253", "0.5336293", "0.5319729", "0.52928525", "0.52611613", "0.52568454", "0.5250543", "0.5241267", "0.5235155", "0.52345175", "0.522278", "0.521565", "0.52118194" ]
0.7835892
0
Test that `.openDialog(bar)` throws doesn't throw an error where `bar` is a dirty object.
def test_openDialog_flag_var(self): self.run_script(""" foo.openDialog(bar) """) self.assert_notices()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_openDialog_pass(self):\n self.run_script(\"\"\"\n foo.openDialog(\"foo\")\n foo.openDialog(\"chrome://foo/bar\")\n \"\"\")\n self.assert_silent()", "def test_openWindowWithWrongSettingsFile(self):\n self.createWrongSettingsFile()\n return self.assertRaises(SettingsCorrupted, ConfigurationWindow)", "def test_open_project_fails(self):\n with patch('cauldron.steptest.support.open_project') as open_project:\n open_project.side_effect = RuntimeError('FAKE')\n with self.assertRaises(AssertionError):\n self.open_project()", "def test_initializing_repository_without_git_repo_does_not_raise_error(\n tmp_path: str,\n) -> None:\n with does_not_raise():\n repo = Repository(str(tmp_path))\n assert repo.git_wrapper is None", "def test_work_without_activity(human):\n with pytest.raises(AttributeError):\n human.work()", "def test_cancel_operation(self):\n con = sqlite.connect(\":memory:\")\n def progress():\n return 1\n con.set_progress_handler(progress, 1)\n curs = con.cursor()\n self.assertRaises(\n sqlite.OperationalError,\n curs.execute,\n \"create table bar (a, b)\")", "def test_running_main_error_in_parsing(exopy_qtbot):\n def check_dialog(qtbot, dial):\n assert 'cmd' in dial.text\n\n with pytest.raises(SystemExit):\n with handle_dialog(exopy_qtbot, 'reject', check_dialog):\n main(['dummy'])", "def test_invalid_instantiation(invalid_instance):\n with pytest.raises(ValueError):\n invalid_instance()", "def test_repo_validation(self):\n repo = gnome.gh.repo_from_callback(MockCallback())\n repo._repo.create_milestone = MagicMock()\n gh_milestone = MockMilestoneFoo()\n try:\n mlstn = gnome.gh.Milestone(repo, gh_milestone)\n was_error = False\n except:\n was_error = True\n self.assertFalse(was_error)", "def test_foo(self):\n foo = Foo(value=1)\n with self.assertRaises(TypeError):\n foo.foo()", "def test_worktree_tempfile_failure(\n repository: Repository, monkeypatch: pytest.MonkeyPatch\n) -> None:\n import tempfile\n\n def raise_() -> None:\n raise Exception(\"boom\")\n\n monkeypatch.setattr(tempfile, \"TemporaryDirectory\", raise_)\n branch = repository.heads.create(\"branch\")\n\n with pytest.raises(Exception, match=\"boom\"):\n with repository.worktree(branch, checkout=False):\n pass # pragma: no cover", "def test_error_bubbles_up(self):\n\n def fail(box):\n box.fail(ValueError(\"oh dear\"))\n\n self.assertThat(\n lambda: sync_perform(func_dispatcher, Effect(fail)),\n raises(ValueError(\"oh dear\")),\n )", "def test_open_no_setup(restaurant_only, hall_only, kitchen_only, delivery_only):\n # Here checks not all variants, cause restaurant_only is not isolated\n # object. They were previously check and working alongside\n # but affects result if together.\n\n # no setups\n with pytest.raises(CustomWarning):\n restaurant_only.open()\n assert restaurant_only.is_working is False, \"You need to setup Kitchen, Delivery and Hall\"\n\n # only kitchen\n with pytest.raises(CustomWarning):\n restaurant_only.set_kitchen(kitchen_only)\n restaurant_only.open()\n assert restaurant_only.is_working is False, \"You need to setup Kitchen, Delivery and Hall\"\n\n # only delivery and kitchen\n with pytest.raises(CustomWarning):\n restaurant_only.set_delivery(delivery_only)\n restaurant_only.set_kitchen(kitchen_only)\n restaurant_only.open()\n assert restaurant_only.is_working is False, \"You need to setup Kitchen, Delivery and Hall\"", "def test_context_immutable():\n with pytest.raises(ImmutableStateError):\n Context().abc = 1", "def test_check_required_fail():\n settings = SettingsModel()\n\n with pytest.raises(InvalidSettings):\n settings.check()", "def test_errors_on_bad_argument(self):\n self.assertRaises(Exception, Scope, 'foo')\n self.assertRaises(Exception, Scope, 1)\n self.assertRaises(Exception, Scope, [])\n self.assertRaises(Exception, Scope, tuple())", "def test_running_main_error_in_parser_modifying(exopy_qtbot, monkeypatch):\n import exopy.__main__ as em\n\n def false_iter(arg):\n\n class FalseEntryPoint(EntryPoint):\n def load(self, *args, **kwargs):\n\n def false_modifier(parser):\n raise Exception('Failed to add stupid argument to parser')\n\n return (false_modifier, 1)\n\n return [FalseEntryPoint('dummy', 'dummy')]\n\n monkeypatch.setattr(em, 'iter_entry_points', false_iter)\n\n def check_dialog(qtbot, dial):\n assert 'modifying' in dial.text\n\n with pytest.raises(SystemExit):\n with handle_dialog(exopy_qtbot, 'reject', check_dialog):\n main([])", "def test_path_nonexistent(self):\n self.command.package = self.input_ovf\n self.command.file_path = \"foobar\"\n self.assertRaises(InvalidInputError, self.command.run)", "def test_bad_menu():\n with pt.raises(Exception):\n sc.import_menu('test.txt')", "def test_invalidFile(self):\n self.assertRaises(cesmEnvLib.checkFile(\"blah\", \"write\"))", "def test_test_directory_no_workspace(self):\n self.logger.info(\"STEP: Enter a test directory without a workspace.\")\n self.workspace = Workspace(Mock())\n self.logger.info(\"STEP: Verify that an exception was raised.\")\n with self.assertRaises(Exception):\n with self.workspace.test_directory(\"dir1\"):\n pass", "def test_Input_Invalid_Data(self):\n height = StringVar(self.root, 0)\n width = StringVar(self.root, -45)\n mines = StringVar(self.root, 3)\n with self.assertRaises(Exception) as context:\n self.menu.createGameWindow('Custom', height, width, mines)\n self.assertTrue('Invalid data' in str(context.exception))", "def test_wrong_mode(self):\n self.assertRaises(ComponentErrorsEx, self.dp.setRewindingMode, 'FOO')", "def test_call(self):\r\n p = OtuPicker({})\r\n self.assertRaises(NotImplementedError, p, '/path/to/seqs')", "def soft_assert_no_modals_present(modal_obj, soft_assert):\n assert issubclass(modal_obj.__class__, object_modal.BaseObjectModal), (\n \"Object should be derived from BaseObjectModal.\")\n tabs = browsers.get_browser().windows()\n soft_assert.expect(\n len(tabs) == 2, \"Only 2 window tabs should be opened but it is found \"\n \"{} tab(s).\".format(len(tabs)))\n for tab_num, tab in enumerate(tabs, start=1):\n tab.use()\n soft_assert.expect(not modal_obj.is_present,\n \"There should be no modal windows in browser \"\n \"tab number {}.\".format(tab_num))", "def test_runSignaled(self):\n builder = BookBuilder()\n exc = self.assertRaises(\n CommandFailed, builder.run,\n [sys.executable, '-c',\n 'import sys; print \"hi\"; sys.stdout.flush(); '\n 'import os; os.kill(os.getpid(), 9)'])\n self.assertEquals(exc.exitSignal, 9)\n self.assertEquals(exc.exitStatus, None)\n self.assertEquals(exc.output, \"hi\\n\")", "def test_openDialog(self):\n\n def test_uri(self, uri):\n self.setUp()\n self.setup_err()\n self.run_script('foo.openDialog(\"%s\")' % uri)\n self.assert_failed(with_warnings=True)\n\n uris = ['http://foo/bar/',\n 'https://foo/bar/',\n 'ftp://foo/bar/',\n 'data:asdf']\n for uri in uris:\n yield test_uri, self, uri", "def test_basic_invalid_bill():\n b = toy_bill()\n b.identifier = None\n with pytest.raises(ValueError):\n b.validate()", "def test_worktree_prunes_worktree_on_failure(repository: Repository) -> None:\n branch = repository.heads.create(\"branch\")\n\n with pytest.raises(Exception, match=\"Boom\"):\n with repository.worktree(branch) as worktree:\n raise Exception(\"Boom\")\n\n privatedir = repository.path / \".git\" / \"worktrees\" / worktree.path.name\n assert not privatedir.exists()", "def testBadFileSubmit(self, mock_open):\n mock_open.side_effect = IOError\n\n self.assertRaises(\n auacm.exceptions.InvalidSubmission,\n auacm.submit.submit, ['problem 1', 'notafile.cpp'])" ]
[ "0.664462", "0.6096759", "0.5865762", "0.56994104", "0.5670863", "0.5574597", "0.5482802", "0.54300237", "0.5427492", "0.53748256", "0.5308122", "0.52538544", "0.5234245", "0.52235", "0.5222695", "0.52182746", "0.52126163", "0.5170296", "0.516994", "0.51174134", "0.5110923", "0.51042396", "0.5097018", "0.5092945", "0.5092363", "0.5086963", "0.5082291", "0.5077408", "0.5071176", "0.5069975" ]
0.63953465
1
Test that `.openDialog("")` throws an error where is a nonchrome, nonrelative URL.
def test_openDialog(self): def test_uri(self, uri): self.setUp() self.setup_err() self.run_script('foo.openDialog("%s")' % uri) self.assert_failed(with_warnings=True) uris = ['http://foo/bar/', 'https://foo/bar/', 'ftp://foo/bar/', 'data:asdf'] for uri in uris: yield test_uri, self, uri
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_openDialog_pass(self):\n self.run_script(\"\"\"\n foo.openDialog(\"foo\")\n foo.openDialog(\"chrome://foo/bar\")\n \"\"\")\n self.assert_silent()", "def error_open_mess(url: str) -> None:\n meta = MainData()\n print(('{0}Can not open URL: {1} {2}{3}').format(meta.clrs['red'],\n meta.clrs['lblue'],\n url,\n meta.clrs['reset']))", "def test_openWindowWithWrongSettingsFile(self):\n self.createWrongSettingsFile()\n return self.assertRaises(SettingsCorrupted, ConfigurationWindow)", "def test_openDialog_flag_var(self):\n self.run_script(\"\"\"\n foo.openDialog(bar)\n \"\"\")\n self.assert_notices()", "def test_nsIFile_launch():\n\n assert _do_test_raw('foo.launch()').failed()", "def _unable_open_option(self):\n self.tap_on_open_option()\n\n unable_msg = self.UTILS.element.getElement(DOM.DownloadManager.download_confirm_h1, \"Unable to open msg\")\n self.UTILS.test.test(unable_msg.text == _(\"Unable to open\"), \"Unable to open msg\")", "def open_url(self, url):\n try:\n if url != \"\":\n self.driver.maximize_window()\n self.driver.get(url)\n print(url + \" : url is opened\")\n else:\n print(\"Please enter valid url\")\n except Exception as e:\n print(str(e))", "def open_link(self):\n try:\n # webbrowser.open(self.url) # if you are on Windows OS\n webbrowser.get('safari').open_new_tab(self.url) # if you are on Mac OS\n except(AttributeError):\n self.ids.label.text = self.error_msg", "def assert_cannot_view(obj):\n selenium_utils.open_url(obj.url)\n assert ui_utils.is_error_403()", "def test_open_mainpage(open_browser, url_param):\n open_browser.get(url_param)\n assert open_browser.current_url == url_param\n open_browser.close()", "def test_running_main_error_in_parsing(exopy_qtbot):\n def check_dialog(qtbot, dial):\n assert 'cmd' in dial.text\n\n with pytest.raises(SystemExit):\n with handle_dialog(exopy_qtbot, 'reject', check_dialog):\n main(['dummy'])", "def assert_forbidden(self, url):\n with pytest.raises(zope.testbrowser.browser.HTTPError) as err:\n self.open(url)\n assert 'HTTP Error 403: Forbidden' == str(err.value), \\\n 'Raised \"{}\" instead of HTTP-403 Forbidden'.format(err.value)", "def test_validate_url_non_google_doc():\n url_not_a_google_doc = 'https://not-a-google-doc.com'\n assert validate_url(url_not_a_google_doc) is False", "def test__canonizeURL(self):\n self.run_script_for_compat('alert(e._canonizeURL(foo));')\n self.assert_silent()\n self.assert_compat_error()", "def error(message, title=None):\n return dialog(\"error\", message=message, title=title)", "def test_fail_launch_file(self):\n args = self.args.copy()\n # Pass a string instead of a list\n args[\"traj_file\"] = \"nofile.xtc\"\n with pytest.raises(FileNotFoundError) as err:\n UI.launch(**args)\n assert \"nofile.xtc does not exist.\" in str(err.value)", "def unable_to_open_message(filepath: Path, not_ok: Exception) -> None:\n if \"PYTEST_CURRENT_TEST\" in os.environ:\n print('DBG> Running inside a pytest -> not showing error message.')\n return\n info = QMessageBox()\n info.setIcon(QMessageBox.Information)\n print('Output from gemmi:', not_ok)\n try:\n line = str(not_ok)[4:].split(':')[1]\n except IndexError:\n line = None\n info.setText('This cif file is not readable! ')\n if line:\n try:\n int(line)\n info.setInformativeText(f'\\nPlease check line {line} in\\n{filepath.name}')\n except ValueError:\n info.setInformativeText(f'\"{filepath.name}\"\\n{not_ok}')\n else:\n info.setInformativeText(f'\"{filepath.name}\"\\n{not_ok}')\n info.show()\n info.exec()", "def open(url):\r\n webbrowser.open(url)", "def test_validate_url_valid():\n url = 'https://docs.google.com/spreadsheets/d/AbCde1'\n\n assert validate_url(url) is True", "def test_open(self):\n page, resources = self.ghost.open(base_url)\n self.assertEqual(page.url, base_url)\n \n self.ghost.click(\"#run\")", "def test_validate_url_invalid_d_value():\n url_invalid_d_value = 'https://docs.google.com/spreadsheets/abc/AbCde1'\n assert validate_url(url_invalid_d_value) is False", "def simple_test_open_url(url):\n try:\n return requests.get(url, headers={\"User-Agent\": random.choice(useragents.useragents())}).status_code\n except Exception as _:\n return False", "def test_launch_tool_invalid_path_4(self):\n\n home_dir = os.path.join('/','bad_home','bad_hubname','fake_user')\n\n parameters_text = 'file(datafile1):%s/file_does_not_exist' % (home_dir)\n\n try:\n sessnum = launch_tool(self.https_authority,self.reguser,\n self.regpass,self.browser,self.catalog,self.utils,\n TOOL_NAME,TOOL_REVISION,parameters_text)\n\n self.close_sessions.append(sessnum)\n\n assert False, \"while passing tool parameters, cms failed to\" \\\n + \" catch invalid path: %s\" % (repr(parameters_text))\n\n except BadParameterError as e:\n pass", "def test_unfetchable_url(self):\r\n url = u'file://test.html'\r\n read = readable.ReadUrl.parse(url)\r\n self.assertEqual(read.status, 901)", "def browser_open(url):\n FNULL = open(os.devnull, 'w')\n subprocess.Popen([udata.browser, url], stdout=FNULL, stderr=subprocess.STDOUT )", "def test_NoFileUploaded_NoRedirectionAfterSubmint(self):\n\n self.open(config.url)\n self.click(config.submit_file)\n self.assert_element_present('#send-title')", "def test_request_fetch_bogus_url():\n with pytest.raises(SystemExit):\n request.fetch(\"lsdfjlsdjf\")", "def test_username_not_exist(self):\n\n url_extend = 'user_auth/login/'\n # get the first input button under the first form in login page.\n username = 'usersomerandomeuser'\n password = 'user'\n login_button = login(self.browser, self.url + url_extend, username, password)\n try:\n login_button.click()\n except:\n raise Exception(\"Login Error!\")\n\n ## check the current url\n assert self.browser.current_url == self.url + url_extend", "def test_open_browser_linux_no_xdg(self):\n from streamlit import env_util\n\n env_util.IS_LINUX_OR_BSD = True\n\n with patch(\"streamlit.env_util.is_executable_in_path\", return_value=False):\n with patch(\"webbrowser.open\") as webbrowser_open:\n with patch(\"subprocess.Popen\") as subprocess_popen:\n util.open_browser(\"http://some-url\")\n self.assertEqual(True, webbrowser_open.called)\n self.assertEqual(False, subprocess_popen.called)", "def test_failToOpenLocalFile(self):\n fp = FilePath(self.mktemp()).child(\"child-with-no-existing-parent\")\n\n self.assertRaises(IOError, self.makeConnectedDccFileReceive, fp.path)" ]
[ "0.76250166", "0.63414615", "0.60822976", "0.5849271", "0.5837435", "0.5826389", "0.57790226", "0.57463026", "0.5706456", "0.5685249", "0.5682213", "0.56506664", "0.56181157", "0.5609045", "0.5588032", "0.55650854", "0.5546954", "0.55328673", "0.5530209", "0.55300903", "0.5511032", "0.5505552", "0.5503703", "0.5470498", "0.5465367", "0.5461519", "0.54481775", "0.54370975", "0.54364514", "0.5434695" ]
0.6708838
1
select name from mydb.item_item where ingredients not like '%multimedia%' and ingredients not like '%provision%'
def test_excludeIngredientQuery(self) -> None: ingredient0 = 'multimedia' ingredient1 = 'provision' result = self.entries.exclude(Q(ingredients__icontains=ingredient0) | Q(ingredients__icontains=ingredient1)) self.assertEqual(988, len(result)) queries = (Q(ingredients__icontains=ingredient0), Q(ingredients__icontains=ingredient1)) result = self.entries.exclude(functools.reduce(operator.or_, queries)) self.assertEqual(988, len(result))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute(self):\n return super(BeefRecipes, self).execute().where(lower(col('ingredients')).like(\"%beef%\"))", "def test_search_by_bad_ingredients(self):\n recipe_id = self.request_mgr.search_by_ingredients(['asdfadsfa'])\n self.assertEqual(recipe_id, None)", "def test_includeIngredientQuery(self) -> None:\n ingredient0 = 'multimedia'\n ingredient1 = 'provision'\n result = self.entries.filter(Q(ingredients__icontains=ingredient0) & Q(ingredients__icontains=ingredient1))\n self.assertEqual(1, len(result))", "def filter_non_ingredient(ingredient_list):\n stop_words = set(stopwords.words('english'))\n \n filtered_list = []\n add_list = 0 #a dummy variable to add a text to filtered list\n for phrases in set(ingredient_list): #run through only one item in set (removes duplicates)\n\n for word in phrases:\n if word in stop_words:\n phrases.replace(word,'')\n\n #if one of the word in a phrase is ingredient, counts in to list\n for word in word_tokenize(phrases): #phrases can be phrase (run through phrases)\n \n is_ingredient = is_it_ingredient(word) #returns true if a word is ingridient\n \n if is_ingredient == True:\n add_list = 1\n else:\n add_list = 0\n\n ##if one of the word in a phrase is ingredient, counts in to list\n if add_list == 1 :\n\n filtered_list.append(phrases.capitalize())\n add_list = 0 \n\n return filtered_list", "def test_search_by_no_ingredients(self):\n recipe_id = self.request_mgr.search_by_ingredients([])\n self.assertEqual(recipe_id, None)", "def get_beer_ingredients(beer):\n beer_ingredients = []\n for ing in beer['ingredients']:\n for item in beer['ingredients'][ing]:\n if 'name' in item:\n if item['name'] not in beer_ingredients:\n beer_ingredients.append(item['name'])\n\n return beer_ingredients", "def negation(document):\n negation=[]\n for item in document:\n if '\\'t' in item:\n negation.append('not')\n negation.append(item)\n else:\n negation.append(item)\n return negation", "def filter_one_v_all(description):\n brain_parts = [\"forebrain\", \"midbrain\", \"hindbrain\"]\n for part in brain_parts:\n if part in description:\n return True\n return False", "def cookbook_search(search_term):\n return db.boxcar_cookbooks.find({'name': {'$regex':'^'+search_term}})", "def test_search_by_ingredients(self):\n recipe_id = self.request_mgr.search_by_ingredients(['butter', 'sugar', 'eggs'])\n self.assertGreater(recipe_id, 0)", "def short(products, field):\n\n li = []\n for p in products:\n if not p[field].replace(\" \", \"_\") in li:\n li.append(p[field].replace(\" \", \"_\"))\n return li", "def test_query_with_no_matches_returns_nothing(test_store):\n items = list(test_store.get_by(name=\"Sugar\"))\n\n assert len(items) == 0", "def exclude_other_class_items(_items, class_name):\n\n class_skills = class_skill_names(class_name)\n other_skill_names = list(set(all_class_skill_names()) - set(class_skills)) + class_attributes(Classes.DEMON_HUNTER)\n\n def match_invert_skills(item):\n \"\"\" filter items based on if they match a class skill \"\"\"\n text = item.text\n\n if any([skill in text for skill in other_skill_names]):\n if any([skill in text for skill in class_skills]): # double check\n print('found a wizard skill', [skill for skill in class_skills if skill in text])\n print(item)\n return True\n return False\n return True\n\n return list(filter(match_invert_skills, _items))\n\n # def match_invert_skills(_item):\n # \"\"\" filter items based on if they match a class skill \"\"\"\n # text = _item.text\n #\n # if any([skill in text for skill in other_skill_names]):\n #\n # if any([skill in text for skill in class_skills]): # double check\n # print('found aa wizard skill', [skill for skill in class_skills if skill in text])\n # print(_item)\n # return True\n # return False\n #\n # print('lolll')\n # return True\n #\n # print(other_skill_names)\n # to_return = []\n # for item in _items:\n # if match_invert_skills(item):\n # to_return.append(item)\n #\n #\n # return to_return", "def test_one_word_with_one_not(self):\n words = \"Python\"\n none = \"Junior\"\n self.q.construct_query(all_words=words, none=none)\n self.assertEqual(self.q.query, \"Python+-Junior\")", "def remove_colors(ingredient):\n colors = [\"yellow\", \"purple\", \"green\", \"black\",\n \"purple\", \"white\", \"red\"]\n no_colors = [gram for gram in ingredient.split(\" \") if gram not in colors]\n colorless_string = \" \".join(no_colors)\n return colorless_string", "def search_brands_by_name(mystr):\n brands = Brand.query.filter(Brand.name.like('%'+mystr+'%')).all()\n return brands", "def test_search_recipes_by_ingredients(self):\n pass", "def search_recipe(ingredients):\n\n params = '+'.join(ingredients.split())\n url_search = SEARCH_URL.format(params)\n response = req.get(url_search)\n\n return response.content", "def search_brands_by_name(mystr):\n\n results = db.session.query(Brand).filter(Brand.name.like(mystr)).all()\n\n return results", "def search_brands_by_name(mystr):\n \n return Brand.query.filter(Brand.name.like('%' + mystr + '%')).all()", "def shop_items(request):\n\n items = Item.objects.all()\n\n query = None\n\n \"\"\" Used Code Institute Search logic from Tutorial \"\"\"\n if 'query' in request.GET:\n query = request.GET['query']\n if not query:\n messages.error(request, \"Please enter your search\")\n return redirect(reverse('items'))\n \n queries = Q(name__icontains=query) | Q(item_description__icontains=query)\n items = items.filter(queries)\n\n context = {\n 'items': items,\n 'search_term': query,\n }\n\n return render(request, 'items/items.html', context)", "def test_legal_names(self):\r\n prod = generate_products()\r\n ADJECTIVES = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\r\n NOUNS = ['Anvil', 'Catapult', 'Disguise', 'Mousetrap', '???']\r\n for product in prod:\r\n self.assertIn(product.name.split(\" \")[0], ADJECTIVES)\r\n self.assertIn(product.name.split(\" \")[1], NOUNS)", "def filter_pro_matches(resp):\n\n return [x for x in resp if x[\"dire_name\"] and x[\"radiant_name\"]]", "def clean_names_list(names):\n pure_names = []\n nan = re.compile('nan', re.IGNORECASE)\n title = re.compile('surname', re.IGNORECASE)\n for name in names:\n if nan.search(name):\n continue\n elif title.search(name):\n continue\n else:\n pure_names.append(name)\n return pure_names", "def test_legal_names(self):\n products = generate_products()\n\n for product in products:\n names = product.name.split(\" \")\n self.assertIn(names[0], ADJECTIVES)\n self.assertIn(names[1], NOUNS)", "def listsearch(query, item):\n fh = ''\n if not isinstance(item, six.string_types):\n fh = item[1]\n item = item[0]\n\n return bool(re.search(query, item) or\n re.search(query, fh))", "def test_negation():\n char1 = Character(court=['winter'])\n char2 = Character()\n char3 = Character(court=['summer'])\n res = npc.commands.find_characters([\"court~:winter\"], [char1, char2, char3])\n assert char1 not in res\n assert char2 in res\n assert char3 in res", "def get_ingred_exclusions(user_id):\n\n exclusions = ExcludedIngredient.query.filter_by(user_id=user_id).all()\n if exclusions:\n exclusion_list = []\n for exclusion in exclusions:\n exclusion_list.append(exclusion.ingred_name)\n\n return exclusion_list\n else:\n return None", "def is_it_ingredient(word):\n reject_synsets = ['meal.n.01', 'meal.n.02', 'dish.n.02', 'vitamin.n.01']\n reject_synsets = set(wordnet.synset(w) for w in reject_synsets)\n accept_synsets = ['food.n.01', 'food.n.02']\n accept_synsets = set(wordnet.synset(w) for w in accept_synsets)\n for word_synset in wordnet.synsets(word, wordnet.NOUN):\n all_synsets = set(word_synset.closure(lambda s: s.hypernyms()))\n all_synsets.add(word_synset)\n for synset in reject_synsets:\n if synset in all_synsets:\n return False\n for synset in accept_synsets:\n if synset in all_synsets:\n return True", "def _badnames():\n\n with sqlite3.connect(DB) as db:\n cursor = db.cursor()\n cursor.execute(\"SELECT eid, fullname from players ORDER BY eid\")\n rows = cursor.fetchall()\n # list to put all entries in.\n outlist = []\n # now check each name.\n if len(rows) == 0:\n return None\n else:\n for row in rows: # fullname = row[1]\n splitname = row[1].split() # splits on the space.\n if len(splitname) != 2: # if the name is not 2. append to list.\n outlist.append(\"{0} - {1}\".format(row[0], row[1]))\n # return what we have.\n return outlist" ]
[ "0.620481", "0.6070535", "0.5824319", "0.56567574", "0.5471123", "0.540554", "0.54015297", "0.53638273", "0.5333347", "0.53282493", "0.5299963", "0.5287664", "0.52692205", "0.5224078", "0.52074933", "0.5206623", "0.52025026", "0.5187782", "0.5176583", "0.5147688", "0.51246583", "0.50674075", "0.5066795", "0.5063075", "0.5023341", "0.50187427", "0.5015921", "0.49804744", "0.49631062", "0.49547613" ]
0.6608633
0
select name from mydb.item_item where ingredients like '%multimedia%' and ingredients like '%provision%'
def test_includeIngredientQuery(self) -> None: ingredient0 = 'multimedia' ingredient1 = 'provision' result = self.entries.filter(Q(ingredients__icontains=ingredient0) & Q(ingredients__icontains=ingredient1)) self.assertEqual(1, len(result))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute(self):\n return super(BeefRecipes, self).execute().where(lower(col('ingredients')).like(\"%beef%\"))", "def search_recipe(ingredients):\n\n params = '+'.join(ingredients.split())\n url_search = SEARCH_URL.format(params)\n response = req.get(url_search)\n\n return response.content", "def search_brands_by_name(mystr):\n\n results = db.session.query(Brand).filter(Brand.name.like(mystr)).all()\n\n return results", "def search_brands_by_name(mystr):\n brands = Brand.query.filter(Brand.name.like('%'+mystr+'%')).all()\n return brands", "def search_brands_by_name(mystr):\n \n return Brand.query.filter(Brand.name.like('%' + mystr + '%')).all()", "def cookbook_search(search_term):\n return db.boxcar_cookbooks.find({'name': {'$regex':'^'+search_term}})", "def search_products(name='', price='', stock='', description=''):\n with MY_CONNECTION as connection:\n cursor = connection.cursor()\n if description:\n cursor.execute(\n \"\"\"\n SELECT id_product, product_name, product_price, in_stock, description\n FROM Products\n WHERE product_name=? OR product_price=? OR in_stock=? OR description=?\n \"\"\",\n (name, price, stock, description,))\n else:\n cursor.execute(\n \"\"\"\n SELECT id_product, product_name, product_price, in_stock, description\n FROM Products\n WHERE product_name=? OR product_price=? OR in_stock=?\n \"\"\",\n (name, price, stock,))\n return cursor.fetchall()", "def search():\n query = request.args['query']\n # find instances of the entered word in title, tags or ingredients\n results = mongo.db.places.find({\n '$or': [\n {'name': {'$regex': query, '$options': 'i'}},\n {'tags': {'$regex': query, '$options': 'i'}},\n {'city': {'$regex': query, '$options': 'i'}},\n ]\n })\n return render_template('search.html', query=query, results=results)", "def test_search_recipes_by_ingredients(self):\n pass", "def search_general(abe, q):\n def process(row):\n (name, code3) = row\n return { 'name': name + ' (' + code3 + ')',\n 'uri': 'chain/' + str(name) }\n ret = map(process, abe.store.selectall(\"\"\"\n SELECT chain_name, chain_code3\n FROM chain\n WHERE UPPER(chain_name) LIKE '%' || ? || '%'\n OR UPPER(chain_code3) LIKE '%' || ? || '%'\n \"\"\", (q.upper(), q.upper())))\n return ret", "def shop_items(request):\n\n items = Item.objects.all()\n\n query = None\n\n \"\"\" Used Code Institute Search logic from Tutorial \"\"\"\n if 'query' in request.GET:\n query = request.GET['query']\n if not query:\n messages.error(request, \"Please enter your search\")\n return redirect(reverse('items'))\n \n queries = Q(name__icontains=query) | Q(item_description__icontains=query)\n items = items.filter(queries)\n\n context = {\n 'items': items,\n 'search_term': query,\n }\n\n return render(request, 'items/items.html', context)", "def test_search_by_ingredients(self):\n recipe_id = self.request_mgr.search_by_ingredients(['butter', 'sugar', 'eggs'])\n self.assertGreater(recipe_id, 0)", "def search():\n query = request.form.get(\"query\")\n recipes = list(mongo.db.recipes.find({\"$text\": {\"$search\": query}}))\n return render_template(\"recipes.html\", recipes=recipes)", "def get_items_by_name(request, name):\n try:\n items = Items.objects.filter(titulo__icontains=name)\n except Items.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n serializer = ItemsSerializer(items, many=True)\n return Response(serializer.data)", "def search_substitute(product):\r\n cursor.execute('USE openfoodfacts;')\r\n # Make a string with the categories used in the query\r\n search = product.category\r\n # Other variable\r\n product_name = product.name\r\n product_score = product.nutri_score\r\n\r\n cursor.execute(\"\"\"SELECT Food.id, Food.name, categories_id, nutri_score, url, stores \\\r\n FROM Food \\\r\n INNER JOIN Categories ON Food.categories_id = Categories.name\\\r\n WHERE categories_id LIKE %s AND Food.name NOT LIKE %s \\\r\n AND Food.nutri_score <= %s \"\"\", (search, product_name, product_score))\r\n substitute = cursor.fetchone()\r\n try:\r\n return cl.Food(substitute)\r\n except TypeError:\r\n print(\"Désolé, il n'y a pas de substitut pour ce product...\")", "def listsearch(query, item):\n fh = ''\n if not isinstance(item, six.string_types):\n fh = item[1]\n item = item[0]\n\n return bool(re.search(query, item) or\n re.search(query, fh))", "def similar(text, database):\n # TODO\n pass", "def query(self,prefix):\n result =[ [ ] , [ ] ]\n\n p1='^'+prefix+r'[a-zA-Z]*$'\n pattern = re.compile(p1) \n for word in self.wordList:\n if pattern.match(word):\n result[0].append(word)\n result[1].append(self.embDic[word])\n return result", "def find_tags_by_prefix(prefix):\n\twith postgres, postgres.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cur:\n\t\tcur.execute(\"select * from mustard.tags where english_name ilike %s order by english_name\", (prefix + \"%\",))\n\t\treturn cur.fetchall()", "def ingredient_db():\n # type: () -> List[Text]\n return [\"abricot\",\n \"banane\",\n \"cassis\",\n \"cerise\",\n \"citron\",\n \"clémentine\",\n \"coing\",\n \"fraise\",\n \"framboise\",\n \"groseille\",\n \"mirabelle\",\n \"mûre\",\n \"myrtille\",\n \"nectarine\",\n \"orange\",\n \"pamplemousse\",\n \"pomelo\",\n \"pêche\",\n \"poire\",\n \"pomme\",\n \"prune\",\n \"pruneau\",\n \"raisin\",\n \"rhubarbe\",\n \"ananas\",\n \"figue\",\n \"fruit de la passion\",\n \"goyave\",\n \"grenade\",\n \"kaki\",\n \"kiwi\",\n \"kumquat\",\n \"litchi\",\n \"mangue\",\n \"melon\",\n \"papaye\",\n \"pastèque\",\n \"vanille\",\n \"amande\",\n \"datte\",\n \"noisette\",\n \"artichaut\",\n \"aubergine\",\n \"asperge\",\n \"avocat\",\n \"betterave\",\n \"blette\",\n \"brocoli\",\n \"banane plantain\",\n \"carotte\",\n \"cardon\",\n \"céleri rave\",\n \"céleri branche\",\n \"champignon\",\n \"champignon de paris\",\n \"chou blanc\",\n \"chou rouge\",\n \"chou de bruxelles\",\n \"chou-fleur\",\n \"citrouille\",\n \"concombre\",\n \"courge\",\n \"courgette\",\n \"crosne\",\n \"echalote\",\n \"epinard\",\n \"endive\",\n \"fenouil\",\n \"haricot vert\",\n \"haricot\",\n \"navet\",\n \"oignon\",\n \"oseille\",\n \"panais\",\n \"pâtisson\",\n \"petit pois\",\n \"poireau\",\n \"poivron\",\n \"potiron\",\n \"radis rouge\",\n \"rutabaga\",\n \"navet\",\n \"salade \",\n \"salsifis\",\n \"tomate\",\n \"topinambour\",\n \"maïs\"]", "def search():\n query = request.form.get(\"query\", None)\n recipes = mongo.db.recipes.find({\"$text\": {\"$search\": query}})\n return render_template(\"recipes/list.html\", recipes=recipes)", "def search_db():\n print \"asdfasdfsda\"\n \"\"\"\n term = request.form[\"term\"]\n q1 = g.db.query(Referee).filter(Referee.f_name.like('%{0}%'.format(term)))\n q2 = g.db.query(Referee).filter(Referee.l_name.like('%{0}%'.format(term)))\n\n results = q1.union(q2).all()\n return render_template('search_results.html', results=results)\n \"\"\"", "def search():\n query = request.form.get(\"query\")\n # pylint: disable=redefined-outer-name\n recipes = list(mongo.db.recipes.find({\"$text\": {\"$search\": query}}))\n return render_template(\"recipes.html\", recipes=recipes)", "def search_products(phrase):\n sv = (SearchVector('name', weight='A') +\n SearchVector('description', weight='B'))\n rank = SearchRank(sv, SearchQuery(phrase))\n return Product.objects.annotate(rank=rank).filter(\n rank__gte=0.2).order_by('-rank')", "def getIngredients():\n ingredients = ['Whiskey', 'Tequila', 'Vodka', 'Blue Curacao', 'Orange Juice',\n 'Pineapple Juice', 'Cranberry Juice', 'Sour Mix']\n return ingredients", "def search_by_substring(db, table, column, substring):\n\n condition = column + \" LIKE \\'%\" + substring + \"%\\'\"\n result = select_columns(db, table, \"*\", condition=condition)\n\n return result", "def search(query_string):", "def autocomplete():\n value = str(request.args.get('q'))\n result = s.query(Genes).filter(Genes.name.like(\"%\" + value + \"%\")).all()\n data = [i.name for i in result]\n return jsonify(matching_results=data)", "def search_shopping_list_by_item_name_keyword(title):\n shopping_list_ids = []\n item_ids = []\n item_list = db.session.query(Item).filter(Item.title.like(('%' + title + '%'))).all()\n for item in item_list:\n item_ids.append(item.id)\n\n shopping_list_items = db.session.query(ShoppingListItems).filter(ShoppingListItems.item_id.in_(item_ids)).all()\n for shopping_list_item in shopping_list_items:\n shopping_list_ids.append(shopping_list_item.shopping_list_id)\n\n all_shopping_lists = db.session.query(ShoppingList).filter(ShoppingList.id.in_(shopping_list_ids)).all()\n return create_shopping_list_output(all_shopping_lists)", "def search_for_meme(self, search):\n cursor = self.conn.cursor()\n cursor.execute(f\"select * from memes where lower(meme_name) like ?\", (f'%{search}%', ))\n results = cursor.fetchall()\n cursor.close()\n return results" ]
[ "0.6647757", "0.6191605", "0.59739184", "0.59232444", "0.5843114", "0.5770647", "0.57508814", "0.57090604", "0.56950915", "0.56343377", "0.55926657", "0.5586077", "0.555635", "0.55286276", "0.5510413", "0.55007166", "0.5458444", "0.5436161", "0.5435352", "0.5406655", "0.5402049", "0.53797895", "0.5371757", "0.5339518", "0.53363633", "0.53300804", "0.5293815", "0.5287651", "0.5286931", "0.5279974" ]
0.63518983
1
Processes the configuration for a group.
def _process_group(self, **config_kwargs) -> RobotGroupConfig: return RobotGroupConfig(self.sim_scene, **config_kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_group_from_config(self):\n\n group_file_name = \"cicada/config/group.yaml\"\n if os.path.isfile(group_file_name):\n self.group_data = dict()\n with open(group_file_name, 'r') as stream:\n self.group_data = yaml.safe_load(stream)\n self.all_groups = deepcopy(self.group_data)\n if self.group_data:\n keys_to_del = []\n for key, value in self.group_data.items():\n missing_file = False\n for file in value:\n if file not in self.nwb_path_list.values():\n missing_file = True\n if missing_file:\n keys_to_del.append(key)\n for key in keys_to_del:\n self.group_data.pop(key)\n self.grouped_labels = []\n if self.group_data:\n self.grouped = True\n for value in self.group_data.values():\n nwb_file_list = []\n for file in value:\n io = NWBHDF5IO(file, 'r')\n nwb_file = io.read()\n self.data_dict[nwb_file.identifier] = nwb_file\n nwb_file_list.append(nwb_file.identifier)\n self.grouped_labels.append(nwb_file_list)\n self.showGroupMenu.setEnabled(True)\n self.addGroupDataMenu.setEnabled(True)\n self.populate_menu()\n else:\n self.showGroupMenu.setEnabled(False)\n self.addGroupDataMenu.setEnabled(False)\n self.showGroupMenu.clear()\n self.addGroupDataMenu.clear()", "def parseConf(confData):\n\tgroupNames = confData.keys()\n\tfor group in groupNames:\n\t\tlogger.info(\"processing {0}\".format(confData[group][\"group_name\"]))\n\t\tlogger.info(\"source: {0}\".format(confData[group][\"source\"]))\n\t\tsourcePath = confData[group][\"source\"]\n\t\tlogger.info(\"destination: {0}\".format(confData[group][\"destination\"]))\n\t\tdestinationPath = confData[group][\"destination\"]\n\t\tfor f in confData[group][\"file_names\"]:\n\t\t\tlogger.info(\"file name: {0}\".format(f))\n\t\t\tfileSource = \"{0}/{1}\".format(sourcePath, f)\n\t\t\tfileDestination = \"{0}/{1}\".format(destinationPath, f)\n\t\t\tmoveFile(fileSource, fileDestination)\n\treturn True", "def _build_config_group(self, config_name: str):\n # TODO: consider adding calibration sub-groups\n # create configuration group\n gname = config_name\n self.create_group(gname)\n\n # -- set attributes for configuration group ----\n brd_slot_num = [\n 3,\n ]\n brd_types = [\n 4,\n ]\n brd_config_indices = [\n 0,\n ]\n brd_address = [\n self.slot_info[brd_slot_num[0]][2],\n ]\n for field in (\"SIS 3305\", \"SIS 3302\"):\n config_index = 0\n brd_bool_arr = np.any(self._active_brdch[field], axis=1)\n brd_index = np.where(brd_bool_arr)[0]\n for brd in brd_index:\n # determine slot number\n slot = self.get_slot(brd + 1, field)\n if slot is None:\n warn(f\"Got no slot number for board number {brd+1}\")\n continue\n\n # update lists\n brd_slot_num.append(slot)\n brd_types.append(3 if field == \"SIS 3305\" else 2)\n brd_config_indices.append(config_index)\n brd_address.append(self.slot_info[slot][2])\n\n # increment config index\n config_index += 1\n\n # update attributes\n self[gname].attrs.update(\n {\n \"SIS crate base addresses\": np.array(brd_address, dtype=np.uint32),\n \"SIS crate board types\": np.array(brd_types, dtype=np.uint32),\n \"SIS crate config indices\": np.array(brd_config_indices, dtype=np.uint32),\n \"SIS crate max average shots\": np.int32(1),\n \"SIS crate slot numbers\": np.array(brd_slot_num, dtype=np.uint32),\n }\n )\n\n # -- Create and Populate Configuration Sub-Groups ----\n for slot, index in zip(brd_slot_num, brd_config_indices):\n adc = self.slot_info[slot][1]\n if adc == \"SIS 3820\":\n self._build_config_sis3820_subgroup(config_name, slot, index)\n elif adc == \"SIS 3302\":\n self._build_config_sis3302_subgroup(config_name, slot, index)\n elif adc == \"SIS 3305\":\n self._build_config_sis3305_subgroup(config_name, slot, index)", "def _update(self):\n # clear group before rebuild\n self.clear()\n\n # build configuration groups\n self._config_names = []\n for i in range(self._n_configs):\n config_name = f\"config{i+1:02}\"\n self._config_names.append(config_name)\n self._build_config_group(config_name)\n\n # reset active configuration if necessary\n if not all(cname in self._config_names for cname in self._active_config):\n self._active_config = (self._config_names[0],)\n\n # build datasets\n self._build_datasets()", "def config(self, **kw):\n group = kw.pop('group', None)\n for k, v in kw.items():\n CONF.set_override(k, v, group)", "def config(self, **kw):\n group = kw.pop('group', None)\n for k, v in kw.iteritems():\n CONF.set_override(k, v, group)", "def __init__(self, conf, group):\n self._conf = conf\n self._group = group", "def _handleAnswerFileParams(answerFile):\n try:\n logging.debug(\"Starting to handle config file\")\n\n # Read answer file\n fconf = ConfigParser.ConfigParser()\n fconf.read(answerFile)\n\n # Iterate all the groups and check the pre/post conditions\n for group in controller.getAllGroups():\n # Get all params per group\n\n # Handle pre conditions for group\n preConditionValue = True\n if group.getKey(\"PRE_CONDITION\"):\n preConditionValue = _handleGroupCondition(fconf, group.getKey(\"PRE_CONDITION\"), preConditionValue)\n\n # Handle pre condition match with case insensitive values\n logging.info(\"Comparing pre- conditions, value: '%s', and match: '%s'\" % (preConditionValue, group.getKey(\"PRE_CONDITION_MATCH\")))\n if utils.compareStrIgnoreCase(preConditionValue, group.getKey(\"PRE_CONDITION_MATCH\")):\n for param in group.getAllParams():\n _loadParamFromFile(fconf, \"general\", param.getKey(\"CONF_NAME\"))\n\n # Handle post conditions for group only if pre condition passed\n postConditionValue = True\n if group.getKey(\"POST_CONDITION\"):\n postConditionValue = _handleGroupCondition(fconf, group.getKey(\"POST_CONDITION\"), postConditionValue)\n\n # Handle post condition match for group\n if not utils.compareStrIgnoreCase(postConditionValue, group.getKey(\"POST_CONDITION_MATCH\")):\n logging.error(\"The group condition (%s) returned: %s, which differs from the excpeted output: %s\"%\\\n (group.getKey(\"GROUP_NAME\"), postConditionValue, group.getKey(\"POST_CONDITION_MATCH\")))\n raise ValueError(output_messages.ERR_EXP_GROUP_VALIDATION_ANS_FILE%\\\n (group.getKey(\"GROUP_NAME\"), postConditionValue, group.getKey(\"POST_CONDITION_MATCH\")))\n else:\n logging.debug(\"condition (%s) passed\" % group.getKey(\"POST_CONDITION\"))\n else:\n logging.debug(\"no post condition check for group %s\" % group.getKey(\"GROUP_NAME\"))\n else:\n logging.debug(\"skipping params group %s since value of group validation is %s\" % (group.getKey(\"GROUP_NAME\"), preConditionValue))\n\n except Exception as e:\n logging.error(traceback.format_exc())\n raise Exception(output_messages.ERR_EXP_HANDLE_ANSWER_FILE%(e))", "def process_config(self):\n driver_options = self.config['service']['options']\n process_config = {\n 'assembler_config': {\n 'driver_options': driver_options,\n 'teststep_config': self.teststep_config,\n 'testcase_config': self.config['reader_settings']['test_case']['keys'],\n },\n 'assembly_config': self.config['assembly_settings'],\n }\n return process_config", "def setGroup(self, group):\n\t\tself.config.GROUP = group", "async def updateGroupConfiguration(self, config_type=None, group_slug=None, body=\"\"):\n payload = {}\n \n if config_type:\n payload[\"config_type\"] = config_type\n \n if group_slug:\n payload[\"group_slug\"] = group_slug\n \n\n # Parameter validation\n schema = CatalogValidator.updateGroupConfiguration()\n schema.dump(schema.load(payload))\n \n # Body validation\n from .models import AppConfigurationDetail\n schema = AppConfigurationDetail()\n schema.dump(schema.load(body))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v2.0/company/{self._conf.companyId}/application/{self.applicationId}/product-configuration/{config_type}/groups/{group_slug}\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"config_type\",\"description\":\"A `config_type` is a unique identifier for a particular group configuration type.\",\"schema\":{\"type\":\"string\",\"enum\":[\"comparisons_groups\",\"details_groups\",\"seller_groups\"]},\"required\":true},{\"in\":\"path\",\"name\":\"group_slug\",\"description\":\"A `group_slug` is a unique identifier of a particular configuration.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"config_type\",\"description\":\"A `config_type` is a unique identifier for a particular group configuration type.\",\"schema\":{\"type\":\"string\",\"enum\":[\"comparisons_groups\",\"details_groups\",\"seller_groups\"]},\"required\":true},{\"in\":\"path\",\"name\":\"group_slug\",\"description\":\"A `group_slug` is a unique identifier of a particular configuration.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", config_type=config_type, group_slug=group_slug)\n query_string = await create_query_string(config_type=config_type, group_slug=group_slug)\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"PUT\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"put\", await create_url_without_domain(f\"/service/platform/catalog/v2.0/company/{self._conf.companyId}/application/{self.applicationId}/product-configuration/{config_type}/groups/{group_slug}\", config_type=config_type, group_slug=group_slug), query_string, headers, body, exclude_headers=exclude_headers), data=body)", "def process(self):\n self._processed = True\n # We need to load up previous section_maps info\n with open(os.path.join(self.home, 'section_maps'), 'rb') as _file:\n section_maps = pickle.load(_file)\n\n # This will ensure that sections persist with the same -a, -b nomenclature over time\n self.groups.section_maps = section_maps\n self.groups.period_info = {}\n\n super().process()", "def fromFile(filename, config=None, **kwargs):\n\n # overwrite existing values?\n overwrite = kwargs.pop('overwrite', False)\n\n # Config files can have grouped arguments\n # the variable to store in groups\n groups_name = kwargs.pop('groups_name', 'groups')\n # the name of the grouping key=value pair\n group_on = kwargs.pop('group_on', None)\n # the target group to extract\n primary_group = kwargs.pop('group', None)\n\n # If no config object was passed in, create one\n if config is not None:\n self = config\n else:\n self = Config(**kwargs)\n self._filename = filename\n self._path = os.path.abspath(os.path.dirname(filename))\n\n self[group_on] = primary_group\n\n # current group\n group = self # start with the base config object as the group\n group_name = None\n groups = {}\n self[groups_name] = groups\n\n if filename is not None:\n file = open(filename, 'r')\n for line in file:\n line = line.strip()\n # skip comments\n if line == '' or line[0] in ('#', '%') or line[:2] in ('//',):\n continue\n key, value = line.split('=', 1)\n key = key.strip()\n value = value.strip()\n\n # using eval() is inherently insecure, but allows for nice options\n # for setting options in the config file\n\n # first we attempt to evaluate the value without using the\n # config object as the locals\n no_locals_val = None\n try:\n no_locals_val = eval(value)\n except:\n pass\n\n # now we evaluate the value with the config object as the locals\n locals_val = None\n try:\n locals_val = eval(value, {}, self.__dict__)\n except:\n locals_val = value\n\n # if the key equals the group tag, start a new grouping\n if key == group_on:\n group_name = locals_val\n if group is not None:\n self[locals_val] = group\n group = Config(**kwargs)\n groups[locals_val] = group\n\n # start at the next line now that we have a group object\n continue\n\n if type(locals_val) is str:\n # try string replacement using the config object as the dict\n try:\n locals_val = locals_val % self\n except KeyError:\n pass\n try:\n locals_val = locals_val % group\n except KeyError:\n pass\n\n # if their string representations are not equal then the config\n # object, used as locals, was actually need to evaluate the value\n # so store the original string, it will be needed to reconstruct things\n if str(no_locals_val) != str(locals_val):\n group.__orig[key] = value\n\n if overwrite:\n group[key] = locals_val\n else:\n cur_val = group.get(key, None)\n group[key] = locals_val if cur_val is None else cur_val\n\n # if the current group is the target/primary group the add the\n # key=value directly to the config\n if group_name == primary_group:\n if overwrite:\n self[key] = locals_val\n else:\n cur_val = self.get(key, None)\n self[key] = locals_val if cur_val is None else cur_val\n\n file.close()\n\n # if there is only one group, extract it outwards to the top level\n # if len(groups) == 1:\n # self.__dict__[group_on] = groups.iterkeys().next()\n return self", "def parse_config(self):\n # TODO: parse config file\n pass", "def group_info(args):\n\n args.suppress_verify_output = True\n if verify(args) != 0:\n # restore stdout\n sys.stdout = sys.__stdout__\n print(\"Config file not valid, please use the verify function to debug\")\n return 1\n\n with open(args.file, \"r\") as f:\n config_json = json.load(f)\n\n for group in config_json[\"groups\"]:\n if group[\"name\"] == args.group:\n print(json.dumps(group, indent=4))\n return 0\n\n print(\"No group matching {} found\".format(args.group))\n return 1", "def _initGroups(self):\n defaults = self._getGroupDefaults()\n ddict = self._getDefaultGroupDict(defaults)\n\n for group in self._config.sections():\n ddict[\"_name\"] = group\n container = self.getGroupContainer(**ddict)\n self._passConfig(container, group)\n self.groups.append(container)\n\n if not self.groups:\n self.groups.append(self.getGroupContainer(**defaults._dict_))", "async def createGroupConfiguration(self, config_type=None, body=\"\"):\n payload = {}\n \n if config_type:\n payload[\"config_type\"] = config_type\n \n\n # Parameter validation\n schema = CatalogValidator.createGroupConfiguration()\n schema.dump(schema.load(payload))\n \n # Body validation\n from .models import AppConfigurationDetail\n schema = AppConfigurationDetail()\n schema.dump(schema.load(body))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v2.0/company/{self._conf.companyId}/application/{self.applicationId}/product-configuration/{config_type}/groups\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"config_type\",\"description\":\"A `config_type` is a unique identifier for a particular group configuration type.\",\"schema\":{\"type\":\"string\",\"enum\":[\"comparisons_groups\",\"details_groups\",\"seller_groups\"]},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"config_type\",\"description\":\"A `config_type` is a unique identifier for a particular group configuration type.\",\"schema\":{\"type\":\"string\",\"enum\":[\"comparisons_groups\",\"details_groups\",\"seller_groups\"]},\"required\":true}]}\"\"\", config_type=config_type)\n query_string = await create_query_string(config_type=config_type)\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"POST\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"post\", await create_url_without_domain(f\"/service/platform/catalog/v2.0/company/{self._conf.companyId}/application/{self.applicationId}/product-configuration/{config_type}/groups\", config_type=config_type), query_string, headers, body, exclude_headers=exclude_headers), data=body)", "def define_group_properties(self):\n\n # PropertyGroup\n self.propertygroup['debug']['x86'] = get_propertygroup(\n 'debug', 'x86', ' and @Label=\"Configuration\"'\n )\n self.propertygroup['debug']['x64'] = get_propertygroup(\n 'debug', 'x64', ' and @Label=\"Configuration\"'\n )\n self.propertygroup['release']['x86'] = get_propertygroup(\n 'release', 'x86', ' and @Label=\"Configuration\"'\n )\n self.propertygroup['release']['x64'] = get_propertygroup(\n 'release', 'x64', ' and @Label=\"Configuration\"'\n )\n\n # ItemDefinitionGroup\n self.definitiongroups['debug']['x86'] = get_definitiongroup('debug', 'x86')\n self.definitiongroups['debug']['x64'] = get_definitiongroup('debug', 'x64')\n self.definitiongroups['release']['x86'] = get_definitiongroup('release', 'x86')\n self.definitiongroups['release']['x64'] = get_definitiongroup('release', 'x64')", "def process_group_upload(self, configlist):\n switches = [str(t[0]) for t in self.get_switches()]\n for swconfig in configlist: # for each\n dpid = list(swconfig.keys())[0]\n\n if dpid not in switches:\n break\n\n for flow in swconfig[dpid]:\n flow['dpid'] = dpid\n flow['operation'] = 'add'\n result = self.process_group_message(flow)\n print(result)\n return 'Groups added successfully!'", "def parse(options):\n global p_entering_group_block, p_exiting_group_block, p_group_next, p_group_name, p_group_set\n \n in_group_block = False\n \n group_list = []\n group_elem = {}\n \n order_keys = []\n \n with open(options.input_file, mode=fd_read_options) as fd_input:\n for line in fd_input:\n line = line.strip()\n \n # We match a group block\n if p_entering_group_block.search(line):\n in_group_block = True\n \n # We are in a group block\n if in_group_block:\n if p_group_name.search(line):\n group_name = p_group_name.search(line).group('group_name')\n group_elem['name'] = group_name\n if not('name' in order_keys): order_keys.append('name')\n \n # We match a setting\n if p_group_set.search(line):\n group_key = p_group_set.search(line).group('group_key')\n if not(group_key in order_keys): order_keys.append(group_key)\n \n group_value = p_group_set.search(line).group('group_value').strip()\n group_value = re.sub('[\"]', '', group_value)\n \n group_elem[group_key] = group_value\n \n # We are done with the current group id\n if p_group_next.search(line):\n group_list.append(group_elem)\n group_elem = {}\n \n # We are exiting the group block\n if p_exiting_group_block.search(line):\n in_group_block = False\n \n return (group_list, order_keys)", "def process_config(self, do_usage=True):\r\n if self.configfile:\r\n self.process_config_file(do_usage)\r\n\r\n # Copy config options to attributes of self. This only fills\r\n # in options that aren't already set from the command line.\r\n for name, confname in self.names_list:\r\n if confname:\r\n parts = confname.split(\".\")\r\n obj = self.configroot\r\n for part in parts:\r\n if obj is None:\r\n break\r\n # Here AttributeError is not a user error!\r\n obj = getattr(obj, part)\r\n self._set(name, obj, 0)\r\n\r\n # Process defaults\r\n for name, value in self.default_map.items():\r\n if getattr(self, name) is None:\r\n setattr(self, name, value)\r\n\r\n # Process required options\r\n for name, message in self.required_map.items():\r\n if getattr(self, name) is None:\r\n self.usage(message)", "def do_config(self, validated_message):\n raise NotImplementedError() # implement in child", "def return_site_group_configuration(self, group_id, testcase=None):\n\n self.log.debug(\"Returning current configuration for site group %s ...\" % group_id)\n result = {'successful': False, 'settings':[]}\n\n try:\n # query server for site group settings\n response = self.query_page('Site Groups')['query response']\n\n # find settings for given site group\n siteGroup = None\n for entry in response:\n if str(entry['id']) == str(group_id):\n self.log.trace(\"Site Group %s found.\" % group_id)\n siteGroup = entry\n if siteGroup is not None:\n result['settings'] = translate_dict_to_list_parameters(self.log, siteGroup, None,\n SITEGROUPCON_FIELDS)['parameters']\n self.log.trace(\"Returned current configuration for site group %s.\" % group_id)\n else:\n self.log.error(\"Site Group %s not found.\" % group_id)\n\n result['successful'] = True\n except BaseException, e:\n self.handle_exception(e, operation=\"return current configuration for site group %s\"\n % group_id)\n\n # return\n if testcase is not None: testcase.processing = result['successful']\n return result", "def _process_group(self):\n if not isinstance(self.transform, GroupTransformModel):\n return\n\n self._process_name()\n\n if self.transformed_item['type'] == 'Campaign':\n self._process_metadata_datetime('firstSeen', self.transform.first_seen)\n\n if self.transformed_item['type'] == 'Document':\n self._process_metadata('fileName', self.transform.file_name)\n self._process_metadata('malware', self.transform.malware)\n self._process_metadata('password', self.transform.password)\n\n if self.transformed_item['type'] == 'Email':\n self._process_metadata('body', self.transform.body)\n self._process_metadata('from', self.transform.from_addr)\n self._process_metadata('header', self.transform.header)\n self._process_metadata('subject', self.transform.subject)\n self._process_metadata('to', self.transform.to_addr)\n\n if self.transformed_item['type'] in ('Event', 'Incident'):\n self._process_metadata_datetime('eventDate', self.transform.event_date)\n self._process_metadata('status', self.transform.status)\n\n if self.transformed_item['type'] == 'Report':\n self._process_metadata('fileName', self.transform.file_name)\n self._process_metadata_datetime('publishDate', self.transform.publish_date)\n\n # Handle sig specific fields here\n if self.transformed_item['type'] == 'Signature':\n self._process_metadata('fileName', self.transform.file_name)\n self._process_metadata('fileType', self.transform.file_type)\n self._process_metadata('fileText', self.transform.file_text)", "def _prepare_files(self, grouping_by):\n self.post_conf_dict = {}\n self.pre_conf_dict = {}\n main_folder = self.main_folder\n\n file_path = 'devlab/tests/groups_example.yaml'\n exmpl_file_path = os.path.join(main_folder, file_path)\n pre_conf = open(exmpl_file_path, 'r')\n self.pre_conf_dict = yaml.load(pre_conf)\n\n inst_id_list = []\n inst_3 = None\n for key in self.pre_conf_dict.keys():\n if key == 'user_defined_group_1':\n for val in self.pre_conf_dict[key]:\n for inst in self.src_vms:\n if inst['name'] == val:\n inst_id_list.append(inst['id'])\n elif key == 'user_defined_group_2':\n for inst in self.src_vms:\n if inst['name'] == self.pre_conf_dict[key][0]:\n inst_3 = inst['id']\n self.pre_conf_dict['group_by'] = [unicode(grouping_by)]\n self.pre_conf_dict['user_defined_group_1'] = inst_id_list\n self.pre_conf_dict['user_defined_group_2'] = [inst_3]\n self.new_file_name = 'test_file.yaml'\n file_to_write_into = os.path.join(os.getcwd(), self.new_file_name)\n with open(file_to_write_into, 'w') as stream:\n yaml.dump(self.pre_conf_dict, stream, default_flow_style=False)\n fab_path = os.path.join('devlab/tests', self.new_file_name)\n _cmd = 'cd {cf_folder} && fab get_groups:{config_ini},{new_file}'\n cmd = _cmd.format(cf_folder=main_folder, new_file=fab_path,\n config_ini='devlab/tests/configuration.ini')\n os.system(cmd)\n post_file_path = os.path.join(main_folder, 'vm_groups.yaml')\n post_conf = file(post_file_path, 'r')\n self.post_conf_dict = yaml.load(post_conf)", "def process_config(self, filename):\n \n self.log_message(\"processing config file: \"+filename)\n parser = SafeConfigParser()\n parser.optionxform = str\n parser.read(filename)\n self.source_files[filename] = parser\n \n sections = parser.sections()\n for section in sections:\n \n options = parser.options(section)\n params = {}\n non_std = {}\n for option in options:\n ## any option that ends with the word \"password\" will be encrypted and will automatically be decrypted upon\n ## processing \n if option in self.standard_options:\n params[option] = self.get_value(option, parser.get(section, option))\n else:\n non_std[option] = self.get_value(option, parser.get(section, option))\n\n params['non_std'] = non_std\n params['source_file'] = filename\n params['name']=section\n params['run_date']=self.run_date\n c_entry = ConfigEntry(params)\n if c_entry.ready: \n entry_num = c_entry.get_entry_type()\n self.entries[self.entry_types[entry_num]].append(c_entry)\n self.entry_dict[section] = {'source':filename,'entry':c_entry}\n self.log_message(\"Loaded Config Entry: \"+section)\n else:\n self.log_message(\"Failed to load config entry: \"+section)\n\n return self.entries", "def configure_site_group(self, entry_id, settings=[], testcase=None):\n\n self.log.debug(\"Configuring Site Group '%s' ...\" % entry_id)\n result = {'successful': False, 'verified': False, 'site group id': None,\n 'site group name': None}\n\n try:\n # define default data packet to send to server\n data = {\n SITEGROUPCON_FIELDS['id']: '',\n SITEGROUPCON_FIELDS['site group name']: '',\n SITEGROUPCON_FIELDS['storage location']: self.storage_loc,\n SITEGROUPCON_FIELDS['elrt']: '365',\n SITEGROUPCON_FIELDS['tsd']: '15',\n SITEGROUPCON_FIELDS['ltsd']: '365',\n SITEGROUPCON_FIELDS['dgp']: '7',\n }\n\n # update data packet with current site group settings (if group ID given as entry ID)\n name = None\n try:\n data[SITEGROUPCON_FIELDS['id']] = int(entry_id)\n self.log.trace(\"Editing existing site.\")\n\n currentSettings = self.return_site_group_configuration(entry_id)['settings']\n for setting in currentSettings:\n data[setting[0]] = setting[1]\n\n # determine group name\n if setting[0].lower() == SITEGROUPCON_FIELDS['site group name'].lower():\n name = setting[1]\n\n\n except BaseException:\n data[SITEGROUPCON_FIELDS['site group name']] = entry_id\n name = entry_id\n self.log.trace(\"Adding new site group.\")\n\n # update data packet with given settings\n for setting in settings:\n data[SITEGROUPCON_FIELDS[setting[0]]] = setting[1]\n\n # if group name, update\n if setting[0].lower() == 'site group name':\n name = setting[1]\n\n # post request to server\n url = self.server_url + SITEGROUPCON_MODIFY_PATH\n successful = self.post_http_request(url, data)['successful']\n\n # verify site group\n if successful and name is not None:\n results = self.verify_site_group(name, settings)\n result['verified'] = results['verified']\n result['site group id'] = results['site group id']\n result['site group name'] = name\n self.log.trace(\"Verified site group\")\n else:\n self.log.error(\"Failed to verify site group.\")\n result['verified'] = False\n\n result['successful'] = True\n except BaseException, e:\n self.handle_exception(e, operation=\"configure site group\")\n\n # return\n if testcase is not None:\n testcase.site_group_id = result['site group id']\n testcase.site_group_name = result['site group name']\n testcase.processing = result['successful']\n return result", "def gcam_parse(cfgfile_name):\n\n ## initialize the structures that will receive the data we are\n ## parsing from the file\n capability_table = {}\n module_list = []\n\n ## cfgfile_name is a filename\n with open(cfgfile_name,\"r\") as cfgfile: \n section = None\n module = None\n sectnpat = re.compile(r'\\[(.+)\\]')\n keyvalpat = re.compile(r'(.+)=(.+)')\n\n for line in cfgfile:\n line = line.lstrip() # remove leading whitespace\n\n ## check for comments and blank lines. A line is a comment if\n ## the first non-whitespace character is a '#'\n if(line == \"\" or line[0] == '#'):\n continue\n\n ## check for section header. Section headers appear in square brackets: [gcam_module]\n sectnmatch = sectnpat.match(line)\n if sectnmatch:\n section = sectnmatch.group(1)\n print \"parser starting section: %s\" % section\n \n if not section.lower()==\"global\":\n ## Section header starts a new module\n ## create the new module: the section name is the module class\n ## TODO: is the input from the config file trusted enough to do it this way?\n modcreate = \"%s(capability_table)\" % section\n print \"modcreate statement: %s\\n\" % modcreate\n module = eval(modcreate)\n else:\n ## This is kind of a wart because I want to call\n ## the section \"global\", but I don't want to have\n ## a module called \"global\".\n module = GlobalParamsModule(capability_table)\n \n module_list.append(module) \n continue # nothing further to do for a section header line\n\n ## If we get this far, we have a nonblank line that is not a\n ## comment or a section header. We had better be in a section\n ## by now, or the config is malformed.\n if section==None:\n raise RuntimeError(\"Malformed config file: doesn't open with a section header.\")\n\n kvmatch = keyvalpat.match(line)\n if not kvmatch:\n raise RuntimeError(\"Malformed line in config file:\\n%s\"%line)\n\n key = kvmatch.group(1).lstrip().rstrip()\n val = kvmatch.group(2).lstrip().rstrip()\n\n print \"parser got key= %s\\tval= %s\" % (key, val)\n\n module.addparam(key, val)\n\n ## end of loop over config file lines\n ## end of with block: config file will be closed\n \n ## close out the parameter processing for all modules in the list\n for module in module_list:\n module.finalize_parsing()\n\n return (module_list, capability_table)", "def config_collection(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tself.log('In config_collection',level=logging.DEBUG)\n\t\tcfg = self.cfg\n\t\tfor module_id in self.module_ids():\n\t\t\t# Default to None so we can interpret as ifneeded\n\t\t\tself.get_config(module_id, 'shutit.core.module.build', None, boolean=True, forcenone=True)\n\t\t\tself.get_config(module_id, 'shutit.core.module.remove', False, boolean=True)\n\t\t\tself.get_config(module_id, 'shutit.core.module.tag', False, boolean=True)\n\t\t\t# Default to allow any image\n\t\t\tself.get_config(module_id, 'shutit.core.module.allowed_images', [\".*\"])\n\t\t\tmodule = self.shutit_map[module_id]\n\t\t\tcfg_file = os.path.dirname(get_module_file(self,module)) + '/configs/build.cnf'\n\t\t\tif os.path.isfile(cfg_file):\n\t\t\t\t# use self.get_config, forcing the passed-in default\n\t\t\t\tconfig_parser = ConfigParser.ConfigParser()\n\t\t\t\tconfig_parser.read(cfg_file)\n\t\t\t\tfor section in config_parser.sections():\n\t\t\t\t\tif section == module_id:\n\t\t\t\t\t\tfor option in config_parser.options(section):\n\t\t\t\t\t\t\tif option == 'shutit.core.module.allowed_images':\n\t\t\t\t\t\t\t\toverride = False\n\t\t\t\t\t\t\t\tfor mod, opt, val in self.build['config_overrides']:\n\t\t\t\t\t\t\t\t\tval = val # pylint\n\t\t\t\t\t\t\t\t\t# skip overrides\n\t\t\t\t\t\t\t\t\tif mod == module_id and opt == option:\n\t\t\t\t\t\t\t\t\t\toverride = True\n\t\t\t\t\t\t\t\tif override:\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\tvalue = config_parser.get(section,option)\n\t\t\t\t\t\t\t\tif option == 'shutit.core.module.allowed_images':\n\t\t\t\t\t\t\t\t\tvalue = json.loads(value)\n\t\t\t\t\t\t\t\tself.get_config(module_id, option, value, forceask=True)\n\t\t\t# ifneeded will (by default) only take effect if 'build' is not\n\t\t\t# specified. It can, however, be forced to a value, but this\n\t\t\t# should be unusual.\n\t\t\tif cfg[module_id]['shutit.core.module.build'] is None:\n\t\t\t\tself.get_config(module_id, 'shutit.core.module.build_ifneeded', True, boolean=True)\n\t\t\t\tcfg[module_id]['shutit.core.module.build'] = False\n\t\t\telse:\n\t\t\t\tself.get_config(module_id, 'shutit.core.module.build_ifneeded', False, boolean=True)", "def get_config(group):\n config = toml.load('./config.toml')\n return config[group]" ]
[ "0.63921624", "0.6276154", "0.5917572", "0.58930755", "0.5878203", "0.5875567", "0.5822448", "0.57812375", "0.57412446", "0.57304585", "0.5726534", "0.57034045", "0.5700766", "0.56856185", "0.5675106", "0.56413394", "0.56358737", "0.5601757", "0.55887634", "0.55571437", "0.5549367", "0.5546626", "0.554342", "0.55340356", "0.55151105", "0.5481036", "0.54538804", "0.54406774", "0.54171646", "0.53655356" ]
0.7236225
0
Returns the initial states for the given groups.
def get_initial_state( self, groups: Union[str, Sequence[str]], ) -> Union[RobotState, Sequence[RobotState]]: if isinstance(groups, str): configs = [self.get_config(groups)] else: configs = [self.get_config(name) for name in groups] states = [] for config in configs: state = RobotState() # Return a blank state if this is a hardware-only group. if config.qpos_indices is None: states.append(state) continue state.qpos = self.sim_scene.init_qpos[config.qpos_indices].copy() state.qvel = self.sim_scene.init_qvel[config.qvel_indices].copy() states.append(state) if isinstance(groups, str): return states[0] return states
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_initial_states(self):\n raise NotImplementedError()", "def initial_states(self):\n return self._initial_states", "def initial_states(self):\n return list(self.iter_initial_states())", "def states_initial(self):\n return self.states(\"Initial = YES\")", "def _get_group_states(\n self, configs: Sequence[RobotGroupConfig]) -> Sequence[RobotState]:\n states = []\n for config in configs:\n state = RobotState()\n # Return a blank state if this is a hardware-only group.\n if config.qpos_indices is None:\n states.append(state)\n continue\n\n state.qpos = self.sim_scene.data.qpos[config.qpos_indices]\n state.qvel = self.sim_scene.data.qvel[config.qvel_indices]\n # qacc has the same dimensionality as qvel.\n state.qacc = self.sim_scene.data.qacc[config.qvel_indices]\n\n # Add observation noise to the state.\n self._apply_observation_noise(state, config)\n\n states.append(state)\n return states", "def iter_initial_states(self):\n from six.moves import filter\n return filter(lambda s:s.is_initial, self.iter_states())", "def initial_state():\n\treturn [[EMPTY, EMPTY, EMPTY],\n\t\t\t[EMPTY, EMPTY, EMPTY],\n\t\t\t[EMPTY, EMPTY, EMPTY]]", "def get_initial_states(self):\n return product(*[phi.automaton().states.initial for phi in self])", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]" ]
[ "0.6517598", "0.63916826", "0.61768365", "0.61699396", "0.6043793", "0.59128195", "0.5887704", "0.58768594", "0.58085155", "0.58085155", "0.58085155", "0.58085155", "0.58085155", "0.58085155", "0.58085155", "0.58085155", "0.58085155", "0.58085155", "0.58085155", "0.58085155", "0.58085155", "0.58085155", "0.58085155", "0.58085155", "0.58085155", "0.58085155", "0.58085155", "0.58085155", "0.58085155", "0.58085155" ]
0.7757164
0
Returns the states for the given group configurations.
def _get_group_states( self, configs: Sequence[RobotGroupConfig]) -> Sequence[RobotState]: states = [] for config in configs: state = RobotState() # Return a blank state if this is a hardware-only group. if config.qpos_indices is None: states.append(state) continue state.qpos = self.sim_scene.data.qpos[config.qpos_indices] state.qvel = self.sim_scene.data.qvel[config.qvel_indices] # qacc has the same dimensionality as qvel. state.qacc = self.sim_scene.data.qacc[config.qvel_indices] # Add observation noise to the state. self._apply_observation_noise(state, config) states.append(state) return states
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_states(self):\n raise NotImplementedError()", "def get_states():\n try:\n ''' Returns a list of states in list named result '''\n data = State.select()\n return ListStyle.list(data, request), 200\n except Exception as e:\n abort(500)", "def get_all_states(self):\n return self._states", "def _get_check_groups(self, group=None):\n groups = [g for g in self.config_dict]\n if group:\n if group in groups:\n check_groups = [group]\n else:\n check_groups = []\n else:\n check_groups = groups\n return check_groups", "def get_list_of_states(self):\n return self.states", "def get_initial_state(\n self,\n groups: Union[str, Sequence[str]],\n ) -> Union[RobotState, Sequence[RobotState]]:\n if isinstance(groups, str):\n configs = [self.get_config(groups)]\n else:\n configs = [self.get_config(name) for name in groups]\n states = []\n for config in configs:\n state = RobotState()\n # Return a blank state if this is a hardware-only group.\n if config.qpos_indices is None:\n states.append(state)\n continue\n\n state.qpos = self.sim_scene.init_qpos[config.qpos_indices].copy()\n state.qvel = self.sim_scene.init_qvel[config.qvel_indices].copy()\n states.append(state)\n if isinstance(groups, str):\n return states[0]\n return states", "def dense_state(self):\n return {name: self.group[name].state for name in self.group.keys()}", "def get_state_group_info(program):\n\n state_group_info = OrderedDict()\n for i, j in findall(\n r'state_and_packet.state_group_(\\d+)_state_(\\d+)', program):\n indices = state_group_info.get(i, OrderedSet())\n indices.add(j)\n state_group_info[i] = indices\n\n return state_group_info", "def all_states(self):\n return self._states", "def get_state(self, variables_for_state_group):\n state = None\n for variable in variables_for_state_group:\n assert variable.startswith(self.prefix + '.')\n data_portion = variable[len(self.prefix) + 1:]\n\n not_set = False\n if data_portion.startswith('NOT.'):\n data_portion = data_portion[len('NOT.'):]\n not_set = True\n\n assert data_portion in self.states\n\n if not_set:\n continue\n\n if state is None:\n state = data_portion\n else:\n assert False, (state, data_portion)\n\n if state is None:\n state = self.default\n\n return state", "def get_states(self):\n states = {}\n if hasattr(self, 'random_mask_state'):\n states['random_mask_state'] = self.random_mask_state.get_state()\n if hasattr(self, 'deformrandomstate'):\n states['deformrandomstate'] = self.deformrandomstate.get_state()\n states['randomstate'] = self.randomstate.get_state()\n return states", "def get_states(self):\n\n try:\n response = requests.get(self.ROOT_URL + self.ALL_STATES_ENDPOINT)\n response = response.json()\n except ValueError:\n raise OpenSkyApiException(self.PARSE_ERROR)\n except RequestException:\n raise OpenSkyApiException(self.REQUEST_ERROR)\n return self.parse_response(response)", "def get_sink_states(self):\n state1 = State(4, 2)\n return [state1]", "def states():\n states = storage.all(State).values()\n return render_template('9-states.html', states=states)", "def states_list():\n return render_template('7-states_list.html',\n states=storage.all(State).values())", "def get_active_states(self):\n raise NotImplementedError()", "def states_list(self, states):\n self.log('List of states: [{}]'.format(\n ' | '.join([(lambda x: x[1:])(s) for s in\n states.keys()])))\n return", "def get_states(self, structure):\n dir = IO(dir=os.path.join(self.loc, structure))\n return dir.get_values(\n structure, \"geo.log\", self.geovalid, self.finalE)", "def list_all_scaling_groups(self, request, paginate):\n\n def format_list(results):\n group_states, actives = results\n groups = [{\n 'id': state.group_id,\n 'links': get_autoscale_links(state.tenant_id, state.group_id),\n 'state': format_state_dict(state, active)\n } for state, active in zip(group_states, actives)]\n return {\n \"groups\": groups,\n \"groups_links\": get_groups_links(\n groups, self.tenant_id, None, **paginate)\n }\n\n def fetch_active_caches(group_states):\n if not tenant_is_enabled(self.tenant_id, config_value):\n return group_states, [None] * len(group_states)\n d = gatherResults(\n [get_active_cache(\n self.store.reactor, self.store.connection, self.tenant_id,\n state.group_id)\n for state in group_states])\n return d.addCallback(lambda cache: (group_states, cache))\n\n deferred = self.store.list_scaling_group_states(\n self.log, self.tenant_id, **paginate)\n deferred.addCallback(fetch_active_caches)\n deferred.addCallback(format_list)\n deferred.addCallback(json.dumps)\n return deferred", "def test_groups_group_id_state_get(self):\n pass", "def states(self):\n knownstates = set(self.keys())\n for possiblestates in self.values():\n for i in possiblestates:\n knownstates.add(i)\n return list(knownstates)", "def get_case_list_by_group(config):\n # Identity = namedtuple('Identity', ['service', 'id'])\n groups = config.get('groups')\n full_case_lists = {}\n for group_name, group in groups.items():\n cases = group['cases']\n if group.get('dependencies'):\n for dep in group.get('dependencies'):\n dependencies_tests = groups.get(dep).get('cases')\n cases += dependencies_tests\n full_case_lists[group_name] = cases\n return full_case_lists", "def get_states(self, sim_outputs: List = None):\n \n # Ensure model has been initialized at least once\n self._model_has_been_initialized(\"get_states\")\n\n if sim_outputs is None:\n sim_outputs = self.sim_outputs\n elif not len(sim_outputs) > 0:\n sim_outputs = self.sim_outputs\n\n states_dict = self._get_variables(sim_outputs)\n \n # Check if more than one index has been found\n if not len(states_dict.keys()) > 0:\n print(\"[get_states] No valid state names have been provided. No states are returned.\")\n return {}\n\n return states_dict", "def states_list():\n states_dict = storage.all(State)\n states = [v for v in states_dict.values()]\n return render_template('7-states_list.html', states=states)", "def States(self) -> List[Callable]:\r\n\t\treturn self.__STATES__", "def states(self):\n from geoid.core import names\n from geoid.censusnames import geo_names, stusab\n\n states = {}\n\n for state_no, stusab in stusab.items():\n states[stusab] = {\n 'name': geo_names[(state_no,0)],\n 'stusab': stusab,\n 'number' : state_no\n }\n\n states['US'] = {\n 'name': 'United States',\n 'stusab': 'US',\n 'number' : 0\n }\n\n return states", "def all_states(self) -> Tuple[State, ...]:\n return self.influence_graph.all_states()", "def groups(self):\n return self.get_data(\"groups\")", "def states_list():\n state_dict = storage.all('State').values()\n return render_template('7-states_list.html', state_dict=state_dict)", "def states():\n all_states = storage.all(State)\n return render_template('9-states.html', States=all_states, ID=None,\n Stateobj=None)" ]
[ "0.5906376", "0.587641", "0.58613956", "0.5801155", "0.57883763", "0.57819426", "0.5745295", "0.5735361", "0.5593753", "0.5529911", "0.55296177", "0.54799014", "0.5479514", "0.54587895", "0.5449307", "0.5433292", "0.5427357", "0.5345912", "0.5342106", "0.5340869", "0.5320486", "0.5296267", "0.5291941", "0.52904725", "0.5274526", "0.527334", "0.52728814", "0.5262083", "0.524495", "0.5225711" ]
0.7487228
0
Applies observation noise to the given state.
def _apply_observation_noise(self, state: RobotState, config: RobotGroupConfig): if config.sim_observation_noise is None or self.random_state is None: return # Define the noise calculation. def noise(value_range: np.ndarray): amplitude = config.sim_observation_noise * np.ptp( value_range, axis=1) return amplitude * self.random_state.uniform( low=-0.5, high=0.5, size=value_range.shape[0]) if config.qpos_range is not None: state.qpos += noise(config.qpos_range) if config.qvel_range is not None: state.qvel += noise(config.qvel_range)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_noise(self):\n self.noise = torch.normal(0.5, .2, self.state.shape).double()\n self.noise *= torch.sqrt(2 *\n self.vars['T']*torch.tensor(self.vars['dt']))", "def apply_noise(self, input):\n mask = np.random.binomial(1, 1-self.noise_prob, len(input)) \n noisy_input = mask * input\n \n return noisy_input", "def add_noise(self):\n self.noise = np.random.poisson(lam=self.lam, size=self.image.shape)\n self.image += self.noise\n return", "def noiseReduction(self):\n pass", "def add_noise(self):\n self.noise = np.random.poisson(lam=self.lam, size=self.im.shape)\n self.im += self.noise\n return", "def _sample_noise(self) -> np.ndarray:\n return np.random.randn(self.actor_action_size)", "def _sample_new_noise(self, *, tf_sess=None):\n if self.framework == \"tf\":\n tf_sess.run(self.tf_sample_new_noise_op)\n elif self.framework == \"tf2\":\n self._tf_sample_new_noise_op()\n else:\n for i in range(len(self.noise)):\n self.noise[i] = torch.normal(\n mean=torch.zeros(self.noise[i].size()), std=self.stddev\n ).to(self.device)", "def add_noise(self, noise):\n if noise > 0.0:\n for key in self.counts:\n self.counts[key] *= 1.0 + noise * np.random.random_sample()", "def state_transition(self):\n self.x = np.dot(self.F, self.x)\n self.x_noisy = noise_fun(np.dot(self.F, self.x_noisy), self.Q)\n return self.x", "def observation_from_state(self, state):\n state_index = self.latent_variable_markov_chain.index_dict[state]\n return np.random.choice(self.observation_states,\n p=self.emission_probabilities[state_index, :])", "def noise(self, freq: int, /) -> None:", "def noise(self, noise):\n\n self._noise = noise", "def make_noisy_images(image):\r\n return apply_poisson_noise(image, random_state=12345)", "def noise(self, stddev):\n #add noise to weights\n pass", "def observation_func(\n self,\n state: np.ndarray,\n observation_noise: Optional[np.ndarray] = None,\n control_vect: Optional[np.ndarray] = None\n ) -> np.ndarray:\n pass", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))\n self.state = x + dx\n return self.state", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.random.rand(*x.shape) \n self.state = x + dx\n return self.state", "def __call__(self, input: torch.Tensor) -> torch.Tensor:\n # Get noise\n noise = self.mean + torch.randn_like(input) * self.std\n # Apply nose to image\n input = input + noise\n return input", "def _add_noise(X, n_noise, random_state=None):\n np.random.seed(random_state)\n noise = np.random.randn(X.shape[0], n_noise)\n return np.hstack((X, noise))", "def ternary_noise(N_stimuli, Nx, Ny):\n return np.random.randint(-1, 2, size=(N_stimuli, Nx, Ny))", "def _addNoise(self):\n self.dispNoise = self.dispRaw.copy()\n self.dispNoise[:, 0] += self.sigmaEast * numpy.random.randn(self.numStations)\n self.dispNoise[:, 1] += self.sigmaNorth * numpy.random.randn(self.numStations)\n self.dispNoise[:, 2] += self.sigmaUp * numpy.random.randn(self.numStations)\n return", "def gen_noise(sample_size, latent):\r\n\treturn Variable(torch.randn(sample_size, latent))", "def action_with_noise(self, observation):\n if self.replay_buffer.size > self.warmup_size:\n action = self.action(observation)\n else:\n action = self.random_action(observation)\n noise = np.clip(np.random.randn(self.action_dim) * self.sigma,\n -self.noise_cap, self.noise_cap)\n action_with_noise = action + noise\n return (np.clip(action_with_noise, self.action_low, self.action_high),\n action, noise)", "def sample(self):\n x = self.state\n# dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n dx = self.theta * (self.mu - x) + self.sigma * np.random.standard_normal(self.size)\n self.state = x + dx\n return self.state", "def reset_noise(self):\n self.advantage_hidden_layer.reset_noise()\n self.advantage_layer.reset_noise()\n self.value_hidden_layer.reset_noise()\n self.value_layer.reset_noise()", "def generate_noise_vector(self, ):\n self.noise.resize_(\n self.batch_size, int(self.opt.nz), 1, 1).normal_(0, 1)\n self.noisev = Variable(self.noise) # TODO: Add volatile=True???", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array(\n [random.random() for i in range(len(x))]\n )\n self.state = x + dx\n return self.state", "def add_noise(image):\n image += 10e-10 * np.random.randn(image.shape[0], image.shape[1], 1)\n \n return image", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n # dx = self.theta * (self.mu - x) + self.sigma * np.random.standard_normal(self.size)\n self.state = x + dx\n return self.state", "def sample(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])\n self.state = x + dx\n return self.state" ]
[ "0.7100496", "0.6829454", "0.6615999", "0.65508354", "0.6535193", "0.65046763", "0.6397168", "0.6329152", "0.6270984", "0.62694496", "0.6267069", "0.6266234", "0.62525165", "0.6244945", "0.6227903", "0.620435", "0.6183446", "0.6166714", "0.61635095", "0.616012", "0.6097126", "0.6081401", "0.605562", "0.6052497", "0.60498816", "0.60459226", "0.6030385", "0.6028759", "0.6027353", "0.6025186" ]
0.79560375
0
Denormalizes the given action.
def _denormalize_action(self, action: np.ndarray, config: RobotGroupConfig) -> np.ndarray: if config.denormalize_center.shape != action.shape: raise ValueError( 'Action shape ({}) does not match actuator shape: ({})'.format( action.shape, config.denormalize_center.shape)) assert config.denormalize_range is not None action = np.clip(action, -1.0, 1.0) return config.denormalize_center + (action * config.denormalize_range)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reverse_action(self, action):\n low = self.action_space.low\n high = self.action_space.high\n\n scale_factor = (high - low) / 2\n reloc_factor = high - scale_factor\n\n action = (action - reloc_factor) / scale_factor\n action = np.clip(action, -1.0, 1.0)\n\n return action", "def _preprocess_action(self, action: np.ndarray) -> np.ndarray:\n if self.use_raw_actions:\n return action\n else:\n return super()._preprocess_action(action)", "def transform_action(self, action):\n action = np.clip(action, self.input_min, self.input_max)\n transformed_action = (action - self.action_input_transform) * self.action_scale + self.action_output_transform\n\n return transformed_action", "def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:\n low, high = self.action_space.low, self.action_space.high\n return low + (0.5 * (scaled_action + 1.0) * (high - low))", "def rescale_action(self, action: np.ndarray) -> np.ndarray:\n action_rescaled = (\n action * (self.action_max - self.action_min) / 2.0\n + (self.action_max + self.action_min) / 2.0\n )\n return action_rescaled", "def _dediscretize_action(self, action):\n\n if self.discrete_input:\n\n discrete_m1 = action[0]\n discrete_m2 = action[1]\n\n\n m1 = 145 + discrete_m1 * 99/(self.num_div_action - 1)\n m2 = 145 + discrete_m2 * 99/(self.num_div_action - 1)\n\n else:\n if self.differential_car:\n # actions fron 0 to 24\n discrete_m1 = action//5\n discrete_m2 = action % 5\n\n m1 = 145 + discrete_m1 * 99/(self.num_div_action - 1)\n m2 = 145 + discrete_m2 * 99/(self.num_div_action - 1)\n\n else:\n discrete_m1 = action // 5\n discrete_m2 = action % 5\n\n # the traction engine of the ackerman car starts\n # working with pwm=180\n\n m1 = 180 + discrete_m1 * 74 / (self.num_div_action - 1)\n\n # it is the servo and goes from 0 to 255\n m2 = discrete_m2 * 255 / (self.num_div_action - 1)\n\n return m1, m2", "def parseAction(self, action):\n action = self.AGENT_TYPES[action]\n\n\n full_action = {}\n full_action[\"action\"] = action\n if action == \"eli-kw\":\n keywords = self.dataset.getSuggestedKeywords()\n full_action[\"keywords\"] = keywords[:self.N]\n elif action == \"info\" or action == \"info-all\":\n full_action[\"function\"] = self.current_function\n\n elif action == \"sugg\" or action == \"sugg-info-all\":\n top_hit = self.dataset.getTopHits(1)\n if not top_hit:\n full_action[\"action\"] = \"eli-query\"\n else:\n functions = self.dataset.getTopHits(1, self.result_index)\n if functions:\n full_action[\"function\"] = functions[0]\n else:\n full_action[\"function\"] = \"\"\n\n self.result_index += 1\n\n elif action == \"sugg-all\":\n full_action[\"list\"] = self.dataset.getTopHits(self.K, self.result_index)\n\n elif action == \"change-page\":\n self.result_index += self.K\n full_action[\"list\"] = self.dataset.getTopHits(self.K, self.result_index)\n return full_action", "def _decode_action(self, action_id):\n raise NotImplementedError", "def scale_action(self, action: np.ndarray) -> np.ndarray:\n low, high = self.action_space.low, self.action_space.high\n return 2.0 * ((action - low) / (high - low)) - 1.0", "def denormalize(self, x):\n raise NotImplementedError", "def action(self, action):\n low = self.action_space.low\n high = self.action_space.high\n\n scale_factor = (high - low) / 2\n reloc_factor = high - scale_factor\n\n action = action * scale_factor + reloc_factor\n action = np.clip(action, low, high)\n\n return action", "def convert_to_low_level_action(self, i_state, action):\n pass", "def _action(self, action, osd, info=None, **kwargs):\n body = {action: info}\n self.run_hooks('modify_body_for_action', body, **kwargs)\n url = '/osds/%s/action' % base.getid(osd)\n return self.api.client.post(url, body=body)", "def _get_action(verb):\n aux_verbs = \"\"\n for child in verb.children:\n if child.dep_ == \"aux\" or child.dep_ == \"neg\":\n aux_verbs += str(child)\n return SpacyEventExtractor._remove_extra_whitespaces(str(aux_verbs) + ' ' + str(verb))", "def _formulate_action(Action, **kwargs):\n\n return Action(**kwargs)", "def _fill_action_info(action):\n def _is_ascii(s):\n return all(ord(c) < 128 for c in s)\n\n if not _is_ascii(action.obj_desc_str):\n tf.logging.info('Found an unconvertable unicode %s', action.obj_desc_str)\n return\n\n if not (isinstance(action.verb_str, str) and isinstance(\n action.obj_desc_str, str) and isinstance(action.input_content_str, str)):\n return\n action.regularize_strs()\n input_str_pos_padding = [\n config.LABEL_DEFAULT_VALUE_INT, config.LABEL_DEFAULT_VALUE_INT\n ]\n\n input_prep_word = _get_input_prep_word()\n swipe_prep_word = _get_swipe_prep_word()\n\n if action.action_rule == common.ActionRules.NO_VERB_RULE:\n action.instruction_str = action.obj_desc_str\n action.verb_str_pos = [0, 0]\n action.obj_str_pos = [0, _count_chars(action.obj_desc_str)]\n action.input_str_pos = input_str_pos_padding\n return\n\n if action.action_type in [common.ActionTypes.CLICK]:\n action.instruction_str = '%s %s' % (action.verb_str, action.obj_desc_str)\n action.verb_str_pos = [0, _count_chars(action.verb_str)]\n action.obj_str_pos = [\n _count_chars(action.verb_str) + 1,\n _count_chars(action.instruction_str)\n ]\n action.input_str_pos = input_str_pos_padding\n\n elif action.action_type in [common.ActionTypes.INPUT]:\n # There is no space between 4th and 5th string because the 2nd string,\n # article word, is optional.\n action.instruction_str = '%s %s %s %s' % (\n action.verb_str, action.input_content_str, input_prep_word,\n action.obj_desc_str)\n action.verb_str_pos = [0, _count_chars(action.verb_str)]\n action.input_str_pos = [\n _count_chars(action.verb_str) + 1,\n _count_chars('%s %s' % (action.verb_str, action.input_content_str))\n ]\n action.obj_str_pos = [\n _count_chars(\n '%s %s %s' %\n (action.verb_str, action.input_content_str, input_prep_word)) + 1,\n _count_chars(action.instruction_str)\n ]\n # All the rests are swipe actions\n else:\n action.instruction_str = '%s %s %s' % (action.verb_str, swipe_prep_word,\n action.obj_desc_str)\n action.verb_str_pos = [0, _count_chars(action.verb_str)]\n action.input_str_pos = input_str_pos_padding\n action.obj_str_pos = [\n _count_chars('%s %s' % (action.verb_str, swipe_prep_word)) + 1,\n _count_chars(action.instruction_str)\n ]", "def to_action(self, node):\n\n name = node.children[0].name\n parameter_map = {}\n\n if \":parameters\" in node:\n params = PDDL_Utils.read_type(node[\":parameters\"])\n parameter_map = {p[0]: p[1] for p in params} # map of variable-names to types\n else:\n params = []\n\n\n assert \":derive-condition\" in node, \"Error: You must include the :derive-condition value for every action.\"\n\n dcond_ind = [n.name for n in node.children].index(':derive-condition')\n dcond = self.to_formula(node.children[dcond_ind+1], parameter_map)\n\n\n if \":precondition\" in node:\n assert len(node[\":precondition\"].children) == 1,\\\n \"precondition should have one top-level child\"\n precond = self.to_formula(node[\":precondition\"].children[0], parameter_map)\n else:\n precond = None\n\n if \":observe\" in node:\n assert len(node[\":observe\"].children) == 1,\\\n \"observe should have one top-level child\"\n observe = self.to_predicate(node[\":observe\"].children[0], map=parameter_map)\n else:\n observe = None\n\n if \":effect\" in node:\n assert len(node[\":effect\"].children) == 1,\\\n \"effect should have one top-level child\"\n effect = self.to_formula(node[\":effect\"].children[0], parameter_map)\n else:\n effect = None\n\n return Action(name, params, precond, observe, effect, dcond)", "def external_action_to_action(self, agent, external_action):\n return external_action", "def action(self, action):\n action = (action + 1) / 2 # [-1, 1] => [0, 1]\n action *= (self.action_space.high - self.action_space.low)\n action += self.action_space.low\n return action", "def as_action_view(cls, action):\n method = getattr(cls, action)\n mapping = dict(method.mapping)\n initkwargs = method.kwargs\n # If the method is defined on a mixin class, a single schema instance will end up\n # being shared between the subclasses, which causes problems with schema generation.\n # We work around this by making a copy of it.\n schema = initkwargs.get('schema')\n if schema is not None:\n initkwargs['schema'] = copy(schema)\n return cls.as_view(mapping, **initkwargs)", "def compute_command(self, action: ActT) -> np.ndarray:\n # pylint: disable=unused-argument\n\n # Check if the action is out-of-bounds, in debug mode only\n if self.debug and not self._contains_action():\n LOGGER.warning(\"The action is out-of-bounds.\")\n\n assert isinstance(action, np.ndarray)\n return action", "def transfer_actions(action, act_space):\n #print(action)\n action_spaces = []\n res = []\n for act in act_space.spaces:\n if act_space[act].__class__.__name__ == 'Discrete':\n action_spaces.append(act_space[act].n)\n res.append(action[act])\n elif act_space[act].__class__.__name__ == 'Enum':\n action_spaces.append(len(act_space[act].values))\n res.append(action[act])\n elif act == 'camera':\n res.append(camera_transform(action[act][0]))\n res.append(camera_transform(action[act][1]))\n action_spaces.append(36)\n action_spaces.append(36)\n\n return res", "def action(self, args):\n mean, stddev = args\n dist = tfp.distributions.Normal(loc=mean, scale=stddev)\n action = dist.sample(1)\n action = K.clip(action,\n self.env.action_space.low[0],\n self.env.action_space.high[0])\n return action", "def _action(self, action, consistencygroup, info=None, **kwargs):\n body = {action: info}\n self.run_hooks('modify_body_for_action', body, **kwargs)\n url = '/consistencygroups/%s/action' % base.getid(consistencygroup)\n resp, body = self.api.client.post(url, body=body)\n return common_base.TupleWithMeta((resp, body), resp)", "def decode(action):\n\n from_rank, from_file, move_type = np.unravel_index(action, (8, 8, 73))\n\n is_underpromotion = (\n _TYPE_OFFSET <= move_type\n and move_type < _TYPE_OFFSET + _NUM_TYPES\n )\n\n if not is_underpromotion:\n return None\n\n underpromotion_type = move_type - _TYPE_OFFSET\n\n direction_idx, promotion_idx = np.unravel_index(\n indices=underpromotion_type,\n shape=(3,3)\n )\n\n direction = _DIRECTIONS[direction_idx]\n promotion = _PROMOTIONS[promotion_idx]\n\n to_rank = from_rank + 1\n to_file = from_file + direction\n\n move = utils.pack(from_rank, from_file, to_rank, to_file)\n move.promotion = promotion\n\n return move", "def action(self, action):\n allowed_values = [\"DELETE\", \"NONE\"]\n if action not in allowed_values:\n raise ValueError(\n \"Invalid value for `action` ({0}), must be one of {1}\"\n .format(action, allowed_values)\n )\n\n self._action = action", "def scale(self, state, action):\n control_action = action[..., : self._true_dim_action[0]]\n scale = super().scale(state, control_action)\n\n return scale", "def _action_to_one_hot(self, action):\n zeros = torch.zeros(\n action.size()[0], self.action_dim, dtype=torch.float32,\n device=action.device)\n return zeros.scatter_(1, action.unsqueeze(1), 1)", "def _action_to_one_hot(self, action):\n zeros = torch.zeros(\n action.size()[0], self.action_dim, dtype=torch.float32,\n device=action.device)\n return zeros.scatter_(1, action.unsqueeze(1), 1)", "def strip_action(self) -> str:\n return self.strip_action_str(self.content)" ]
[ "0.59685934", "0.5911025", "0.5838376", "0.57880056", "0.5760996", "0.5746722", "0.5595164", "0.5518302", "0.5501854", "0.5424955", "0.53894466", "0.5361105", "0.5326777", "0.5308367", "0.52543795", "0.50769705", "0.503363", "0.5022165", "0.5016263", "0.50093585", "0.50074387", "0.4999803", "0.498712", "0.4983206", "0.4982905", "0.4958982", "0.49431118", "0.4934482", "0.4934482", "0.4907637" ]
0.77185875
0
This validates the post request as a whole and not just a field. During read, the validator practically skips. During post, each orderline unit is validated. If an orderline has more units than available product units, the order is not accepted.
def validate(self, attrs): exception_body = [] for orderline in attrs.get('orderlines', []): product = orderline['product'] # If orderline has less units than available, all good. if orderline['units'] <= product.units: continue # else error is accumulated if product.units > 0: exception_body.append({product.name: 'Only {0} units available.'.format(str(product.units))}) else: exception_body.append({product.name: 'Out of stock'}) # If any orderline has problem, reject order. if exception_body: raise exceptions.PermissionDenied({'errors': exception_body}) return attrs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean(self):\n cleaned_data = super().clean()\n variant = cleaned_data.get('variant')\n quantity = cleaned_data.get('quantity')\n if variant and quantity is not None:\n try:\n variant.check_quantity(quantity)\n except InsufficientStock as e:\n error = forms.ValidationError(\n pgettext_lazy(\n 'Add item form error',\n 'Could not add item. '\n 'Only %(remaining)d remaining in stock.' %\n {'remaining': e.item.quantity_available}))\n self.add_error('quantity', error)\n return cleaned_data", "def validate(self, attrs):\n if 'filled_out' in attrs and not attrs['filled_out']:\n raise ValidationError(\"filled_out cannot be set to false\")\n\n if 'agreed_to_terms_of_service' in attrs and not attrs['agreed_to_terms_of_service']:\n raise ValidationError(\"agreed_to_terms_of_service cannot be set to false\")\n\n # Postal code is only required in United States and Canada\n country = attrs.get(\"country\", \"\")\n postal_code = attrs.get(\"postal_code\", \"\")\n if country in (\"US\", \"CA\") and not postal_code:\n raise ValidationError(\"postal_code may not be blank\")\n\n return super().validate(attrs)", "def post(self, request, *args, **kwargs):\n self.object = None\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n ot_linea_form = OT_LineaFormSet(self.request.POST)\n if (form.is_valid() and ot_linea_form.is_valid()):\n return self.form_valid(form, ot_linea_form)\n else:\n return self.form_invalid(form, ot_linea_form)", "def post(self, request, *args, **kwargs):\n self.object = None\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n ot_linea_form = OT_LineaFormSet(self.request.POST)\n if (form.is_valid() and ot_linea_form.is_valid()):\n return self.form_valid(form, ot_linea_form)\n else:\n return self.form_invalid(form, ot_linea_form)", "def verify_post_data ( ):\n # check every field is present\n try:\n request.json[ 'source_lang' ]\n request.json[ 'target_lang' ]\n request.json[ 'text' ]\n\n TranslatorApp.verify_rpc_value ( request.json )\n\n except KeyError: # All the values are not present\n # 400 Bad Request\n abort ( 400, \"All mandatory fields are not provided\" )\n except ValueError as err:\n # 422 Unprocessable Entity\n abort ( 422, \"Unprocessable value: {0}\".format ( err.args ) )\n except BadRequest:\n # 400 Bad Request\n abort ( 400, \"Provided values are having malformed syntax\" )", "def test_error_data_order(client):\n data = dict(product_name=\"Latte\")\n response = client.post(\"/api/order\", headers=HEADERS, json=data)\n assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY", "def _validate_post(self, value, name, result):\n return result", "def post(self, request, *args, **kwargs):\n self.object = None\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n factura_form = Factura_LineaFormSet(self.request.POST)\n remito_form = Remito_LineaFormSet(self.request.POST)\n ot_linea_form = OT_LineaFormSet(self.request.POST)\n if (form.is_valid() and factura_form.is_valid()\n and ot_linea_form.is_valid() and remito_form.is_valid()):\n return self.form_valid(form, factura_form, remito_form, ot_linea_form)\n else:\n return self.form_invalid(form, factura_form, remito_form, ot_linea_form)", "def test_POST_receipt(self):\n\t\t# list should have no receipts at first\n\t\tself.POST_list()\n\t\tlist_data = self.GET_data('/api/list/' + self.list_id)\n\t\tself.assertTrue(('receipts' not in list_data) or not len(list_data['receipts']))\n\n\t\t# after post receipt and its _id should be in list.receipts\n\t\tself.POST_receipt()\n\t\tlist_data = self.GET_data('/api/list/' + self.list_id)\n\t\tself.assertTrue('receipts' in list_data)\n\t\tself.assertEqual([self.receipt_id], list_data['receipts'])\n\n\t\t# post another receipt and receipts should have length of 2\n\t\tself.POST_receipt()\n\t\tlist_data = self.GET_data('/api/list/' + self.list_id)\n\t\tself.assertEqual(2, len(list_data['receipts']))\n\t\tself.assertTrue(self.receipt_id in list_data['receipts'])", "def post(self, request, *args, **kwargs):\n self.object = None\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n tarea_linea_form = Tarea_LineaFormSet(self.request.POST)\n if (form.is_valid() and tarea_linea_form.is_valid()):\n return self.form_valid(form, tarea_linea_form)\n else:\n return self.form_invalid(form, tarea_linea_form)", "def test_make_order_with_quantity_invalid(self):\n response = self.api_test_client.post('{}/orders'.format(\n self.BASE_URL), json={\n 'item_name': 'Watermelon', 'item_price': 50, 'quantity': -3\n }, headers={'Content-Type': 'application/json'})\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'], 'Bad request. Price and quantity must be ints >= 1')", "def test_invalid_request_type(self, order_placed):\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n\n data[\"req_transaction_type\"] = \"payment\",\n\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0, 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')", "def clean(self):\n\n cleaned_data = self.cleaned_data\n if cleaned_data.get('order') and cleaned_data.get('stock') \\\n and cleaned_data.get('volume') and cleaned_data.get('price'):\n t = cleaned_data['trader']\n if cleaned_data['order'] == 'B': # buy order\n open_orders = Order.objects.filter(trader=t,\n order='B', completed=False)\n open_order_value = float(sum([o.volume * o.price for o in open_orders]))\n open_order_value += int(cleaned_data['volume']) * float(cleaned_data['price'])\n\n if open_order_value > t.cash:\n raise ValidationError(\"You don't have enough cash!\")\n\n elif cleaned_data['order'] == 'S': # sell order!\n open_orders = sum(Order.objects.filter(trader=t, order='S',\n stock=cleaned_data['stock'],\n completed=False).values_list('volume', flat=True))\n open_orders += cleaned_data['volume']\n\n if open_orders > t.holding_set.get(stock=cleaned_data['stock']).shares:\n raise ValidationError(\"You don't have enough shares!\")\n return cleaned_data", "def validate(self, request):\n values = {\n 'robot_match_comments':request.POST['robot_match_comments'],\n 'did_foul':'did_foul' in request.POST,\n 'did_technical_foul':'did_technical_foul' in request.POST,\n 'foul_description':request.POST['foul_description'],\n 'did_shoot':'did_shoot' in request.POST,\n 'auto_1':request.POST['auto_1'],\n 'auto_2':request.POST['auto_2'],\n 'auto_3':request.POST['auto_3'],\n 'auto_miss':request.POST['auto_miss'],\n 'teleop_1':request.POST['teleop_1'],\n 'teleop_2':request.POST['teleop_2'],\n 'teleop_3':request.POST['teleop_3'],\n 'teleop_5':request.POST['teleop_5'],\n 'teleop_miss':request.POST['teleop_miss'],\n 'shooting_description':request.POST['shooting_description'],\n 'did_climb':'did_climb' in request.POST,\n 'climb_start':request.POST['climb_start'],\n 'climb_finish':request.POST['climb_finish'],\n 'level_reached':request.POST.get('level_reached'),\n 'frisbees_dumped':request.POST['frisbees_dumped'],\n 'climbing_description':request.POST['climbing_description'],\n 'did_human_load':'did_human_load' in request.POST,\n 'did_ground_load':'did_ground_load' in request.POST,\n 'auto_frisbees_ground_loaded':\\\n request.POST['auto_frisbees_ground_loaded'],\n 'loading_description':request.POST['loading_description'],\n }\n if ((values['did_foul'] or values['did_technical_foul']) and\n not values['foul_description']):\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'Please enter a description of the foul(s) the robot committed',\n new_values\n )\n if values['did_shoot']:\n try:\n values['auto_1'] = int(values['auto_1'])\n values['auto_2'] = int(values['auto_2'])\n values['auto_3'] = int(values['auto_3'])\n values['auto_miss'] = int(values['auto_miss'])\n values['teleop_1'] = int(values['teleop_1'])\n values['teleop_2'] = int(values['teleop_2'])\n values['teleop_3'] = int(values['teleop_3'])\n values['teleop_5'] = int(values['teleop_5'])\n values['teleop_miss'] = int(values['teleop_miss'])\n except ValueError:\n raise ValidationError(\n 'You must enter a number for all of the shooting numbers',\n self.__dict__.copy().update(values)\n )\n if values['did_climb']:\n try:\n values['climb_start'] = int(values['climb_start'])\n values['climb_finish'] = int(values['climb_finish'])\n try:\n values['level_reached'] = int(values['level_reached'])\n except TypeError:\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'You must select a level the robot climbed too',\n new_values\n )\n values['frisbees_dumped'] = int(values['frisbees_dumped'])\n except ValueError:\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'All climbing related numbers must be numbers',\n new_values\n )\n if values['did_ground_load']:\n try:\n values['auto_frisbees_ground_loaded'] = int(\n values['auto_frisbees_ground_loaded'])\n except ValueError:\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'All numbers of frisbees ground loaded must be numbers',\n new_values\n )\n return values", "def validate(self, data):\n # calling subserializer validate method (fields, and presets)\n data = super(FormidableSerializer, self).validate(data)\n # we check every field define in presets are define inside the form.\n if 'fields' in data and 'presets' in data:\n data = self.check_presets_cohesion(data)\n return data", "def test_make_order_with_some_data_missing(self):\n response = self.api_test_client.post('{}/orders'.format(\n self.BASE_URL), json={'item_name': 'Watermelon'}, headers={\n 'Content-Type': 'application/json'})\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'], 'Bad request. Missing required param')", "def __call__(self, data):\n if sum(item_data['amount'] for item_data in data) < self.order.total_cost:\n raise ValidationError({\n api_settings.NON_FIELD_ERRORS_KEY: self.message,\n })", "def post(self, request, *args, **kwargs):\n self.object = None\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n factura_form = Factura_LineaFormSet(self.request.POST)\n ot_linea_form = OT_LineaFormSet(self.request.POST)\n if (form.is_valid() and factura_form.is_valid()\n and ot_linea_form.is_valid()):\n return self.form_valid(form, factura_form, ot_linea_form)\n else:\n return self.form_invalid(form, factura_form, ot_linea_form)", "def post(self, request, *args, **kwargs):\n self.object = None\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n instrumento_linea_form = Instrumento_LineaFormSet(self.request.POST)\n if (form.is_valid() and instrumento_linea_form.is_valid()):\n return self.form_valid(form, instrumento_linea_form)\n else:\n return self.form_invalid(form, instrumento_linea_form)", "def post(self, request, *args, **kwargs):\n self.object = RUT.objects.get(pk=kwargs['pk'])\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n ot_linea_form = OT_LineaFormSet(self.request.POST, instance=self.object)\n if (form.is_valid() and ot_linea_form.is_valid()):\n return self.form_valid(form, ot_linea_form)\n else:\n return self.form_invalid(form, ot_linea_form)", "def validate(self):\n\n for entry in self.body:\n e_type = list(entry.keys())[0]\n if e_type not in self.type_checks:\n raise exceptions.BadInputError(f\"invalid input type {e_type}\")\n\n body = entry[e_type]\n self.type_checks[e_type](body)", "def test_make_order_with_non_json_data(self):\n response = self.api_test_client.post('{}/orders'.format(\n self.BASE_URL), data='item_name=Guacamole&item_price=200')\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'],\n 'Bad request. Request data must be in json format')", "def test_make_order_without_any_request_data(self):\n response = self.api_test_client.post('{}/orders'.format(\n self.BASE_URL), json={}, headers={\n 'Content-Type': 'application/json'})\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'], 'Bad request. Missing required param')", "def post(self, request, *args, **kwargs):\n self.object = OT.objects.get(pk=kwargs['pk'])\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n factura_form = Factura_LineaFormSet(self.request.POST, instance=self.object)\n remito_form = Remito_LineaFormSet(self.request.POST, instance=self.object)\n ot_linea_form = OT_LineaFormSet(self.request.POST, instance=self.object)\n if (form.is_valid() and factura_form.is_valid()\n and ot_linea_form.is_valid() and remito_form.is_valid()):\n return self.form_valid(form, factura_form, remito_form, ot_linea_form)\n else:\n return self.form_invalid(form, factura_form, remito_form, ot_linea_form)", "def validate(self):\r\n # Check KeyError\r\n try:\r\n self.fields[\"product_name_fr\"]\r\n self.fields[\"generic_name\"]\r\n self.fields[\"url\"]\r\n self.fields[\"nutrition_grade_fr\"]\r\n self.fields[\"categories\"]\r\n self.fields[\"stores\"]\r\n self.fields[\"brands\"]\r\n except KeyError:\r\n return False\r\n\r\n # Check empty field and lenght of generic_name\r\n for key, value in self.fields.items():\r\n if value == '':\r\n return False\r\n break\r\n if key == \"generic_name\":\r\n if len(value) > 255:\r\n return False\r\n\r\n try:\r\n self.categories = ProductFromApiToDatabase.clean_tag(\r\n self.fields[\"categories\"], 100)\r\n self.stores = ProductFromApiToDatabase.clean_tag(\r\n self.fields[\"stores\"], 45)\r\n self.brands = ProductFromApiToDatabase.clean_tag(\r\n self.fields[\"brands\"], 45)\r\n self.category_index = self.categories.index(self.category)\r\n except KeyError:\r\n return False\r\n except ValueError:\r\n return False\r\n except AttributeError:\r\n self.errors += 1\r\n print(self.errors)\r\n return False", "def validate_emprestimo_post_body(request_body: dict):\n required_fields = [\n 'valor_nominal',\n 'taxa_juros',\n 'banco',\n 'nome_cliente'\n ]\n request_fields = request_body.keys()\n\n for current_required_field in required_fields:\n if current_required_field not in request_fields:\n raise MissingRequiredFields(code=400)\n\n if not isinstance(request_body.get('taxa_juros'), float):\n raise InvalidFieldType(code=400)\n\n if not isinstance(request_body.get('valor_nominal'), float):\n raise InvalidFieldType(code=400)\n\n if not isinstance(request_body.get('banco'), str):\n raise InvalidFieldType(code=400)\n\n if not isinstance(request_body.get('nome_cliente'), str):\n raise InvalidFieldType(code=400)\n\n if request_body.get('valor_nominal') <= 0 or request_body.get('taxa_juros') <= 0:\n raise InvalidFieldValue(code=400)\n\n return", "def validate(self, value):\n\n current_values = dict(self.queryset.values_list('id', 'quantity'))\n for product_id in self.product_fields.keys():\n self.product_fields[product_id]['quantity'] = current_values[product_id]\n\n errors = []\n for (product_id, product_data), chosen_value in zip(self.product_fields.items(), value):\n name = product_data['name']\n int_chosen_val = int(chosen_value)\n if product_data['quantity'] == 0:\n errors.append(\n ValidationError(self.error_messages['out_of_stock'].format(name))\n )\n continue\n if int_chosen_val <= 0:\n errors.append(\n ValidationError(self.error_messages['incorrect_quantity'].format(name))\n )\n continue\n\n if product_data['quantity'] < int_chosen_val:\n errors.append(\n ValidationError(self.error_messages['less_quantity'].format(product_data['quantity'], name))\n )\n continue\n\n if len(errors) > 0:\n raise ValidationError(errors)", "def run_validation(self, data=empty):\n\n # no idea why there is no such built in feature in DRF\n if data is not empty:\n unknown = set(data) - set(self.fields)\n if unknown:\n errors = ['Unknown field: {}'.format(f) for f in unknown]\n raise ValidationError({api_settings.NON_FIELD_ERRORS_KEY: errors})\n return super().run_validation(data)", "def clean(self):\n cleaned_data = super(AuthorizenetSurveyPurchaseForm, self).clean()\n if cleaned_data.get(\"purchase_code\"):\n return cleaned_data\n\n for f in [\"card_number\", \"card_expiry\", \"card_ccv\"]:\n if not cleaned_data.get(f):\n self.add_error(f, \"Required for card payments\")\n return cleaned_data", "def test_custom_required(self):\n for data in ({}, {'payment_amount': ''}):\n form = DonationAmountForm(data=data)\n self.assertFalse(form.is_valid())\n errors = form.errors.as_data()\n self.assertTrue('payment_amount' in errors)\n self.assertEqual('required', errors['payment_amount'][0].code)" ]
[ "0.5745162", "0.552804", "0.55063504", "0.55063504", "0.5504187", "0.54778343", "0.54469776", "0.5445624", "0.5444358", "0.54384", "0.54326487", "0.5432112", "0.5422848", "0.54169387", "0.5380987", "0.53740376", "0.5372109", "0.5350199", "0.5332469", "0.5322742", "0.5322059", "0.53145486", "0.53097117", "0.530445", "0.53043765", "0.52981484", "0.52824223", "0.5280457", "0.5274435", "0.5274373" ]
0.64973056
0
Actual updation of an order to confirmed/cancellation/delivery status happens here. There are only two valid transitions acceptable 1. From accepted to confirmed. 2. From confirmed to cancelled/delivered.
def update(self, instance, validated_data): # If an order is cancelled or delivered, it cannot be modified. if instance.status == CANCELLED or instance.status == DELIVERED: raise exceptions.PermissionDenied('This order cannot be modified.') # If an order is already confirmed but UI/agent sends another confirmation request by mistake, # we deny it as each confirmation is a big operation that includes generating invoices/ledger entries. if instance.status == validated_data['status'] == CONFIRMED: raise exceptions.PermissionDenied('This order is already confirmed.') if instance.status == ACCEPTED and validated_data['status'] == CONFIRMED: # 1. Transition: accepted -> confirmed instance.status = validated_data.get('status') elif instance.status == CONFIRMED and validated_data['status'] in [CANCELLED, DELIVERED]: # 2. Transition: confirmed -> cancelled/delivered and return instance.status = validated_data.get('status') instance.save(update_fields=['status']) return instance else: # In case of any invalid transition, reject it. raise exceptions.PermissionDenied('There seems to be some discrepancy. Please contact your agent.') # Get exclusive lock on all relevant data rows orderlines = instance.orderlines.select_for_update().select_related('product').all() # Do order and product update in a single transaction with transaction.atomic(): # Validate that order can be approved. self._validate_units_and_balance_in_orderlines(orderlines, instance.user) for orderline in orderlines: # Decrement product stock count by orderline(buying) requirement product = orderline.product product.units = F('units') - orderline.units product.save(update_fields=['units']) # Lock current standing price into the orderline, calculate sub total and lock it. product_price = product.price orderline.confirmed_price = product_price orderline.locked = CONFIRMED orderline.sub_total = product_price * F('units') orderline.save(update_fields=['confirmed_price', 'locked', 'sub_total']) # Mark order as confirmed. instance.save(update_fields=['status']) return instance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_order(self):\n response = self.api_test_client.put('{}/orders/1'.format(\n self.BASE_URL), json={'order_status': 'accepted'})\n\n self.assertEqual(response.status_code, 201)\n self.assertTrue(\n response_as_json(response)['order']['status_updated_on'])\n self.assertEqual(\n response_as_json(response)['order']['order_status'], 'accepted')", "def test_order_update_status_function(self):\n order = OrderInfo.objects.create(user=self.create_test_user())\n self.assertIsInstance(order.ordered, datetime)\n self.assertIsNone(order.cooked)\n self.assertIsNone(order.delivered)\n\n order.update_current_state()\n self.assertIsInstance(order.cooked, datetime)\n self.assertIsNone(order.delivered)\n\n order.update_current_state()\n self.assertIsInstance(order.cooked, datetime)\n self.assertIsInstance(order.delivered, datetime)", "def determine_order_status_change(order, decision):\n if order.status == Order.FAILED and decision == CYBERSOURCE_DECISION_CANCEL:\n # This is a duplicate message, ignore since it's already handled\n return None\n\n if order.status != Order.CREATED:\n raise EcommerceException(f\"{order} is expected to have status 'created'\")\n\n if decision != CYBERSOURCE_DECISION_ACCEPT:\n log.warning(\n \"Order fulfillment failed: received a decision that wasn't ACCEPT for order %s\",\n order,\n )\n return Order.FAILED\n\n return Order.FULFILLED", "def test_manager_change_order_status(self):\n self.client.force_authenticate(self.user)\n cancel = \"CA\"\n url = reverse('order-set_status', args=[self.order.id])\n resp = self.client.patch(url, data={\n \"status\": cancel\n })\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n new_status = Order.objects.values(\"status\").get(pk=self.order.id)\n self.assertEqual(new_status[\"status\"], cancel)\n\n with self.subTest('customer can not change order status'):\n self.user.role = get_user_model().CUSTOMER\n self.client.force_authenticate(self.user)\n resp = self.client.patch(url, data={\n \"status\": cancel\n })\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def on_update(self):\n if self.get('update_request') and not self.is_pending_approval():\n if self.is_revert:\n self.set_as_reverted()\n else:\n self.set_as_success()", "async def _check_order_update(self, *args, **kwargs):\n order_nos = list(self._orders.keys())\n if not order_nos:\n return\n for order_no in order_nos:\n success, error = await self._rest_api.get_order_status(order_no)\n if error:\n return\n await self._update_order(success[\"data\"][0])", "async def update_order_status():\n symbol = App.config[\"symbol\"]\n\n # Get currently active order and id (if any)\n order = App.order\n order_id = order.get(\"orderId\", 0) if order else 0\n if not order_id:\n log.error(f\"Wrong state or use: check order status cannot find the order id.\")\n return None\n\n # -----\n # Retrieve order from the server\n try:\n new_order = App.client.get_order(symbol=symbol, orderId=order_id)\n except Exception as e:\n log.error(f\"Binance exception in 'get_order' {e}\")\n return\n\n # Impose and overwrite the new order information\n if new_order:\n order.update(new_order)\n else:\n return None\n\n # Now order[\"status\"] contains the latest status of the order\n return order[\"status\"]", "def update_order(self, order):\n order.order_id = self.order_id\n order.average_price = self.avg_execution_price\n order.symbol = self.symbol\n order.side = self.side\n order.type = self.order_type\n order.amount = self.original_amount\n order.price = self.price\n order.filled = self.executed_amount\n order.remaining = self.remaining_amount\n if self.is_cancelled:\n order.status = exchanges.Order.Status.CANCELLED\n elif self.is_live:\n order.status = exchanges.Order.Status.OPEN\n else:\n order.status = exchanges.Order.Status.CLOSED\n return order", "def notify_order(self, order):\n if order.status in [order.Submitted, order.Accepted]:\n return # active buy/sell order submitted/accepted - do nothing\n\n # check if order has been completed (could reject if not enough cash)\n if order.status in [order.Completed]:\n if order.isbuy():\n self.log(f'BUY EXECUTED, {order.executed.price:.2f}')\n elif order.issell():\n self.log(f'SELL EXECUTED, {order.executed.price:.2f}')\n elif order.status in [order.Canceled, order.Margin, order.Rejected]:\n self.log('Order Canceled/Margin/Rejected')\n\n self.bar_executed = len(self)\n\n self.order = None # reset orders", "def order_update_status():\n result = order_obj.order_update_status(request.forms) \n return result", "def test_admin_change_order_status(self):\n # Test unregistered id\n # Correct format but not there\n response = self.client.put(\n 'api/v1/parcels/35420', headers=self.admin_token_dict)\n data = json.loads(response.data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(\n data, {'message': 'No Parcel delivery order with that id'})\n # Test invalid format id\n response = self.client.put(\n 'api/v1/parcels/35uh420', headers=self.admin_token_dict) # Incorrect id format\n data = json.loads(response.data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data, {'message': 'Wrong id format'})", "def warehouse_officer_confirm_qty(self):\n if (\n self.approve_request_ids is None\n or self.approve_request_ids is False\n ):\n raise UserError(\"No line(s) defined!\")\n self._compute_confirm()\n for line in self.approve_request_ids:\n line._compute_state()\n if any(line.state != \"available\" for line in self.approve_request_ids):\n raise Warning(\n \"Please procure the items that are short in stock or process pending purchase agreements and try again!\"\n )\n else:\n self.state = 'transfer'", "def order_success(self, request):\n order = self.order_from_request(request)\n\n if not order:\n return self.order_new(request)\n\n if not order.balance_remaining:\n self.set_order_on_request(request, order=None)\n\n\n order_data = OrderData.objects.get(order=order)\n o_data = simplejson.loads(order_data.data)\n\n paymentData = {}\n paymentData['delivery_address2'] = o_data['delivery_address2']\n paymentData['billing_address2'] = o_data['billing_address2']\n paymentData['delivery_date'] = o_data['delivery_date']\n paymentData['delivery_state'] = o_data['delivery_state']\n paymentData['billing_state'] = o_data['billing_state']\n paymentData['salutation'] = o_data['salutation']\n paymentData['contact_number'] = o_data['billing_contact_number']\n\n #try:\n oPayment = OrderPayment.objects.get(order=order)\n oPayment.payment_method = o_data['order_payment_method']\n oPayment.data = simplejson.dumps(paymentData)\n oPayment.save()\n #except:\n # pass\n\n \"\"\"\n order update note\n \"\"\"\n notes = o_data['order_notes']\n order.notes = notes\n order.save()\n\n # st_save_helper(request, order)\n\n \"\"\"\n sbid = None\n\n if 'customer_styleboard' in request.session:\n sbid = request.session.get('customer_styleboard').id\n\n if 'personalize_id' in request.session:\n print \"There's a personalize_id\"\n \"\"\"\n\n current_user = User.objects.get(id=int(request.user.id))\n\n if 'ipn_emailed' in o_data and o_data['ipn_emailed']:\n\n pass\n \n else:\n\n emailed = send_email_order(order, current_user, notes, paymentData['contact_number'], self)\n\n logr.info('emailed order confirmation to : %s from order success' % current_user.email)\n\n\n order_data.delete() # not needed after saving to order payment\\\n \n clear_styleboard_session(request)\n\n try:\n del request.session['customer_styleboard']\n del request.session['personalize_id']\n except:\n pass\n\n return self.render(request, 'plata/shop_order_success.html',\n self.get_context(request, {\n 'order': order,\n 'progress': 'success',\n }))", "def update_or_create_delivery(self, orderitem_data):", "def _order_cannot_be_updated_if_not_pending(order_status):\n pecan.abort(400, u._(\"Only PENDING orders can be updated. Order is in the\"\n \"{0} state.\").format(order_status))", "def update_with_order_update(self, order_update: OrderUpdate) -> bool:\n if (order_update.client_order_id != self.client_order_id\n and order_update.exchange_order_id != self.exchange_order_id):\n return False\n\n prev_data = (self.exchange_order_id, self.current_state)\n\n if self.exchange_order_id is None and order_update.exchange_order_id is not None:\n self.update_exchange_order_id(order_update.exchange_order_id)\n\n self.current_state = order_update.new_state\n self.check_processed_by_exchange_condition()\n\n updated: bool = prev_data != (self.exchange_order_id, self.current_state)\n\n if updated:\n self.last_update_timestamp = order_update.update_timestamp\n\n return updated", "def test_set_assign_complete(self):\n order_2 = StockOrderWrapper(self.order_2)\n order_3 = StockOrderWrapper(self.order_3)\n order_5 = StockOrderWrapper(self.order_5)\n order_7 = StockOrderWrapper(self.order_7)\n\n order_2.set_assign_complete()\n order_3.set_assign_complete()\n order_5.set_assign_complete()\n order_7.set_assign_complete()\n\n # =================================================================\n # test: order_wrapper status is updated\n # =================================================================\n\n self.assertEquals(order_2.order_status, PROCESSED)\n self.assertEquals(order_3.order_status, PROCESSED)\n self.assertEquals(order_5.order_status, PROCESSED)\n self.assertEquals(order_7.order_status, PROCESSED)\n\n # =================================================================\n # test: order is saved\n # =================================================================\n\n real_order_2 = StockOrder.objects.get(order_id=1)\n real_order_3 = StockOrder.objects.get(order_id=2)\n real_order_5 = StockOrder.objects.get(order_id=3)\n real_order_7 = StockOrder.objects.get(order_id=4)\n\n # =================================================================\n # test: order_result is updated\n # =================================================================\n\n self.assertEquals(real_order_2.order_result, ASSIGNED_COMPLETE)\n self.assertEquals(real_order_3.order_result, ASSIGNED_COMPLETE)\n self.assertEquals(real_order_5.order_result, ASSIGNED_COMPLETE)\n self.assertEquals(real_order_7.order_result, ASSIGNED_COMPLETE)\n\n # =================================================================\n # test: order_status is updated\n # =================================================================\n\n self.assertEquals(real_order_2.order_status, PROCESSED)\n self.assertEquals(real_order_3.order_status, PROCESSED)\n self.assertEquals(real_order_5.order_status, PROCESSED)\n self.assertEquals(real_order_7.order_status, PROCESSED)", "def mark_refunded(self):\n order = self.clone()\n order.status = Order.STATUS_REFUNDED\n order.save()\n return order", "def converge_orders(self, buy_orders, sell_orders, order_status):\n\n tickLog = self.exchange.get_instrument()['tickLog']\n to_amend = []\n to_create = []\n to_cancel = []\n buys_matched = 0\n sells_matched = 0\n existing_orders = self.exchange.get_orders()\n\n # Check all existing orders and match them up with what we want to place.\n # If there's an open one, we might be able to amend it to fit what we want.\n for order in existing_orders:\n if order['ordType'] != 'Limit':\n continue\n try:\n if (order['side'] == 'Buy' and (order_status == 0 or order_status == 4 or order_status == 3 or order_status == 1 or order_status == 7)):\n desired_order = buy_orders[buys_matched]\n buys_matched += 1\n elif (order['side'] == 'Sell' and (order_status == 0 or order_status == 2 or order_status == 1 or order_status == 3 or order_status == 8)):\n desired_order = sell_orders[sells_matched]\n sells_matched += 1\n elif (order['price'] == buy_orders[buys_matched]['price'] and order_status == 6):\n to_cancel.append(order)\n buys_matched += 1\n continue\n elif (order['price'] == sell_orders[sells_matched]['price'] and order_status == 6):\n to_cancel.append(order)\n sells_matched += 1\n continue\n else:\n continue\n\n # Found an existing order. Do we need to amend it?\n if desired_order['orderQty'] != order['leavesQty'] or (\n # If price has changed, and the change is more than our RELIST_INTERVAL, amend.\n desired_order['price'] != order['price'] and\n abs((desired_order['price'] / order['price']) - 1) > 0):\n to_amend.append({'orderID': order['orderID'], 'orderQty': order['cumQty'] + desired_order['orderQty'],\n 'price': desired_order['price'], 'side': order['side']})\n # Found an stop existing order. Do we need to amend it?\n\n except IndexError:\n # Will throw if there isn't a desired order to match. In that case, cancel it.\n if ((order_status == 2 and order['side'] == 'Sell') or (order_status == 1 and self.running_qty > 0) or (order_status == 4 and order['side'] == 'Buy') or (order_status == 3 and self.running_qty < 0) or (order_status == 7 and order['side'] == 'Buy') or (order_status == 8 and order['side'] == 'Sell')):\n to_cancel.append(order)\n\n if (order_status == 0 or order_status == 4 or order_status == 3 or order_status == 1 or order_status == 5 or order_status == 7):\n while buys_matched < len(buy_orders):\n to_create.append(buy_orders[buys_matched])\n buys_matched += 1\n if (order_status == 0 or order_status == 2 or order_status == 1 or order_status == 3 or order_status == 5 or order_status == 8):\n while sells_matched < len(sell_orders):\n to_create.append(sell_orders[sells_matched])\n sells_matched += 1\n\n if len(to_amend) > 0:\n for amended_order in reversed(to_amend):\n reference_order = [o for o in existing_orders if o['orderID'] == amended_order['orderID']][0]\n logger.info(\"Amending %4s: %d @ %.*f to %d @ %.*f (%+.*f)\" % (\n amended_order['side'],\n reference_order['leavesQty'], tickLog, reference_order['price'],\n (amended_order['orderQty'] - reference_order['cumQty']), tickLog, amended_order['price'],\n tickLog, (amended_order['price'] - reference_order['price'])\n ))\n # This can fail if an order has closed in the time we were processing.\n # The API will send us `invalid ordStatus`, which means that the order's status (Filled/Canceled)\n # made it not amendable.\n # If that happens, we need to catch it and re-tick.\n try:\n self.exchange.amend_bulk_orders(to_amend)\n except requests.exceptions.HTTPError as e:\n errorObj = e.response.json()\n if errorObj['error']['message'] == 'Invalid ordStatus':\n logger.warn(\"Amending failed. Waiting for order data to converge and retrying.\")\n sleep(0.5)\n return self.place_orders()\n else:\n logger.error(\"Unknown error on amend: %s. Exiting\" % errorObj)\n sys.exit(1)\n\n if len(to_create) > 0:\n logger.info(\"Creating %d orders:\" % (len(to_create)))\n for order in reversed(to_create):\n logger.info(\"%4s %d @ %.*f\" % (order['side'], order['orderQty'], tickLog, order['price']))\n self.exchange.create_bulk_orders(to_create)\n\n # Could happen if we exceed a delta limit\n if len(to_cancel) > 0:\n logger.info(\"Canceling %d orders:\" % (len(to_cancel)))\n for order in reversed(to_cancel):\n logger.info(\"%4s %d @ %.*f\" % (order['side'], order['leavesQty'], tickLog, order['price']))\n self.exchange.cancel_bulk_orders(to_cancel)", "def _handle_orders(self, response):\n response_type = response['type']\n state_updated = False\n if response_type == \"subscription_ack\":\n # Insure the subscription details are expected. Don't do anything.\n account_id = response['accountId']\n # TODO: should we do anything with the subscription id?\n # subscription_id = response['subscriptionId']\n symbol_filter = response['symbolFilter']\n api_session_filter = response['apiSessionFilter']\n event_type_filter = response['eventTypeFilter']\n if len(symbol_filter) or len(event_type_filter):\n raise Exception(\"No symbol or event type were specified, but \"\n \"filters were registered.\")\n if len(api_session_filter) != 1:\n raise Exception(\"1 session filter should have been registered.\"\n f\"{len(api_session_filter)} were registered.\")\n accepted_key = api_session_filter[0]\n if accepted_key != self._api_credentials\\\n .api_key:\n raise Exception(\"The whitelisted api session key does not \"\n \"match our session key.\")\n elif response_type == \"initial\":\n # Create a new order record for the initial response.\n order_response = OrderResponse.from_json_dict(response)\n new_order = exchanges.Order()\n order_response.update_order(new_order)\n existing_order = self.exchange_state.order(new_order.order_id)\n if existing_order:\n raise Exception(\"An initial response was received for an \"\n \"existing order (id: {new_order.order_id}).\")\n self.exchange_state.set_order(new_order.order_id, new_order)\n state_updated = True\n elif response_type == \"accepted\":\n # Create a new order. Mark the corresponding action as successful.\n order_response = OrderResponse.from_json_dict(response)\n new_order = exchanges.Order()\n order_response.update_order(new_order)\n self.exchange_state.set_order(new_order.order_id, new_order)\n found_action = False\n for a in self._create_actions:\n if id(a) == int(order_response.client_order_id):\n if a.order is not None:\n raise Exception(\"An order accept message was received, \"\n \"but its corresponding action already \"\n \"has an order (id:{a.order.order_id}).\")\n a.order = new_order\n # I don't know if we need this status.\n a.status = exchanges.Action.Status.SUCCESS\n found_action = True\n break\n if not found_action:\n raise Exception(\"Received an order accept message, but no \"\n \"matching order action was found.\")\n state_updated = True\n elif response_type == \"rejected\":\n order_response = OrderResponse.from_json_dict(response)\n log.warning(f\"An order was rejected. Reason: \" + response['reason'])\n new_order = exchanges.Order()\n order_response.update_order(new_order)\n self.exchange_state.set_order(new_order.order_id, new_order)\n found_action = False\n for a in self._create_actions:\n if id(a) == int(order_response.client_order_id):\n if a.order is not None:\n raise Exception(\"An order reject message was received, \"\n \"but its corresponding action already \"\n \"has an order (id:{a.order.order_id}).\")\n a.order = new_order\n a.status = exchanges.Action.Status.FAILED\n found_action = True\n break\n if not found_action:\n raise Exception(\"Received an order reject message, but no \"\n \"matching order action was found.\")\n state_updated = True\n elif response_type == \"booked\":\n # I don't think we need to act on this.\n log.info(\"Order booked. Order id:{response['order_id']}.\")\n elif response_type == \"fill\":\n order_response = OrderResponse.from_json_dict(response)\n order = self.exchange_state.order(order_response.order_id)\n if not order:\n raise Exception(\"Received a fill response for an unknown order \"\n f\"(id:{order_response.order_id}).\")\n log.info(\"Order fill response received for order id: \"\n f\"{order_response.order_id}.\")\n order_response.update_order(order)\n state_updated = True\n # TODO: we could add some checks here to see if our fee calculation\n # is correct.\n elif response_type == \"cancelled\":\n order_response = OrderResponse.from_json_dict(response)\n order = self.exchange_state.order(order_response.order_id)\n reason = response.get('reason', 'No reason provided.')\n # Unused:\n # cancel_command_id = response.get('cancel_command_id', None)\n if not order:\n raise Exception(\"Received a cancelled response for an unknown \"\n f\"order (id:{order_response.order_id}). Reason:\"\n f\"{reason}\")\n log.info(\"Order fill response received for order id: \"\n f\"{order_response.order_id}. Reason: {reason}\")\n cancel_action = self._cancel_actions.get(order_response.order_id,\n None)\n if not cancel_action:\n raise Exception(\"Received a cancel response but can't find a \"\n \"matching cancel action.\")\n cancel_action.status = exchanges.Action.Status.SUCCESS\n state_updated = True\n elif response_type == \"cancel_rejected\":\n order_response = OrderResponse.from_json_dict(response)\n reason = response.get('reason', 'No reason provided.')\n log.warning(\"Failed to cancel order (id: \"\n f\"{order_response.order_id}). Reason: {reason}\")\n cancel_action = self._cancel_actions.get(order_response.order_id,\n None)\n if not cancel_action:\n raise Exception(\"Received a cancel rejected response but can't \"\n \"find a matching cancel action.\")\n cancel_action.status = exchanges.Action.Status.FAILED\n state_updated = True\n elif response_type == \"closed\":\n order_response = OrderResponse.from_json_dict(response)\n order = self.exchange_state.order(order_response.order_id)\n if not order:\n raise Exception(\"Received a close response for an unknown order\"\n f\" (id:{order_response.order_id}).\")\n log.info(\"Order close response received for order id: \"\n f\"{order_response.order_id}.\")\n order_response.update_order(order)\n state_updated = True\n else:\n raise Exception(f\"Unexpected response type: {response_type}.\")\n return state_updated", "def SendOrderConfirmation(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _update_time_delivered(self, time_delivered):\n # Update db record's time_delivered field\n update = {'time_delivered': time_delivered}\n datebase.update_transaction_record(filter=self.filter, update=update)\n \n # Update db record's estimated_time field\n datebase.update_transaction_record(filter=self.filter, {estimated_time:'0'})\n \n # Update db record's transaction status to delivered\n self._update_transaction_status(transaction_status='delivered')\n \t\t self.transaction_info.update(delivery_status='delivered')\n \n # Update object\n \t\tself.transaction_info.update(time_delivered=time_delivered)\n self.transaction_info.update(estimated_time=0)\n self.transaction_info(transaction_status='delivered')\n\n \tdef _update_transaction_status(self, transaction_status, photo=None):\n \"\"\"\n Update record's transaction_status and send sms msg to update seeker\n \"\"\"\n # Send text message when status changes \n self.send_text(message_type=transaction_status)\n\n # Update db record's transaction status\n update = {'transaction_status': transaction_status}\n datebase.update_transaction_record(filter=self.filter, update=update)\n\n # Update object\n self.transaction_info.update('transaction_seeker': transaction_status)\n\n # If delivered ... TODO: do we actually want to remove from db? \n \t\t# if transaction_status == 'delivered':\n # datebase.delete_transaction_record()\n # return 1 \n # arguments against: we wont be able to access delivered photo if we want to do that", "def set_order_valid():\n data = select_data_source()\n order_id = data['id']\n valid = data['valid']\n user = data['user']\n \n if check_user_permission(user) : return permission_denied_return\n \n db = database.getdb()\n \n ### Check if order exists.\n \n cmd = 'select * from orders where id==\"{0}\"'.format(order_id)\n order_info = db.execute(cmd).fetchall()\n if len(order_info) != 1 :\n return validate_not_exist_return\n \n ### Check if this order belongs to the user.\n \n cmd = 'select * from orders where id==\"{0}\" AND owner==\"{1}\"'.format(order_id, user)\n user_info = db.execute(cmd).fetchall()\n if len(user_info) != 1 :\n return validate_invalid_user_return\n \n ### Check if order is not valid recently.\n \n cmd = 'select * from orders where id==\"{0}\" AND passed!=0'.format(order_id)\n order_valid = db.execute(cmd).fetchall()\n if len(order_valid) != 0 :\n return validate_is_valid_return\n \n ### Check if there is an order already valid at the same time.\n cmd = 'select time from orders where id==\"{0}\"'.format(order_id)\n order_time = db.execute(cmd).fetchall()[0][0]\n cmd = 'select * from orders where time==\"{0}\" AND passed!=0'.format(order_time)\n conflict = db.execute(cmd).fetchall()\n if len(conflict) != 0 :\n return validate_conflict_return\n \n if str.lower(valid) == 'true' :\n ### Set order valid.\n cmd = 'update orders set passed=1 where id==\"{0}\"'.format(order_id)\n db.execute(cmd)\n db.commit()\n print('set order {0} to valid.'.format(order_id))\n return validate_complete_return\n elif str.lower(valid) == 'false' :\n ### Remove the order entry.\n cmd = 'delete from orders where id=\"{0}\"'.format(order_id)\n db.execute(cmd)\n db.commit()\n print('deny the order {0} and remove it from databse.'.format(order_id))\n # TODO: email something to announce...\n return validate_reject_return\n else:\n return validate_valid_parameter_error_return", "def test_acknowledge_orders(self):\n pass", "async def process(self, msg):\n logger.debug(\"msg:\", json.dumps(msg), caller=self)\n e = msg.get(\"e\")\n if e == \"executionReport\": # Order update.\n if msg[\"s\"] != self._raw_symbol:\n return\n order_no = \"{}_{}\".format(msg[\"i\"], msg[\"c\"])\n if msg[\"X\"] == \"NEW\":\n status = ORDER_STATUS_SUBMITTED\n elif msg[\"X\"] == \"PARTIALLY_FILLED\":\n status = ORDER_STATUS_PARTIAL_FILLED\n elif msg[\"X\"] == \"FILLED\":\n status = ORDER_STATUS_FILLED\n elif msg[\"X\"] == \"CANCELED\":\n status = ORDER_STATUS_CANCELED\n elif msg[\"X\"] == \"REJECTED\":\n status = ORDER_STATUS_FAILED\n elif msg[\"X\"] == \"EXPIRED\":\n status = ORDER_STATUS_FAILED\n else:\n logger.warn(\"unknown status:\", msg, caller=self)\n return\n order = self._orders.get(order_no)\n if not order:\n info = {\n \"platform\": self._platform,\n \"account\": self._account,\n \"strategy\": self._strategy,\n \"order_no\": order_no,\n \"action\": msg[\"S\"],\n \"order_type\": msg[\"o\"],\n \"symbol\": self._symbol,\n \"price\": msg[\"p\"],\n \"quantity\": msg[\"q\"],\n \"ctime\": msg[\"O\"]\n }\n order = Order(**info)\n self._orders[order_no] = order\n order.remain = float(msg[\"q\"]) - float(msg[\"z\"])\n order.status = status\n order.utime = msg[\"T\"]\n if self._order_update_callback:\n SingleTask.run(self._order_update_callback, copy.copy(order))", "def fulfill_order(request_data):\n # First, save this information in a receipt\n receipt = Receipt.objects.create(data=request_data)\n\n # Link the order with the receipt if we can parse it\n reference_number = request_data[\"req_reference_number\"]\n req_bill_to_email = request_data.get(\"req_bill_to_email\")\n order = Order.objects.get_by_reference_number(reference_number)\n receipt.order = order\n receipt.save()\n\n new_order_status = determine_order_status_change(order, request_data[\"decision\"])\n if new_order_status is None:\n # This is a duplicate message, ignore since it's already handled\n return\n\n order.status = new_order_status\n order.save()\n sync_hubspot_deal(order)\n\n if order.status == Order.FULFILLED:\n complete_order(order)\n if settings.ENABLE_ORDER_RECEIPTS:\n send_ecommerce_order_receipt(\n order=order, cyber_source_provided_email=req_bill_to_email\n )\n\n # Save to log everything to an audit table including enrollments created in complete_order\n order.save_and_log(None)", "def test_change_order_status_when_order_does_not_exist(self):\n response = self.api_test_client.put('{}/orders/1000'.format(\n self.BASE_URL), json={'order_status': 'accepted'})\n\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response_as_json(\n response)['message'],\n 'Order with id 1000 not found')", "def on_update_after_submit(self):\n if self.get('update_request') and not self.is_pending_approval():\n if self.is_revert:\n self.set_as_reverted()\n else:\n self.set_as_success()", "def _onSuccess(self, controller):\r\n if controller.order.paid_in_full:\r\n controller.cart.empty()\r\n for item in controller.order.orderitem_set.all():\r\n if item.product.is_subscription:\r\n item.completed = True\r\n item.save()\r\n try:\r\n curr_status = controller.order.orderstatus_set.latest() \r\n except OrderStatus.DoesNotExist:\r\n curr_status = None\r\n \r\n if (curr_status is None) or (curr_status.notes and curr_status.status == \"New\"):\r\n controller.order.add_status(status='New', notes = \"Order successfully submitted\")\r\n else:\r\n # otherwise just update and save\r\n if not curr_status.notes:\r\n curr_status.notes = _(\"Order successfully submitted\")\r\n curr_status.save() \r\n\r\n #Redirect to the success page\r\n url = controller.lookup_url('satchmo_checkout-success')\r\n return HttpResponseRedirect(url) \r\n\r\n else:\r\n log.debug('Order #%i not paid in full, sending to pay rest of balance', controller.order.id)\r\n #url = controller.order.get_balance_remaining_url()\r\n url = reverse('satchmo_balance_remaining')\r\n return HttpResponseRedirect(url)", "async def process(self, msg):\n logger.debug(\"msg:\", json.dumps(msg), caller=self)\n e = msg.get(\"e\")\n if e == \"ORDER_TRADE_UPDATE\": # Order update.\n self._update_order(msg[\"o\"])" ]
[ "0.68926567", "0.6892195", "0.66556543", "0.6604199", "0.64407927", "0.6413585", "0.6380169", "0.6379697", "0.6355502", "0.63130534", "0.63076985", "0.6285451", "0.6273759", "0.613097", "0.6118931", "0.6096293", "0.6087185", "0.60793054", "0.6068147", "0.6027666", "0.6010716", "0.5994995", "0.598443", "0.5963867", "0.59536463", "0.5946517", "0.593697", "0.59339434", "0.58925515", "0.5890675" ]
0.76192147
0
Create lr scheduler based on config. note that lr_scheduler must accept a optimizer that has been restored.
def build(optimizer_config, optimizer, total_step): optimizer_type = optimizer_config.WhichOneof('optimizer') if optimizer_type == 'rms_prop_optimizer': config = optimizer_config.rms_prop_optimizer lr_scheduler = _create_learning_rate_scheduler( config.learning_rate, optimizer, total_step=total_step) if optimizer_type == 'momentum_optimizer': config = optimizer_config.momentum_optimizer lr_scheduler = _create_learning_rate_scheduler( config.learning_rate, optimizer, total_step=total_step) if optimizer_type == 'adam_optimizer': config = optimizer_config.adam_optimizer lr_scheduler = _create_learning_rate_scheduler( config.learning_rate, optimizer, total_step=total_step) return lr_scheduler
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_lr_scheduler(\n cfg: CfgNode, optimizer: torch.optim.Optimizer\n) -> torch.optim.lr_scheduler._LRScheduler:\n name = cfg.SOLVER.LR_SCHEDULER_NAME\n if name == \"WarmupMultiStepLR\":\n return WarmupMultiStepLR(\n optimizer,\n cfg.SOLVER.STEPS,\n cfg.SOLVER.GAMMA,\n warmup_factor=cfg.SOLVER.WARMUP_FACTOR,\n warmup_iters=cfg.SOLVER.WARMUP_ITERS,\n warmup_method=cfg.SOLVER.WARMUP_METHOD,\n )\n elif name == \"WarmupCosineLR\":\n return WarmupCosineLR(\n optimizer,\n cfg.SOLVER.MAX_ITER,\n warmup_factor=cfg.SOLVER.WARMUP_FACTOR,\n warmup_iters=cfg.SOLVER.WARMUP_ITERS,\n warmup_method=cfg.SOLVER.WARMUP_METHOD,\n )\n else:\n raise ValueError(\"Unknown LR scheduler: {}\".format(name))", "def create_lr_scheduler(\n self, optimizer: torch.optim.Optimizer # type: ignore\n ) -> Optional[LRScheduler]:\n pass", "def scheduler_creator(optimizer, config):\n return torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.9)", "def _create_learning_rate_scheduler(learning_rate_config, optimizer, total_step):\n lr_scheduler = None\n learning_rate_type = learning_rate_config.WhichOneof('learning_rate')\n if learning_rate_type == 'multi_phase':\n config = learning_rate_config.multi_phase\n lr_phases = []\n mom_phases = []\n for phase_cfg in config.phases:\n lr_phases.append((phase_cfg.start, phase_cfg.lambda_func))\n mom_phases.append((phase_cfg.start, phase_cfg.momentum_lambda_func))\n lr_scheduler = lsf.LRSchedulerStep(\n optimizer,total_step, lr_phases, mom_phases)\n\n if learning_rate_type == 'one_cycle':\n config = learning_rate_config.one_cycle\n lr_scheduler = lsf.OneCycle(\n optimizer, total_step, config.lr_max, list(config.moms), config.div_factor, config.pct_start)\n\n if lr_scheduler is None:\n raise ValueError('Learning_rate %s not supported.' % learning_rate_type)\n\n return lr_scheduler", "def _create_learning_rate_scheduler(learning_rate_config, optimizer, last_step=-1):\n lr_scheduler = None\n learning_rate_type = learning_rate_config.name \n if learning_rate_type == 'constant_learning_rate': \n\n lr_scheduler = learning_schedules.Constant(\n optimizer, last_step=last_step)\n\n if learning_rate_type == 'exponential_decay_learning_rate':\n config = learning_rate_config \n lr_scheduler = learning_schedules.ExponentialDecay(\n optimizer, config.decay_steps, \n config.decay_factor, config.staircase, last_step=last_step)\n\n if learning_rate_type == 'manual_step_learning_rate':\n config = learning_rate_config\n if not config.schedule:\n raise ValueError('Empty learning rate schedule.')\n learning_rate_step_boundaries = [x.step for x in config.schedule]\n learning_rate_sequence = [config.initial_learning_rate]\n learning_rate_sequence += [x.learning_rate for x in config.schedule]\n lr_scheduler = learning_schedules.ManualStepping(\n optimizer, learning_rate_step_boundaries, learning_rate_sequence, \n last_step=last_step)\n\n if learning_rate_type == 'cosine_decay_learning_rate':\n config = learning_rate_config.cosine_decay_learning_rate\n lr_scheduler = learning_schedules.CosineDecayWithWarmup(\n optimizer, config.total_steps, \n config.warmup_learning_rate, config.warmup_steps, \n last_step=last_step)\n\n if lr_scheduler is None:\n raise ValueError('Learning_rate %s not supported.' % learning_rate_type)\n\n return lr_scheduler", "def build_lr_scheduler(optimizer, lr_scheduler='single_step', stepsize=1, gamma=0.1, max_epoch=1,\n frozen=20, warmup=10, warmup_factor_base=0.1):\n if lr_scheduler not in AVAI_SCH:\n raise ValueError('Unsupported scheduler: {}. Must be one of {}'.format(lr_scheduler, AVAI_SCH))\n\n if lr_scheduler == 'single_step':\n if isinstance(stepsize, list):\n stepsize = stepsize[-1]\n\n if not isinstance(stepsize, int):\n raise TypeError(\n 'For single_step lr_scheduler, stepsize must '\n 'be an integer, but got {}'.format(type(stepsize))\n )\n\n scheduler = torch.optim.lr_scheduler.StepLR(\n optimizer, step_size=stepsize, gamma=gamma\n )\n\n elif lr_scheduler == 'multi_step':\n if not isinstance(stepsize, list):\n raise TypeError(\n 'For multi_step lr_scheduler, stepsize must '\n 'be a list, but got {}'.format(type(stepsize))\n )\n\n scheduler = torch.optim.lr_scheduler.MultiStepLR(\n optimizer, milestones=stepsize, gamma=gamma\n )\n\n elif lr_scheduler == 'cosine':\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, float(max_epoch)\n )\n elif lr_scheduler == 'multi_step_warmup':\n if not isinstance(stepsize, list):\n raise TypeError(\n 'For multi_step lr_scheduler, stepsize must '\n 'be a list, but got {}'.format(type(stepsize))\n )\n\n scheduler = MultiStepLRWithWarmUp(\n optimizer, milestones=stepsize, warmup_iters=warmup, frozen_iters=frozen, gamma=gamma,\n warmup_factor_base=warmup_factor_base\n )\n\n return scheduler", "def build_lr_scheduler(\n optimizer, lr_scheduler='single_step', stepsize=1, gamma=0.1, max_epoch=1\n):\n if lr_scheduler not in AVAI_SCH:\n raise ValueError(\n 'Unsupported scheduler: {}. Must be one of {}'.format(\n lr_scheduler, AVAI_SCH\n )\n )\n\n if lr_scheduler == 'single_step':\n if isinstance(stepsize, list):\n stepsize = stepsize[-1]\n\n if not isinstance(stepsize, int):\n raise TypeError(\n 'For single_step lr_scheduler, stepsize must '\n 'be an integer, but got {}'.format(type(stepsize))\n )\n\n scheduler = torch.optim.lr_scheduler.StepLR(\n optimizer, step_size=stepsize, gamma=gamma\n )\n\n elif lr_scheduler == 'multi_step':\n if not isinstance(stepsize, list):\n raise TypeError(\n 'For multi_step lr_scheduler, stepsize must '\n 'be a list, but got {}'.format(type(stepsize))\n )\n\n scheduler = torch.optim.lr_scheduler.MultiStepLR(\n optimizer, milestones=stepsize, gamma=gamma\n )\n\n elif lr_scheduler == 'cosine':\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, float(max_epoch)\n )\n\n return scheduler", "def _get_lr_scheduler(project_parameters, optimizer):\n if project_parameters.lr_scheduler == 'StepLR':\n lr_scheduler = StepLR(optimizer=optimizer,\n step_size=project_parameters.step_size, gamma=project_parameters.gamma)\n elif project_parameters.lr_scheduler == 'CosineAnnealingLR':\n lr_scheduler = CosineAnnealingLR(\n optimizer=optimizer, T_max=project_parameters.step_size)\n return lr_scheduler", "def get_scheduler(optimizer, opt):\n \n epochs_no_decay = opt.epochs - opt.lr_linear\n lr_policy = opt.lr_policy\n \n if lr_policy == 'linear':\n def lr_lambda(epoch):\n return 1. - max(0, epoch - epochs_no_decay) / float(opt.lr_linear + 1)\n scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)\n elif lr_policy == 'step':\n # multiply by gamma every lr_decay_steps\n # for example lr_decay_steps=50 and initial learning = .5\n # then we have \n # lr = .5 for 0 <= epoch < 50;\n # lr = .05 for 50 <= epoch < 100;\n # lr = .005 for 100 <= epoch < 150;\n scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_step, gamma=.1)\n elif lr_policy == 'plateau':\n # Reduce learning rate when a metric has stopped improving. \n # Models often benefit from reducing the learning rate by a factor of 2-10 once learning stagnates. \n # This scheduler reads a metrics quantity and if no improvement \n # is seen for a ‘patience’ number of epochs, \n # the learning rate is reduced.\n # Parameters\n # - mode (str, default=min): In `min` mode, lr will be reduced when the quantity monitored has stopped decreasing; \n # in `max` mode, lr will be reduced when the quantity monitored has stopped increasing.\n # - factor (float, default=.1): Factor by which the learning rate will be reduced. new_lr = lr * factor.\n # - patience (int, default=10): Number of epochs with no improvement after which learning rate will be reduced. \n # - threshold (float): only decrease lr if the change in the quantitiy monitored is smaller than threshold. \n # Say we have threshold=0.001, if loss is $18.0$ on epoch $n$ and loss is $17.9999$ on epoch $n+1$,\n # then multiply current learning rate by the factor.\n # On the contrary, if the loss is 17.99, lr doesn't have to be changed.\n scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=.2, threshold=.01, patience=5)\n else:\n return NotImplementedError(f'learning rate policy {lr_policy} is not implemented')\n return scheduler", "def create_lr_scheduler(lr_scheduler_name: str, optimizer: torch.optim.Optimizer,\n lr_gamma: float, max_epochs: int) -> torch.optim.lr_scheduler._LRScheduler:\n if lr_scheduler_name.lower() == \"exponential\":\n lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, lr_gamma)\n elif lr_scheduler_name.lower() == \"linear\":\n lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda epoch: 1 - epoch / max_epochs)\n else:\n raise ValueError(\"SystemLog: Unknown learning rate scheduler {}\".format(lr_scheduler_name))\n\n return lr_scheduler", "def make_lr_scheduler(lr: float, final_lr: float, n_epochs: int,\n verbose: int = 1) -> keras.callbacks.LearningRateScheduler:\n schedule = build_schedule(lr, final_lr, n_epochs)\n return LearningRateScheduler(schedule=schedule, verbose=verbose)", "def get_lr_scheduler(self):\n try:\n scheduler_name = self.configs.OPTIM.LR_SCHEDULER.SCHEDULER_NAME\n except AttributeError:\n scheduler_name = None\n\n if scheduler_name is None:\n lr_scheduler = None\n\n elif scheduler_name == 'plateau':\n factor = self.configs.OPTIM.LR_SCHEDULER.FACTOR\n patience = self.configs.OPTIM.LR_SCHEDULER.PATIENCE\n\n lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer=self.optimizer,\n mode='min',\n factor=factor,\n patience=patience,\n verbose=False,\n threshold=1e-4,\n threshold_mode='rel',\n cooldown=0,\n min_lr=1e-6, # default min_lr is 0\n eps=1e-8,\n )\n\n elif scheduler_name == 'cyclic':\n base_lr = self.configs.OPTIM.LR_SCHEDULER.BASE_LR\n max_lr = self.configs.OPTIM.LR_SCHEDULER.MAX_LR\n\n lr_scheduler = torch.optim.lr_scheduler.CyclicLR(\n optimizer=self.optimizer,\n base_lr=base_lr,\n max_lr=max_lr,\n step_size_up=2000,\n step_size_down=None,\n mode='triangular',\n gamma=1.0,\n scale_fn=None,\n scale_mode='cycle',\n cycle_momentum=True,\n base_momentum=0.8,\n max_momentum=0.9,\n last_epoch=-1,\n )\n\n else:\n LOGGER.error('Invalid learning rate scheduler: %s', scheduler_name)\n raise NotImplementedError\n\n return lr_scheduler", "def build_lr_scheduler(optimizer: Optimizer, args: Namespace,\n total_epochs: List[int] = None) -> _LRScheduler:\n # Learning rate scheduler\n return NoamLR(\n optimizer=optimizer,\n warmup_epochs=[args.warmup_epochs],\n total_epochs=total_epochs or [args.epochs] * args.num_lrs,\n steps_per_epoch=args.train_data_size // args.batch_size,\n init_lr=[args.init_lr],\n max_lr=[args.max_lr],\n final_lr=[args.final_lr]\n )", "def get_scheduler(optimizer, opt):\n def lambda_rule(epoch):\n lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)\n return lr_l\n scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)\n return scheduler", "def check_lr_schedulers(self) -> None:\n # set default scheduler\n if (\n \"LR_SCHEDULER\" not in self.config\n or self.config[\"LR_SCHEDULER\"] == \"Identity\"\n ):\n self.config[\"LR_SCHEDULER\"] = \"Identity\"\n self.config[\"LR_SCHEDULER_PARAMS\"] = dict()\n\n lr_scheduler_names = get_class_names_in_files(\n \"src\" + os.path.sep + \"lr_schedulers.py\"\n )\n lr_scheduler_names.remove(\"LrScheduler\")\n\n # Check config regularizer exists\n assert self.config[\"LR_SCHEDULER\"] in lr_scheduler_names\n assert \"LR_SCHEDULER_PARAMS\" in self.config\n assert isinstance(self.config[\"LR_SCHEDULER_PARAMS\"], dict)\n\n if self.config[\"LR_SCHEDULER\"] == \"MultiStepLR\":\n # milestones: list[int]\n assert \"milestones\" in self.config[\"LR_SCHEDULER_PARAMS\"]\n for n in self.config[\"LR_SCHEDULER_PARAMS\"][\"milestones\"]:\n assert isinstance(n, int)\n\n assert \"gamma\" in self.config[\"LR_SCHEDULER_PARAMS\"]\n assert 0 < self.config[\"LR_SCHEDULER_PARAMS\"][\"gamma\"] <= 1.0\n assert isinstance(self.config[\"LR_SCHEDULER_PARAMS\"][\"gamma\"], float)\n\n elif self.config[\"LR_SCHEDULER\"] == \"WarmupCosineLR\":\n # set epochs: int\n self.config[\"LR_SCHEDULER_PARAMS\"][\"epochs\"] = self.config[\"EPOCHS\"]\n\n # set target_lr: float\n self.config[\"LR_SCHEDULER_PARAMS\"][\"target_lr\"] = self.config[\"LR\"]\n\n # warmp_epochs\n assert \"warmup_epochs\" in self.config[\"LR_SCHEDULER_PARAMS\"]\n assert (\n 0\n <= self.config[\"LR_SCHEDULER_PARAMS\"][\"warmup_epochs\"]\n <= self.config[\"EPOCHS\"]\n )\n assert isinstance(self.config[\"LR_SCHEDULER_PARAMS\"][\"warmup_epochs\"], int)\n\n # start_lr\n if self.config[\"LR_SCHEDULER_PARAMS\"][\"warmup_epochs\"] != 0:\n assert \"start_lr\" in self.config[\"LR_SCHEDULER_PARAMS\"]\n assert (\n 0\n < self.config[\"LR_SCHEDULER_PARAMS\"][\"start_lr\"]\n <= self.config[\"LR\"]\n )\n assert isinstance(self.config[\"LR_SCHEDULER_PARAMS\"][\"start_lr\"], float)\n\n # n_rewinding\n if \"n_rewinding\" not in self.config[\"LR_SCHEDULER_PARAMS\"]:\n self.config[\"LR_SCHEDULER_PARAMS\"][\"n_rewinding\"] = 1\n else:\n assert type(self.config[\"LR_SCHEDULER_PARAMS\"][\"n_rewinding\"]) is int\n assert self.config[\"LR_SCHEDULER_PARAMS\"][\"n_rewinding\"] > 0\n assert (\n self.config[\"EPOCHS\"]\n % self.config[\"LR_SCHEDULER_PARAMS\"][\"n_rewinding\"]\n == 0\n )\n\n # Check zero division in lr scheduling\n assert (\n self.config[\"EPOCHS\"]\n // self.config[\"LR_SCHEDULER_PARAMS\"][\"n_rewinding\"]\n > self.config[\"LR_SCHEDULER_PARAMS\"][\"warmup_epochs\"]\n )\n\n # min_lr\n if \"min_lr\" not in self.config[\"LR_SCHEDULER_PARAMS\"]:\n self.config[\"LR_SCHEDULER_PARAMS\"][\"min_lr\"] = 0.0\n else:\n assert type(self.config[\"LR_SCHEDULER_PARAMS\"][\"min_lr\"]) is float\n assert self.config[\"LR_SCHEDULER_PARAMS\"][\"min_lr\"] >= 0.0\n\n # decay\n if \"decay\" not in self.config[\"LR_SCHEDULER_PARAMS\"]:\n self.config[\"LR_SCHEDULER_PARAMS\"][\"decay\"] = 0.0\n else:\n assert type(self.config[\"LR_SCHEDULER_PARAMS\"][\"decay\"]) is float\n assert 0.0 <= self.config[\"LR_SCHEDULER_PARAMS\"][\"decay\"] < 1.0", "def _initScheduler(self) -> torch.optim.lr_scheduler.ReduceLROnPlateau:\n\n return torch.optim.lr_scheduler.ReduceLROnPlateau(\n self.optimizer, \n mode=cfg.training.scheduler_mode,\n factor=cfg.training.scheduler_factor,\n patience=cfg.training.scheduler_patience,\n threshold=cfg.training.scheduler_threshold\n )", "def lr_scheduler(self, lr_init, global_step):\n pass", "def get_lr_scheduler(scheduler_type: str,\n updates_per_checkpoint: int,\n learning_rate_half_life: int,\n learning_rate_reduce_factor: float,\n learning_rate_reduce_num_not_improved: int) -> Optional[LearningRateScheduler]:\n if scheduler_type is None:\n return None\n if scheduler_type == \"fixed-rate-inv-sqrt-t\":\n return LearningRateSchedulerInvSqrtT(updates_per_checkpoint, learning_rate_half_life)\n elif scheduler_type == \"fixed-rate-inv-t\":\n return LearningRateSchedulerInvT(updates_per_checkpoint, learning_rate_half_life)\n elif scheduler_type == \"plateau-reduce\":\n check_condition(learning_rate_reduce_factor is not None,\n \"learning_rate_reduce_factor needed for plateau-reduce scheduler\")\n check_condition(learning_rate_reduce_num_not_improved is not None,\n \"learning_rate_reduce_num_not_improved needed for plateau-reduce scheduler\")\n if learning_rate_reduce_factor >= 1.0:\n logger.warning(\"Not using plateau-reduce learning rate scheduling: learning_rate_reduce_factor == 1.0\")\n return None\n return LearningRateSchedulerPlateauReduce(learning_rate_reduce_factor, learning_rate_reduce_num_not_improved)\n else:\n raise ValueError(\"Unknown learning rate scheduler type %s.\" % scheduler_type)", "def lr_scheduler(optimizer, epoch, init_lr=0.1, lr_decay_epoch=100):\r\n\r\n if epoch % lr_decay_epoch == 0 and epoch > 1:\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = param_group['lr'] * 0.1\r\n\r\n return optimizer", "def lr_scheduler(optimizer, epoch, init_lr=0.1, lr_decay_epoch=100):\n\n if epoch % lr_decay_epoch == 0 and epoch > 1:\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr'] * 0.1\n\n return optimizer", "def lr_scheduler(optimizer, epoch, init_lr=0.1, lr_decay_epoch=50):\n if epoch % lr_decay_epoch == 0 and epoch > 1:\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr'] * 0.1\n return optimizer", "def lr_scheduler(optimizer, epoch, init_lr=0.1, lr_decay_epoch=50):\n if epoch % lr_decay_epoch == 0 and epoch > 1:\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr'] * 0.1\n return optimizer", "def get_scheduler(optimizer: Optimizer,\n hparams: Optional[Union[HParams, Dict[str, Any]]] = None) -> \\\n Optional[_LRScheduler]:\n if hparams is None or isinstance(hparams, dict):\n hparams = HParams(hparams, default_optimization_hparams())\n\n hparams_scheduler = hparams[\"learning_rate_decay\"]\n\n scheduler_type = hparams_scheduler[\"type\"]\n if scheduler_type == \"\" or scheduler_type is None:\n scheduler = None\n else:\n if isinstance(scheduler_type, _LRScheduler):\n scheduler_class = scheduler_type\n else:\n scheduler_modules = ['torch.optim.lr_scheduler',\n 'texar.torch.custom']\n try:\n scheduler_class = utils.check_or_get_class( # type: ignore\n scheduler_type, scheduler_modules, _LRScheduler)\n except TypeError:\n raise ValueError(\n \"Unrecognized lr_scheduler. Must be string name of the \"\n \"lr_scheduler class, or the class which is a subclass of \"\n \"torch.optim._LRScheduler.\")\n\n scheduler_kwargs = hparams_scheduler[\"kwargs\"].todict()\n scheduler_kwargs.update({\"optimizer\": optimizer})\n scheduler = scheduler_class(**scheduler_kwargs) # type: ignore\n\n return scheduler", "def _get_scheduler(\r\n self, optimizer, scheduler: str, warmup_steps: int, t_total: int):\r\n scheduler = scheduler.lower()\r\n if scheduler == 'constantlr':\r\n return transformers.get_constant_schedule(optimizer)\r\n elif scheduler == 'warmupconstant':\r\n # this uses warmup\r\n return transformers.get_constant_schedule_with_warmup(\r\n optimizer, num_warmup_steps=warmup_steps)\r\n elif scheduler == 'warmuplinear':\r\n # This uses warmup + lr-decay with t_total lr-decays\r\n # only this wrapper accepts num_trianing_steps\r\n # if you open the function get_linear_schedule_with_warmup\r\n # you will find that return\r\n # LambdaLR(optimizer, lr_lambda, last_epoch)\r\n # and last_epoch=-1. So it will end till\r\n # num_training_steps consume up\r\n # and you will see that each .step() consumes 1 training\r\n # step. You can see from\r\n # https://pytorch.org/docs/stable/optim.html\r\n # 'How to adjust Learning Rate'\r\n # When you call the step, the counter will reduce by 1,\r\n # and the learning rate will be adjusted accordingly.\r\n # Initial learning rate is given in the Optimiser.\r\n # Initially, at each step, the learning rate will be\r\n # adjusted by param_group in optimizer.param_groups:\r\n # param_group['lr'] = lr\r\n # Now it is implicitly adjusted by the scheduler\r\n return transformers.get_linear_schedule_with_warmup(\r\n optimizer, num_warmup_steps=warmup_steps,\r\n num_training_steps=t_total)\r\n elif scheduler == 'warmupcosine':\r\n return transformers.get_cosine_schedule_with_warmup(\r\n optimizer, num_warmup_steps=warmup_steps,\r\n num_training_steps=t_total)\r\n elif scheduler == 'warmupcosinewithhardrestarts':\r\n return transformers.get_cosine_with_hard_restarts_schedule_with_warmup(\r\n optimizer,\r\n num_warmup_steps=warmup_steps,\r\n num_training_steps=t_total)\r\n else:\r\n raise ValueError(\"Unknown scheduler {}\".format(scheduler))", "def scheduler(epoch_idx, lr):\n new_lr = lr\n if (epoch_idx == 60 or epoch_idx == 120 or epoch_idx == 160\n or epoch_idx == 260 or epoch_idx == 320 or epoch_idx == 360):\n new_lr *= 0.2\n \"\"\"\n if epoch_idx == 200:\n new_lr = 0.1\n \"\"\"\n return new_lr", "def init_scheduler(self):\n gamma = self.config_dict.get(\"gamma\")\n if gamma is None:\n return None\n else:\n return torch.optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=gamma)", "def exp_lr_scheduler(optimizer, epoch, init_lr=0.01, lr_decay_epoch=decay):\n lr = init_lr * (0.1**(epoch // lr_decay_epoch))\n\n if epoch % lr_decay_epoch == 0:\n print('LR is set to {}'.format(lr))\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n return optimizer", "def reschedule_learning_rate(model, epoch, scheduler):\n if epoch == 7:\n optimizer = torch.optim.SGD(model.parameters(), lr=0.005)\n current_lr = next(iter(optimizer.param_groups))[\"lr\"]\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, 6, eta_min=current_lr / 100, last_epoch=-1\n )\n if epoch == 13:\n optimizer = torch.optim.SGD(model.parameters(), lr=0.005)\n current_lr = next(iter(optimizer.param_groups))[\"lr\"]\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, 6, eta_min=current_lr / 100, last_epoch=-1\n )\n if epoch == 19:\n optimizer = torch.optim.SGD(model.parameters(), lr=0.002)\n current_lr = next(iter(optimizer.param_groups))[\"lr\"]\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, 6, eta_min=current_lr / 100, last_epoch=-1\n )\n if epoch == 25:\n optimizer = torch.optim.SGD(model.parameters(), lr=0.002)\n current_lr = next(iter(optimizer.param_groups))[\"lr\"]\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, 6, eta_min=current_lr / 100, last_epoch=-1\n )\n\n return model, scheduler", "def exp_lr_scheduler(optimizer, epoch, init_lr=0.01, lr_decay_epoch=10):\r\n lr = init_lr * (0.8**(epoch // lr_decay_epoch))\r\n print('LR is set to {}'.format(lr))\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr\r\n\r\n return optimizer", "def get_scheduler(optimizer, recipe):\n\n # <optimizer>, \"stages__*__lr_scheduler\"\n return get_instance(optimizer, **recipe)" ]
[ "0.8214888", "0.81373125", "0.8030262", "0.7877491", "0.7863203", "0.777814", "0.7759245", "0.76554245", "0.7646608", "0.7585435", "0.748087", "0.74216324", "0.7285573", "0.7153245", "0.7007013", "0.69302773", "0.6923065", "0.6863463", "0.68190587", "0.6761453", "0.6728476", "0.6728476", "0.66816854", "0.6656579", "0.6535839", "0.64993954", "0.6497161", "0.6482549", "0.6437417", "0.6433141" ]
0.81571466
1
Perform a osd "action."
def _action(self, action, osd, info=None, **kwargs): body = {action: info} self.run_hooks('modify_body_for_action', body, **kwargs) url = '/osds/%s/action' % base.getid(osd) return self.api.client.post(url, body=body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def device_action(self, client, action):\r\n client.deviceAction(action)", "def perform_actual_action(self, action):\n self.game.perform_action(action)", "def execute_action(self, agent, action):\n agent.bump = False\n agent.performance_measure -= 1\n \n if action == 'TurnRight':\n agent.heading = self.turn_heading(agent.heading, -1)\n elif action == 'TurnLeft':\n agent.heading = self.turn_heading(agent.heading, +1)\n elif action == 'Forward':\n self.move_to(agent, vector_add(self.heading_to_vector(agent.heading),\n agent.location))\n elif action == 'Grab':\n if self.some_things_at(agent.location, tclass=Gold):\n try:\n gold = self.list_things_at(agent.location, tclass=Gold)[0]\n agent.has_gold = True\n self.delete_thing(gold)\n except:\n print \"Error: Gold should be here, but couldn't find it!\"\n print 'All things:', self.list_things_at(agent.location)\n print 'Gold?:', self.list_things_at(agent.location, tclass=Gold)\n sys.exit(-1)\n\n elif action == 'Release':\n if agent.location == self.entrance:\n if agent.has_gold:\n agent.performance_measure += 1000\n self.done = True\n elif action == 'Shoot':\n if agent.has_arrow:\n agent.has_arrow = False\n agent.performance_measure -= 10\n self.shoot_arrow(agent)\n elif action == 'Stop':\n self.done = True\n \n print '\\nCurrent Location: ', agent.location\n print 'Heading: ', self.heading_to_str(agent.heading)\n print 'Reminder- Start Location:', self.entrance\n print ''\n print 'Percepts:'", "def _do_action(self):\n pass", "def _do_action(self):\n pass", "def system_api(self, action, in_min, at, all_re, vmhost, other_re, media, member_id):\n msg = None\n if action != 'zeroize':\n if (at == 'now' or (in_min == 0 and at is None)):\n if self.dev.timeout > 5:\n self.queue_message(\"log\", \"Decreasing device RPC timeout to 5 seconds.\")\n self.dev.timeout = 5\n\n try:\n self.sw = jnpr.junos.utils.sw.SW(self.dev)\n if action == 'reboot':\n if member_id is not None:\n for m_id in member_id:\n got = self.sw.reboot(in_min, at, all_re, None, vmhost, other_re, member_id=m_id)\n else:\n got = self.sw.reboot(in_min, at, all_re, None, vmhost, other_re)\n elif action == 'shutdown':\n got = self.sw.poweroff(in_min, at, None, all_re, other_re)\n elif action == 'halt':\n got = self.sw.halt(in_min, at, all_re, other_re)\n elif action == 'zeroize':\n got = self.sw.zeroize(all_re, media)\n else:\n raise AnsibleError('Relevant action not found')\n\n self.queue_message(\"log\", \"RPC executed\")\n if got is None:\n msg = 'Did not find expected RPC response.'\n else:\n msg = '%s successfully initiated. Response got %s' % (action, got)\n except (self.pyez_exception.RpcTimeoutError) as ex:\n try:\n self.close(raise_exceptions=True)\n # This means the device wasn't already disconnected.\n raise AnsibleError('%s failed. %s may not have been ' \\\n 'initiated.' % (action, action))\n except (self.pyez_exception.RpcError,\n self.pyez_exception.ConnectError):\n # This is expected. The device has already disconnected.\n msg = '%s succeeded.' % (action)\n except (self.pyez_exception.RpcError,\n self.pyez_exception.ConnectError) as ex:\n raise AnsibleError('%s failed. Error: %s' % (action, str(ex)))\n return msg", "def handleAction(self, action):\n\n if action.deviceAction == indigo.kUniversalAction.EnergyReset:\n # Reset energy consumed and returned\n self.resetEnergy()\n\n # \"Reset\" the ui value\n self.device.updateStateOnServer('accumEnergyTotal', 0.0)\n elif action.deviceAction == indigo.kUniversalAction.EnergyUpdate:\n # This will be handled by making a status request\n self.sendStatusRequestCommand()\n elif action.deviceAction == indigo.kDeviceAction.RequestStatus:\n self.sendStatusRequestCommand()\n else:\n Shelly.handleAction(self, action)", "def execute_action(self, agent, action):\n if action == 'Right':\n agent.location = loc_B\n agent.performance -= 1\n elif action == 'Left':\n agent.location = loc_A\n agent.performance -= 1\n elif action == 'Suck':\n if self.status[agent.location] == 'Dirty':\n agent.performance += 10\n self.status[agent.location] = 'Clean'", "def execute_action(self, agent, action):\n if action == 'Right':\n agent.location = loc_B\n agent.performance -= 1\n elif action == 'Left':\n agent.location = loc_A\n agent.performance -= 1\n elif action == 'Suck':\n if self.status[agent.location] == 'Dirty':\n agent.performance += 10\n self.status[agent.location] = 'Clean'", "def device_action(host, details, action):\n if details:\n pprint(cs.get_device_details(host))\n if action:\n pprint(cs.device_action(host, action))", "def onAction(*args):", "def onAction(*args):", "def onAction(*args):", "def onAction(*args):", "def test__get_start_osd_no_op(ouw_oc_map):\n next_run, version = ouw._get_start_osd(ouw_oc_map, cluster_name)\n assert not next_run\n assert not version", "def run_action(client: Client, args: Namespace):\n\n result = None\n\n if args.action == 'exec':\n result = client.run(args.command, *args.argument)\n elif args.action == 'say':\n result = client.say(args.message)\n elif args.action == 'fortune':\n result = client.fortune(\n short=not args.long, offensive=args.offensive)\n elif args.action == 'datetime':\n result = client.datetime(frmt=args.format)\n elif args.action == 'in-use':\n players = client.players\n\n if players.online:\n LOGGER.info('There are %i players online:', players.online)\n LOGGER.info(', '.join(players.names))\n else:\n LOGGER.warning('There are no players online.')\n exit(1)\n\n if result:\n LOGGER.info(result)", "def perform_action(self, action_id: int) -> None:\r\n ...", "def perform ( self, action, action_event = None ):\r\n getattr( self.editor, action.action )()", "def run(self):\n\n self._action.execute()", "def execute_action(self, agent, action):\n abstract", "def do_action(self, action, a=None, b=None):\n pass", "def perform_action(self, action_data):\n pass", "def execute_action(self, agent, action):\n raise NotImplementedError", "def execute_action(self, agent, action):\n raise NotImplementedError", "async def perform_action(self) -> None:", "def execute_action(self, a):\n x,y = self.agent\n self.agent = self._get_new_position(x,y,a)", "def apply_action(self, cmd_name, *args):\n\n action = Action(self.tahoma_device.url)\n action.add_command(cmd_name, *args)\n self.controller.apply_actions(\"HomeAssistant\", [action])", "def act(self, device):\n with open(device, 'r') as fd:\n result = fcntl.ioctl(fd, self.ioctl)\n if result:\n raise Exception(\"ioctl failed with result {0}\".format(result))", "def execute_action(self, action):\n if self.game_over or len(self.agent_locs) == 0:\n pass\n elif action.startswith(\"MOVE \"):\n direction = ORIENTATION[action[5:]]\n flip = 2 if direction == 6 else 0\n if direction < 4:\n self.execute_actions(direction + 1)\n else:\n # Relative direction. Either forward (4) or backward (6)\n direction = self.orientation ^ flip\n self.execute_actions(direction + 1)\n self.orientation ^= flip\n self.game_over = self.has_exited().any()\n elif action.startswith(\"TURN \"):\n direction = ORIENTATION[action[5:]]\n self.orientation += 2 - direction\n self.orientation %= 4\n elif action.startswith(\"FACE \"):\n self.orientation = ORIENTATION[action[5:]]\n elif action.startswith(\"TOGGLE\"):\n if len(action) > 6:\n # Toggle in a particular direction\n direction = ORIENTATION[action[7:]]\n else:\n direction = self.orientation\n self.execute_actions(direction + 5)\n elif action in (\"RESTART\", \"ABORT LEVEL\", \"PREV LEVEL\", \"NEXT LEVEL\"):\n self.game_over = action\n return 0", "def do_device(self, args):\n self.device_command.cmdloop(\"Enter to device mode\")" ]
[ "0.6288845", "0.59816945", "0.5836515", "0.5802067", "0.5802067", "0.57797366", "0.574658", "0.56953174", "0.56953174", "0.5662237", "0.5661676", "0.5661676", "0.5661676", "0.5661676", "0.5660734", "0.56254244", "0.5604492", "0.5561712", "0.5526677", "0.5524998", "0.55161124", "0.5510922", "0.54729193", "0.54729193", "0.54710376", "0.54649824", "0.544605", "0.5441653", "0.54218745", "0.5410889" ]
0.6980433
0
The SubstanceEnzyme graph representing the collective metabolic network, occuring in any organism of the clade. This includes each and every enzyme of every organism of this clade.
def collectiveMetabolismEnzymes(self, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph: graph = self.group.collectiveEnzymeGraph(noMultifunctional = excludeMultifunctionalEnzymes, keepOnHeap = True) graph.name = 'Collective metabolism enzymes ' + ' '.join(self.ncbiNames) return graph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_graph_karateclub():\n all_members = set(range(34))\n club1 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 16, 17, 19, 21}\n # club2 = all_members - club1\n\n G = eg.Graph(name=\"Zachary's Karate Club\")\n for node in all_members:\n G.add_node(node+1)\n\n zacharydat = \"\"\"\\\n0 1 1 1 1 1 1 1 1 0 1 1 1 1 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 1 0 0\n1 0 1 1 0 0 0 1 0 0 0 0 0 1 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 1 0 0 0\n1 1 0 1 0 0 0 1 1 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 1 0\n1 1 1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1\n0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1\n1 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 1 0 0 1 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 1 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1\n0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 1\n0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 1 1\n0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 1 0 0 0 1 1\n0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 1 0 0 1 0 1 0 1 1 0 0 0 0 0 1 1 1 0 1\n0 0 0 0 0 0 0 0 1 1 0 0 0 1 1 1 0 0 1 1 1 0 1 1 0 0 1 1 1 1 1 1 1 0\"\"\"\n\n for row, line in enumerate(zacharydat.split('\\n')):\n thisrow = [int(b) for b in line.split()]\n for col, entry in enumerate(thisrow):\n if entry == 1:\n G.add_edge(row+1, col+1)\n\n # Add the name of each member's club as a node attribute.\n for v in G:\n G.nodes[v]['club'] = 'Mr. Hi' if v in club1 else 'Officer'\n return G", "def edges(self):\n return self.dovetails + self.containments + self.internals", "def line_graph_forbidden_subgraphs():\n from sage.graphs.all import Graph\n from sage.graphs.generators.basic import ClawGraph\n graphs = [ClawGraph()]\n\n graphs.append(Graph({\n 0: [1, 2, 3],\n 1: [2, 3],\n 4: [2],\n 5: [3]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3, 4],\n 1: [2, 3, 4],\n 3: [4],\n 2: [5]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3],\n 1: [2, 3],\n 4: [2, 3]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3],\n 1: [2, 3],\n 4: [2],\n 5: [3, 4]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3, 4],\n 1: [2, 3, 4],\n 3: [4],\n 5: [2, 0, 1]\n }))\n\n graphs.append(Graph({\n 5: [0, 1, 2, 3, 4],\n 0: [1, 4],\n 2: [1, 3],\n 3: [4]\n }))\n\n graphs.append(Graph({\n 1: [0, 2, 3, 4],\n 3: [0, 4],\n 2: [4, 5],\n 4: [5]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3],\n 1: [2, 3, 4],\n 2: [3, 4],\n 3: [4]\n }))\n\n return graphs", "def test__graph_structure():\n assert PES_GRAPH == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'),\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n assert pgraph.species(PES_GRAPH) == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'))\n assert pgraph.channels(PES_GRAPH) == (\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n print('\\npes graph')\n print(PES_GRAPH)", "def edges(self):\r\n return self.__generate_edges()", "def edges(self):\n return self.generate_edges()", "def extract_diagram(self):\n nodes = []\n edges = []\n \n for clump in self.clumps:\n new_nodes, new_edges = clump.get_diagram_representation()\n nodes.extend(new_nodes)\n edges.extend(new_edges)\n #nodes.append(backend.JunctionNode(clump))\n # TODO: move to Tunnel.get_diagram_representation()\n for tunnel in self.tunnels:\n# print tunnel\n edges.append(TunnelEdge(tunnel))\n return nodes, edges", "def get_karate_club_data():\n\n # Edge list of Zachary's karate club.\n edge_list = [\n (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8),\n (0, 10), (0, 11), (0, 12), (0, 13), (0, 17), (0, 19), (0, 21), (0, 31),\n (1, 2), (1, 3), (1, 7), (1, 13), (1, 17), (1, 19), (1, 21), (1, 30),\n (2, 3), (2, 7), (2, 8), (2, 9), (2, 13), (2, 27), (2, 28), (2, 32),\n (3, 7), (3, 12), (3, 13), (4, 6), (4, 10), (5, 6), (5, 10), (5, 16),\n (6, 16), (8, 30), (8, 32), (8, 33), (9, 33), (13, 33), (14, 32), (14, 33),\n (15, 32), (15, 33), (18, 32), (18, 33), (19, 33), (20, 32), (20, 33),\n (22, 32), (22, 33), (23, 25), (23, 27), (23, 29), (23, 32), (23, 33),\n (24, 25), (24, 27), (24, 31), (25, 31), (26, 29), (26, 33), (27, 33),\n (28, 31), (28, 33), (29, 32), (29, 33), (30, 32), (30, 33), (31, 32),\n (31, 33), (32, 33)\n ]\n\n # Student-teacher assignment (before split) as in Zachary (1977).\n # Part-time karate instructor: Mr. Hi, node 0 (labeled as 0).\n # President: John A., node 33 (labeled as 1).\n node_labels = jnp.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0,\n 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n \n return create_graph_data(edge_list=edge_list, node_labels=node_labels)", "def coreMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraphByEcMajority(majorityPercentage = majorityPercentageCoreMetabolism, majorityTotal = None, noMultifunctional = excludeMultifunctionalEnzymes)\n graph.name = 'Core metabolism Enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def addedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def sage_graph(self):\n self.fe.load_cache()\n edges = []\n is_bipartite = self.variant.is_bipartite()\n for X in self.L:\n for Y in self.L:\n a = self.op_norm(X, Y)\n if not self.K.is_zero(a):\n for c in self.K.unit_group:\n d = a - c\n if X != Y or c < d or is_bipartite:\n edges.append(((X, c, False), (Y, d, is_bipartite)))\n if X == Y and not is_bipartite:\n break\n return sage.all.Graph(edges)", "def attack_vector_graph(violated_components):\n attack_vector_graph = nx.Graph()\n admissible_vertices = []\n for violated_component in violated_components:\n for attack_vector in violated_component[1]:\n vertex = attack_vector.db_name + \"-\" + attack_vector.db_id\n attack_vector_graph.add_node(vertex)\n admissible_vertices.append(vertex)\n\n capec_filtered_targets = filter_targets(attack_vector.related_attack_pattern, \"CAPEC-\")\n for capec_filtered_target in capec_filtered_targets:\n if capec_filtered_target in admissible_vertices:\n attack_vector_graph.add_edge(vertex, capec_filtered_target)\n\n cwe_filtered_targets = filter_targets(attack_vector.related_weakness, \"CWE-\")\n for cwe_filtered_target in cwe_filtered_targets:\n if cwe_filtered_target in admissible_vertices:\n attack_vector_graph.add_edge(vertex, cwe_filtered_target)\n\n cve_filtered_targets = filter_targets(attack_vector.related_vulnerability, \"\")\n for cve_filtered_target in cve_filtered_targets:\n if cve_filtered_target in admissible_vertices:\n attack_vector_graph.add_edge(vertex, cve_filtered_target)\n\n return attack_vector_graph", "def conservedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def _get_full_graph(self):", "def edges(self):\n return self.__generate_edges()", "def edges(self):\n return self.__generate_edges()", "def edges(self):\n return self.__generate_edges()", "def graph(self) -> dict:\n return self.flat_graph()", "def lostMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def unifiedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def edges(self):\n return convert_chains_to_edges(self.chains)", "def graph(self):\n ...", "def edges(self):\n return self.show_edges()", "def _generate_graph(self) -> None:\n self.g_ = nx.random_partition_graph(list(self._community_sizes),\n p_in=self.community_p_in,\n p_out=self.community_p_out,\n seed=self.seed)\n\n for _, nv in self.g_.nodes.data():\n nv[\"infected\"] = 0\n nv[\"immune\"] = False\n nv[\"alive\"] = True\n nv[\"_edges\"] = []\n nv[\"isolated\"] = False\n nv[\"mask\"] = 0.0", "def graph(self):\n assert self._modeled, \"Need to do calc_covariance\"\n return self._graph", "def create_subbasin_graph():\n subbasin_to_downstream = pd.read_csv(module_dir + '/../data/simulations_shervan/test.rvh', sep='\\s+', skiprows=7, nrows=724, names=['subbasin', 'downstream_subbasin'], usecols=[1,2])\n subbasin_to_downstream['subbasin'] = subbasin_to_downstream['subbasin']\n subbasin_to_downstream['downstream_subbasin'] = 'sub' + subbasin_to_downstream['downstream_subbasin'].astype(str)\n subbasin_to_downstream['edge'] = 1\n\n for subbasin in subbasin_to_downstream['subbasin'].unique():\n is_sink = 1 if len(subbasin_to_downstream[(subbasin_to_downstream['subbasin'] == subbasin) & subbasin_to_downstream['edge'] == 1]) == 0 else 0\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': subbasin, 'downstream_subbasin': subbasin, 'edge': is_sink}, ignore_index=True)\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': 'sub-1', 'downstream_subbasin': 'sub-1', 'edge': 1}, ignore_index=True)\n \n adj = subbasin_to_downstream.pivot(index='subbasin', columns='downstream_subbasin', values='edge').fillna(0) \n adj = adj.sort_index(axis=0).sort_index(axis=1)\n \n G = nx.from_numpy_matrix(adj.values, parallel_edges=False, create_using=nx.DiGraph())\n label_mapping = dict(zip(range(len(adj.values)), adj.index))\n G = nx.relabel_nodes(G, label_mapping)\n \n return G", "def unifiedMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n parentGraph = self.parentClade.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n childGraph = self.childClade.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n graph = parentGraph.union(childGraph, addCount = False, updateName = False)\n graph.name = 'Unified metabolism enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n if colour is True:\n parentEdges = parentGraph.getEdges()\n childEdges = childGraph.getEdges()\n \n Export.addColourAttribute(graph, colour = Export.Colour.BLUE, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.RED, nodes = False, edges = childEdges)\n \n return graph", "def __generate_edges(self):\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n edges.append({vertex, neighbour})\n return edges", "def __generate_edges(self):\n\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append( {vertex,neighbour} )\n return edges", "def strongly_connected_components_subgraphs(self):\n return [self.subgraph(_) for _ in self.strongly_connected_components()]" ]
[ "0.6028162", "0.59044474", "0.57775235", "0.5567599", "0.554882", "0.5535332", "0.5522798", "0.5515686", "0.55137914", "0.55136275", "0.54814845", "0.5472295", "0.5457305", "0.5456937", "0.54244757", "0.54244757", "0.54244757", "0.5421997", "0.5420029", "0.5370936", "0.53633446", "0.5313891", "0.5299916", "0.5288808", "0.52852285", "0.52835613", "0.5270578", "0.5270461", "0.5265868", "0.5257306" ]
0.626008
0
The SubstanceEC graph representing the common metabolic network, shared among all organisms of the clade. This includes only EC numbers which occur in at least `majorityPercentageCoreMetabolism` % of all organisms of this clade.
def coreMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEcGraph: graph = self.group.majorityEcGraph(majorityPercentage = majorityPercentageCoreMetabolism, noMultifunctional = excludeMultifunctionalEnzymes, keepOnHeap = True) graph.name = 'Core metabolism ECs ' + ' '.join(self.ncbiNames) return graph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conservedMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n graph = GeneFunctionConservation.getGraph(parentCoreMetabolism, childCoreMetabolism)\n graph.name = 'Conserved metabolism ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n return graph", "def conservedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n conservedMetabolism = self.conservedMetabolism(majorityPercentageCoreMetabolism)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = conservedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def neofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEcGraph:\n # get neofunctionalisations \n neofunctionalisedECs = NeofunctionalisedECs(self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs))\n \n # filter core metabolism EC graph\n coreMetabolism = self.coreMetabolism(majorityPercentageCoreMetabolism)\n minimumOrganismsCount = math.ceil(self.organismsCount * (majorityPercentageNeofunctionalisation / 100))\n \n neofunctionalisedMetabolism = neofunctionalisedECs.filterGraph(coreMetabolism, minimumEcDifference = None, minimumOrganismsCount = minimumOrganismsCount)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = coreMetabolism\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def divergedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n divergedMetabolism = self.divergedMetabolism(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = divergedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def unifiedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False) -> SubstanceEcGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolism = self.unifiedMetabolism(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def coreMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraphByEcMajority(majorityPercentage = majorityPercentageCoreMetabolism, majorityTotal = None, noMultifunctional = excludeMultifunctionalEnzymes)\n graph.name = 'Core metabolism Enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def addedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def lostMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def redundantECsForContributingNeofunctionalisation(self, \n majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, \n majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, \n eValue = defaultEValue, \n redundancyType: 'RedundancyType' = None,\n considerOnlyECs = None) -> Dict[Neofunctionalisation, Set[EcNumber]]:\n from FEV_KEGG.Robustness.Topology.Redundancy import Redundancy, RedundancyContribution, RedundancyType\n \n if redundancyType is None:\n redundancyType = RedundancyType.default\n \n #- calculate \"neofunctionalised\" ECs\n neofunctionalisedMetabolismSet = self.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, eValue, considerOnlyECs).getECs()\n neofunctionalisationsForFunctionChange = self.neofunctionalisationsForFunctionChange(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, eValue, considerOnlyECs)\n \n #- calculate redundancy\n redundancy = Redundancy( self.coreMetabolism(majorityPercentageCoreMetabolism) )\n redundancyContribution = RedundancyContribution(redundancy, neofunctionalisedMetabolismSet)\n \n contributedECsForContributingNeofunctionalisedEC = redundancyContribution.getContributedKeysForSpecial(redundancyType)\n contributingNeofunctionalisedECs = set(contributedECsForContributingNeofunctionalisedEC.keys())\n \n #- REPEAT for each function change consisting of \"neofunctionalised\" ECs, which also contribute to redundancy\n contributingNeofunctionalisations = dict()\n \n for functionChange, neofunctionalisations in neofunctionalisationsForFunctionChange.items():\n #- report enzyme pairs of neofunctionalisations, which caused the EC to be considered \"neofunctionalised\", and are in return contributing to redundancy \n \n if functionChange.ecA in contributingNeofunctionalisedECs or functionChange.ecB in contributingNeofunctionalisedECs: # function change contributes to redundancy\n \n for neofunctionalisation in neofunctionalisations:\n currentSetOfContributedECs = contributingNeofunctionalisations.get(neofunctionalisation, None)\n \n if currentSetOfContributedECs is None:\n currentSetOfContributedECs = set()\n contributingNeofunctionalisations[neofunctionalisation] = currentSetOfContributedECs\n \n for ec in functionChange.ecPair:\n contributedECs = contributedECsForContributingNeofunctionalisedEC.get(ec, None)\n if contributedECs is not None:\n currentSetOfContributedECs.update(contributedECs)\n \n return contributingNeofunctionalisations", "def unifiedMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n \n graph = parentCoreMetabolism.union(childCoreMetabolism, addCount = False, updateName = False)\n graph.name = 'Unified metabolism ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n if colour is True:\n lostGraph = GeneFunctionLoss.getGraph(parentCoreMetabolism, childCoreMetabolism)\n lostEdges = lostGraph.getEdges()\n \n addedGraph = GeneFunctionAddition.getGraph(parentCoreMetabolism, childCoreMetabolism)\n addedEdges = addedGraph.getEdges()\n \n conservedGraph = GeneFunctionConservation.getGraph(parentCoreMetabolism, childCoreMetabolism)\n conservedEdges = conservedGraph.getEdges() \n \n Export.addColourAttribute(graph, colour = Export.Colour.BLUE, nodes = False, edges = lostEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.RED, nodes = False, edges = addedEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.PINK, nodes = False, edges = conservedEdges)\n \n return graph", "def neofunctionalisations(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, eValue = defaultEValue, considerOnlyECs = None) -> Set[Neofunctionalisation]:\n # get neofunctionalisations \n return self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs).getNeofunctionalisations()", "def conservedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def conservedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentGeneDuplicated.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childGeneDuplicated.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism gene-duplicated enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def divergedMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n \n if colour is True:\n lostGraph = GeneFunctionLoss.getGraph(parentCoreMetabolism, childCoreMetabolism)\n lostEdges = lostGraph.getEdges()\n \n addedGraph = GeneFunctionAddition.getGraph(parentCoreMetabolism, childCoreMetabolism)\n addedEdges = addedGraph.getEdges()\n \n graph = lostGraph.union(addedGraph, addCount = False, updateName = False) \n \n Export.addColourAttribute(graph, colour = Export.Colour.BLUE, nodes = False, edges = lostEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.RED, nodes = False, edges = addedEdges)\n \n else: \n graph = GeneFunctionDivergence.getGraph(parentCoreMetabolism, childCoreMetabolism)\n \n graph.name = 'Diverged metabolism ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def collectiveMetabolism(self, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes, addEcDescriptions = False) -> SubstanceEcGraph:\n graph = self.group.collectiveEcGraph(noMultifunctional = excludeMultifunctionalEnzymes, addCount = True, keepOnHeap = True, addEcDescriptions = addEcDescriptions)\n graph.name = 'Collective metabolism ECs ' + ' '.join(self.ncbiNames)\n return graph", "def neofunctionalisationsForFunctionChange(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, eValue = defaultEValue, considerOnlyECs = None) -> Dict[FunctionChange, Set[Neofunctionalisation]]:\n # get neofunctionalisations\n minimumOrganismsCount = math.ceil(self.organismsCount * (majorityPercentageNeofunctionalisation / 100)) \n return NeofunctionalisedECs(self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs)).getNeofunctionalisationsForFunctionChange(minimumOrganismsCount = minimumOrganismsCount)", "def divergedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentGeneDuplicated.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childGeneDuplicated.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism gene-duplicated enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def divergedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def lostMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism) \n graph = GeneFunctionLoss.getGraph(parentCoreMetabolism, childCoreMetabolism)\n graph.name = 'Lost metabolism ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n return graph", "def lostMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def addedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def addedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def addedMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n graph = GeneFunctionAddition.getGraph(parentCoreMetabolism, childCoreMetabolism)\n graph.name = 'Added metabolism ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n return graph", "def conservedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Tuple[Set[Tuple[Enzyme, Enzyme]]]:\n # get conserved metabolism\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicate enzyme pairs\n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the conserved metabolism\n parentGeneDuplicatedConserved = set()\n childGeneDuplicatedConserved = set()\n \n for enzymeTuple in parentGeneDuplicated:\n if enzymeTuple[0] in conservedMetabolismEnzymes and enzymeTuple[1] in conservedMetabolismEnzymes:\n parentGeneDuplicatedConserved.add(enzymeTuple)\n \n for enzymeTuple in childGeneDuplicated:\n if enzymeTuple[0] in conservedMetabolismEnzymes and enzymeTuple[1] in conservedMetabolismEnzymes:\n childGeneDuplicatedConserved.add(enzymeTuple)\n \n return (parentGeneDuplicatedConserved, childGeneDuplicatedConserved)", "def neofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEnzymeGraph:\n # get neofunctionalisations \n neofunctionalisedEnzymes = self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs)\n \n # filter core metabolism enzyme graph\n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism) \n neofunctionalisedMetabolism = neofunctionalisedEnzymes.filterGraph(enzymeGraph, minimumEcDifference = None)\n \n # colour core metabolism \n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = enzymeGraph\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def lostMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def connected_component(self):\n t1 = datetime.datetime.now()\n nodes = set(x.hex for x in self.agents)\n result = []\n while nodes:\n node = nodes.pop()\n # This set will contain the next group of nodes connected to each other.\n group = {node}\n # Build a queue with this node in it.\n queue = [node]\n # Iterate the queue.\n # When it's empty, we finished visiting a group of connected nodes.\n while queue:\n # Consume the next item from the queue.\n node = queue.pop(0)\n # Fetch the neighbors.\n neighbors = set(x for x in node.fon if x.is_occupied == 1)\n # Remove the neighbors we already visited.\n neighbors.difference_update(group)\n # Remove the remaining nodes from the global set.\n nodes.difference_update(neighbors)\n # Add them to the group of connected nodes.\n group.update(neighbors)\n # Add them to the queue, so we visit them in the next iterations.\n queue.extend(neighbors)\n\n # Add the group to the list of groups.\n result.append(len(group))\n td = datetime.datetime.now() - t1\n print(\"calculated {} connected components in {} seconds\".format(len(result),td.total_seconds()))\n return len(result), np.histogram(result, self.cluster_hist_breaks)[0]", "def geneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n \n \n \n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n geneDuplicationModel = SimpleGeneDuplication\n# geneDuplicationModel = SimpleGroupGeneDuplication(sameGroupOrganisms = self.group)\n \n # filter core metabolism enzyme graph \n geneDuplicatedEnzymes = geneDuplicationModel.filterEnzymes(enzymeGraph, eValue = defaultEValue, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n geneDuplicatedEnzymesOnly = geneDuplicatedEnzymes\n geneDuplicatedEnzymes = enzymeGraph\n Export.addColourAttribute(geneDuplicatedEnzymes, colourToUse, nodes = False, edges = geneDuplicatedEnzymesOnly.getEdges())\n \n geneDuplicatedEnzymes.name = 'Gene-duplicated core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return geneDuplicatedEnzymes", "def unifiedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def unifiedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]: \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n return parentGeneDuplicated.union(childGeneDuplicated)" ]
[ "0.6851328", "0.6829522", "0.67308336", "0.67119503", "0.6634819", "0.66100425", "0.6589145", "0.6467408", "0.624838", "0.611285", "0.60297996", "0.60168624", "0.60092956", "0.5978053", "0.5830879", "0.58009404", "0.575013", "0.5714629", "0.5646468", "0.5639464", "0.56288904", "0.5606735", "0.55988544", "0.55976266", "0.5565456", "0.5528811", "0.5448926", "0.54417694", "0.5421978", "0.54000723" ]
0.7358444
0
The number of organisms (leaf taxons) in this clade. Returns int The number of organisms (leaf taxons) in this clade.
def organismsCount(self) -> int: return self.group.organismsCount
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def leaf_count(self) -> int:\n if self.children == []:\n return 1\n else:\n return sum([x.leaf_count() for x in self.children])", "def n_trees(self):\n return len(self.data_kd)", "def count_atoms(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_atoms()\n return n", "def count_atoms(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_atoms()\n return n", "def higher_taxonomy(self):\n return self.metadata.groupby(['Higher Taxonomy']\n ).size().reset_index().rename(columns={0: 'Organisms'})", "def num_trees(self):\n return self._ll_tree_sequence.get_num_trees()", "def lower_taxonomy(self):\n return self.metadata.groupby(['Higher Taxonomy', 'Lower Taxonomy']\n ).size().reset_index().rename(columns={0: 'Organisms'})", "def n_children(self):\n ch = self.children\n return 0 if not ch else len(ch) + sum([c.n_children for c in ch])", "def num_trees(self) -> int:\n\n return len(self.nodes)", "def __len__(self):\n return len(self.subtrees())", "def natoms(self):\n return len(self.atoms)", "def count_leafs(self):\n\n def walk(block, count=0):\n # This is a non-ope leaf, count it\n if block.is_leaf and not block.is_open:\n count += 1\n\n # This is a non-leaf block. Recurse\n if not block.is_leaf:\n for child in block.blocks.values():\n count += walk(child)\n\n return count\n\n # Recursive walk and count\n return walk(self)", "def get_num_tigers(self) -> int:\n return len(self.get_all_tiger_positions())", "def __len__(self) -> int:\n return 1 + sum(len(child) for child in self.children)", "def n_atoms(self) -> int:\n return 0 if self.atoms is None else len(self.atoms)", "def numAtoms(self):\n\n\t\tnatoms = 0\n\t\tfor chain in self.chain:\n\t\t\tfor residue in chain.residue:\n\t\t\t\tnatoms += residue.numAtoms()\n\n\t\treturn natoms", "def numAtoms(self, flag=None):\n\n return len(self._getSubset(flag)) if flag else self._n_atoms", "def total_num_atoms(self):\n return self.GetNumberOfAtoms()", "def count_taxa_tree(tree_nxobj):\n\tnode_count = 0 #number of taxa in the tree\n\tfor node in tree_nxobj.preorder_node_iter():\n\t\tnode_count += 1\n\n\treturn node_count", "def get_num_atoms(self):\n\n return len(self.atoms)", "def numAtoms(self):\n return self.nAtoms", "def count_leaf(self):\n if self.is_empty():\n return 0\n elif self.is_leaf():\n return 1\n else:\n if self.get_left():\n if self.get_right():\n return 0 + self.get_left().count_leaf() + self.get_right().count_leaf()\n else:\n return 0 + self.get_left().count_leaf()\n else:\n return 0 + self.get_right().count_leaf()", "def __len__(self):\n return 1 + sum([len(child) for child in self.children])", "def n_clusters(self):\n return len(self.clusters)", "def n_atoms(self):\n return self._n_atoms", "def size(self):\r\n return len(atoms)", "def count(self):\n return self.__tree.node_count", "def count_amino_acids(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_amino_acids()\n return n", "def size(self):\n if len(self.children) == 0:\n return 1\n else:\n return 1 + sum([x.size() for x in self.children])", "def getTotalIndividualCount(self):\r\n return self._n" ]
[ "0.67566955", "0.6556457", "0.6517457", "0.6517457", "0.6510434", "0.64950323", "0.6437851", "0.6435725", "0.64108914", "0.6374372", "0.6369585", "0.6362572", "0.63609064", "0.63561183", "0.63489836", "0.63383377", "0.6318547", "0.63147897", "0.63098747", "0.63017595", "0.62791795", "0.62770486", "0.62768745", "0.6266408", "0.6257828", "0.6254769", "0.625356", "0.624897", "0.62465274", "0.6244341" ]
0.77582914
0
The substanceEnzyme graph of all gene duplicated enzymes of the core metabolism.
def geneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism) geneDuplicationModel = SimpleGeneDuplication # geneDuplicationModel = SimpleGroupGeneDuplication(sameGroupOrganisms = self.group) # filter core metabolism enzyme graph geneDuplicatedEnzymes = geneDuplicationModel.filterEnzymes(enzymeGraph, eValue = defaultEValue, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None) # colour core metabolism if colour is not False: if colour is True: colourToUse = Export.Colour.GREEN else: colourToUse = colour geneDuplicatedEnzymesOnly = geneDuplicatedEnzymes geneDuplicatedEnzymes = enzymeGraph Export.addColourAttribute(geneDuplicatedEnzymes, colourToUse, nodes = False, edges = geneDuplicatedEnzymesOnly.getEdges()) geneDuplicatedEnzymes.name = 'Gene-duplicated core metabolism enzymes ' + ' '.join(self.ncbiNames) return geneDuplicatedEnzymes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def lostMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def unifiedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentGeneDuplicated.union(childGeneDuplicated, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n return graph", "def geneDuplicatedEnzymesDict(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Dict[Enzyme, Set[GeneID]]:\n \n \n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n geneDuplicationModel = SimpleGeneDuplication\n \n geneIDsForEnzyme = geneDuplicationModel.getEnzymes(enzymeGraph, returnMatches = True, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None)\n \n# if keepOnHeap is True:\n# self._geneDuplicatedEnzymesObject = geneIDsForEnzyme\n \n return geneIDsForEnzyme", "def addedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def divergedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentGeneDuplicated.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childGeneDuplicated.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism gene-duplicated enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def conservedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentGeneDuplicated.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childGeneDuplicated.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism gene-duplicated enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def geneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n \n \n enzymes = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n geneDuplicationModel = SimpleGeneDuplication\n \n geneIdToEnzyme = dict()\n for enzyme in enzymes:\n geneIdToEnzyme[enzyme.geneID] = enzyme\n \n enzymePairs = geneDuplicationModel.getEnzymePairs(enzymes, ignoreDuplicatesOutsideSet = True, geneIdToEnzyme = geneIdToEnzyme, preCalculatedEnzymes = None)\n \n return enzymePairs", "def unifiedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]: \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n return parentGeneDuplicated.union(childGeneDuplicated)", "def collectiveMetabolismEnzymes(self, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraph(noMultifunctional = excludeMultifunctionalEnzymes, keepOnHeap = True)\n graph.name = 'Collective metabolism enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def lostMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def coreMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraphByEcMajority(majorityPercentage = majorityPercentageCoreMetabolism, majorityTotal = None, noMultifunctional = excludeMultifunctionalEnzymes)\n graph.name = 'Core metabolism Enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def divergedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n # get diverged metabolism\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicate enzyme pairs\n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the diverged metabolism\n parentGeneDuplicatedDiverged = set()\n childGeneDuplicatedDiverged = set()\n \n for enzymeTuple in parentGeneDuplicated:\n if enzymeTuple[0] in divergedMetabolismEnzymes and enzymeTuple[1] in divergedMetabolismEnzymes:\n parentGeneDuplicatedDiverged.add(enzymeTuple)\n \n for enzymeTuple in childGeneDuplicated:\n if enzymeTuple[0] in divergedMetabolismEnzymes and enzymeTuple[1] in divergedMetabolismEnzymes:\n childGeneDuplicatedDiverged.add(enzymeTuple)\n \n return parentGeneDuplicatedDiverged.union(childGeneDuplicatedDiverged)", "def addedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]: \n # get added metabolism\n addedMetabolismEnzymes = self.addedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicated enzyme pairs\n geneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the added metabolism\n geneDuplicatedAdded = set()\n \n for enzymeTuple in geneDuplicated:\n if enzymeTuple[0] in addedMetabolismEnzymes and enzymeTuple[1] in addedMetabolismEnzymes:\n geneDuplicatedAdded.add(enzymeTuple)\n \n return geneDuplicatedAdded", "def conservedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Tuple[Set[Tuple[Enzyme, Enzyme]]]:\n # get conserved metabolism\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicate enzyme pairs\n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the conserved metabolism\n parentGeneDuplicatedConserved = set()\n childGeneDuplicatedConserved = set()\n \n for enzymeTuple in parentGeneDuplicated:\n if enzymeTuple[0] in conservedMetabolismEnzymes and enzymeTuple[1] in conservedMetabolismEnzymes:\n parentGeneDuplicatedConserved.add(enzymeTuple)\n \n for enzymeTuple in childGeneDuplicated:\n if enzymeTuple[0] in conservedMetabolismEnzymes and enzymeTuple[1] in conservedMetabolismEnzymes:\n childGeneDuplicatedConserved.add(enzymeTuple)\n \n return (parentGeneDuplicatedConserved, childGeneDuplicatedConserved)", "def edges(self):\r\n return self.__generate_edges()", "def unifiedMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n parentGraph = self.parentClade.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n childGraph = self.childClade.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n graph = parentGraph.union(childGraph, addCount = False, updateName = False)\n graph.name = 'Unified metabolism enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n if colour is True:\n parentEdges = parentGraph.getEdges()\n childEdges = childGraph.getEdges()\n \n Export.addColourAttribute(graph, colour = Export.Colour.BLUE, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.RED, nodes = False, edges = childEdges)\n \n return graph", "def unifiedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def edges(self):\n return self.dovetails + self.containments + self.internals", "def lostMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n # get added metabolism\n lostMetabolismEnzymes = self.lostMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicated enzyme pairs\n geneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the lost metabolism\n geneDuplicatedLost = set()\n \n for enzymeTuple in geneDuplicated:\n if enzymeTuple[0] in lostMetabolismEnzymes and enzymeTuple[1] in lostMetabolismEnzymes:\n geneDuplicatedLost.add(enzymeTuple)\n \n return geneDuplicatedLost", "def addedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def edges(self):\n return self.generate_edges()", "def line_graph_forbidden_subgraphs():\n from sage.graphs.all import Graph\n from sage.graphs.generators.basic import ClawGraph\n graphs = [ClawGraph()]\n\n graphs.append(Graph({\n 0: [1, 2, 3],\n 1: [2, 3],\n 4: [2],\n 5: [3]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3, 4],\n 1: [2, 3, 4],\n 3: [4],\n 2: [5]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3],\n 1: [2, 3],\n 4: [2, 3]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3],\n 1: [2, 3],\n 4: [2],\n 5: [3, 4]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3, 4],\n 1: [2, 3, 4],\n 3: [4],\n 5: [2, 0, 1]\n }))\n\n graphs.append(Graph({\n 5: [0, 1, 2, 3, 4],\n 0: [1, 4],\n 2: [1, 3],\n 3: [4]\n }))\n\n graphs.append(Graph({\n 1: [0, 2, 3, 4],\n 3: [0, 4],\n 2: [4, 5],\n 4: [5]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3],\n 1: [2, 3, 4],\n 2: [3, 4],\n 3: [4]\n }))\n\n return graphs", "def neofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEnzymeGraph:\n # get neofunctionalisations \n neofunctionalisedEnzymes = self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs)\n \n # filter core metabolism enzyme graph\n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism) \n neofunctionalisedMetabolism = neofunctionalisedEnzymes.filterGraph(enzymeGraph, minimumEcDifference = None)\n \n # colour core metabolism \n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = enzymeGraph\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def edges(self):\n return self.__generate_edges()", "def edges(self):\n return self.__generate_edges()", "def edges(self):\n return self.__generate_edges()", "def conservedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def divergedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def _GetCatalyzingEnzymes(self):\n if self._catalyzing_enzymes is None:\n logging.debug('looking for enzymes catalyzing this reaction')\n self._catalyzing_enzymes = []\n for stored_reaction in self._GetAllStoredReactions():\n enzymes = stored_reaction.enzyme_set.all()\n self._catalyzing_enzymes.extend(enzymes)\n return self._catalyzing_enzymes" ]
[ "0.6935128", "0.6869094", "0.63864636", "0.63718253", "0.62681776", "0.6187902", "0.6169614", "0.61466265", "0.6134928", "0.61197543", "0.61049575", "0.60786825", "0.58720994", "0.5807282", "0.5740409", "0.5649418", "0.56281716", "0.5621698", "0.5607962", "0.5592912", "0.5568106", "0.556758", "0.5539905", "0.55290693", "0.5491371", "0.5491371", "0.5491371", "0.54566187", "0.545369", "0.54385036" ]
0.6968888
0
All gene duplicated enzymes of the core metabolism, pointing to all their duplicates.
def geneDuplicatedEnzymesDict(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Dict[Enzyme, Set[GeneID]]: enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism) geneDuplicationModel = SimpleGeneDuplication geneIDsForEnzyme = geneDuplicationModel.getEnzymes(enzymeGraph, returnMatches = True, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None) # if keepOnHeap is True: # self._geneDuplicatedEnzymesObject = geneIDsForEnzyme return geneIDsForEnzyme
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def geneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n \n \n enzymes = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n geneDuplicationModel = SimpleGeneDuplication\n \n geneIdToEnzyme = dict()\n for enzyme in enzymes:\n geneIdToEnzyme[enzyme.geneID] = enzyme\n \n enzymePairs = geneDuplicationModel.getEnzymePairs(enzymes, ignoreDuplicatesOutsideSet = True, geneIdToEnzyme = geneIdToEnzyme, preCalculatedEnzymes = None)\n \n return enzymePairs", "def unifiedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]: \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n return parentGeneDuplicated.union(childGeneDuplicated)", "def addedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]: \n # get added metabolism\n addedMetabolismEnzymes = self.addedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicated enzyme pairs\n geneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the added metabolism\n geneDuplicatedAdded = set()\n \n for enzymeTuple in geneDuplicated:\n if enzymeTuple[0] in addedMetabolismEnzymes and enzymeTuple[1] in addedMetabolismEnzymes:\n geneDuplicatedAdded.add(enzymeTuple)\n \n return geneDuplicatedAdded", "def divergedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n # get diverged metabolism\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicate enzyme pairs\n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the diverged metabolism\n parentGeneDuplicatedDiverged = set()\n childGeneDuplicatedDiverged = set()\n \n for enzymeTuple in parentGeneDuplicated:\n if enzymeTuple[0] in divergedMetabolismEnzymes and enzymeTuple[1] in divergedMetabolismEnzymes:\n parentGeneDuplicatedDiverged.add(enzymeTuple)\n \n for enzymeTuple in childGeneDuplicated:\n if enzymeTuple[0] in divergedMetabolismEnzymes and enzymeTuple[1] in divergedMetabolismEnzymes:\n childGeneDuplicatedDiverged.add(enzymeTuple)\n \n return parentGeneDuplicatedDiverged.union(childGeneDuplicatedDiverged)", "def lostMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n # get added metabolism\n lostMetabolismEnzymes = self.lostMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicated enzyme pairs\n geneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the lost metabolism\n geneDuplicatedLost = set()\n \n for enzymeTuple in geneDuplicated:\n if enzymeTuple[0] in lostMetabolismEnzymes and enzymeTuple[1] in lostMetabolismEnzymes:\n geneDuplicatedLost.add(enzymeTuple)\n \n return geneDuplicatedLost", "def addedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def geneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n \n \n \n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n geneDuplicationModel = SimpleGeneDuplication\n# geneDuplicationModel = SimpleGroupGeneDuplication(sameGroupOrganisms = self.group)\n \n # filter core metabolism enzyme graph \n geneDuplicatedEnzymes = geneDuplicationModel.filterEnzymes(enzymeGraph, eValue = defaultEValue, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n geneDuplicatedEnzymesOnly = geneDuplicatedEnzymes\n geneDuplicatedEnzymes = enzymeGraph\n Export.addColourAttribute(geneDuplicatedEnzymes, colourToUse, nodes = False, edges = geneDuplicatedEnzymesOnly.getEdges())\n \n geneDuplicatedEnzymes.name = 'Gene-duplicated core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return geneDuplicatedEnzymes", "def __delete_duplicates(self):\n log = logging.getLogger()\n log.debug(\"\\n---> Duplicate check <---\")\n\n chromosomes = list(set(self.chromosomes))\n diff = self.size - len(chromosomes)\n\n if diff > 0:\n log.debug(\"---> Duplicate(s) found! <---\")\n for i in range(diff):\n chromosomes.append(\n Chromosome(self.__generate_random_gene_sequence(),\n self.environment))\n else:\n log.debug(\"---> No duplicates found! <---\")\n\n self.chromosomes = chromosomes", "def conservedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Tuple[Set[Tuple[Enzyme, Enzyme]]]:\n # get conserved metabolism\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicate enzyme pairs\n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the conserved metabolism\n parentGeneDuplicatedConserved = set()\n childGeneDuplicatedConserved = set()\n \n for enzymeTuple in parentGeneDuplicated:\n if enzymeTuple[0] in conservedMetabolismEnzymes and enzymeTuple[1] in conservedMetabolismEnzymes:\n parentGeneDuplicatedConserved.add(enzymeTuple)\n \n for enzymeTuple in childGeneDuplicated:\n if enzymeTuple[0] in conservedMetabolismEnzymes and enzymeTuple[1] in conservedMetabolismEnzymes:\n childGeneDuplicatedConserved.add(enzymeTuple)\n \n return (parentGeneDuplicatedConserved, childGeneDuplicatedConserved)", "def lostMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def divergedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentGeneDuplicated.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childGeneDuplicated.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism gene-duplicated enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def unifiedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentGeneDuplicated.union(childGeneDuplicated, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n return graph", "def duplicate_subassemblies(self):\n dupes = set()\n top_dupes = defaultdict(set)\n\n # first, make a list of all duplicated non-leaf fragments\n for flow in self.fg.flows():\n for dirn in ('Input', 'Output'):\n asm = set(k for k in self.fg.fragments_with_flow(flow, direction=dirn) if k.term.is_fg)\n if len(asm) > 1:\n dupes |= asm\n\n # next, screen them down to top-level frags (frags whose parents are nonduplicated)\n for frag in dupes:\n if frag.reference_entity not in dupes:\n top_dupes[frag.flow, frag.direction].add(frag)\n\n # finally, generate the duplicate sets\n for v in top_dupes.values():\n yield v", "def conservedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentGeneDuplicated.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childGeneDuplicated.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism gene-duplicated enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def merge_duplicate_nodes(self):\n merges={}\n xys={}\n for n in self.valid_node_iter():\n k=tuple(self.nodes['x'][n])\n if k in xys:\n merges[n]=xys[k]\n self.merge_nodes(xys[k],n)\n else:\n xys[k]=n\n return merges", "def __hash__(self):\n return hash(('genes', tuple(self.genes), self.environment))", "def __hash__(self):\n index_list = [allele.index for allele in self.genes]\n return hash(tuple(index_list))", "def known_mines(self):\n if len(self.cells)==self.count:\n return self.cells\n return set()\n #raise NotImplementedError", "def get_identical_nodes(self):\n\n return self._identical_nodes", "def clean_duplicate(self):\r\n self.elements = list(set(self.elements))\r\n self.elements = [e for e in self.elements if e != '']", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n if obj.modeling_cloth: \n yield (obj, obj.matrix_world.copy())", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n if obj.modeling_cloth: \n yield (obj, obj.matrix_world.copy())", "def _compute_sims(self):\n no_duplicates = defaultdict(list)\n for num, lineset1, idx1, lineset2, idx2 in self._iter_sims():\n duplicate = no_duplicates[num]\n for couples in duplicate:\n if (lineset1, idx1) in couples or (lineset2, idx2) in couples:\n couples.add((lineset1, idx1))\n couples.add((lineset2, idx2))\n break\n else:\n duplicate.append({(lineset1, idx1), (lineset2, idx2)})\n sims = []\n for num, ensembles in no_duplicates.items():\n for couples in ensembles:\n sims.append((num, couples))\n sims.sort()\n sims.reverse()\n return sims", "def duplicates(self, x):\n return list(dict.fromkeys(x))", "def blaze(self):\n visited = set()\n tile_exits = dict((tile, {}) for tile in self.tiles)\n\n def visit(tile):\n # Randomized depth-first search of self.tiles.\n visited.add(tile)\n adj = self.adjacencies(tile, self.tiles)\n self.rand.shuffle(adj)\n for d, t in adj:\n if t not in visited:\n tile_exits[tile][d] = t\n tile_exits[t][self._inverted_dirs[d]] = tile\n visit(t)\n\n visit(next(iter(self.tiles)))\n return tile_exits", "def __iter__(self):\n seen = set()\n for elem, group in self._mapping.items():\n if elem not in seen:\n yield group\n seen.update(group)", "def visible_objects_and_duplis():\r\n \r\n for obj in context.visible_objects:\r\n if obj.type == 'MESH':\r\n yield (obj, obj.matrix_world.copy())\r\n \r\n if obj.dupli_type != 'NONE':\r\n obj.dupli_list_create(scene)\r\n for dob in obj.dupli_list:\r\n obj_dupli = dob.object\r\n if obj_dupli.type == 'MESH':\r\n yield (obj_dupli, dob.matrix.copy())\r\n \r\n obj.dupli_list_clear()", "def clone(self):\n return _libsbml.ListOfGeneAssociations_clone(self)", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n yield (obj, obj.matrix_world.copy())\n\n if obj.instance_type != 'NONE':\n obj.dupli_list_create(scene)\n for dob in obj.dupli_list:\n obj_dupli = dob.object\n if obj_dupli.type == 'MESH':\n yield (obj_dupli, dob.matrix.copy())\n\n obj.dupli_list_clear()", "def complete_material_equivalences(self):\n for material in self.materials:\n material.geu = self\n for material_aux in self.materials:\n material.equivalent_materials.add(material_aux)" ]
[ "0.7099837", "0.7025168", "0.69302565", "0.6652433", "0.65650713", "0.65152824", "0.6477518", "0.6289955", "0.6230133", "0.62202966", "0.60999846", "0.59928745", "0.5987962", "0.58550674", "0.5808454", "0.5769425", "0.564335", "0.5591993", "0.5578701", "0.5500059", "0.5488227", "0.5488227", "0.54834104", "0.545136", "0.54494333", "0.54350245", "0.5406417", "0.5386712", "0.5383131", "0.5370069" ]
0.71266633
0
All gene duplicated enzymes of the core metabolism, paired with each of their duplicates. If enzyme A is a duplicate of enzyme B and vice versa, this does not return duplicates, but returns only one pair, with the "smaller" enzyme as the first value. An enzyme is "smaller" if its gene ID string is "smaller".
def geneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]: enzymes = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes() geneDuplicationModel = SimpleGeneDuplication geneIdToEnzyme = dict() for enzyme in enzymes: geneIdToEnzyme[enzyme.geneID] = enzyme enzymePairs = geneDuplicationModel.getEnzymePairs(enzymes, ignoreDuplicatesOutsideSet = True, geneIdToEnzyme = geneIdToEnzyme, preCalculatedEnzymes = None) return enzymePairs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def divergedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n # get diverged metabolism\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicate enzyme pairs\n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the diverged metabolism\n parentGeneDuplicatedDiverged = set()\n childGeneDuplicatedDiverged = set()\n \n for enzymeTuple in parentGeneDuplicated:\n if enzymeTuple[0] in divergedMetabolismEnzymes and enzymeTuple[1] in divergedMetabolismEnzymes:\n parentGeneDuplicatedDiverged.add(enzymeTuple)\n \n for enzymeTuple in childGeneDuplicated:\n if enzymeTuple[0] in divergedMetabolismEnzymes and enzymeTuple[1] in divergedMetabolismEnzymes:\n childGeneDuplicatedDiverged.add(enzymeTuple)\n \n return parentGeneDuplicatedDiverged.union(childGeneDuplicatedDiverged)", "def unifiedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]: \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n return parentGeneDuplicated.union(childGeneDuplicated)", "def addedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]: \n # get added metabolism\n addedMetabolismEnzymes = self.addedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicated enzyme pairs\n geneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the added metabolism\n geneDuplicatedAdded = set()\n \n for enzymeTuple in geneDuplicated:\n if enzymeTuple[0] in addedMetabolismEnzymes and enzymeTuple[1] in addedMetabolismEnzymes:\n geneDuplicatedAdded.add(enzymeTuple)\n \n return geneDuplicatedAdded", "def conservedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Tuple[Set[Tuple[Enzyme, Enzyme]]]:\n # get conserved metabolism\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicate enzyme pairs\n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the conserved metabolism\n parentGeneDuplicatedConserved = set()\n childGeneDuplicatedConserved = set()\n \n for enzymeTuple in parentGeneDuplicated:\n if enzymeTuple[0] in conservedMetabolismEnzymes and enzymeTuple[1] in conservedMetabolismEnzymes:\n parentGeneDuplicatedConserved.add(enzymeTuple)\n \n for enzymeTuple in childGeneDuplicated:\n if enzymeTuple[0] in conservedMetabolismEnzymes and enzymeTuple[1] in conservedMetabolismEnzymes:\n childGeneDuplicatedConserved.add(enzymeTuple)\n \n return (parentGeneDuplicatedConserved, childGeneDuplicatedConserved)", "def lostMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n # get added metabolism\n lostMetabolismEnzymes = self.lostMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicated enzyme pairs\n geneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the lost metabolism\n geneDuplicatedLost = set()\n \n for enzymeTuple in geneDuplicated:\n if enzymeTuple[0] in lostMetabolismEnzymes and enzymeTuple[1] in lostMetabolismEnzymes:\n geneDuplicatedLost.add(enzymeTuple)\n \n return geneDuplicatedLost", "def geneDuplicatedEnzymesDict(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Dict[Enzyme, Set[GeneID]]:\n \n \n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n geneDuplicationModel = SimpleGeneDuplication\n \n geneIDsForEnzyme = geneDuplicationModel.getEnzymes(enzymeGraph, returnMatches = True, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None)\n \n# if keepOnHeap is True:\n# self._geneDuplicatedEnzymesObject = geneIDsForEnzyme\n \n return geneIDsForEnzyme", "def geneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n \n \n \n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n geneDuplicationModel = SimpleGeneDuplication\n# geneDuplicationModel = SimpleGroupGeneDuplication(sameGroupOrganisms = self.group)\n \n # filter core metabolism enzyme graph \n geneDuplicatedEnzymes = geneDuplicationModel.filterEnzymes(enzymeGraph, eValue = defaultEValue, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n geneDuplicatedEnzymesOnly = geneDuplicatedEnzymes\n geneDuplicatedEnzymes = enzymeGraph\n Export.addColourAttribute(geneDuplicatedEnzymes, colourToUse, nodes = False, edges = geneDuplicatedEnzymesOnly.getEdges())\n \n geneDuplicatedEnzymes.name = 'Gene-duplicated core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return geneDuplicatedEnzymes", "def divergedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentGeneDuplicated.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childGeneDuplicated.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism gene-duplicated enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def addedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def unifiedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentGeneDuplicated.union(childGeneDuplicated, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n return graph", "def lostMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def conservedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentGeneDuplicated.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childGeneDuplicated.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism gene-duplicated enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def filter_dups(self):\n def dups_filter():\n dups = set()\n for g1, g2, w in self.gen:\n if (min(g1, g2), max(g1, g2)) in dups:\n continue\n dups.add((min(g1, g2), max(g1, g2)))\n yield g1, g2, w\n return self.filter(dups_filter())", "def _compute_sims(self):\n no_duplicates = defaultdict(list)\n for num, lineset1, idx1, lineset2, idx2 in self._iter_sims():\n duplicate = no_duplicates[num]\n for couples in duplicate:\n if (lineset1, idx1) in couples or (lineset2, idx2) in couples:\n couples.add((lineset1, idx1))\n couples.add((lineset2, idx2))\n break\n else:\n duplicate.append({(lineset1, idx1), (lineset2, idx2)})\n sims = []\n for num, ensembles in no_duplicates.items():\n for couples in ensembles:\n sims.append((num, couples))\n sims.sort()\n sims.reverse()\n return sims", "def __delete_duplicates(self):\n log = logging.getLogger()\n log.debug(\"\\n---> Duplicate check <---\")\n\n chromosomes = list(set(self.chromosomes))\n diff = self.size - len(chromosomes)\n\n if diff > 0:\n log.debug(\"---> Duplicate(s) found! <---\")\n for i in range(diff):\n chromosomes.append(\n Chromosome(self.__generate_random_gene_sequence(),\n self.environment))\n else:\n log.debug(\"---> No duplicates found! <---\")\n\n self.chromosomes = chromosomes", "def subsets_with_dup(s):\n r = [[]]\n for e in s:\n print 'r: %-55r e: %r' % (e,r)\n for x in r:\n a = sorted(x + [e])\n if not(a in r): r.append(a) \n return r", "def testDuplicate(self,permutations=True):\n # This algorithm is faster than encode,\n # but for nplex=2 enmagic2 would probably still be faster.\n if permutations:\n C = self.copy()\n C.sort(axis=1)\n else:\n C = self\n ind = sortByColumns(C)\n C = C.take(ind,axis=0)\n ok = (C != roll(C,1,axis=0)).any(axis=1)\n if not ok[0]: # all duplicates -> should result in one unique element\n ok[0] = True\n return ind,ok", "def __find_similar_pairs(self):\n size = len(self.__indexclusters)\n candidates = []\n for i in range(size):\n for j in range(i+1, size):\n simi = self.__cluster_simi(i, j)\n #print simi, self.__indexclusters[i],self.__indexclusters[j]\n if simi >= self.__threshold:\n candidates.append((simi, i, j))\n candidates.sort(reverse = True, key = lambda x: x[0])\n\n\n # filter overlapped pairs\n to_remove = set()\n appeared = set()\n for index, cand in enumerate(candidates):\n if cand[1] not in appeared and cand[2] not in appeared:\n appeared.add(cand[1])\n appeared.add(cand[2])\n else:\n to_remove.add(index)\n\n #print 'ahha'\n #print [(cand[1], cand[2]) for index, cand in enumerate(candidates) if index not in to_remove]\n\n return [(cand[1], cand[2]) for index, cand in enumerate(candidates)\n if index not in to_remove]", "def duplicate_subassemblies(self):\n dupes = set()\n top_dupes = defaultdict(set)\n\n # first, make a list of all duplicated non-leaf fragments\n for flow in self.fg.flows():\n for dirn in ('Input', 'Output'):\n asm = set(k for k in self.fg.fragments_with_flow(flow, direction=dirn) if k.term.is_fg)\n if len(asm) > 1:\n dupes |= asm\n\n # next, screen them down to top-level frags (frags whose parents are nonduplicated)\n for frag in dupes:\n if frag.reference_entity not in dupes:\n top_dupes[frag.flow, frag.direction].add(frag)\n\n # finally, generate the duplicate sets\n for v in top_dupes.values():\n yield v", "def remove_duplicate_interactions(interactions):\n # steric-clashes at the end\n sort_key = lambda i: i.name == 'steric-clash'\n\n interaction_map = dict()\n for interaction in interactions.sorted(sort_key):\n atom_indexes = interaction.atom_indexes.flatten()\n is_duplicate = atom_indexes in interaction_map\n if interaction.name != 'steric-clash' or not is_duplicate:\n interaction_map[atom_indexes] = interaction\n return tuple(interaction_map.values())", "def __hash__(self):\n index_list = [allele.index for allele in self.genes]\n return hash(tuple(index_list))", "def compute_allergens(foods):\n\n # Create a dictionary mapping allergens to lists\n # of ingredients that may contain that allergen\n allergen_foods = {}\n for ingredients, allergens in foods:\n for allergen in allergens:\n allergen_foods.setdefault(allergen, []).append(set(ingredients))\n\n # For each allergen, compute the intersection of the lists\n # computed above. This will give us the set of ingredienta\n # that could contain that allergen\n candidate_ingredients = {}\n for allergen in allergen_foods:\n candidate_ingredients[allergen] = set.intersection(*allergen_foods[allergen])\n\n # Repeatedly find an allergen that can only be matched to a single\n # ingredient, and remove that ingredient from the list of candidate\n # ingredients for all the other allergens.\n allergens = {}\n while len(candidate_ingredients) > 0:\n\n for single_allergen, cings in candidate_ingredients.items():\n if len(cings) == 1:\n ingredient = cings.pop()\n allergens[single_allergen] = ingredient\n break\n\n del candidate_ingredients[single_allergen] \n\n for allergen in candidate_ingredients:\n if allergen != single_allergen:\n ingredient = allergens[single_allergen]\n candidate_ingredients[allergen].discard(ingredient)\n\n return allergens", "def explain_phenotypically_similar_gene(self, gene_curie):\n matching_input_genes = {gene for gene, ssr in self.ssr.items() if gene_curie in {m.get_id() for m in ssr.matches}}\n df = pd.DataFrame()\n for gene in matching_input_genes:\n results = self.ssr[gene].get_results()\n results['input_gene'] = gene\n df = df.append(results.query(\"id == @gene_curie\"))\n\n return df", "def _findSamesetProteins(protToPeps, proteins=None):\n proteins = viewkeys(protToPeps) if proteins is None else proteins\n\n equalEvidence = ddict(set)\n for protein in proteins:\n peptides = protToPeps[protein]\n equalEvidence[tuple(sorted(peptides))].add(protein)\n equalProteins = list()\n for proteins in viewvalues(equalEvidence):\n if len(proteins) > 1:\n equalProteins.append(tuple(sorted(proteins)))\n return equalProteins", "def __hash__(self):\n return hash(('genes', tuple(self.genes), self.environment))", "def merge_duplicate_nodes(self):\n merges={}\n xys={}\n for n in self.valid_node_iter():\n k=tuple(self.nodes['x'][n])\n if k in xys:\n merges[n]=xys[k]\n self.merge_nodes(xys[k],n)\n else:\n xys[k]=n\n return merges", "def compress_dups(data, column):\n idx = defaultdict(list)\n for row in data:\n idx[row[column]].append(row)\n\n dedup = []\n\n for idx_row in sorted(idx.items()):\n dedup.append(avg_rows(idx_row[1]))\n return dedup", "def delete_duplicates(chromosomes, gene_pool):\n new_chromosomes = []\n for chromosome in chromosomes:\n new_chromosomes.append(tuple(chromosome))\n\n chromosomes = []\n new_chromosomes = list(set(new_chromosomes))\n diff = DEFAULT_POPULATION_SIZE - len(new_chromosomes)\n\n if diff > 0:\n for i in range(diff):\n chromosomes.append(generate_random_gene_sequence(gene_pool))\n\n for chromosome in new_chromosomes:\n chromosomes.append(list(chromosome))\n\n return chromosomes", "def eliminate_duplicates(iterable):\n class NoElement: pass\n\n prev_elem = NoElement\n for elem in sorted(iterable):\n if prev_elem is NoElement:\n prev_elem = elem\n yield elem\n continue\n\n if prev_elem != elem:\n prev_elem = elem\n yield elem", "def strand_to_enzymes(strand):\n\n def chunk_strand(strand):\n for idx in range(len(strand) / 2):\n yield strand[idx * 2:idx * 2 + 2]\n\n # translate the chunks to amino acids or punctuation\n amino_acids = [AminoAcid(TYPOGENETIC_CODE[chunk]) for chunk in chunk_strand(strand.strand)]\n\n enzymes = []\n current_enzyme = []\n for amino_acid in amino_acids:\n # if there is an AA chunk present, we finish off the current enzyme and start a new one\n if amino_acid.op == 'pun':\n enzymes.append(Enzyme(current_enzyme))\n current_enzyme = []\n else:\n current_enzyme.append(amino_acid)\n enzymes.append(Enzyme(current_enzyme))\n\n return filter(lambda e: len(e.amino_acids), enzymes) # filter out any empty enzyimes" ]
[ "0.74381024", "0.7337452", "0.7129101", "0.690591", "0.6749732", "0.66438884", "0.6258788", "0.6047571", "0.59916437", "0.583954", "0.5826309", "0.5580656", "0.5540774", "0.54498655", "0.52426744", "0.5234347", "0.5220493", "0.5200757", "0.5197881", "0.51871806", "0.51799005", "0.5175014", "0.51031214", "0.5058812", "0.505651", "0.5038041", "0.5035011", "0.5027619", "0.5020193", "0.5004277" ]
0.7607282
0
The substanceEC graph of EC numbers belonging to function changes of neofunctionalised enzymes of the core metabolism. Only EC numbers which could have actually taken part in a function change are reported. This is because enzymes can have multiple EC numbers, while only some might be eligible for a function change. For example, consider enzyme A (1.2.3.4, 6.5.4.3) and enzyme B (1.2.3.4, 4.5.6.7). 1.2.3.4 can never change its function to itself, which leaves 1.2.3.4 6.5.4.3, 1.2.3.4 4.5.6.7, and 4.5.6.7 6.5.4.3 as possible function changes. This obviously requires a function to change to a single other function, without splitting or merging, which might be biologically inacurate. However, this should happen rarely, plus one could exclude all enzymes with multiple functions from the core metabolism in the first place.
def neofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEcGraph: # get neofunctionalisations neofunctionalisedECs = NeofunctionalisedECs(self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs)) # filter core metabolism EC graph coreMetabolism = self.coreMetabolism(majorityPercentageCoreMetabolism) minimumOrganismsCount = math.ceil(self.organismsCount * (majorityPercentageNeofunctionalisation / 100)) neofunctionalisedMetabolism = neofunctionalisedECs.filterGraph(coreMetabolism, minimumEcDifference = None, minimumOrganismsCount = minimumOrganismsCount) # colour core metabolism if colour is not False: if colour is True: colourToUse = Export.Colour.GREEN else: colourToUse = colour neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism neofunctionalisedMetabolism = coreMetabolism Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges()) neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism ' + ' '.join(self.ncbiNames) return neofunctionalisedMetabolism
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def redundantECsForContributingNeofunctionalisation(self, \n majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, \n majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, \n eValue = defaultEValue, \n redundancyType: 'RedundancyType' = None,\n considerOnlyECs = None) -> Dict[Neofunctionalisation, Set[EcNumber]]:\n from FEV_KEGG.Robustness.Topology.Redundancy import Redundancy, RedundancyContribution, RedundancyType\n \n if redundancyType is None:\n redundancyType = RedundancyType.default\n \n #- calculate \"neofunctionalised\" ECs\n neofunctionalisedMetabolismSet = self.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, eValue, considerOnlyECs).getECs()\n neofunctionalisationsForFunctionChange = self.neofunctionalisationsForFunctionChange(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, eValue, considerOnlyECs)\n \n #- calculate redundancy\n redundancy = Redundancy( self.coreMetabolism(majorityPercentageCoreMetabolism) )\n redundancyContribution = RedundancyContribution(redundancy, neofunctionalisedMetabolismSet)\n \n contributedECsForContributingNeofunctionalisedEC = redundancyContribution.getContributedKeysForSpecial(redundancyType)\n contributingNeofunctionalisedECs = set(contributedECsForContributingNeofunctionalisedEC.keys())\n \n #- REPEAT for each function change consisting of \"neofunctionalised\" ECs, which also contribute to redundancy\n contributingNeofunctionalisations = dict()\n \n for functionChange, neofunctionalisations in neofunctionalisationsForFunctionChange.items():\n #- report enzyme pairs of neofunctionalisations, which caused the EC to be considered \"neofunctionalised\", and are in return contributing to redundancy \n \n if functionChange.ecA in contributingNeofunctionalisedECs or functionChange.ecB in contributingNeofunctionalisedECs: # function change contributes to redundancy\n \n for neofunctionalisation in neofunctionalisations:\n currentSetOfContributedECs = contributingNeofunctionalisations.get(neofunctionalisation, None)\n \n if currentSetOfContributedECs is None:\n currentSetOfContributedECs = set()\n contributingNeofunctionalisations[neofunctionalisation] = currentSetOfContributedECs\n \n for ec in functionChange.ecPair:\n contributedECs = contributedECsForContributingNeofunctionalisedEC.get(ec, None)\n if contributedECs is not None:\n currentSetOfContributedECs.update(contributedECs)\n \n return contributingNeofunctionalisations", "def addedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def EC(f):\n return dmp_ground_EC(f.rep, f.lev, f.dom)", "def lostMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def unifiedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False) -> SubstanceEcGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolism = self.unifiedMetabolism(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def neofunctionalisationsForFunctionChange(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, eValue = defaultEValue, considerOnlyECs = None) -> Dict[FunctionChange, Set[Neofunctionalisation]]:\n # get neofunctionalisations\n minimumOrganismsCount = math.ceil(self.organismsCount * (majorityPercentageNeofunctionalisation / 100)) \n return NeofunctionalisedECs(self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs)).getNeofunctionalisationsForFunctionChange(minimumOrganismsCount = minimumOrganismsCount)", "def conservedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n conservedMetabolism = self.conservedMetabolism(majorityPercentageCoreMetabolism)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = conservedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def eci(self):\n num_ext_terms = len(self._subspace.external_terms) # check for extra terms\n coefs = self.coefs[:-num_ext_terms] if num_ext_terms else self.coefs[:]\n eci = coefs.copy()\n eci = eci / self._subspace.function_total_multiplicities\n return eci", "def divergedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n divergedMetabolism = self.divergedMetabolism(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = divergedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def CS2e(Type=\"DFA\"):\n CC50, CC51, CC52, CC53 = state('CC50'), state('CC51'), state('CC52'), state('CC53')\n for i in sigma:\n CC52.transit[i] = CC52\n CC53.transit[i] = CC53\n for i in sigma_1:\n CC50.transit[i] = CC50\n for i in sigma_2:\n CC50.transit[i] = CC53\n CC51.transit[i] = CC51\n for i in sigma_cc:\n CC50.transit[i] = CC51\n CC51.transit[i] = CC51\n for i in sigma_ncc:\n CC51.transit[i] = CC50\n for i in sigma_B_A:\n CC51.transit[i] = CC50\n for i in sigma_e:\n CC51.transit[i] = CC53\n for i in sigma_A:\n CC51.transit[i] = CC52\n if Type == \"pDFA\":\n CC5 = pDFA('CC5', sigma, [CC50, CC51, CC52, CC53], CC50, [CC52])\n else:\n CC5 = DFA('CC5', sigma, [CC50, CC51, CC52, CC53], CC50, [CC52])\n if (SIZEOF):\n EM_size[\"CS2e\"] = asizeof.asizeof(CC5)\n return CC5", "def geneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n \n \n \n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n geneDuplicationModel = SimpleGeneDuplication\n# geneDuplicationModel = SimpleGroupGeneDuplication(sameGroupOrganisms = self.group)\n \n # filter core metabolism enzyme graph \n geneDuplicatedEnzymes = geneDuplicationModel.filterEnzymes(enzymeGraph, eValue = defaultEValue, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n geneDuplicatedEnzymesOnly = geneDuplicatedEnzymes\n geneDuplicatedEnzymes = enzymeGraph\n Export.addColourAttribute(geneDuplicatedEnzymes, colourToUse, nodes = False, edges = geneDuplicatedEnzymesOnly.getEdges())\n \n geneDuplicatedEnzymes.name = 'Gene-duplicated core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return geneDuplicatedEnzymes", "def neofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEnzymeGraph:\n # get neofunctionalisations \n neofunctionalisedEnzymes = self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs)\n \n # filter core metabolism enzyme graph\n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism) \n neofunctionalisedMetabolism = neofunctionalisedEnzymes.filterGraph(enzymeGraph, minimumEcDifference = None)\n \n # colour core metabolism \n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = enzymeGraph\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def __repr__(self):\n return \"EC(%s, %s)\" % (str(self.coefficient), repr(self.basefield))", "def set_ec(self, etacalc):\n if not self.__thermodyn:\n C = np.zeros((6,6))\n \n LC = self.__structures.items()[0][1].LC\n if self.__mthd == 'Energy':\n if type(etacalc)==list:\n A2=[]\n for i in range(len(etacalc)):\n A2.append(self.__A2[i][etacalc[i]])\n else:\n if not etacalc in self.__A2[0].keys(): raise ValueError('Please coose one of %s'%(self.__A2[0].keys()))\n A2 = [a2[etacalc] for a2 in self.__A2]\n \n \n #%%%--- Cubic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'CI' or \\\n LC == 'CII'):\n C[0,0] =-2.*(A2[0]-3.*A2[1])/3.\n C[1,1] = C[0,0]\n C[2,2] = C[0,0]\n C[3,3] = A2[2]/6.\n C[4,4] = C[3,3]\n C[5,5] = C[3,3]\n C[0,1] = (2.*A2[0]-3.*A2[1])/3.\n C[0,2] = C[0,1]\n C[1,2] = C[0,1]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Hexagonal structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = 2./3.*A2[0] + 4./3.*A2[1] - 2.*A2[2] - 2.*A2[3]\n C[0,2] = 1./6.*A2[0] - 2./3.*A2[1] + 0.5*A2[2]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[2]\n C[3,3] =-0.5*A2[2] + 0.5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RI'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[0,4] = .5*(-A2[3] - A2[4] + A2[6])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[1,4] =-C[0,4] \n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[3,5] =-C[0,4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TI'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[3]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[3]\n C[0,2] = A2[0]/6.-2.*A2[1]/3.+.5*A2[3]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[3]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TII'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[4]\n C[1,1] = C[0,0]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[4]\n C[0,2] = A2[0]/6.-(2./3.)*A2[1]+.5*A2[4]\n C[0,5] = (-A2[2]+A2[3]-A2[6])/4.\n C[1,2] = C[0,2]\n C[1,5] =-C[0,5]\n C[2,2] = 2.*A2[4]\n C[3,3] = .5*A2[5]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[6]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Orthorhombic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'O'):\n C[0,0] = 2.*A2[0]/3.+4.*A2[1]/3.+A2[3]-2.*A2[4]-2.*A2[5]\n C[0,1] = 1.*A2[0]/3.+2.*A2[1]/3.-.5*A2[3]-A2[5]\n C[0,2] = 1.*A2[0]/3.-2.*A2[1]/3.+4.*A2[2]/3.-.5*A2[3]-A2[4]\n C[1,1] = 2.*A2[4]\n C[1,2] =-2.*A2[1]/3.-4.*A2[2]/3.+.5*A2[3]+A2[4]+A2[5]\n C[2,2] = 2.*A2[5]\n C[3,3] = .5*A2[6]\n C[4,4] = .5*A2[7]\n C[5,5] = .5*A2[8]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Monoclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'M'):\n C[0,0] = 2.*A2[0]/3.+8.*(A2[1]+A2[2])/3.-2.*(A2[5]+A2[8]+A2[9])\n C[0,1] = A2[0]/3.+4.*(A2[1]+A2[2])/3.-2.*A2[5]-A2[9]\n C[0,2] =(A2[0]-4.*A2[2])/3.+A2[5]-A2[8]\n C[0,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.+.5*(A2[5]+A2[7]+A2[8]+A2[9]-A2[12])\n C[1,1] = 2.*A2[8]\n C[1,2] =-4.*(2.*A2[1]+A2[2])/3.+2.*A2[5]+A2[8]+A2[9]+A2[12]\n C[1,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.-.5*A2[3]+A2[5]+.5*(A2[7]+A2[8]+A2[9])\n C[2,2] = 2.*A2[9]\n C[2,5] =-1.*A2[0]/6.+2.*A2[1]/3.-.5*(A2[3]+A2[4]-A2[7]-A2[8]-A2[9]-A2[12])\n C[3,3] = .5*A2[10]\n C[3,4] = .25*(A2[6]-A2[10]-A2[11])\n C[4,4] = .5*A2[11]\n C[5,5] = .5*A2[12]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Triclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'N'):\n C[0,0] = 2.*A2[0]\n C[0,1] = 1.*(-A2[0]-A2[1]+A2[6])\n C[0,2] = 1.*(-A2[0]-A2[2]+A2[7])\n C[0,3] = .5*(-A2[0]-A2[3]+A2[8]) \n C[0,4] = .5*(-A2[0]+A2[9]-A2[4])\n C[0,5] = .5*(-A2[0]+A2[10]-A2[5])\n C[1,1] = 2.*A2[1]\n C[1,2] = 1.*(A2[11]-A2[1]-A2[2])\n C[1,3] = .5*(A2[12]-A2[1]-A2[3])\n C[1,4] = .5*(A2[13]-A2[1]-A2[4])\n C[1,5] = .5*(A2[14]-A2[1]-A2[5])\n C[2,2] = 2.*A2[2] \n C[2,3] = .5*(A2[15]-A2[2]-A2[3])\n C[2,4] = .5*(A2[16]-A2[2]-A2[4])\n C[2,5] = .5*(A2[17]-A2[2]-A2[5])\n C[3,3] = .5*A2[3]\n C[3,4] = .25*(A2[18]-A2[3]-A2[4])\n C[3,5] = .25*(A2[19]-A2[3]-A2[5])\n C[4,4] = .5*A2[4]\n C[4,5] = .25*(A2[20]-A2[4]-A2[5])\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n elif self.__mthd == 'Stress':\n \n if (LC == 'CI' or \\\n LC == 'CII'):\n Matrix = np.mat([[1.0, 5.0, 0.0],\n [2.0, 4.0, 0.0],\n [3.0, 3.0, 0.0],\n [0.0, 0.0, 4.0],\n [0.0, 0.0, 5.0],\n [0.0, 0.0, 6.0]])\n \n if (LC == 'HI' or \\\n LC == 'HII'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0],\n [ 2, 1, 3, 0, 0],\n [ 0, 0, 3, 3, 0],\n [ 0, 0, 0, 0, 4],\n [ 0, 0, 0, 0, 5],\n [ 3,-3, 0, 0, 0],\n [ 3,-5,-1, 0, 0],\n [-5, 3,-1, 0, 0],\n [ 0, 0,-2,-1, 0],\n [ 0, 0, 0, 0, 6],\n [ 0, 0, 0, 0, 2],\n [-2, 2, 0, 0, 0]])\n \n if (LC == 'RI'):\n Matrix = np.mat([[ 1, 2, 3, 4, 0, 0],\n [ 2, 1, 3,-4, 0, 0],\n [ 0, 0, 3, 0, 3, 0],\n [ 0, 0, 0,-1, 0, 4],\n [ 0, 0, 0, 6, 0, 5],\n [ 3,-3, 0, 5, 0, 0],\n [ 3,-5,-1, 6, 0, 0],\n [-5, 3,-1,-6, 0, 0],\n [ 0, 0,-2, 0,-1, 0],\n [ 0, 0, 0, 8, 0, 6],\n [ 0, 0, 0,-4, 0, 2],\n [-2, 2, 0, 2, 0, 0]])\n \n if (LC == 'RII'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 0, 0],\n [ 2, 1, 3,-4,-5, 0, 0],\n [ 0, 0, 3, 0, 0, 3, 0],\n [ 0, 0, 0,-1,-6, 0, 4],\n [ 0, 0, 0, 6,-1, 0, 5],\n [ 3,-3, 0, 5,-4, 0, 0],\n [ 3,-5,-1, 6, 2, 0, 0],\n [-5, 3,-1,-6,-2, 0, 0],\n [ 0, 0,-2, 0, 0,-1, 0],\n [ 0, 0, 0, 8, 4, 0, 6],\n [ 0, 0, 0,-4, 8, 0, 2],\n [-2, 2, 0, 2,-6, 0, 0]])\n \n if (LC == 'TI'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0, 0],\n [ 2, 1, 3, 0, 0, 0],\n [ 0, 0, 3, 3, 0, 0],\n [ 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0, 0, 0, 6],\n [ 3,-5,-1, 0, 0, 0],\n [-5, 3,-1, 0, 0, 0],\n [ 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 0, 0,-4]])\n \n if (LC == 'TII'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0],\n [ 2, 1, 3,-6, 0, 0, 0],\n [ 0, 0, 3, 0, 3, 0, 0],\n [ 0, 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0,-1, 0, 0, 6],\n [ 3,-5,-1,-4, 0, 0, 0],\n [-5, 3,-1, 4, 0, 0, 0],\n [ 0, 0,-2, 0,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 8, 0, 0,-4]])\n \n if (LC == 'O'):\n Matrix = np.mat([[1, 2, 3, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 2, 3, 0, 0, 0, 0],\n [0, 0, 1, 0, 2, 3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 4, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 5, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 6],\n [3,-5,-1, 0, 0, 0, 0, 0, 0],\n [0, 3, 0,-5,-1, 0, 0, 0, 0],\n [0, 0, 3, 0,-5,-1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 6, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-4],\n [5, 4, 6, 0, 0, 0, 0, 0, 0],\n [0, 5, 0, 4, 6, 0, 0, 0, 0],\n [0, 0, 5, 0, 4, 6, 0, 0, 0],\n [0, 0, 0, 0, 0, 0,-2, 0, 0],\n [0, 0, 0, 0, 0, 0, 0,-1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-3]])\n \n if (LC == 'M'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 2, 3, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 2, 0, 3, 6, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0],\n [ 0, 0, 0, 1, 0, 0, 2, 0, 3, 0, 0, 0, 6],\n [-2, 1, 4,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 1, 4,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 1, 0, 4,-5, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0],\n [ 0, 0, 0,-2, 0, 0, 1, 0, 4, 0, 0,-5, 0],\n [ 3,-5,-1,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0,-5,-1,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0,-5, 0,-1,-4, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0],\n [ 0, 0, 0, 3, 0, 0,-5, 0,-1, 0, 0,-4, 0],\n [-4,-6, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0,-6, 5, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0,-6, 0, 5, 2, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0],\n [ 0, 0, 0,-4, 0, 0,-6, 0, 5, 0, 0, 2, 0],\n [ 5, 4, 6,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 4, 6,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 4, 0, 6,-3, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0],\n [ 0, 0, 0, 5, 0, 0, 4, 0, 6, 0, 0,-3, 0]])\n \n if (LC == 'N'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 0, 0, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 5, 6, 0, 0, 0],\n [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6, 0],\n [ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6],\n [-2, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 0, 0, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 6,-5, 0, 0, 0],\n [ 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5, 0],\n [ 0, 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5],\n [ 3,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0, 0, 0,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 2,-4, 0, 0, 0],\n [ 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4, 0],\n [ 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4],\n [-4,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0, 0, 0,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1,-3, 2, 0, 0, 0],\n [ 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2, 0],\n [ 0, 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2],\n [ 5, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 0, 0, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2,-1,-3, 0, 0, 0],\n [ 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3, 0],\n [ 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3],\n [-6, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-6, 0, 0, 0, 0, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5,-4, 1, 0, 0, 0],\n [ 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1, 0],\n [ 0, 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1]])\n \n sigma = np.array(self.__sigma[etacalc])\n \n ci = np.linalg.lstsq(Matrix,sigma)\n \n #-- Cubic structures ------------------------------------------------------------------------------\n if (LC == 'CI' or \\\n LC == 'CII'):\n \n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[3,3]=ci[0][2]\n C[1,1]=C[0,0]\n C[2,2]=C[0,0]\n C[0,2]=C[0,1]\n C[1,2]=C[0,1]\n C[4,4]=C[3,3]\n C[5,5]=C[3,3]\n \n #-- Hexagonal Structures --------------------------------------------------------------------------\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[2,2]=ci[0][3]\n C[3,3]=ci[0][4]\n C[1,1]=C[0,0]\n C[1,2]=C[0,2]\n C[4,4]=C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral I Structures ---------------------------------------------------------------------\n if (LC == 'RI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral II Structures --------------------------------------------------------------------\n if (LC == 'RII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[0,4]= ci[0][4]\n C[2,2]= ci[0][5]\n C[3,3]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[1,4]=-C[0,4]\n C[3,5]=-C[0,4]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Tetragonal I Structures -----------------------------------------------------------------------\n if (LC == 'TI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[2,2]= ci[0][3]\n C[3,3]= ci[0][4]\n C[5,5]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[4,4]= C[3,3]\n \n #-- Tetragonal II Structures ----------------------------------------------------------------------\n if (LC == 'TII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,5]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[5,5]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,5]=-C[0,5]\n C[4,4]= C[3,3]\n \n #-- Orthorhombic Structures -----------------------------------------------------------------------\n if (LC == 'O'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[1,1]=ci[0][3]\n C[1,2]=ci[0][4]\n C[2,2]=ci[0][5]\n C[3,3]=ci[0][6]\n C[4,4]=ci[0][7]\n C[5,5]=ci[0][8]\n \n #-- Monoclinic Structures -------------------------------------------------------------------------\n if (LC == 'M'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,5]=ci[0][3]\n C[1,1]=ci[0][4]\n C[1,2]=ci[0][5]\n C[1,5]=ci[0][6]\n C[2,2]=ci[0][7]\n C[2,5]=ci[0][8]\n C[3,3]=ci[0][9]\n C[3,4]=ci[0][10]\n C[4,4]=ci[0][11]\n C[5,5]=ci[0][12]\n \n #-- Triclinic Structures --------------------------------------------------------------------------\n if (LC == 'N'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,3]=ci[0][3]\n C[0,4]=ci[0][4]\n C[0,5]=ci[0][5]\n C[1,1]=ci[0][6]\n C[1,2]=ci[0][7]\n C[1,3]=ci[0][8]\n C[1,4]=ci[0][9]\n C[1,5]=ci[0][10]\n C[2,2]=ci[0][11]\n C[2,3]=ci[0][12]\n C[2,4]=ci[0][13]\n C[2,5]=ci[0][14]\n C[3,3]=ci[0][15]\n C[3,4]=ci[0][16]\n C[3,5]=ci[0][17]\n C[4,4]=ci[0][18]\n C[4,5]=ci[0][19]\n C[5,5]=ci[0][20]\n #--------------------------------------------------------------------------------------------------\n \n \n \n for i in range(5):\n for j in range(i+1,6):\n C[j,i] = C[i,j] \n #%%%--- Calculating the elastic moduli ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if self.__cod in ['espresso']: C = -C/10.\n elif self.__cod in ['vasp','exciting','wien']: C = C*self.__vToGPa/self.__V0#C = C/4.#C=C*self.__CONV/self.__V0\n elif self.__cod in ['emto']: C = C*self.__ToGPa/self.__V0\n self.BV = (C[0,0]+C[1,1]+C[2,2]+2*(C[0,1]+C[0,2]+C[1,2]))/9\n self.GV = ((C[0,0]+C[1,1]+C[2,2])-(C[0,1]+C[0,2]+C[1,2])+3*(C[3,3]+C[4,4]+C[5,5]))/15\n self.EV = (9*self.BV*self.GV)/(3*self.BV+self.GV)\n self.nuV= (1.5*self.BV-self.GV)/(3*self.BV+self.GV)\n self.S = np.linalg.inv(C)\n self.BR = 1/(self.S[0,0]+self.S[1,1]+self.S[2,2]+2*(self.S[0,1]+self.S[0,2]+self.S[1,2]))\n self.GR =15/(4*(self.S[0,0]+self.S[1,1]+self.S[2,2])-4*(self.S[0,1]+self.S[0,2]+self.S[1,2])+3*(self.S[3,3]+self.S[4,4]+self.S[5,5]))\n self.ER = (9*self.BR*self.GR)/(3*self.BR+self.GR)\n self.nuR= (1.5*self.BR-self.GR)/(3*self.BR+self.GR)\n self.BH = 0.50*(self.BV+self.BR)\n self.GH = 0.50*(self.GV+self.GR)\n self.EH = (9.*self.BH*self.GH)/(3.*self.BH+self.GH)\n self.nuH= (1.5*self.BH-self.GH)/(3.*self.BH+self.GH)\n self.AVR= 100.*(self.GV-self.GR)/(self.GV+self.GR)\n #--------------------------------------------------------------------------------------------------------------------------------\n self.__C = C\n \n else:\n Cs = []\n for t in map(str,self.__T):#for t in range(len(self.__T)):\n C = np.zeros((6,6))\n \n LC = self.__structures.items()[0][1].LC\n if self.__mthd == 'Energy':\n if type(etacalc)==list:\n A2=[]\n for i in range(len(etacalc)):\n A2.append(self.__A2[t][i][etacalc[i]])\n else:\n if not etacalc in self.__A2[t][0].keys(): raise ValueError('Please coose one of %s'%(self.__A2[t][0].keys()))\n A2 = [a2[etacalc] for a2 in self.__A2[t]]\n \n #%%%--- Cubic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'CI' or \\\n LC == 'CII'):\n C[0,0] =-2.*(A2[0]-3.*A2[1])/3.\n C[1,1] = C[0,0]\n C[2,2] = C[0,0]\n C[3,3] = A2[2]/6.\n C[4,4] = C[3,3]\n C[5,5] = C[3,3]\n C[0,1] = (2.*A2[0]-3.*A2[1])/3.\n C[0,2] = C[0,1]\n C[1,2] = C[0,1]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Hexagonal structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = 2./3.*A2[0] + 4./3.*A2[1] - 2.*A2[2] - 2.*A2[3]\n C[0,2] = 1./6.*A2[0] - 2./3.*A2[1] + 0.5*A2[2]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[2]\n C[3,3] =-0.5*A2[2] + 0.5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RI'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[0,4] = .5*(-A2[3] - A2[4] + A2[6])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[1,4] =-C[0,4] \n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[3,5] =-C[0,4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TI'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[3]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[3]\n C[0,2] = A2[0]/6.-2.*A2[1]/3.+.5*A2[3]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[3]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TII'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[4]\n C[1,1] = C[0,0]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[4]\n C[0,2] = A2[0]/6.-(2./3.)*A2[1]+.5*A2[4]\n C[0,5] = (-A2[2]+A2[3]-A2[6])/4.\n C[1,2] = C[0,2]\n C[1,5] =-C[0,5]\n C[2,2] = 2.*A2[4]\n C[3,3] = .5*A2[5]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[6]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Orthorhombic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'O'):\n C[0,0] = 2.*A2[0]/3.+4.*A2[1]/3.+A2[3]-2.*A2[4]-2.*A2[5]\n C[0,1] = 1.*A2[0]/3.+2.*A2[1]/3.-.5*A2[3]-A2[5]\n C[0,2] = 1.*A2[0]/3.-2.*A2[1]/3.+4.*A2[2]/3.-.5*A2[3]-A2[4]\n C[1,1] = 2.*A2[4]\n C[1,2] =-2.*A2[1]/3.-4.*A2[2]/3.+.5*A2[3]+A2[4]+A2[5]\n C[2,2] = 2.*A2[5]\n C[3,3] = .5*A2[6]\n C[4,4] = .5*A2[7]\n C[5,5] = .5*A2[8]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Monoclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'M'):\n C[0,0] = 2.*A2[0]/3.+8.*(A2[1]+A2[2])/3.-2.*(A2[5]+A2[8]+A2[9])\n C[0,1] = A2[0]/3.+4.*(A2[1]+A2[2])/3.-2.*A2[5]-A2[9]\n C[0,2] =(A2[0]-4.*A2[2])/3.+A2[5]-A2[8]\n C[0,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.+.5*(A2[5]+A2[7]+A2[8]+A2[9]-A2[12])\n C[1,1] = 2.*A2[8]\n C[1,2] =-4.*(2.*A2[1]+A2[2])/3.+2.*A2[5]+A2[8]+A2[9]+A2[12]\n C[1,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.-.5*A2[3]+A2[5]+.5*(A2[7]+A2[8]+A2[9])\n C[2,2] = 2.*A2[9]\n C[2,5] =-1.*A2[0]/6.+2.*A2[1]/3.-.5*(A2[3]+A2[4]-A2[7]-A2[8]-A2[9]-A2[12])\n C[3,3] = .5*A2[10]\n C[3,4] = .25*(A2[6]-A2[10]-A2[11])\n C[4,4] = .5*A2[11]\n C[5,5] = .5*A2[12]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Triclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'N'):\n C[0,0] = 2.*A2[0]\n C[0,1] = 1.*(-A2[0]-A2[1]+A2[6])\n C[0,2] = 1.*(-A2[0]-A2[2]+A2[7])\n C[0,3] = .5*(-A2[0]-A2[3]+A2[8]) \n C[0,4] = .5*(-A2[0]+A2[9]-A2[4])\n C[0,5] = .5*(-A2[0]+A2[10]-A2[5])\n C[1,1] = 2.*A2[1]\n C[1,2] = 1.*(A2[11]-A2[1]-A2[2])\n C[1,3] = .5*(A2[12]-A2[1]-A2[3])\n C[1,4] = .5*(A2[13]-A2[1]-A2[4])\n C[1,5] = .5*(A2[14]-A2[1]-A2[5])\n C[2,2] = 2.*A2[2] \n C[2,3] = .5*(A2[15]-A2[2]-A2[3])\n C[2,4] = .5*(A2[16]-A2[2]-A2[4])\n C[2,5] = .5*(A2[17]-A2[2]-A2[5])\n C[3,3] = .5*A2[3]\n C[3,4] = .25*(A2[18]-A2[3]-A2[4])\n C[3,5] = .25*(A2[19]-A2[3]-A2[5])\n C[4,4] = .5*A2[4]\n C[4,5] = .25*(A2[20]-A2[4]-A2[5])\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n elif self.__mthd == 'Stress':\n \n if (LC == 'CI' or \\\n LC == 'CII'):\n Matrix = np.mat([[1.0, 5.0, 0.0],\n [2.0, 4.0, 0.0],\n [3.0, 3.0, 0.0],\n [0.0, 0.0, 4.0],\n [0.0, 0.0, 5.0],\n [0.0, 0.0, 6.0]])\n \n if (LC == 'HI' or \\\n LC == 'HII'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0],\n [ 2, 1, 3, 0, 0],\n [ 0, 0, 3, 3, 0],\n [ 0, 0, 0, 0, 4],\n [ 0, 0, 0, 0, 5],\n [ 3,-3, 0, 0, 0],\n [ 3,-5,-1, 0, 0],\n [-5, 3,-1, 0, 0],\n [ 0, 0,-2,-1, 0],\n [ 0, 0, 0, 0, 6],\n [ 0, 0, 0, 0, 2],\n [-2, 2, 0, 0, 0]])\n \n if (LC == 'RI'):\n Matrix = np.mat([[ 1, 2, 3, 4, 0, 0],\n [ 2, 1, 3,-4, 0, 0],\n [ 0, 0, 3, 0, 3, 0],\n [ 0, 0, 0,-1, 0, 4],\n [ 0, 0, 0, 6, 0, 5],\n [ 3,-3, 0, 5, 0, 0],\n [ 3,-5,-1, 6, 0, 0],\n [-5, 3,-1,-6, 0, 0],\n [ 0, 0,-2, 0,-1, 0],\n [ 0, 0, 0, 8, 0, 6],\n [ 0, 0, 0,-4, 0, 2],\n [-2, 2, 0, 2, 0, 0]])\n \n if (LC == 'RII'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 0, 0],\n [ 2, 1, 3,-4,-5, 0, 0],\n [ 0, 0, 3, 0, 0, 3, 0],\n [ 0, 0, 0,-1,-6, 0, 4],\n [ 0, 0, 0, 6,-1, 0, 5],\n [ 3,-3, 0, 5,-4, 0, 0],\n [ 3,-5,-1, 6, 2, 0, 0],\n [-5, 3,-1,-6,-2, 0, 0],\n [ 0, 0,-2, 0, 0,-1, 0],\n [ 0, 0, 0, 8, 4, 0, 6],\n [ 0, 0, 0,-4, 8, 0, 2],\n [-2, 2, 0, 2,-6, 0, 0]])\n \n if (LC == 'TI'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0, 0],\n [ 2, 1, 3, 0, 0, 0],\n [ 0, 0, 3, 3, 0, 0],\n [ 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0, 0, 0, 6],\n [ 3,-5,-1, 0, 0, 0],\n [-5, 3,-1, 0, 0, 0],\n [ 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 0, 0,-4]])\n \n if (LC == 'TII'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0],\n [ 2, 1, 3,-6, 0, 0, 0],\n [ 0, 0, 3, 0, 3, 0, 0],\n [ 0, 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0,-1, 0, 0, 6],\n [ 3,-5,-1,-4, 0, 0, 0],\n [-5, 3,-1, 4, 0, 0, 0],\n [ 0, 0,-2, 0,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 8, 0, 0,-4]])\n \n if (LC == 'O'):\n Matrix = np.mat([[1, 2, 3, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 2, 3, 0, 0, 0, 0],\n [0, 0, 1, 0, 2, 3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 4, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 5, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 6],\n [3,-5,-1, 0, 0, 0, 0, 0, 0],\n [0, 3, 0,-5,-1, 0, 0, 0, 0],\n [0, 0, 3, 0,-5,-1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 6, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-4],\n [5, 4, 6, 0, 0, 0, 0, 0, 0],\n [0, 5, 0, 4, 6, 0, 0, 0, 0],\n [0, 0, 5, 0, 4, 6, 0, 0, 0],\n [0, 0, 0, 0, 0, 0,-2, 0, 0],\n [0, 0, 0, 0, 0, 0, 0,-1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-3]])\n \n if (LC == 'M'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 2, 3, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 2, 0, 3, 6, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0],\n [ 0, 0, 0, 1, 0, 0, 2, 0, 3, 0, 0, 0, 6],\n [-2, 1, 4,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 1, 4,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 1, 0, 4,-5, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0],\n [ 0, 0, 0,-2, 0, 0, 1, 0, 4, 0, 0,-5, 0],\n [ 3,-5,-1,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0,-5,-1,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0,-5, 0,-1,-4, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0],\n [ 0, 0, 0, 3, 0, 0,-5, 0,-1, 0, 0,-4, 0],\n [-4,-6, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0,-6, 5, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0,-6, 0, 5, 2, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0],\n [ 0, 0, 0,-4, 0, 0,-6, 0, 5, 0, 0, 2, 0],\n [ 5, 4, 6,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 4, 6,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 4, 0, 6,-3, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0],\n [ 0, 0, 0, 5, 0, 0, 4, 0, 6, 0, 0,-3, 0]])\n \n if (LC == 'N'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 0, 0, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 5, 6, 0, 0, 0],\n [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6, 0],\n [ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6],\n [-2, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 0, 0, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 6,-5, 0, 0, 0],\n [ 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5, 0],\n [ 0, 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5],\n [ 3,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0, 0, 0,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 2,-4, 0, 0, 0],\n [ 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4, 0],\n [ 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4],\n [-4,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0, 0, 0,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1,-3, 2, 0, 0, 0],\n [ 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2, 0],\n [ 0, 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2],\n [ 5, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 0, 0, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2,-1,-3, 0, 0, 0],\n [ 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3, 0],\n [ 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3],\n [-6, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-6, 0, 0, 0, 0, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5,-4, 1, 0, 0, 0],\n [ 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1, 0],\n [ 0, 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1]])\n \n sigma = np.array(self.__sigma[etacalc])\n \n ci = np.linalg.lstsq(Matrix,sigma)\n \n #-- Cubic structures ------------------------------------------------------------------------------\n if (LC == 'CI' or \\\n LC == 'CII'):\n \n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[3,3]=ci[0][2]\n C[1,1]=C[0,0]\n C[2,2]=C[0,0]\n C[0,2]=C[0,1]\n C[1,2]=C[0,1]\n C[4,4]=C[3,3]\n C[5,5]=C[3,3]\n \n #-- Hexagonal Structures --------------------------------------------------------------------------\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[2,2]=ci[0][3]\n C[3,3]=ci[0][4]\n C[1,1]=C[0,0]\n C[1,2]=C[0,2]\n C[4,4]=C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral I Structures ---------------------------------------------------------------------\n if (LC == 'RI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral II Structures --------------------------------------------------------------------\n if (LC == 'RII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[0,4]= ci[0][4]\n C[2,2]= ci[0][5]\n C[3,3]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[1,4]=-C[0,4]\n C[3,5]=-C[0,4]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Tetragonal I Structures -----------------------------------------------------------------------\n if (LC == 'TI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[2,2]= ci[0][3]\n C[3,3]= ci[0][4]\n C[5,5]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[4,4]= C[3,3]\n \n #-- Tetragonal II Structures ----------------------------------------------------------------------\n if (LC == 'TII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,5]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[5,5]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,5]=-C[0,5]\n C[4,4]= C[3,3]\n \n #-- Orthorhombic Structures -----------------------------------------------------------------------\n if (LC == 'O'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[1,1]=ci[0][3]\n C[1,2]=ci[0][4]\n C[2,2]=ci[0][5]\n C[3,3]=ci[0][6]\n C[4,4]=ci[0][7]\n C[5,5]=ci[0][8]\n \n #-- Monoclinic Structures -------------------------------------------------------------------------\n if (LC == 'M'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,5]=ci[0][3]\n C[1,1]=ci[0][4]\n C[1,2]=ci[0][5]\n C[1,5]=ci[0][6]\n C[2,2]=ci[0][7]\n C[2,5]=ci[0][8]\n C[3,3]=ci[0][9]\n C[3,4]=ci[0][10]\n C[4,4]=ci[0][11]\n C[5,5]=ci[0][12]\n \n #-- Triclinic Structures --------------------------------------------------------------------------\n if (LC == 'N'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,3]=ci[0][3]\n C[0,4]=ci[0][4]\n C[0,5]=ci[0][5]\n C[1,1]=ci[0][6]\n C[1,2]=ci[0][7]\n C[1,3]=ci[0][8]\n C[1,4]=ci[0][9]\n C[1,5]=ci[0][10]\n C[2,2]=ci[0][11]\n C[2,3]=ci[0][12]\n C[2,4]=ci[0][13]\n C[2,5]=ci[0][14]\n C[3,3]=ci[0][15]\n C[3,4]=ci[0][16]\n C[3,5]=ci[0][17]\n C[4,4]=ci[0][18]\n C[4,5]=ci[0][19]\n C[5,5]=ci[0][20]\n #--------------------------------------------------------------------------------------------------\n \n \n \n for i in range(5):\n for j in range(i+1,6):\n C[j,i] = C[i,j] \n #%%%--- Calculating the elastic moduli ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if self.__cod == 'espresso': C = -C/10.\n elif self.__cod in ['vasp','emto','exciting','wien']: C=C*self.__vToGPa/self.__V0#C = C/4.#C=C*self.__CONV/self.__V0#C = C/4.\n self.BV = (C[0,0]+C[1,1]+C[2,2]+2*(C[0,1]+C[0,2]+C[1,2]))/9\n self.GV = ((C[0,0]+C[1,1]+C[2,2])-(C[0,1]+C[0,2]+C[1,2])+3*(C[3,3]+C[4,4]+C[5,5]))/15\n self.EV = (9*self.BV*self.GV)/(3*self.BV+self.GV)\n self.nuV= (1.5*self.BV-self.GV)/(3*self.BV+self.GV)\n self.S = np.linalg.inv(C)\n self.BR = 1/(self.S[0,0]+self.S[1,1]+self.S[2,2]+2*(self.S[0,1]+self.S[0,2]+self.S[1,2]))\n self.GR =15/(4*(self.S[0,0]+self.S[1,1]+self.S[2,2])-4*(self.S[0,1]+self.S[0,2]+self.S[1,2])+3*(self.S[3,3]+self.S[4,4]+self.S[5,5]))\n self.ER = (9*self.BR*self.GR)/(3*self.BR+self.GR)\n self.nuR= (1.5*self.BR-self.GR)/(3*self.BR+self.GR)\n self.BH = 0.50*(self.BV+self.BR)\n self.GH = 0.50*(self.GV+self.GR)\n self.EH = (9.*self.BH*self.GH)/(3.*self.BH+self.GH)\n self.nuH= (1.5*self.BH-self.GH)/(3.*self.BH+self.GH)\n self.AVR= 100.*(self.GV-self.GR)/(self.GV+self.GR)\n #--------------------------------------------------------------------------------------------------------------------------------\n Cs.append(C)\n self.__C = Cs", "def true_anomaly_from_eccentric(e, E):\n\n return 2 * atan2(sqrt(1.0 + e) * sin(E / 2.0), sqrt(1.0 - e) * cos(E / 2.0))", "def _excitonic_coft_old(self,SS,AG,n):\n \n # FIXME: works only for 2 level molecules\n \n c0 = AG.monomers[0].get_egcf((0,1))\n Nt = len(c0)\n \n # SystemBathInteraction\n sbi = AG.get_SystemBathInteraction()\n # CorrelationFunctionMatrix\n cfm = sbi.CC\n \n # get number of monomeric basis states\n Na = 0\n for monomer in AG.monomers:\n Na += monomer.nel-1\n \n ct = numpy.zeros((Nt),dtype=numpy.complex128)\n #Na = AG.nmono\n for kk in range(Na):\n \n #nkk = AG.monomers[kk].egcf_mapping[0]\n \n for ll in range(Na):\n \n #nll = AG.monomers[ll].egcf_mapping[0]\n \n ct += ((SS[kk+1,n+1]**2)*(SS[ll+1,n+1]**2)*cfm.get_coft(kk,ll))\n #*AG.egcf_matrix.get_coft(nkk,nll))\n \n return ct", "def get_ecg_graph():\n titles = ['ecg1', 'ecg2', 'ecg3']\n colors = ['rgb(240,0,0)', 'rgb(0,240,0)', 'rgb(0,0,240)']\n update()\n signames_ecg = queries['signames_ecg']\n signals = queries['signals']\n latesthr = queries['latesthr']\n return html.Div(className='ecg', children=[\n html.Div(style={'display': 'flex', 'height': '40vh'},\n children=[dcc.Graph(\n id=titles[i] + signame,\n style={'width': '100%'},\n figure={\n 'data': [\n {'x': signals[signame]['time'],\n 'y': signals[signame][titles[i]],\n 'mode': 'line', 'name': signame, 'line': {'color':colors[i]}}\n ],\n 'layout': {\n 'font': {'color':'#fff'},\n 'title': '{}-{}'.format(signame, titles[i]),\n 'xaxis': {'title': 'time', 'color': '#fff', 'showgrid': 'False'},\n 'yaxis': {'title': 'voltage (mv)', 'color': '#fff', 'showgrid': 'False', 'range': np.linspace(-2.5, 2.5, 10)},\n 'paper_bgcolor':'#000', 'plot_bgcolor':'#000'\n }\n }\n ) for i in range(len(titles))]\n +\n [html.Div(\n style={'justify-content': 'center', 'display': 'flex',\n 'align-items': 'center', 'width': '10vh', 'font-size': '30pt', 'color': 'white'},\n children=['{}'.format(latesthr[signame][0])])\n ]\n ) for signame in signames_ecg])", "def coreMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEcGraph:\n graph = self.group.majorityEcGraph(majorityPercentage = majorityPercentageCoreMetabolism, noMultifunctional = excludeMultifunctionalEnzymes, keepOnHeap = True)\n graph.name = 'Core metabolism ECs ' + ' '.join(self.ncbiNames)\n return graph", "def addedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def make_euler_circuit(start_node, updated_graph_instance):\n\n current_edges_on_graph_list = make_edges_list(updated_graph_instance.edges_dict)\n\n current_node = start_node\n\n node_visit_order = [current_node]\n edge_visit_order = []\n\n # print(\"\\n\\n\\ncurrent_edges_on_graph_list:\", current_edges_on_graph_list)\n\n while len(current_edges_on_graph_list) > 0:\n\n # print(\"current_edges_on_graph_list:\", current_edges_on_graph_list)\n # while there are still edges on the graph, keep traversing\n\n current_bridges_on_graph = get_bridges(current_edges_on_graph_list)\n\n edges_conn_to_current_node = get_all_conn_edges_remaining_in_graph(\n current_node, current_edges_on_graph_list, updated_graph_instance.nodes_dict\n )\n\n edge_to_traverse = choose_edge_to_traverse(\n current_node, edges_conn_to_current_node, current_bridges_on_graph\n )\n\n if edge_to_traverse in current_edges_on_graph_list:\n\n current_edges_on_graph_list.remove(edge_to_traverse)\n\n else:\n\n current_edges_on_graph_list.remove(edge_to_traverse[::-1])\n\n edge_to_traverse_list = list(edge_to_traverse)\n # remove current node from edge to traverse\n edge_to_traverse_list.remove(current_node)\n # update current node to be the only node left in the edge list\n\n # update edge traveral list with edge just traversed\n edge_traversed = (current_node, edge_to_traverse_list[0])\n\n edge_visit_order.append(edge_traversed)\n\n current_node = edge_to_traverse_list[0]\n\n # add the new current node to the nodes visit order list\n node_visit_order.append(current_node)\n\n # add node visit order and edge_visit order to graph instance\n\n updated_graph_instance.node_visit_order = node_visit_order\n\n updated_graph_instance.edge_visit_order = edge_visit_order\n\n updated_graph_instance.node_geojson = make_node_geojson(updated_graph_instance)\n\n updated_graph_instance.edge_geojson = make_edge_geojson(updated_graph_instance)\n\n updated_graph_instance.route_geojson = make_route_geojson(updated_graph_instance)\n\n print(\"\\n\\n\\n\\n\\nROUTE COLLECTION\", updated_graph_instance.route_geojson)\n\n print(\"check done\")\n\n return updated_graph_instance", "def addedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def life_expectancy_graph(country_name):\r\n dat = read_data(pre_file_name())\r\n MainDct = dat[0]\r\n # MetaDct = dat[1]\r\n f_life_expectancy = {}\r\n for idx in MainDct:\r\n if idx == country_name or MainDct[idx].country_code == country_name:\r\n for idx1 in MainDct[idx].values:\r\n if idx1[1] != 0:\r\n f_life_expectancy[idx1[0]] = idx1[1]\r\n return f_life_expectancy", "def collectiveMetabolism(self, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes, addEcDescriptions = False) -> SubstanceEcGraph:\n graph = self.group.collectiveEcGraph(noMultifunctional = excludeMultifunctionalEnzymes, addCount = True, keepOnHeap = True, addEcDescriptions = addEcDescriptions)\n graph.name = 'Collective metabolism ECs ' + ' '.join(self.ncbiNames)\n return graph", "def Evac_minus_EF_from_charge(Evac_minus_Ei, ni, charge_from_dopants, net_charge):\r\n # eh_charge is the charge from electrons and holes only\r\n eh_charge = net_charge - charge_from_dopants\r\n \r\n if eh_charge > 30 * ni:\r\n # Plenty of holes, negligible electrons\r\n p = eh_charge\r\n return Evac_minus_Ei + kT_in_eV * math.log(p / ni)\r\n if eh_charge < -30 * ni:\r\n # Plenty of electrons, negligible holes\r\n n = -eh_charge\r\n return Evac_minus_Ei - kT_in_eV * math.log(n / ni)\r\n \r\n # Starting here, we are in the situation where BOTH holes and electrons\r\n # need to be taken into account. Solve the simultaneous equations\r\n # p * n = ni**2 and p - n = eh_charge to get p and n.\r\n \r\n def solve_quadratic_equation(a,b,c):\r\n \"\"\" return larger solution to ax^2 + bx + c = 0 \"\"\"\r\n delta = b**2 - 4 * a * c\r\n if delta < 0:\r\n raise ValueError(\"No real solution...that shouldn't happen!\")\r\n return (-b + math.sqrt(delta)) / (2*a)\r\n\r\n if eh_charge > 0:\r\n # Slightly more holes than electrons\r\n p = solve_quadratic_equation(1, -eh_charge, -ni**2)\r\n return Evac_minus_Ei + kT_in_eV * math.log(p / ni)\r\n else:\r\n # Slightly more electrons than holes\r\n n = solve_quadratic_equation(1, eh_charge, -ni**2)\r\n return Evac_minus_Ei - kT_in_eV * math.log(n / ni)", "def lostMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def coreMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraphByEcMajority(majorityPercentage = majorityPercentageCoreMetabolism, majorityTotal = None, noMultifunctional = excludeMultifunctionalEnzymes)\n graph.name = 'Core metabolism Enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def euclid_ccl(Omega_c, sigma8=0.83):\n\n cosmo_fid = ccl.Cosmology(Omega_c=Omega_c, Omega_b=0.045, h=0.71, sigma8=sigma8, n_s=0.963)\n\n dNdzs = np.zeros((nbins, z.size))\n shears = []\n \n for i in range(nbins):\n # edges of 1 equal width redshift bins, between 0 and 2\n zmin, zmax = i*(2./nbins), (i+1)*(2./nbins)\n # generate dNdz per bin\n dNdzs[i,:] = ccl.dNdz_tomog(z=z, zmin=zmin, zmax=zmax, pz_func=pz, dNdz_func = dNdz_true)\n # calculate the shear per bin\n gal_shapes = ccl.WeakLensingTracer(cosmo_fid, dndz=(z, dNdzs[i,:]))\n shears.append(gal_shapes)\n \n print ('Shears calculated, calculating power spectra now...')\n # calculate nbin*(nbin+1)/2 = 1 spectra from the shears\n Cls = []\n for i in range(nbins):\n for j in range(0,i+1):\n Cls.append(ccl.angular_cl(cosmo_fid, shears[i], shears[j], ells))\n \n return np.array(Cls), dNdzs", "def eccentricity(self):\n new_data = self._data[['pl_pnum', 'pl_orbper', 'pl_orbsmax',\n 'pl_masse', 'pl_orbeccen',\n 'pl_radj', 'pl_dens', 'st_teff',\n 'st_mass', 'st_rad']]\n new_data = new_data.dropna()\n\n features = new_data[['pl_pnum', 'pl_orbper', 'pl_orbsmax',\n 'pl_masse',\n 'pl_radj', 'pl_dens', 'st_teff',\n 'st_mass', 'st_rad']]\n labels = new_data['pl_orbeccen']\n\n features_train, features_test, labels_train, labels_test = \\\n train_test_split(features, labels, test_size=0.2)\n\n # Create an untrained model\n model = DecisionTreeRegressor()\n\n # Train it on the **training set**\n model.fit(features_train, labels_train)\n\n # Compute test accuracy\n test_predictions = model.predict(features_test)\n test_acc = mean_absolute_error(labels_test, test_predictions)\n test_acc_r2 = r2_score(labels_test, test_predictions)\n\n # Plot ML vs Actual\n fig, [ax1, ax2] = plt.subplots(2, figsize=(15, 12))\n\n sns.distplot(test_predictions, kde=False, ax=ax1)\n sns.distplot(labels_test, kde=False, ax=ax2)\n\n ax1.set_title('Distribution of Predicted Eccentricities of Orbits')\n ax1.set_xlabel('Eccentricity of Orbit')\n ax1.set_ylabel('Number of Planets')\n\n ax2.set_title('Distribution of Actual Eccentricities of Orbits')\n ax2.set_xlabel('Eccentricity of Orbit')\n ax2.set_ylabel('Number of Planets')\n\n plt.savefig('figures/ML_Eccentricity.png', bbox_inches='tight')\n\n return (test_acc, test_acc_r2)", "def eci(self):\n return self.__eci", "def lostMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph" ]
[ "0.6066636", "0.5614624", "0.5553752", "0.5487054", "0.54392904", "0.54148823", "0.5396551", "0.53906614", "0.52966624", "0.52825737", "0.5221849", "0.5204589", "0.51931787", "0.5125437", "0.5115151", "0.51081955", "0.5107046", "0.5076023", "0.50700116", "0.50495934", "0.5038528", "0.5027768", "0.5005341", "0.4964105", "0.4962018", "0.49601495", "0.49533936", "0.49471894", "0.49267903", "0.49239126" ]
0.5634353
1
Get neofunctionalisation events of all enzymes in the core metabolism.
def neofunctionalisations(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, eValue = defaultEValue, considerOnlyECs = None) -> Set[Neofunctionalisation]: # get neofunctionalisations return self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs).getNeofunctionalisations()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def neofunctionalisationsForFunctionChange(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, eValue = defaultEValue, considerOnlyECs = None) -> Dict[FunctionChange, Set[Neofunctionalisation]]:\n # get neofunctionalisations\n minimumOrganismsCount = math.ceil(self.organismsCount * (majorityPercentageNeofunctionalisation / 100)) \n return NeofunctionalisedECs(self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs)).getNeofunctionalisationsForFunctionChange(minimumOrganismsCount = minimumOrganismsCount)", "def addedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def collectiveMetabolismEnzymes(self, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraph(noMultifunctional = excludeMultifunctionalEnzymes, keepOnHeap = True)\n graph.name = 'Collective metabolism enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def neofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEnzymeGraph:\n # get neofunctionalisations \n neofunctionalisedEnzymes = self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs)\n \n # filter core metabolism enzyme graph\n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism) \n neofunctionalisedMetabolism = neofunctionalisedEnzymes.filterGraph(enzymeGraph, minimumEcDifference = None)\n \n # colour core metabolism \n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = enzymeGraph\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def coreMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraphByEcMajority(majorityPercentage = majorityPercentageCoreMetabolism, majorityTotal = None, noMultifunctional = excludeMultifunctionalEnzymes)\n graph.name = 'Core metabolism Enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def lostMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def neofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEcGraph:\n # get neofunctionalisations \n neofunctionalisedECs = NeofunctionalisedECs(self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs))\n \n # filter core metabolism EC graph\n coreMetabolism = self.coreMetabolism(majorityPercentageCoreMetabolism)\n minimumOrganismsCount = math.ceil(self.organismsCount * (majorityPercentageNeofunctionalisation / 100))\n \n neofunctionalisedMetabolism = neofunctionalisedECs.filterGraph(coreMetabolism, minimumEcDifference = None, minimumOrganismsCount = minimumOrganismsCount)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = coreMetabolism\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def list_events():\n return [\n snow,\n mosquito,\n sun_heat,\n orage,\n overflowing,\n gathering,\n trampling,\n pollution,\n southern_wind,\n northern_wind,\n fog,\n sun\n ]", "def addedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def _GetCatalyzingEnzymes(self):\n if self._catalyzing_enzymes is None:\n logging.debug('looking for enzymes catalyzing this reaction')\n self._catalyzing_enzymes = []\n for stored_reaction in self._GetAllStoredReactions():\n enzymes = stored_reaction.enzyme_set.all()\n self._catalyzing_enzymes.extend(enzymes)\n return self._catalyzing_enzymes", "def getExcitonStates(self):\n energies, coefficients = self.force_field.getExcitonStates()\n # check that wavefunctions are orthogonal\n olap = np.dot(coefficients.transpose(), coefficients)\n err = la.norm(olap - np.eye(len(energies)))\n assert err < 1.0e-10, \"exciton wavefunctions not orthogonal, |S - Id|= %e\" % err\n return energies, coefficients", "def get_events(self):\n events = []\n for device in self:\n events.extend(self[device].get_events())\n return events", "def lostMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def unisolvent_nodes(self):\r\n return self.grid.unisolvent_nodes", "def get_events(self):\n raise NotImplementedError", "def get_events(self):\n raise NotImplementedError", "def _process_egocentric(self, signal: egocentric.EgocentricSignal):\n output_signals = []\n output_signals += self._process_egocentric_direction(\n self._get_hparam('egocentric_direction_mode'),\n signal.xz_direction,\n signal.yz_direction)\n output_signals += self._process_egocentric_distance(\n self._get_hparam('egocentric_distance_mode'),\n signal.distance)\n return output_signals", "def get_incident_nodes(self):\n # return the set of incident edges\n return \\\n {\n self.first_incident_node,\n self.second_incident_node\n }", "def edges(self):\n return self.dovetails + self.containments + self.internals", "def getEnergyEvolution(self):\n\n\t\tEBefore = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleBefore]\n\t\tEAfter = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleAfter]\n\n\t\treturn EBefore, EAfter", "def conservedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def events(self):\r\n return e.Events(self)", "def all_events(cls) -> \"IFilterPattern\":\n return jsii.sinvoke(cls, \"allEvents\", [])", "def events(self):\r\n return ev.Events(self)", "def events(self):\r\n return ev.Events(self)", "def unifiedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def events(self) -> Sequence[Tuple[str, Sequence[Union[np.ndarray, bytes]]]]:\n return self._env.events()", "def midi_events(self):\n return reapy.core.item.midi_event.MIDIEventList(self)", "def eog_removal(self):\n print('ch_names are: ' + str(self.raw.ch_names))\n ch_name = input(\"Enter a channel for eog detection. Best if the channel is near eyes, like Fp1 and Fp2. All channels will be named like 'CH_1': \")\n eog_projs, eog_events = mne.preprocessing.compute_proj_eog(self.raw, n_grad=0, n_mag=0, n_eeg=1, ch_name=ch_name, reject = None)\n projs = eog_projs\n self.epochs.add_proj(projs)\n self.epochs.apply_proj()\n return self.epochs", "def events(self) -> Dict[EventCall, Set[Node]]:\n return self._events" ]
[ "0.59909326", "0.57868326", "0.57269204", "0.5654965", "0.5592295", "0.5470599", "0.54684854", "0.54546684", "0.54503614", "0.54164416", "0.54042697", "0.5348139", "0.5230972", "0.52255017", "0.52217627", "0.52217627", "0.5192401", "0.51829016", "0.5179526", "0.5167479", "0.5130801", "0.5123529", "0.51132625", "0.50985503", "0.50985503", "0.50947356", "0.5091494", "0.5084691", "0.50748825", "0.5049948" ]
0.6120901
0
Get neofunctionalisation events of all enzymes in the core metabolism, grouped by each possible function change event.
def neofunctionalisationsForFunctionChange(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, eValue = defaultEValue, considerOnlyECs = None) -> Dict[FunctionChange, Set[Neofunctionalisation]]: # get neofunctionalisations minimumOrganismsCount = math.ceil(self.organismsCount * (majorityPercentageNeofunctionalisation / 100)) return NeofunctionalisedECs(self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs)).getNeofunctionalisationsForFunctionChange(minimumOrganismsCount = minimumOrganismsCount)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def neofunctionalisations(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, eValue = defaultEValue, considerOnlyECs = None) -> Set[Neofunctionalisation]:\n # get neofunctionalisations \n return self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs).getNeofunctionalisations()", "def addedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def addedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def events(self):\n\n events = []\n # Update the totals\n self.update_totals()\n\n # Replication - total number of bacteria of metabolism * prob of replication\n events.append((self.total_f * self.rates[P_REPLICATE_FAST], lambda f: self.replicate(BACTERIA_FAST)))\n events.append((self.total_s * self.rates[P_REPLICATE_SLOW], lambda f: self.replicate(BACTERIA_SLOW)))\n events.append((self.total_intra * self.rates[P_REPLICATE_INTRACELLULAR],\n lambda f: self.replicate(BACTERIA_INTRACELLULAR)))\n\n # Metabolism change - sum of (number of bacteria of metabolism in patch * o2 tension) * prob of change\n # TODO - check if this is ok\n events.append((self.total_f_o2 * self.rates[P_CHANGE_FAST_SLOW], lambda f: self.change(BACTERIA_SLOW)))\n events.append((self.total_s_o2 * self.rates[P_CHANGE_SLOW_FAST], lambda f: self.change(BACTERIA_FAST)))\n\n # Migrate - sum of (number of bacteria of metabolism in patch * degree of patch) * prob of migration\n events.append((self.total_f_degree * self.rates[P_MIGRATE_FAST], lambda f: self.migrate(BACTERIA_FAST)))\n events.append((self.total_s_degree * self.rates[P_MIGRATE_SLOW], lambda f: self.migrate(BACTERIA_SLOW)))\n\n # Recruit mac - num of nodes * prob of recruit\n events.append((len(self.nodes()) * self.rates[P_RECRUIT], lambda f: self.recruit_mac()))\n\n # Death of mac - total number of macs * prob of death\n events.append((self.total_mac_regular * self.rates[P_DEATH_REGULAR],\n lambda f: self.death_mac(MACROPHAGE_REGULAR)))\n events.append((self.total_mac_infected * self.rates[P_DEATH_INFECTED],\n lambda f: self.death_mac(MACROPHAGE_INFECTED)))\n\n # Mac ingest - sum of (number of bacteria of metabolism in patch * num of macrophages in patch) * prob of ingest\n events.append((self.total_regular_fast * self.rates[P_REGULAR_INGEST_FAST],\n lambda f: self.ingest(BACTERIA_FAST, MACROPHAGE_REGULAR)))\n events.append((self.total_regular_slow * self.rates[P_REGULAR_INGEST_SLOW],\n lambda f: self.ingest(BACTERIA_SLOW, MACROPHAGE_REGULAR)))\n events.append((self.total_infected_fast * self.rates[P_INFECTED_INGEST_FAST],\n lambda f: self.ingest(BACTERIA_FAST, MACROPHAGE_INFECTED)))\n events.append((self.total_infected_slow * self.rates[P_INFECTED_INGEST_SLOW],\n lambda f: self.ingest(BACTERIA_SLOW, MACROPHAGE_INFECTED)))\n\n # Activation\n events.append((self.total_activation * self.rates[P_ACTIVATION], lambda f: self.activate()))\n\n return events", "def _process_egocentric(self, signal: egocentric.EgocentricSignal):\n output_signals = []\n output_signals += self._process_egocentric_direction(\n self._get_hparam('egocentric_direction_mode'),\n signal.xz_direction,\n signal.yz_direction)\n output_signals += self._process_egocentric_distance(\n self._get_hparam('egocentric_distance_mode'),\n signal.distance)\n return output_signals", "def neofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEcGraph:\n # get neofunctionalisations \n neofunctionalisedECs = NeofunctionalisedECs(self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs))\n \n # filter core metabolism EC graph\n coreMetabolism = self.coreMetabolism(majorityPercentageCoreMetabolism)\n minimumOrganismsCount = math.ceil(self.organismsCount * (majorityPercentageNeofunctionalisation / 100))\n \n neofunctionalisedMetabolism = neofunctionalisedECs.filterGraph(coreMetabolism, minimumEcDifference = None, minimumOrganismsCount = minimumOrganismsCount)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = coreMetabolism\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def get_events() -> list[Event]:\n g.ledger.changed()\n return [e for e in g.filtered.entries if isinstance(e, Event)]", "def neofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEnzymeGraph:\n # get neofunctionalisations \n neofunctionalisedEnzymes = self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs)\n \n # filter core metabolism enzyme graph\n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism) \n neofunctionalisedMetabolism = neofunctionalisedEnzymes.filterGraph(enzymeGraph, minimumEcDifference = None)\n \n # colour core metabolism \n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = enzymeGraph\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def coreMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraphByEcMajority(majorityPercentage = majorityPercentageCoreMetabolism, majorityTotal = None, noMultifunctional = excludeMultifunctionalEnzymes)\n graph.name = 'Core metabolism Enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def collectiveMetabolismEnzymes(self, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraph(noMultifunctional = excludeMultifunctionalEnzymes, keepOnHeap = True)\n graph.name = 'Collective metabolism enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def get_events(self):\n events = []\n for device in self:\n events.extend(self[device].get_events())\n return events", "def lostMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def all_events(cls) -> \"IFilterPattern\":\n return jsii.sinvoke(cls, \"allEvents\", [])", "def list_events():\n return [\n snow,\n mosquito,\n sun_heat,\n orage,\n overflowing,\n gathering,\n trampling,\n pollution,\n southern_wind,\n northern_wind,\n fog,\n sun\n ]", "def lostMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def all_gizmo_to_group():\n\n for n in nuke.allNodes():\n # Avoid scripted gizmo.\n if nuke.knobChangeds.get(n.Class()):\n continue\n\n gizmo_to_group(n)", "def events(self) -> Dict[EventCall, Set[Node]]:\n return self._events", "def redundantECsForContributingNeofunctionalisation(self, \n majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, \n majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, \n eValue = defaultEValue, \n redundancyType: 'RedundancyType' = None,\n considerOnlyECs = None) -> Dict[Neofunctionalisation, Set[EcNumber]]:\n from FEV_KEGG.Robustness.Topology.Redundancy import Redundancy, RedundancyContribution, RedundancyType\n \n if redundancyType is None:\n redundancyType = RedundancyType.default\n \n #- calculate \"neofunctionalised\" ECs\n neofunctionalisedMetabolismSet = self.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, eValue, considerOnlyECs).getECs()\n neofunctionalisationsForFunctionChange = self.neofunctionalisationsForFunctionChange(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, eValue, considerOnlyECs)\n \n #- calculate redundancy\n redundancy = Redundancy( self.coreMetabolism(majorityPercentageCoreMetabolism) )\n redundancyContribution = RedundancyContribution(redundancy, neofunctionalisedMetabolismSet)\n \n contributedECsForContributingNeofunctionalisedEC = redundancyContribution.getContributedKeysForSpecial(redundancyType)\n contributingNeofunctionalisedECs = set(contributedECsForContributingNeofunctionalisedEC.keys())\n \n #- REPEAT for each function change consisting of \"neofunctionalised\" ECs, which also contribute to redundancy\n contributingNeofunctionalisations = dict()\n \n for functionChange, neofunctionalisations in neofunctionalisationsForFunctionChange.items():\n #- report enzyme pairs of neofunctionalisations, which caused the EC to be considered \"neofunctionalised\", and are in return contributing to redundancy \n \n if functionChange.ecA in contributingNeofunctionalisedECs or functionChange.ecB in contributingNeofunctionalisedECs: # function change contributes to redundancy\n \n for neofunctionalisation in neofunctionalisations:\n currentSetOfContributedECs = contributingNeofunctionalisations.get(neofunctionalisation, None)\n \n if currentSetOfContributedECs is None:\n currentSetOfContributedECs = set()\n contributingNeofunctionalisations[neofunctionalisation] = currentSetOfContributedECs\n \n for ec in functionChange.ecPair:\n contributedECs = contributedECsForContributingNeofunctionalisedEC.get(ec, None)\n if contributedECs is not None:\n currentSetOfContributedECs.update(contributedECs)\n \n return contributingNeofunctionalisations", "def _extract_complement_events(self):\n\t\ttry:\n\t\t\ttable = self.hdf5file[fastq_paths[self.version]['complement'] % self.group]\n\t\t\tself.complement_events = [Event(x) for x in table['Events'][()]]\n\t\texcept Exception, e:\n\t\t\tself.complement_events = []", "def get_events(self):\n raise NotImplementedError", "def get_events(self):\n raise NotImplementedError", "def eog_removal(self):\n print('ch_names are: ' + str(self.raw.ch_names))\n ch_name = input(\"Enter a channel for eog detection. Best if the channel is near eyes, like Fp1 and Fp2. All channels will be named like 'CH_1': \")\n eog_projs, eog_events = mne.preprocessing.compute_proj_eog(self.raw, n_grad=0, n_mag=0, n_eeg=1, ch_name=ch_name, reject = None)\n projs = eog_projs\n self.epochs.add_proj(projs)\n self.epochs.apply_proj()\n return self.epochs", "def functions(self):\n return functions(self.startEA, self.endEA)", "def export_events_ioe(self):\n for event in self.positive_ids:\n pos_trans = ','.join(sorted(self.positive_ids[event]))\n all_trans = ','.join(list(set(sorted(self.positive_ids[event] + self.negative_ids[event]))))\n full_event = '{};{}:{}'.format(self.gene.name, self.etype, event)\n\n yield ('{}\\t{}\\t{}\\t{}\\t{}\\n'.format(self.gene.chr, self.gene.name, full_event,\n pos_trans, all_trans),\n self.etype)", "def get_complement_events(self):\n\t\tif self.have_complements is False:\n\t\t\tself._extract_complement_events()\n\t\t\tself.have_complements = True\n\n\t\treturn self.complement_events", "def getExcitonStates(self):\n energies, coefficients = self.force_field.getExcitonStates()\n # check that wavefunctions are orthogonal\n olap = np.dot(coefficients.transpose(), coefficients)\n err = la.norm(olap - np.eye(len(energies)))\n assert err < 1.0e-10, \"exciton wavefunctions not orthogonal, |S - Id|= %e\" % err\n return energies, coefficients", "def get_events(self):\n disallowed = [ident(self.add_event.__func__), ident(ident)]\n self.frames = None\n\n return [item for item in self.events if item[2] not in disallowed]", "def events(self) -> Sequence[Tuple[str, Sequence[Union[np.ndarray, bytes]]]]:\n return self._env.events()", "def get_events(self):\n self._events = []\n self.ircobj.process_once(timeout=0.1)\n return self._events", "def conservedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)" ]
[ "0.5865203", "0.55186284", "0.54669976", "0.54044694", "0.5370907", "0.53479284", "0.532519", "0.52754444", "0.5167715", "0.5159104", "0.5134438", "0.5128673", "0.5123458", "0.5117896", "0.5097917", "0.5074114", "0.49859592", "0.49851054", "0.49779522", "0.49597242", "0.49597242", "0.49060076", "0.4888356", "0.48759714", "0.4859687", "0.48586753", "0.48584706", "0.48416004", "0.48295724", "0.48119137" ]
0.68809205
0
Get neofunctionalisation events of all enzymes in the core metabolism, which contribute to redundancy, pointing to the EC numbers their function changes' EC numbers provides redundancy for.
def redundantECsForContributingNeofunctionalisation(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, eValue = defaultEValue, redundancyType: 'RedundancyType' = None, considerOnlyECs = None) -> Dict[Neofunctionalisation, Set[EcNumber]]: from FEV_KEGG.Robustness.Topology.Redundancy import Redundancy, RedundancyContribution, RedundancyType if redundancyType is None: redundancyType = RedundancyType.default #- calculate "neofunctionalised" ECs neofunctionalisedMetabolismSet = self.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, eValue, considerOnlyECs).getECs() neofunctionalisationsForFunctionChange = self.neofunctionalisationsForFunctionChange(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, eValue, considerOnlyECs) #- calculate redundancy redundancy = Redundancy( self.coreMetabolism(majorityPercentageCoreMetabolism) ) redundancyContribution = RedundancyContribution(redundancy, neofunctionalisedMetabolismSet) contributedECsForContributingNeofunctionalisedEC = redundancyContribution.getContributedKeysForSpecial(redundancyType) contributingNeofunctionalisedECs = set(contributedECsForContributingNeofunctionalisedEC.keys()) #- REPEAT for each function change consisting of "neofunctionalised" ECs, which also contribute to redundancy contributingNeofunctionalisations = dict() for functionChange, neofunctionalisations in neofunctionalisationsForFunctionChange.items(): #- report enzyme pairs of neofunctionalisations, which caused the EC to be considered "neofunctionalised", and are in return contributing to redundancy if functionChange.ecA in contributingNeofunctionalisedECs or functionChange.ecB in contributingNeofunctionalisedECs: # function change contributes to redundancy for neofunctionalisation in neofunctionalisations: currentSetOfContributedECs = contributingNeofunctionalisations.get(neofunctionalisation, None) if currentSetOfContributedECs is None: currentSetOfContributedECs = set() contributingNeofunctionalisations[neofunctionalisation] = currentSetOfContributedECs for ec in functionChange.ecPair: contributedECs = contributedECsForContributingNeofunctionalisedEC.get(ec, None) if contributedECs is not None: currentSetOfContributedECs.update(contributedECs) return contributingNeofunctionalisations
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def neofunctionalisationsForFunctionChange(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, eValue = defaultEValue, considerOnlyECs = None) -> Dict[FunctionChange, Set[Neofunctionalisation]]:\n # get neofunctionalisations\n minimumOrganismsCount = math.ceil(self.organismsCount * (majorityPercentageNeofunctionalisation / 100)) \n return NeofunctionalisedECs(self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs)).getNeofunctionalisationsForFunctionChange(minimumOrganismsCount = minimumOrganismsCount)", "def neofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEcGraph:\n # get neofunctionalisations \n neofunctionalisedECs = NeofunctionalisedECs(self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs))\n \n # filter core metabolism EC graph\n coreMetabolism = self.coreMetabolism(majorityPercentageCoreMetabolism)\n minimumOrganismsCount = math.ceil(self.organismsCount * (majorityPercentageNeofunctionalisation / 100))\n \n neofunctionalisedMetabolism = neofunctionalisedECs.filterGraph(coreMetabolism, minimumEcDifference = None, minimumOrganismsCount = minimumOrganismsCount)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = coreMetabolism\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def neofunctionalisations(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, eValue = defaultEValue, considerOnlyECs = None) -> Set[Neofunctionalisation]:\n # get neofunctionalisations \n return self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs).getNeofunctionalisations()", "def addedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def lostMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def unifiedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False) -> SubstanceEcGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolism = self.unifiedMetabolism(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def conservedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n conservedMetabolism = self.conservedMetabolism(majorityPercentageCoreMetabolism)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = conservedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def neofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEnzymeGraph:\n # get neofunctionalisations \n neofunctionalisedEnzymes = self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs)\n \n # filter core metabolism enzyme graph\n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism) \n neofunctionalisedMetabolism = neofunctionalisedEnzymes.filterGraph(enzymeGraph, minimumEcDifference = None)\n \n # colour core metabolism \n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = enzymeGraph\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def divergedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n divergedMetabolism = self.divergedMetabolism(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = divergedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def addedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def _excitonic_coft_old(self,SS,AG,n):\n \n # FIXME: works only for 2 level molecules\n \n c0 = AG.monomers[0].get_egcf((0,1))\n Nt = len(c0)\n \n # SystemBathInteraction\n sbi = AG.get_SystemBathInteraction()\n # CorrelationFunctionMatrix\n cfm = sbi.CC\n \n # get number of monomeric basis states\n Na = 0\n for monomer in AG.monomers:\n Na += monomer.nel-1\n \n ct = numpy.zeros((Nt),dtype=numpy.complex128)\n #Na = AG.nmono\n for kk in range(Na):\n \n #nkk = AG.monomers[kk].egcf_mapping[0]\n \n for ll in range(Na):\n \n #nll = AG.monomers[ll].egcf_mapping[0]\n \n ct += ((SS[kk+1,n+1]**2)*(SS[ll+1,n+1]**2)*cfm.get_coft(kk,ll))\n #*AG.egcf_matrix.get_coft(nkk,nll))\n \n return ct", "def getEnergyEvolution(self):\n\n\t\tEBefore = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleBefore]\n\t\tEAfter = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleAfter]\n\n\t\treturn EBefore, EAfter", "def get_nonselfconsistent_energies(self, bee_type):\n assert bee_type == 'beefvdw'\n p = os.popen('grep -32 \"BEEF xc energy contributions\" OUTCAR | tail -32','r')\n s = p.readlines()\n p.close()\n xc = np.array([])\n for i, l in enumerate(s):\n l_ = float(l.split(\":\")[-1])\n xc = np.append(xc, l_)\n assert len(xc) == 32\n return xc", "def energy_function(self):\n E = 0\n for i in range(len(self.config)):\n for j in range(len(self.config)):\n s = self.config[i,j]\n #Calculate the impact of neighboring particle pairs\n neighbors = (self.config[(i+1)%L, j] +\n self.config[i, (j+1)%L] + \n self.config[(i-1)%L, j] + \n self.config[i, (j-1)%L])\n E += -J*s*neighbors\n #fix for extra neighbors\n return E/4", "def getExcitonStates(self):\n energies, coefficients = self.force_field.getExcitonStates()\n # check that wavefunctions are orthogonal\n olap = np.dot(coefficients.transpose(), coefficients)\n err = la.norm(olap - np.eye(len(energies)))\n assert err < 1.0e-10, \"exciton wavefunctions not orthogonal, |S - Id|= %e\" % err\n return energies, coefficients", "def _excitonic_coft_all(self,SS,AG):\n \n # SystemBathInteraction\n sbi = AG.get_SystemBathInteraction()\n # CorrelationFunctionMatrix\n cfm = sbi.CC\n \n c0 = AG.monomers[0].get_egcf((0,1))\n Nt = len(c0)\n \n Nst = AG.HamOp.dim\n ct = numpy.zeros((Nst,Nt),dtype=numpy.complex128)\n\n # electronic states corresponding to single excited states\n import time\n timecount = 0\n elst = numpy.where(AG.which_band == 1)[0]\n start = time.time()\n for el1 in elst:\n for el2 in elst:\n coft = cfm.get_coft(el1-1,el2-1)\n start2 = time.time()\n for kk in AG.vibindices[el1]:\n for ll in AG.vibindices[el2]:\n ct[:,:] += numpy.dot(\n numpy.expand_dims((SS[kk,:]**2)*(SS[ll,:]**2),axis=1),\n numpy.expand_dims(coft,axis=0))\n stop2 = time.time()\n timecount += stop2 - start2\n stop = time.time()\n print(stop-start,stop-start - timecount)\n return ct", "def _get_gs_energies(self):\n energy = []\n for ground_state in self._ground_states:\n gs_energy = 0.0\n for key in ground_state[\"eci\"].keys():\n gs_energy += ground_state[\"eci\"][key] * ground_state[\"cf\"][key]\n energy.append(len(ground_state[\"atoms\"]) * gs_energy)\n return energy", "def getNumberOfEvents(self):\n whereClause = \"ecc_id = 1 and r_power = 0 and n = 2\"\n Nevent = self.db.selectFromTable(\"eccentricities\", \"count()\", whereClause)\n return Nevent[0][0]", "def noe_calcs(df):\n noe_status = ['off','on']\n off_mean, on_mean = [df[i].mean(1) for i in noe_status]\n off_sdev, on_sdev = [df[i].std(1) for i in noe_status]\n noe_val = on_mean/off_mean\n noe_err = np.sqrt( (noe_val**2) * ( (on_sdev/on_mean)**2 + \\\n (off_sdev/off_mean)**2 ) )\n return pd.concat([noe_val,noe_err], axis=1, keys=['noe','err'])", "def conservedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def true_anomaly_from_eccentric(e, E):\n\n return 2 * atan2(sqrt(1.0 + e) * sin(E / 2.0), sqrt(1.0 - e) * cos(E / 2.0))", "def _excitonic_reorg_energy(self, SS, AG, n):\n \n # SystemBathInteraction\n sbi = AG.get_SystemBathInteraction()\n # CorrelationFunctionMatrix\n cfm = sbi.CC\n \n rg = 0.0\n \n # electronic states corresponding to single excited states\n elst = numpy.where(AG.which_band == 1)[0]\n for el1 in elst:\n reorg = cfm.get_reorganization_energy(el1-1,el1-1)\n for kk in AG.vibindices[el1]:\n rg += ((SS[kk,n]**2)*(SS[kk,n]**2)*reorg)\n return rg", "def get_number_of_effective_electrons(self, nat=None, plasmon_energy=None,\n cumulative=True):\n\n if plasmon_energy is None and nat is not None:\n m0 = constants.value(\"electron mass\")\n epsilon0 = constants.epsilon_0 # Vacuum permittivity [F/m]\n hbar = constants.hbar # Reduced Plank constant [J·s]\n k = 2 * epsilon0 * m0 / (np.pi * nat * hbar ** 2)\n elif plasmon_energy is not None and nat is None:\n k = 8*(np.pi*plasmon_energy**2)**-1\n else:\n raise AttributeError(\"Either nat or plasmon_energy should be given,\"\n \" just one of them, not both parameters.\")\n\n axis = self.axes_manager.signal_axes[0]\n if cumulative is False:\n dneff1 = k * simps((-1. / self.data).imag * axis.axis,\n x=axis.axis,\n axis=axis.index_in_array)\n dneff2 = k * simps(self.data.imag * axis.axis,\n x=axis.axis,\n axis=axis.index_in_array)\n neff1 = self._get_navigation_signal(data=dneff1)\n neff2 = self._get_navigation_signal(data=dneff2)\n else:\n neff1 = self._deepcopy_with_new_data(\n k * cumtrapz((-1. / self.data).imag * axis.axis,\n x=axis.axis,\n axis=axis.index_in_array,\n initial=0))\n neff2 = self._deepcopy_with_new_data(\n k * cumtrapz(self.data.imag * axis.axis,\n x=axis.axis,\n axis=axis.index_in_array,\n initial=0))\n\n # Prepare return\n neff1.metadata.General.title = (r'$n_{eff}$ from energy-loss')\n neff2.metadata.General.title = (r'$n_{eff}$ from absorption')\n\n return neff1, neff2", "def get_o_energies(mol):\n try:\n ev_to_hartree = 1./convertor(1,'hartree','eV')\n g=hack_parser.Gaussian(mol.calc.log, loglevel=50)\n d=g.parse()\n #lm, hm, lr\n o_component_es = np.array(d.oniomenergies)\n except AttributeError:\n return 0\n\n return (ev_to_hartree * o_component_es * [-1,1,1]).sum(axis=1)", "def lostMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def nelectrons(self):\n return sum(self)", "def _excitonic_coft(self,SS,AG,n):\n \n # SystemBathInteraction\n sbi = AG.get_SystemBathInteraction()\n # CorrelationFunctionMatrix\n cfm = sbi.CC\n \n c0 = AG.monomers[0].get_egcf((0,1))\n Nt = len(c0)\n \n ct = numpy.zeros((Nt),dtype=numpy.complex128)\n\n # electronic states corresponding to single excited states\n elst = numpy.where(AG.which_band == 1)[0]\n for el1 in elst:\n for el2 in elst:\n if cfm.cpointer[el1-1,el2-1] == 0:\n continue\n coft = cfm.get_coft(el1-1,el2-1) \n for kk in AG.vibindices[el1]:\n for ll in AG.vibindices[el2]:\n ct += ((SS[kk,n]**2)*(SS[ll,n]**2)*coft)\n return ct", "def events(self):\n\n events = []\n # Update the totals\n self.update_totals()\n\n # Replication - total number of bacteria of metabolism * prob of replication\n events.append((self.total_f * self.rates[P_REPLICATE_FAST], lambda f: self.replicate(BACTERIA_FAST)))\n events.append((self.total_s * self.rates[P_REPLICATE_SLOW], lambda f: self.replicate(BACTERIA_SLOW)))\n events.append((self.total_intra * self.rates[P_REPLICATE_INTRACELLULAR],\n lambda f: self.replicate(BACTERIA_INTRACELLULAR)))\n\n # Metabolism change - sum of (number of bacteria of metabolism in patch * o2 tension) * prob of change\n # TODO - check if this is ok\n events.append((self.total_f_o2 * self.rates[P_CHANGE_FAST_SLOW], lambda f: self.change(BACTERIA_SLOW)))\n events.append((self.total_s_o2 * self.rates[P_CHANGE_SLOW_FAST], lambda f: self.change(BACTERIA_FAST)))\n\n # Migrate - sum of (number of bacteria of metabolism in patch * degree of patch) * prob of migration\n events.append((self.total_f_degree * self.rates[P_MIGRATE_FAST], lambda f: self.migrate(BACTERIA_FAST)))\n events.append((self.total_s_degree * self.rates[P_MIGRATE_SLOW], lambda f: self.migrate(BACTERIA_SLOW)))\n\n # Recruit mac - num of nodes * prob of recruit\n events.append((len(self.nodes()) * self.rates[P_RECRUIT], lambda f: self.recruit_mac()))\n\n # Death of mac - total number of macs * prob of death\n events.append((self.total_mac_regular * self.rates[P_DEATH_REGULAR],\n lambda f: self.death_mac(MACROPHAGE_REGULAR)))\n events.append((self.total_mac_infected * self.rates[P_DEATH_INFECTED],\n lambda f: self.death_mac(MACROPHAGE_INFECTED)))\n\n # Mac ingest - sum of (number of bacteria of metabolism in patch * num of macrophages in patch) * prob of ingest\n events.append((self.total_regular_fast * self.rates[P_REGULAR_INGEST_FAST],\n lambda f: self.ingest(BACTERIA_FAST, MACROPHAGE_REGULAR)))\n events.append((self.total_regular_slow * self.rates[P_REGULAR_INGEST_SLOW],\n lambda f: self.ingest(BACTERIA_SLOW, MACROPHAGE_REGULAR)))\n events.append((self.total_infected_fast * self.rates[P_INFECTED_INGEST_FAST],\n lambda f: self.ingest(BACTERIA_FAST, MACROPHAGE_INFECTED)))\n events.append((self.total_infected_slow * self.rates[P_INFECTED_INGEST_SLOW],\n lambda f: self.ingest(BACTERIA_SLOW, MACROPHAGE_INFECTED)))\n\n # Activation\n events.append((self.total_activation * self.rates[P_ACTIVATION], lambda f: self.activate()))\n\n return events", "def eclogite_foliated():\n\n rho = 3300.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 203.45; C[0,1] = 67.76; C[0,2] = 64.47; C[0,3] = 0.08; C[0,4] = 1.9; C[0,5] = -0.4\n C[1,0] = C[0,1]; C[1,1] = 220.58; C[1,2] = 63.65; C[1,3] = 0.46; C[1,4] = 0.59; C[1,5] = 0.06\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 189.75; C[2,3] = 0.13; C[2,4] = 0.95; C[2,5] = -0.2\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 66.32; C[3,4] = -0.27; C[3,5] = 0.73\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 65.77; C[4,5] = -0.02\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 70.75\n\n return C, rho", "def _process_egocentric(self, signal: egocentric.EgocentricSignal):\n output_signals = []\n output_signals += self._process_egocentric_direction(\n self._get_hparam('egocentric_direction_mode'),\n signal.xz_direction,\n signal.yz_direction)\n output_signals += self._process_egocentric_distance(\n self._get_hparam('egocentric_distance_mode'),\n signal.distance)\n return output_signals" ]
[ "0.6847851", "0.6301637", "0.6094617", "0.60497826", "0.5845832", "0.58000714", "0.5776784", "0.5648149", "0.54955536", "0.547115", "0.5464117", "0.5397465", "0.5397401", "0.5384487", "0.5378686", "0.5366517", "0.5358685", "0.5353077", "0.53527224", "0.5348591", "0.53361887", "0.52696407", "0.5259403", "0.52461934", "0.52209026", "0.5212362", "0.5200353", "0.5198639", "0.51962477", "0.5191627" ]
0.6713973
1
All names/paths in NCBI taxonomy used to create the parent clade.
def parentNCBInames(self): return self.parentClade.ncbiNames
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def childNCBInames(self):\n return self.childClade.ncbiNames", "def display_all_paths(taxonomy):\n for i,entry in enumerate(taxonomy):\n print \"For nodeId : {} :: NodeName : {} \" .format(entry['nodeId'], entry['nodeName'])\n parentId = entry['parentId']\n parentName = entry['parentName']\n while parentId != None:\n print \"ParentId : {} :: ParentName : {}\" .format(parentId, parentName)\n # Search for nodeId == parentId\n for temp in taxonomy:\n if temp['nodeId'] == parentId:\n parentId = temp['parentId']\n parentName = temp['parentName']\n break\n if i == 5:\n break", "def get_rdp_taxonomy(self):\r\n # RDP uses 0 for the parent ID of the root node\r\n if self.parent is None:\r\n parent_id = 0\r\n else:\r\n parent_id = self.parent.id\r\n\r\n # top rank name must be norank, and bottom rank must be genus\r\n if self.depth == 0:\r\n rank_name = \"norank\"\r\n elif self.children:\r\n rank_name = self.taxonomic_ranks[self.depth]\r\n else:\r\n rank_name = \"genus\"\r\n\r\n fields = [\r\n self.id, self.name, parent_id, self.depth, rank_name]\r\n taxonomy_str = '*'.join(map(str, fields)) + \"\\n\"\r\n\r\n # Recursively append lines from sorted list of subtrees\r\n child_names = sorted(self.children.keys())\r\n subtrees = [self.children[name] for name in child_names]\r\n for subtree in subtrees:\r\n taxonomy_str += subtree.get_rdp_taxonomy()\r\n return taxonomy_str", "def parents(self, taxonomy):\n\n p = defaultdict(list)\n for taxon_id, taxa in taxonomy.items():\n p[taxon_id] = taxa\n for i, taxon in enumerate(taxa):\n if i != 0:\n p[taxon] = taxa[0:i]\n\n return p", "def test_make_taxonomy(self):\n basic_test_runner(self, 'taxonomy')", "def nsrTaxonomy():\r\n # Input file\r\n taxonomyFile = pd.read_csv(args.indir+\"/\"+args.infile1, header=2,\r\n sep=\"\\t\", encoding=\"utf8\")\r\n\r\n # Parse taxonomic names into their elementary components\r\n taxonomy = taxonomyFile.loc[taxonomyFile['rank'] == 'soort']\r\n taxonList = []\r\n for taxon in taxonomy['scientific_name']:\r\n parser = taxonParser(taxon)\r\n if not parser or parser is False:\r\n pass\r\n else:\r\n taxonList.append(parser)\r\n\r\n # Write taxonomy to file\r\n index = 0\r\n with io.open(par_path+\"/results/nsr_species.csv\", \"w\", encoding=\"utf-8\") as outfile:\r\n outfile.write('\"species_id\",\"species_name\",\"identification_reference\"\\n')\r\n for i in taxonList:\r\n binomial = ' '.join(str(i).split()[:2])\r\n authorship = ' '.join(str(i).split()[2:])\r\n outfile.write('%s,%s,\"%s\"\\n' % (index, binomial, authorship))\r\n index += 1\r\n\r\n return taxonList", "def __init__(self, name, parents, cpt):\n\t\tself.parents = parents\n\t\tself.name = name\n\t\tself.cpt = cpt", "def generate_path(self):\n ontology = []\n for item in self.parent.get_ancestors():\n if item.level != 0:\n ontology.append(item.slug)\n\n if self.parent.level != 0:\n ontology.append(self.parent.slug)\n\n ontology.append(self.slug)\n\n return '/' + '/'.join(ontology) + '/'", "def children(self, taxon, taxonomy):\n\n c = set()\n for taxon_id, taxa in taxonomy.items():\n if taxon in taxa:\n\n if taxon.startswith('s__'):\n c.add(taxon_id)\n else:\n taxon_index = taxa.index(taxon)\n for child in taxa[taxon_index + 1:]:\n if len(child) > 3: # not just an empty prefix\n c.add(child)\n\n return c", "def init_taxon():\n if not exists('./data/taxdmp.zip'):\n ftp = FTP('ftp.ncbi.nih.gov')\n ftp.login()\n ftp.cwd('pub/taxonomy')\n ftp.retrbinary('RETR taxdmp.zip', open('./data/taxdmp.zip', 'wb').write)\n ftp.quit\n with ZipFile('./data/taxdmp.zip', 'r') as dumpfile:\n dumpfile.extractall(path='./data/')\n taxon_id = dict()\n data = list()\n name = dict()\n specie = list()\n son = dict()\n greatson = dict()\n parent = dict()\n rank = dict()\n global taxon\n taxon = list()\n with open('./data/names.dmp', 'r') as dumpfile:\n raw = dumpfile.read().split(sep='\\n')\n raw.pop()\n for record in raw:\n add = record.replace('\\t', '').split(sep='|')\n if add[0] not in name or add[2] == 'scientific name':\n name[add[0]] = add[1]\n with open('./data/nodes.dmp', 'r') as dumpfile:\n raw = dumpfile.read().split(sep='\\n')\n raw.pop()\n for record in raw:\n add = record.replace('\\t', '').split(sep='|')\n # 1696063|Sarcocystis corvusi||scientific name|\n taxon_id[add[0]] = add[1]\n rank[add[0]] = add[3]\n if add[2] == 'species':\n specie.append(add[0])\n for specie in specie:\n record = [specie, ]\n while taxon_id[specie] != '1':\n record.append(taxon_id[specie])\n specie = taxon_id[specie]\n # if '33090' in record:\n # record.pop()\n # record.pop()\n data.append(record)\n for data in data:\n for n in range(len(data)):\n if data[n] not in parent:\n parent[data[n]] = data[(n + 1):]\n if n == 0:\n continue\n if data[n] not in son:\n son[data[n]] = {data[n - 1], }\n else:\n son[data[n]].add(data[n - 1])\n if data[n] not in greatson:\n greatson[data[n]] = {data[0], }\n else:\n greatson[data[n]].add(data[0])\n for specie in name.items():\n if specie[0] not in son:\n son[specie[0]] = set()\n if specie[0] not in parent:\n parent[specie[0]] = list()\n if specie[0] not in greatson:\n greatson[specie[0]] = set()\n record = [specie[0], name[specie[0]], rank[specie[0]], son[specie[0]], parent[specie[0]], greatson[specie[0]]]\n taxon.append(record)\n\n con = sqlite3.connect('./data/DB')\n cur = con.cursor()\n cur.execute(\n 'CREATE TABLE IF NOT EXISTS taxon (Id TEXT, Name TEXT, Rank TEXT, Son TEXT, Parent TEXT, GreatSon TEXT);')\n for line in taxon:\n son = ' '.join(line[3])\n parent = ' '.join(line[4])\n greatson = ' '.join(line[5])\n cur.execute('INSERT INTO taxon (Id, Name, Rank, Son, Parent, GreatSon) VALUES (?, ?, ?, ?, ?, ?);',\n (line[0], line[1], line[2], son, parent, greatson))\n con.commit()\n cur.close()\n con.close()\n print('Done.\\n')", "def get_taxonomy(): # noqa: E501\n return 'do some magic!'", "def findDiscripancies(taxonomy):\n i = 0\n for entry in taxonomy:\n if entry['parentName'] != None:\n print entry['nodeName']\n if entry['nodeName'].lower() == entry['parentName'].lower():\n i += 1\n print \"No of same nodes = {} \" .format(i)", "def __init__(self):\n\t\tself.clust_idx_part = -1\n\t\t\"\"\"\n\t\tthis list stores the indices of input trees having this taxon\n\t\t\"\"\"\n\t\tself.Support_Tree_List = []", "def _parents(self, prefix):\n if self.inherit:\n suffix = self.inherit.name\n value = self.tracconfig.get(\n self.section, '%s.%s' % (prefix, suffix), default=None)\n if value:\n return self._parents_to_list(value)\n return None", "def create_taxonomy(dataset_name, attr, dataset=[]):\n #path = os.getcwd()\n\n path_in = os.getcwd()\n pattern = '^.*/thesis-data-anonymisation/'\n path_top = re.search(pattern, path_in).group(0)\n\n path = path_top +'data'\n\n if len(dataset_name) > 0:\n prefix = '../data/'+dataset_name+'/hierarchy_'\n else:\n prefix = '../data/hierarchy_'\n\n postfix = '.csv'\n\n try:\n file = open(path + '/' + prefix + attr + postfix, 'r')\n except FileNotFoundError:\n if len(dataset_name) > 0:\n prefix = '/data/'+dataset_name+'/hierarchy_'\n else:\n prefix = '/data/hierarchy_'\n file = open(path+prefix + attr + postfix, 'r')\n\n taxonomy = {}\n #dataset_group = dataset.groupby(attr).groups\n\n lines_in = file.readlines()\n file.close()\n lines = [line.strip().split(';') for line in lines_in]\n max_height = max([len(line) for line in lines])\n try:\n float(lines[0][0])\n is_numeric = True\n except ValueError:\n is_numeric = False\n for line in lines:\n #try:\n # if is_numeric:\n # dataset_group[int(line[0])]\n # else:\n # dataset_group[line[0]]\n #except KeyError:\n # continue\n line.reverse()\n for i, val in enumerate(line):\n is_leaf = False\n if val == '*':\n node = TaxNode(val, None, is_numeric, is_leaf)\n else:\n if i == len(line) - 1:\n is_leaf = True\n\n node = TaxNode(val, taxonomy[line[i - 1]][-1], is_numeric, is_leaf)\n try:\n current_nodes = taxonomy[val]\n already_added = False\n for current_node in current_nodes:\n if current_node.parent is None:\n already_added = True\n elif current_node.parent.value == node.parent.value:\n already_added = True\n if not already_added:\n taxonomy[val].append(node)\n except KeyError:\n taxonomy[val] = [node] # Saves the nodes in a list in case of several parents (only valid for nodes with several parents!!!)\n hierarchy = Taxonomy(taxonomy, max_height)\n\n return hierarchy", "def children(self): # noqa: ANN201", "def get_path(self) -> list:\n path = []\n if self.parent:\n path = [a.name for a in self.parent.ancestors(include_self=True)]\n\n return path + [self.name]", "def load_nodes(path):\n global parents\n with open(path, 'r') as r:\n for line in r:\n (taxid, parent, other) = re.split(r'\\s*\\|\\s*', line.strip('|\\n\\t '), 2)\n parents[taxid] = parent", "def QualifyParentNames(self):\n\n if self._parents_qualified:\n return\n for entity_type in self.valid_types_map.values():\n fq_tuplemap = {}\n for parent in entity_type.unqualified_parent_names:\n fq_tuple = self._BuildQualifiedParentTuple(parent)\n fq_name = f'{fq_tuple.namespace}/{fq_tuple.typename}'\n fq_tuplemap[fq_name] = fq_tuple\n entity_type.parent_names = fq_tuplemap\n self._parents_qualified = True", "def discover_taxa(self,\n treefile,\n schema,\n preserve_underscores):\n for tree in dendropy.Tree.yield_from_files([treefile],\n schema=schema,\n preserve_underscores=preserve_underscores,\n ignore_unrecognized_keyword_arguments=True,\n ):\n return tree.taxon_namespace", "def read_from_tree(self, tree, warnings=True):\n\n if isinstance(tree, str):\n tree = dendropy.Tree.get_from_path(tree,\n schema='newick',\n rooting=\"force-rooted\",\n preserve_underscores=True)\n\n taxonomy = {}\n for leaf in tree.leaf_node_iter():\n taxa = []\n\n node = leaf.parent_node\n while node:\n if node.label:\n taxa_str = node.label\n if ':' in taxa_str:\n taxa_str = taxa_str.split(':')[1]\n\n if not is_float(taxa_str):\n if taxa_str[-1] == ';':\n taxa_str = taxa_str[:-1]\n\n # check for concatenated ranks of the form: p__Crenarchaeota__c__Thermoprotei\n for prefix in Taxonomy.rank_prefixes:\n split_str = '__' + prefix\n if split_str in taxa_str:\n taxa_str = taxa_str.replace(split_str, ';' + prefix)\n\n # appears to be an internal label and not simply a support value\n taxa = [x.strip() for x in taxa_str.split(';')] + taxa\n node = node.parent_node\n\n if warnings and len(taxa) > 7:\n self.logger.warning(\n 'Invalid taxonomy string read from tree for taxon %s: %s' % (leaf.taxon.label, ';'.join(taxa)))\n\n # check if genus name should be appended to species label\n if len(taxa) == 7:\n genus = taxa[5][3:]\n species = taxa[6][3:]\n if genus not in species and len(species.split()) == 1:\n taxa[6] = 's__' + genus + ' ' + species\n\n taxa = self.fill_trailing_ranks(taxa)\n taxonomy[leaf.taxon.label] = taxa\n\n return taxonomy", "def get_ancestor_agency_id_terms(self):\n return # osid.search.terms.IdTerm", "def get_children(self, parent):\n\n child_names = []\n for child in parent.children:\n child_names.append(child.label)\n return child_names", "def parse_taxonomy( seq_id, lineage, key_dictionary ):\n\tif seq_id in sti_dict:\n\t\ttax_id = sti_dict[ seq_id ]\n\t\ttax_names = [ tax_id ] #list of taxon names\n\telse:\n\t\ttax_id = str( seq_id )\n\t\ttax_names = [ tax_id ] #list of taxon names\n\ttax_numbers = [ seq_id ]\n\tis_A_list = [] #store is_A relationships\n\n\twhile lineage != '1': #forces traversal through the tri file until we get to the root of taxonomy\n\t\t#print lineage\n\t\tif lineage == '0': #need this to process the root in the tri file. \n\t\t\tbreak\n\t\tis_A_list = [lineage] + is_A_list\n\t\ttax_numbers = [lineage] + tax_numbers\n\t\tif lineage in sti_dict: #we have the next taxonomic representative in the sti file\n\t\t\ttax_id = sti_dict[ lineage ]\n\t\t\ttax_names = [tax_id] + tax_names #append tax_id to front of list\n\t\telse: #the taxon does not have a sequence representative. \n\t\t\ttax_id = str( lineage ) \n\t\t\ttax_names = [tax_id] + tax_names\n\t\t#now process to next lineage\n\t\tlineage = tri_dict[ lineage ] \n\n\n\ttax_names = ['root'] + tax_names #append tax_id to front of list\n\ttax_numbers = [lineage] + tax_numbers\n\tis_A_list = ['0'] + [lineage] + is_A_list\n\n\t#now append all of these reuslts to the final dictionary, which will be keyed \n\t#off of the tax_numbers list (unique IDs for each taxonomic level.\n\n\tfor i in xrange( len( tax_numbers ) ):\n\t\tid = tax_numbers[i]\n\t\tif id in key_dictionary:\n\t\t\tpass\n\t\telse:\n\t\t\tparent = is_A_list[i]\n\t\t\tlevel = i #taxonomic level (how far down in levels are we?)\n\t\t\tnames = process_names( tax_names[:i+1] )\n\t\t\tkey_dictionary[ id ] = [ parent, level, names ]\n\n\treturn( key_dictionary )", "def get_descendant_agency_terms(self):\n return # osid.authentication.AgencyQueryInspector", "def taxonomy_files(self):\n location=self.place.capitalize()+'-'+str(self.year)+'-'\n no_of_ideograms=self.OTU.make_tree(location,self.start_level,self.plot_level)\n return no_of_ideograms", "def _populate_terms(self, optobj):\n has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs\n # Make parents and relationships references to the actual GO terms.\n for rec in self.values():\n # Given parent GO IDs, set parent GO Term objects\n rec.parents = set([self[goid] for goid in rec._parents])\n\n # For each parent GO Term object, add it's child GO Term to the children data member\n for parent_rec in rec.parents:\n parent_rec.children.add(rec)\n\n if has_relationship:\n self._populate_relationships(rec)", "def __init__(self, ncbiNames: 'e.g. Enterobacter or Proteobacteria/Gammaproteobacteria. Allows list of names, e.g. [\"Gammaproteobacteria\", \"/Archaea\"]', excludeUnclassified = defaultExcludeUnclassified, oneOrganismPerSpecies = defaultOneOrganismPerSpecies):\n taxonomy = NCBI.getTaxonomy()\n \n if isinstance(ncbiNames, str):\n ncbiNames = [ncbiNames]\n \n self.ncbiNames = ncbiNames\n \n allOrganisms = set()\n for ncbiName in ncbiNames:\n organisms = taxonomy.getOrganismAbbreviationsByPath(ncbiName, exceptPaths=('unclassified' if excludeUnclassified else None), oneOrganismPerSpecies=oneOrganismPerSpecies)\n if organisms is None or len(organisms) == 0:\n raise ValueError(\"No clade of this path found: \" + ncbiName)\n allOrganisms.update(organisms)\n \n self.group = Group( allOrganisms )\n \n self._lastNeofunctionalisedEnzymesCache = None\n self._lastGeneDuplicatedEnzymesMatches = None", "def __init__(self, parent, child, excludeUnclassified = defaultExcludeUnclassified):\n # read first NCBI name from Clade object, if necessary\n if isinstance(parent, Clade):\n parentNCBIname = parent.ncbiNames[0]\n elif not isinstance(parent, str):\n # must be iterable, else fail\n parentNCBIname = parent[0]\n \n if isinstance(child, Clade):\n childNCBIname = child.ncbiNames[0]\n elif not isinstance(child, str):\n # must be iterable, else fail\n childNCBIname = child[0]\n \n # check if child is really a child of parent\n taxonomy = NCBI.getTaxonomy()\n parentNode = taxonomy.searchNodesByPath(parentNCBIname, exceptPaths=('unclassified' if excludeUnclassified else None))\n if parentNode is None or len(parentNode) == 0:\n raise ValueError(\"No clade of this path found: \" + parentNCBIname)\n else: # only consider first element\n parentNode = parentNode[0]\n \n childNode = taxonomy.searchNodesByPath(childNCBIname, exceptPaths=('unclassified' if excludeUnclassified else None))\n if childNode is None or len(childNode) == 0:\n raise ValueError(\"No clade of this path found: \" + childNCBIname)\n else: # only consider first element\n childNode = childNode[0]\n \n foundParent = False\n for ancestor in childNode.ancestors:\n if Taxonomy.nodePath2String(ancestor) == Taxonomy.nodePath2String(parentNode):\n foundParent = True\n break\n \n if foundParent == False:\n raise ValueError(\"Child is not a descendant of parent.\")\n \n super().__init__(parent, child, excludeUnclassified)", "def get_taxonomy_info(self,taxonomy_path):\n Taxon = 'Taxon'\n p1 = '.*[Tt][Aa][Xx][Oo].*'\n pattern = re.compile(p1)\n feature_id = 'Feature ID'\n p2 = '.*[Ii][Dd].*'\n pattern2 = re.compile(p2)\n \n try:\n taxonomy_df = pd.read_csv(taxonomy_path, sep= '\\t')\n print('valid taxonomy file')\n except:\n print('unvalid taxonomy path')\n for ele in taxonomy_df.columns:\n if len(pattern.findall(ele)) >0:\n if pattern.findall(ele)[0]>3 :\n Taxon = ele\n else:\n pass\n break\n for ele in taxonomy_df.columns:\n if len(pattern2.findall(ele)) > 0:\n if len(pattern2.findall(ele)[0])>3 :\n feature_id = ele\n else:\n pass\n break\n taxonomy_df = taxonomy_df.set_index(feature_id)\n self.lineage = taxonomy_df[Taxon]" ]
[ "0.66094685", "0.5948672", "0.59455013", "0.5883354", "0.58186686", "0.57299733", "0.5665493", "0.5624762", "0.55911696", "0.55900514", "0.55431145", "0.554038", "0.55155194", "0.5509813", "0.55068725", "0.55050105", "0.5497012", "0.5494448", "0.54918534", "0.546851", "0.5436032", "0.53887016", "0.5378879", "0.53710556", "0.53612095", "0.53594625", "0.5347124", "0.53456783", "0.53426194", "0.5330114" ]
0.6904
0
All names/paths in NCBI taxonomy used to create the child clade.
def childNCBInames(self): return self.childClade.ncbiNames
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def children(self, taxon, taxonomy):\n\n c = set()\n for taxon_id, taxa in taxonomy.items():\n if taxon in taxa:\n\n if taxon.startswith('s__'):\n c.add(taxon_id)\n else:\n taxon_index = taxa.index(taxon)\n for child in taxa[taxon_index + 1:]:\n if len(child) > 3: # not just an empty prefix\n c.add(child)\n\n return c", "def children(self): # noqa: ANN201", "def display_all_paths(taxonomy):\n for i,entry in enumerate(taxonomy):\n print \"For nodeId : {} :: NodeName : {} \" .format(entry['nodeId'], entry['nodeName'])\n parentId = entry['parentId']\n parentName = entry['parentName']\n while parentId != None:\n print \"ParentId : {} :: ParentName : {}\" .format(parentId, parentName)\n # Search for nodeId == parentId\n for temp in taxonomy:\n if temp['nodeId'] == parentId:\n parentId = temp['parentId']\n parentName = temp['parentName']\n break\n if i == 5:\n break", "def test_make_taxonomy(self):\n basic_test_runner(self, 'taxonomy')", "def parentNCBInames(self):\n return self.parentClade.ncbiNames", "def taxon_children(self, taxonomy):\n\n taxon_children = defaultdict(set)\n for taxon_id, taxa in taxonomy.items():\n for i, taxon in enumerate(taxa):\n if len(taxon) == 3:\n continue # just rank prefix\n\n if len(taxa) > i + 1 and len(taxa[i + 1]) != 3:\n taxon_children[taxon].add(taxa[i + 1])\n\n if len(taxa) > self.rank_index['s__']:\n taxon = taxa[self.rank_index['s__']]\n if taxon != 's__':\n taxon_children[taxon].add(taxon_id)\n\n return taxon_children", "def get_rdp_taxonomy(self):\r\n # RDP uses 0 for the parent ID of the root node\r\n if self.parent is None:\r\n parent_id = 0\r\n else:\r\n parent_id = self.parent.id\r\n\r\n # top rank name must be norank, and bottom rank must be genus\r\n if self.depth == 0:\r\n rank_name = \"norank\"\r\n elif self.children:\r\n rank_name = self.taxonomic_ranks[self.depth]\r\n else:\r\n rank_name = \"genus\"\r\n\r\n fields = [\r\n self.id, self.name, parent_id, self.depth, rank_name]\r\n taxonomy_str = '*'.join(map(str, fields)) + \"\\n\"\r\n\r\n # Recursively append lines from sorted list of subtrees\r\n child_names = sorted(self.children.keys())\r\n subtrees = [self.children[name] for name in child_names]\r\n for subtree in subtrees:\r\n taxonomy_str += subtree.get_rdp_taxonomy()\r\n return taxonomy_str", "def Children(self) -> _n_1_t_2:", "def nsrTaxonomy():\r\n # Input file\r\n taxonomyFile = pd.read_csv(args.indir+\"/\"+args.infile1, header=2,\r\n sep=\"\\t\", encoding=\"utf8\")\r\n\r\n # Parse taxonomic names into their elementary components\r\n taxonomy = taxonomyFile.loc[taxonomyFile['rank'] == 'soort']\r\n taxonList = []\r\n for taxon in taxonomy['scientific_name']:\r\n parser = taxonParser(taxon)\r\n if not parser or parser is False:\r\n pass\r\n else:\r\n taxonList.append(parser)\r\n\r\n # Write taxonomy to file\r\n index = 0\r\n with io.open(par_path+\"/results/nsr_species.csv\", \"w\", encoding=\"utf-8\") as outfile:\r\n outfile.write('\"species_id\",\"species_name\",\"identification_reference\"\\n')\r\n for i in taxonList:\r\n binomial = ' '.join(str(i).split()[:2])\r\n authorship = ' '.join(str(i).split()[2:])\r\n outfile.write('%s,%s,\"%s\"\\n' % (index, binomial, authorship))\r\n index += 1\r\n\r\n return taxonList", "def read_from_tree(self, tree, warnings=True):\n\n if isinstance(tree, str):\n tree = dendropy.Tree.get_from_path(tree,\n schema='newick',\n rooting=\"force-rooted\",\n preserve_underscores=True)\n\n taxonomy = {}\n for leaf in tree.leaf_node_iter():\n taxa = []\n\n node = leaf.parent_node\n while node:\n if node.label:\n taxa_str = node.label\n if ':' in taxa_str:\n taxa_str = taxa_str.split(':')[1]\n\n if not is_float(taxa_str):\n if taxa_str[-1] == ';':\n taxa_str = taxa_str[:-1]\n\n # check for concatenated ranks of the form: p__Crenarchaeota__c__Thermoprotei\n for prefix in Taxonomy.rank_prefixes:\n split_str = '__' + prefix\n if split_str in taxa_str:\n taxa_str = taxa_str.replace(split_str, ';' + prefix)\n\n # appears to be an internal label and not simply a support value\n taxa = [x.strip() for x in taxa_str.split(';')] + taxa\n node = node.parent_node\n\n if warnings and len(taxa) > 7:\n self.logger.warning(\n 'Invalid taxonomy string read from tree for taxon %s: %s' % (leaf.taxon.label, ';'.join(taxa)))\n\n # check if genus name should be appended to species label\n if len(taxa) == 7:\n genus = taxa[5][3:]\n species = taxa[6][3:]\n if genus not in species and len(species.split()) == 1:\n taxa[6] = 's__' + genus + ' ' + species\n\n taxa = self.fill_trailing_ranks(taxa)\n taxonomy[leaf.taxon.label] = taxa\n\n return taxonomy", "def get_children(self, parent):\n\n child_names = []\n for child in parent.children:\n child_names.append(child.label)\n return child_names", "def children(self):\n return [self.cut]", "def get_taxonomy(): # noqa: E501\n return 'do some magic!'", "def create_taxonomy(dataset_name, attr, dataset=[]):\n #path = os.getcwd()\n\n path_in = os.getcwd()\n pattern = '^.*/thesis-data-anonymisation/'\n path_top = re.search(pattern, path_in).group(0)\n\n path = path_top +'data'\n\n if len(dataset_name) > 0:\n prefix = '../data/'+dataset_name+'/hierarchy_'\n else:\n prefix = '../data/hierarchy_'\n\n postfix = '.csv'\n\n try:\n file = open(path + '/' + prefix + attr + postfix, 'r')\n except FileNotFoundError:\n if len(dataset_name) > 0:\n prefix = '/data/'+dataset_name+'/hierarchy_'\n else:\n prefix = '/data/hierarchy_'\n file = open(path+prefix + attr + postfix, 'r')\n\n taxonomy = {}\n #dataset_group = dataset.groupby(attr).groups\n\n lines_in = file.readlines()\n file.close()\n lines = [line.strip().split(';') for line in lines_in]\n max_height = max([len(line) for line in lines])\n try:\n float(lines[0][0])\n is_numeric = True\n except ValueError:\n is_numeric = False\n for line in lines:\n #try:\n # if is_numeric:\n # dataset_group[int(line[0])]\n # else:\n # dataset_group[line[0]]\n #except KeyError:\n # continue\n line.reverse()\n for i, val in enumerate(line):\n is_leaf = False\n if val == '*':\n node = TaxNode(val, None, is_numeric, is_leaf)\n else:\n if i == len(line) - 1:\n is_leaf = True\n\n node = TaxNode(val, taxonomy[line[i - 1]][-1], is_numeric, is_leaf)\n try:\n current_nodes = taxonomy[val]\n already_added = False\n for current_node in current_nodes:\n if current_node.parent is None:\n already_added = True\n elif current_node.parent.value == node.parent.value:\n already_added = True\n if not already_added:\n taxonomy[val].append(node)\n except KeyError:\n taxonomy[val] = [node] # Saves the nodes in a list in case of several parents (only valid for nodes with several parents!!!)\n hierarchy = Taxonomy(taxonomy, max_height)\n\n return hierarchy", "def _populate_terms(self, optobj):\n has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs\n # Make parents and relationships references to the actual GO terms.\n for rec in self.values():\n # Given parent GO IDs, set parent GO Term objects\n rec.parents = set([self[goid] for goid in rec._parents])\n\n # For each parent GO Term object, add it's child GO Term to the children data member\n for parent_rec in rec.parents:\n parent_rec.children.add(rec)\n\n if has_relationship:\n self._populate_relationships(rec)", "def get_taxonomy_info(self,taxonomy_path):\n Taxon = 'Taxon'\n p1 = '.*[Tt][Aa][Xx][Oo].*'\n pattern = re.compile(p1)\n feature_id = 'Feature ID'\n p2 = '.*[Ii][Dd].*'\n pattern2 = re.compile(p2)\n \n try:\n taxonomy_df = pd.read_csv(taxonomy_path, sep= '\\t')\n print('valid taxonomy file')\n except:\n print('unvalid taxonomy path')\n for ele in taxonomy_df.columns:\n if len(pattern.findall(ele)) >0:\n if pattern.findall(ele)[0]>3 :\n Taxon = ele\n else:\n pass\n break\n for ele in taxonomy_df.columns:\n if len(pattern2.findall(ele)) > 0:\n if len(pattern2.findall(ele)[0])>3 :\n feature_id = ele\n else:\n pass\n break\n taxonomy_df = taxonomy_df.set_index(feature_id)\n self.lineage = taxonomy_df[Taxon]", "def init_taxon():\n if not exists('./data/taxdmp.zip'):\n ftp = FTP('ftp.ncbi.nih.gov')\n ftp.login()\n ftp.cwd('pub/taxonomy')\n ftp.retrbinary('RETR taxdmp.zip', open('./data/taxdmp.zip', 'wb').write)\n ftp.quit\n with ZipFile('./data/taxdmp.zip', 'r') as dumpfile:\n dumpfile.extractall(path='./data/')\n taxon_id = dict()\n data = list()\n name = dict()\n specie = list()\n son = dict()\n greatson = dict()\n parent = dict()\n rank = dict()\n global taxon\n taxon = list()\n with open('./data/names.dmp', 'r') as dumpfile:\n raw = dumpfile.read().split(sep='\\n')\n raw.pop()\n for record in raw:\n add = record.replace('\\t', '').split(sep='|')\n if add[0] not in name or add[2] == 'scientific name':\n name[add[0]] = add[1]\n with open('./data/nodes.dmp', 'r') as dumpfile:\n raw = dumpfile.read().split(sep='\\n')\n raw.pop()\n for record in raw:\n add = record.replace('\\t', '').split(sep='|')\n # 1696063|Sarcocystis corvusi||scientific name|\n taxon_id[add[0]] = add[1]\n rank[add[0]] = add[3]\n if add[2] == 'species':\n specie.append(add[0])\n for specie in specie:\n record = [specie, ]\n while taxon_id[specie] != '1':\n record.append(taxon_id[specie])\n specie = taxon_id[specie]\n # if '33090' in record:\n # record.pop()\n # record.pop()\n data.append(record)\n for data in data:\n for n in range(len(data)):\n if data[n] not in parent:\n parent[data[n]] = data[(n + 1):]\n if n == 0:\n continue\n if data[n] not in son:\n son[data[n]] = {data[n - 1], }\n else:\n son[data[n]].add(data[n - 1])\n if data[n] not in greatson:\n greatson[data[n]] = {data[0], }\n else:\n greatson[data[n]].add(data[0])\n for specie in name.items():\n if specie[0] not in son:\n son[specie[0]] = set()\n if specie[0] not in parent:\n parent[specie[0]] = list()\n if specie[0] not in greatson:\n greatson[specie[0]] = set()\n record = [specie[0], name[specie[0]], rank[specie[0]], son[specie[0]], parent[specie[0]], greatson[specie[0]]]\n taxon.append(record)\n\n con = sqlite3.connect('./data/DB')\n cur = con.cursor()\n cur.execute(\n 'CREATE TABLE IF NOT EXISTS taxon (Id TEXT, Name TEXT, Rank TEXT, Son TEXT, Parent TEXT, GreatSon TEXT);')\n for line in taxon:\n son = ' '.join(line[3])\n parent = ' '.join(line[4])\n greatson = ' '.join(line[5])\n cur.execute('INSERT INTO taxon (Id, Name, Rank, Son, Parent, GreatSon) VALUES (?, ?, ?, ?, ?, ?);',\n (line[0], line[1], line[2], son, parent, greatson))\n con.commit()\n cur.close()\n con.close()\n print('Done.\\n')", "def writeTaxonomies( self ):\n\n self.logger.info( 'writeTaxonomies: START' )\n\n self.logger.info( 'writeTaxonomies: keggreader.getAllOrganisms(): START' )\n\n organisms = self.reader.getAllOrganisms()\n\n self.logger.info( 'writeTaxonomies: keggreader.getAllOrganisms(): DONE' )\n\n taxonomies = {} \n\n taxonomyFile = self.openInsertFile( 'taxonomiesInsert.psql' )\n\n self.logger.info( 'writeTaxonomies: We got ' + str(len(organisms)) + ' organisms and our insert file is taxonomiesInsert.psql' )\n\n\n for organism,taxonomyData in organisms.iteritems():\n for tax in taxonomyData['lineage']:\n\n taxonomies[ tax['name'] ] = { 'name': tax['name'], 'tax_id': tax['tax_id'], 'type': tax['type'] } \n\n\n self.logger.info( 'writeTaxonomies: We got ' + str(len(taxonomies)) + ' taxonomies.' )\n\n\n for taxonomy,taxData in taxonomies.iteritems():\n taxonomyInserted = self.writeFile( taxonomyFile, 'taxonomies', [ str(taxData['name']), str(taxData['tax_id']), str(taxData['type']) ] )\n self.taxonomiesInserted[ taxData['name'] ] = taxonomyInserted\n\n self.logger.info( 'writeTaxonomies: DONE' )", "def get_mds_children(node):\n children=node.getChildren()\n return {get_mds_shortname(child):child for child in children}", "def children_names(node):\n\n return map(History.name, History.children(node))", "def taxonomy_files(self):\n location=self.place.capitalize()+'-'+str(self.year)+'-'\n no_of_ideograms=self.OTU.make_tree(location,self.start_level,self.plot_level)\n return no_of_ideograms", "def get_descendant_agency_terms(self):\n return # osid.authentication.AgencyQueryInspector", "def get_descendant_agency_id_terms(self):\n return # osid.search.terms.IdTerm", "def path_entries(self):", "def __init__(self, parent, child, excludeUnclassified = defaultExcludeUnclassified):\n # read first NCBI name from Clade object, if necessary\n if isinstance(parent, Clade):\n parentNCBIname = parent.ncbiNames[0]\n elif not isinstance(parent, str):\n # must be iterable, else fail\n parentNCBIname = parent[0]\n \n if isinstance(child, Clade):\n childNCBIname = child.ncbiNames[0]\n elif not isinstance(child, str):\n # must be iterable, else fail\n childNCBIname = child[0]\n \n # check if child is really a child of parent\n taxonomy = NCBI.getTaxonomy()\n parentNode = taxonomy.searchNodesByPath(parentNCBIname, exceptPaths=('unclassified' if excludeUnclassified else None))\n if parentNode is None or len(parentNode) == 0:\n raise ValueError(\"No clade of this path found: \" + parentNCBIname)\n else: # only consider first element\n parentNode = parentNode[0]\n \n childNode = taxonomy.searchNodesByPath(childNCBIname, exceptPaths=('unclassified' if excludeUnclassified else None))\n if childNode is None or len(childNode) == 0:\n raise ValueError(\"No clade of this path found: \" + childNCBIname)\n else: # only consider first element\n childNode = childNode[0]\n \n foundParent = False\n for ancestor in childNode.ancestors:\n if Taxonomy.nodePath2String(ancestor) == Taxonomy.nodePath2String(parentNode):\n foundParent = True\n break\n \n if foundParent == False:\n raise ValueError(\"Child is not a descendant of parent.\")\n \n super().__init__(parent, child, excludeUnclassified)", "def test_taxonomy(n=5):\n ecoli_file = join(this_dir, \"e_coli_core.xml.gz\")\n ids = [\"Escherichia_coli_{}\".format(i) for i in range(1, n + 1)]\n taxa = pd.DataFrame({\"id\": ids})\n taxa[\"genus\"] = \"Escherichia\"\n taxa[\"species\"] = \"Eschericia coli\"\n taxa[\"reactions\"] = 95\n taxa[\"metabolites\"] = 72\n taxa[\"file\"] = ecoli_file\n return taxa", "def findDiscripancies(taxonomy):\n i = 0\n for entry in taxonomy:\n if entry['parentName'] != None:\n print entry['nodeName']\n if entry['nodeName'].lower() == entry['parentName'].lower():\n i += 1\n print \"No of same nodes = {} \" .format(i)", "def children():\n return {\n \"charges\": Charge,\n \"codes\": Code,\n \"comments\": Comment,\n \"links\": Link,\n \"parties\": Party,\n \"rates\": Rate,\n \"references\": Reference,\n }", "def get_children(self, *types: str) -> List[TgnObject]:\n pass", "def __init__(self):\n\t\tself.clust_idx_part = -1\n\t\t\"\"\"\n\t\tthis list stores the indices of input trees having this taxon\n\t\t\"\"\"\n\t\tself.Support_Tree_List = []" ]
[ "0.626776", "0.6162786", "0.60941124", "0.60593927", "0.6052144", "0.5884141", "0.5847267", "0.56961423", "0.56444347", "0.5616592", "0.56161517", "0.55935484", "0.55766165", "0.5564394", "0.55252403", "0.54684275", "0.5455569", "0.54430205", "0.5435376", "0.54307735", "0.53972065", "0.5392369", "0.5382705", "0.5377566", "0.5374456", "0.5363834", "0.53116775", "0.530848", "0.5291235", "0.5290313" ]
0.6842796
0
SubstanceEC graph of the conserved core metabolism.
def conservedMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEcGraph: parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism) childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism) graph = GeneFunctionConservation.getGraph(parentCoreMetabolism, childCoreMetabolism) graph.name = 'Conserved metabolism ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames) return graph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def coreMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEcGraph:\n graph = self.group.majorityEcGraph(majorityPercentage = majorityPercentageCoreMetabolism, noMultifunctional = excludeMultifunctionalEnzymes, keepOnHeap = True)\n graph.name = 'Core metabolism ECs ' + ' '.join(self.ncbiNames)\n return graph", "def conservedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n conservedMetabolism = self.conservedMetabolism(majorityPercentageCoreMetabolism)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = conservedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def graph(self):\n ...", "def graph(self):\n assert self._modeled, \"Need to do calc_covariance\"\n return self._graph", "def addedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def coreMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraphByEcMajority(majorityPercentage = majorityPercentageCoreMetabolism, majorityTotal = None, noMultifunctional = excludeMultifunctionalEnzymes)\n graph.name = 'Core metabolism Enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def collectiveMetabolism(self, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes, addEcDescriptions = False) -> SubstanceEcGraph:\n graph = self.group.collectiveEcGraph(noMultifunctional = excludeMultifunctionalEnzymes, addCount = True, keepOnHeap = True, addEcDescriptions = addEcDescriptions)\n graph.name = 'Collective metabolism ECs ' + ' '.join(self.ncbiNames)\n return graph", "def neofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEcGraph:\n # get neofunctionalisations \n neofunctionalisedECs = NeofunctionalisedECs(self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs))\n \n # filter core metabolism EC graph\n coreMetabolism = self.coreMetabolism(majorityPercentageCoreMetabolism)\n minimumOrganismsCount = math.ceil(self.organismsCount * (majorityPercentageNeofunctionalisation / 100))\n \n neofunctionalisedMetabolism = neofunctionalisedECs.filterGraph(coreMetabolism, minimumEcDifference = None, minimumOrganismsCount = minimumOrganismsCount)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = coreMetabolism\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def unifiedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False) -> SubstanceEcGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolism = self.unifiedMetabolism(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def get_ecg_graph():\n titles = ['ecg1', 'ecg2', 'ecg3']\n colors = ['rgb(240,0,0)', 'rgb(0,240,0)', 'rgb(0,0,240)']\n update()\n signames_ecg = queries['signames_ecg']\n signals = queries['signals']\n latesthr = queries['latesthr']\n return html.Div(className='ecg', children=[\n html.Div(style={'display': 'flex', 'height': '40vh'},\n children=[dcc.Graph(\n id=titles[i] + signame,\n style={'width': '100%'},\n figure={\n 'data': [\n {'x': signals[signame]['time'],\n 'y': signals[signame][titles[i]],\n 'mode': 'line', 'name': signame, 'line': {'color':colors[i]}}\n ],\n 'layout': {\n 'font': {'color':'#fff'},\n 'title': '{}-{}'.format(signame, titles[i]),\n 'xaxis': {'title': 'time', 'color': '#fff', 'showgrid': 'False'},\n 'yaxis': {'title': 'voltage (mv)', 'color': '#fff', 'showgrid': 'False', 'range': np.linspace(-2.5, 2.5, 10)},\n 'paper_bgcolor':'#000', 'plot_bgcolor':'#000'\n }\n }\n ) for i in range(len(titles))]\n +\n [html.Div(\n style={'justify-content': 'center', 'display': 'flex',\n 'align-items': 'center', 'width': '10vh', 'font-size': '30pt', 'color': 'white'},\n children=['{}'.format(latesthr[signame][0])])\n ]\n ) for signame in signames_ecg])", "def divergedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n divergedMetabolism = self.divergedMetabolism(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = divergedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def unifiedMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n \n graph = parentCoreMetabolism.union(childCoreMetabolism, addCount = False, updateName = False)\n graph.name = 'Unified metabolism ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n if colour is True:\n lostGraph = GeneFunctionLoss.getGraph(parentCoreMetabolism, childCoreMetabolism)\n lostEdges = lostGraph.getEdges()\n \n addedGraph = GeneFunctionAddition.getGraph(parentCoreMetabolism, childCoreMetabolism)\n addedEdges = addedGraph.getEdges()\n \n conservedGraph = GeneFunctionConservation.getGraph(parentCoreMetabolism, childCoreMetabolism)\n conservedEdges = conservedGraph.getEdges() \n \n Export.addColourAttribute(graph, colour = Export.Colour.BLUE, nodes = False, edges = lostEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.RED, nodes = False, edges = addedEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.PINK, nodes = False, edges = conservedEdges)\n \n return graph", "def _get_full_graph(self):", "def concentration(self):\n return [node.concentration for node in self]", "def semantics_subgraph(self) -> DiGraph:\n\n return self.graph.subgraph(list(self.semantics_nodes))", "def lostMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def conservedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def divergedMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n \n if colour is True:\n lostGraph = GeneFunctionLoss.getGraph(parentCoreMetabolism, childCoreMetabolism)\n lostEdges = lostGraph.getEdges()\n \n addedGraph = GeneFunctionAddition.getGraph(parentCoreMetabolism, childCoreMetabolism)\n addedEdges = addedGraph.getEdges()\n \n graph = lostGraph.union(addedGraph, addCount = False, updateName = False) \n \n Export.addColourAttribute(graph, colour = Export.Colour.BLUE, nodes = False, edges = lostEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.RED, nodes = False, edges = addedEdges)\n \n else: \n graph = GeneFunctionDivergence.getGraph(parentCoreMetabolism, childCoreMetabolism)\n \n graph.name = 'Diverged metabolism ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def all_subconstituents(self, compute=False):\n out = {}\n for i in range(self._.d+1):\n try:\n out[i] = self.subconstituent(i, compute=compute)\n except IndexError:\n pass\n return out", "def c_edges(self):\n self.compute_c_edges(self)\n return self._c_edges", "def addedMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n graph = GeneFunctionAddition.getGraph(parentCoreMetabolism, childCoreMetabolism)\n graph.name = 'Added metabolism ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n return graph", "def graph(self):\n return self.__graph", "def gen_graph(self):", "def sub_graph_merging(self):", "def sage_graph(self):\n self.fe.load_cache()\n edges = []\n is_bipartite = self.variant.is_bipartite()\n for X in self.L:\n for Y in self.L:\n a = self.op_norm(X, Y)\n if not self.K.is_zero(a):\n for c in self.K.unit_group:\n d = a - c\n if X != Y or c < d or is_bipartite:\n edges.append(((X, c, False), (Y, d, is_bipartite)))\n if X == Y and not is_bipartite:\n break\n return sage.all.Graph(edges)", "def set_ec(self, etacalc):\n if not self.__thermodyn:\n C = np.zeros((6,6))\n \n LC = self.__structures.items()[0][1].LC\n if self.__mthd == 'Energy':\n if type(etacalc)==list:\n A2=[]\n for i in range(len(etacalc)):\n A2.append(self.__A2[i][etacalc[i]])\n else:\n if not etacalc in self.__A2[0].keys(): raise ValueError('Please coose one of %s'%(self.__A2[0].keys()))\n A2 = [a2[etacalc] for a2 in self.__A2]\n \n \n #%%%--- Cubic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'CI' or \\\n LC == 'CII'):\n C[0,0] =-2.*(A2[0]-3.*A2[1])/3.\n C[1,1] = C[0,0]\n C[2,2] = C[0,0]\n C[3,3] = A2[2]/6.\n C[4,4] = C[3,3]\n C[5,5] = C[3,3]\n C[0,1] = (2.*A2[0]-3.*A2[1])/3.\n C[0,2] = C[0,1]\n C[1,2] = C[0,1]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Hexagonal structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = 2./3.*A2[0] + 4./3.*A2[1] - 2.*A2[2] - 2.*A2[3]\n C[0,2] = 1./6.*A2[0] - 2./3.*A2[1] + 0.5*A2[2]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[2]\n C[3,3] =-0.5*A2[2] + 0.5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RI'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[0,4] = .5*(-A2[3] - A2[4] + A2[6])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[1,4] =-C[0,4] \n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[3,5] =-C[0,4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TI'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[3]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[3]\n C[0,2] = A2[0]/6.-2.*A2[1]/3.+.5*A2[3]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[3]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TII'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[4]\n C[1,1] = C[0,0]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[4]\n C[0,2] = A2[0]/6.-(2./3.)*A2[1]+.5*A2[4]\n C[0,5] = (-A2[2]+A2[3]-A2[6])/4.\n C[1,2] = C[0,2]\n C[1,5] =-C[0,5]\n C[2,2] = 2.*A2[4]\n C[3,3] = .5*A2[5]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[6]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Orthorhombic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'O'):\n C[0,0] = 2.*A2[0]/3.+4.*A2[1]/3.+A2[3]-2.*A2[4]-2.*A2[5]\n C[0,1] = 1.*A2[0]/3.+2.*A2[1]/3.-.5*A2[3]-A2[5]\n C[0,2] = 1.*A2[0]/3.-2.*A2[1]/3.+4.*A2[2]/3.-.5*A2[3]-A2[4]\n C[1,1] = 2.*A2[4]\n C[1,2] =-2.*A2[1]/3.-4.*A2[2]/3.+.5*A2[3]+A2[4]+A2[5]\n C[2,2] = 2.*A2[5]\n C[3,3] = .5*A2[6]\n C[4,4] = .5*A2[7]\n C[5,5] = .5*A2[8]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Monoclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'M'):\n C[0,0] = 2.*A2[0]/3.+8.*(A2[1]+A2[2])/3.-2.*(A2[5]+A2[8]+A2[9])\n C[0,1] = A2[0]/3.+4.*(A2[1]+A2[2])/3.-2.*A2[5]-A2[9]\n C[0,2] =(A2[0]-4.*A2[2])/3.+A2[5]-A2[8]\n C[0,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.+.5*(A2[5]+A2[7]+A2[8]+A2[9]-A2[12])\n C[1,1] = 2.*A2[8]\n C[1,2] =-4.*(2.*A2[1]+A2[2])/3.+2.*A2[5]+A2[8]+A2[9]+A2[12]\n C[1,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.-.5*A2[3]+A2[5]+.5*(A2[7]+A2[8]+A2[9])\n C[2,2] = 2.*A2[9]\n C[2,5] =-1.*A2[0]/6.+2.*A2[1]/3.-.5*(A2[3]+A2[4]-A2[7]-A2[8]-A2[9]-A2[12])\n C[3,3] = .5*A2[10]\n C[3,4] = .25*(A2[6]-A2[10]-A2[11])\n C[4,4] = .5*A2[11]\n C[5,5] = .5*A2[12]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Triclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'N'):\n C[0,0] = 2.*A2[0]\n C[0,1] = 1.*(-A2[0]-A2[1]+A2[6])\n C[0,2] = 1.*(-A2[0]-A2[2]+A2[7])\n C[0,3] = .5*(-A2[0]-A2[3]+A2[8]) \n C[0,4] = .5*(-A2[0]+A2[9]-A2[4])\n C[0,5] = .5*(-A2[0]+A2[10]-A2[5])\n C[1,1] = 2.*A2[1]\n C[1,2] = 1.*(A2[11]-A2[1]-A2[2])\n C[1,3] = .5*(A2[12]-A2[1]-A2[3])\n C[1,4] = .5*(A2[13]-A2[1]-A2[4])\n C[1,5] = .5*(A2[14]-A2[1]-A2[5])\n C[2,2] = 2.*A2[2] \n C[2,3] = .5*(A2[15]-A2[2]-A2[3])\n C[2,4] = .5*(A2[16]-A2[2]-A2[4])\n C[2,5] = .5*(A2[17]-A2[2]-A2[5])\n C[3,3] = .5*A2[3]\n C[3,4] = .25*(A2[18]-A2[3]-A2[4])\n C[3,5] = .25*(A2[19]-A2[3]-A2[5])\n C[4,4] = .5*A2[4]\n C[4,5] = .25*(A2[20]-A2[4]-A2[5])\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n elif self.__mthd == 'Stress':\n \n if (LC == 'CI' or \\\n LC == 'CII'):\n Matrix = np.mat([[1.0, 5.0, 0.0],\n [2.0, 4.0, 0.0],\n [3.0, 3.0, 0.0],\n [0.0, 0.0, 4.0],\n [0.0, 0.0, 5.0],\n [0.0, 0.0, 6.0]])\n \n if (LC == 'HI' or \\\n LC == 'HII'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0],\n [ 2, 1, 3, 0, 0],\n [ 0, 0, 3, 3, 0],\n [ 0, 0, 0, 0, 4],\n [ 0, 0, 0, 0, 5],\n [ 3,-3, 0, 0, 0],\n [ 3,-5,-1, 0, 0],\n [-5, 3,-1, 0, 0],\n [ 0, 0,-2,-1, 0],\n [ 0, 0, 0, 0, 6],\n [ 0, 0, 0, 0, 2],\n [-2, 2, 0, 0, 0]])\n \n if (LC == 'RI'):\n Matrix = np.mat([[ 1, 2, 3, 4, 0, 0],\n [ 2, 1, 3,-4, 0, 0],\n [ 0, 0, 3, 0, 3, 0],\n [ 0, 0, 0,-1, 0, 4],\n [ 0, 0, 0, 6, 0, 5],\n [ 3,-3, 0, 5, 0, 0],\n [ 3,-5,-1, 6, 0, 0],\n [-5, 3,-1,-6, 0, 0],\n [ 0, 0,-2, 0,-1, 0],\n [ 0, 0, 0, 8, 0, 6],\n [ 0, 0, 0,-4, 0, 2],\n [-2, 2, 0, 2, 0, 0]])\n \n if (LC == 'RII'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 0, 0],\n [ 2, 1, 3,-4,-5, 0, 0],\n [ 0, 0, 3, 0, 0, 3, 0],\n [ 0, 0, 0,-1,-6, 0, 4],\n [ 0, 0, 0, 6,-1, 0, 5],\n [ 3,-3, 0, 5,-4, 0, 0],\n [ 3,-5,-1, 6, 2, 0, 0],\n [-5, 3,-1,-6,-2, 0, 0],\n [ 0, 0,-2, 0, 0,-1, 0],\n [ 0, 0, 0, 8, 4, 0, 6],\n [ 0, 0, 0,-4, 8, 0, 2],\n [-2, 2, 0, 2,-6, 0, 0]])\n \n if (LC == 'TI'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0, 0],\n [ 2, 1, 3, 0, 0, 0],\n [ 0, 0, 3, 3, 0, 0],\n [ 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0, 0, 0, 6],\n [ 3,-5,-1, 0, 0, 0],\n [-5, 3,-1, 0, 0, 0],\n [ 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 0, 0,-4]])\n \n if (LC == 'TII'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0],\n [ 2, 1, 3,-6, 0, 0, 0],\n [ 0, 0, 3, 0, 3, 0, 0],\n [ 0, 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0,-1, 0, 0, 6],\n [ 3,-5,-1,-4, 0, 0, 0],\n [-5, 3,-1, 4, 0, 0, 0],\n [ 0, 0,-2, 0,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 8, 0, 0,-4]])\n \n if (LC == 'O'):\n Matrix = np.mat([[1, 2, 3, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 2, 3, 0, 0, 0, 0],\n [0, 0, 1, 0, 2, 3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 4, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 5, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 6],\n [3,-5,-1, 0, 0, 0, 0, 0, 0],\n [0, 3, 0,-5,-1, 0, 0, 0, 0],\n [0, 0, 3, 0,-5,-1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 6, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-4],\n [5, 4, 6, 0, 0, 0, 0, 0, 0],\n [0, 5, 0, 4, 6, 0, 0, 0, 0],\n [0, 0, 5, 0, 4, 6, 0, 0, 0],\n [0, 0, 0, 0, 0, 0,-2, 0, 0],\n [0, 0, 0, 0, 0, 0, 0,-1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-3]])\n \n if (LC == 'M'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 2, 3, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 2, 0, 3, 6, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0],\n [ 0, 0, 0, 1, 0, 0, 2, 0, 3, 0, 0, 0, 6],\n [-2, 1, 4,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 1, 4,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 1, 0, 4,-5, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0],\n [ 0, 0, 0,-2, 0, 0, 1, 0, 4, 0, 0,-5, 0],\n [ 3,-5,-1,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0,-5,-1,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0,-5, 0,-1,-4, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0],\n [ 0, 0, 0, 3, 0, 0,-5, 0,-1, 0, 0,-4, 0],\n [-4,-6, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0,-6, 5, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0,-6, 0, 5, 2, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0],\n [ 0, 0, 0,-4, 0, 0,-6, 0, 5, 0, 0, 2, 0],\n [ 5, 4, 6,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 4, 6,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 4, 0, 6,-3, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0],\n [ 0, 0, 0, 5, 0, 0, 4, 0, 6, 0, 0,-3, 0]])\n \n if (LC == 'N'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 0, 0, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 5, 6, 0, 0, 0],\n [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6, 0],\n [ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6],\n [-2, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 0, 0, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 6,-5, 0, 0, 0],\n [ 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5, 0],\n [ 0, 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5],\n [ 3,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0, 0, 0,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 2,-4, 0, 0, 0],\n [ 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4, 0],\n [ 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4],\n [-4,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0, 0, 0,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1,-3, 2, 0, 0, 0],\n [ 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2, 0],\n [ 0, 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2],\n [ 5, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 0, 0, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2,-1,-3, 0, 0, 0],\n [ 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3, 0],\n [ 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3],\n [-6, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-6, 0, 0, 0, 0, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5,-4, 1, 0, 0, 0],\n [ 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1, 0],\n [ 0, 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1]])\n \n sigma = np.array(self.__sigma[etacalc])\n \n ci = np.linalg.lstsq(Matrix,sigma)\n \n #-- Cubic structures ------------------------------------------------------------------------------\n if (LC == 'CI' or \\\n LC == 'CII'):\n \n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[3,3]=ci[0][2]\n C[1,1]=C[0,0]\n C[2,2]=C[0,0]\n C[0,2]=C[0,1]\n C[1,2]=C[0,1]\n C[4,4]=C[3,3]\n C[5,5]=C[3,3]\n \n #-- Hexagonal Structures --------------------------------------------------------------------------\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[2,2]=ci[0][3]\n C[3,3]=ci[0][4]\n C[1,1]=C[0,0]\n C[1,2]=C[0,2]\n C[4,4]=C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral I Structures ---------------------------------------------------------------------\n if (LC == 'RI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral II Structures --------------------------------------------------------------------\n if (LC == 'RII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[0,4]= ci[0][4]\n C[2,2]= ci[0][5]\n C[3,3]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[1,4]=-C[0,4]\n C[3,5]=-C[0,4]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Tetragonal I Structures -----------------------------------------------------------------------\n if (LC == 'TI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[2,2]= ci[0][3]\n C[3,3]= ci[0][4]\n C[5,5]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[4,4]= C[3,3]\n \n #-- Tetragonal II Structures ----------------------------------------------------------------------\n if (LC == 'TII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,5]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[5,5]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,5]=-C[0,5]\n C[4,4]= C[3,3]\n \n #-- Orthorhombic Structures -----------------------------------------------------------------------\n if (LC == 'O'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[1,1]=ci[0][3]\n C[1,2]=ci[0][4]\n C[2,2]=ci[0][5]\n C[3,3]=ci[0][6]\n C[4,4]=ci[0][7]\n C[5,5]=ci[0][8]\n \n #-- Monoclinic Structures -------------------------------------------------------------------------\n if (LC == 'M'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,5]=ci[0][3]\n C[1,1]=ci[0][4]\n C[1,2]=ci[0][5]\n C[1,5]=ci[0][6]\n C[2,2]=ci[0][7]\n C[2,5]=ci[0][8]\n C[3,3]=ci[0][9]\n C[3,4]=ci[0][10]\n C[4,4]=ci[0][11]\n C[5,5]=ci[0][12]\n \n #-- Triclinic Structures --------------------------------------------------------------------------\n if (LC == 'N'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,3]=ci[0][3]\n C[0,4]=ci[0][4]\n C[0,5]=ci[0][5]\n C[1,1]=ci[0][6]\n C[1,2]=ci[0][7]\n C[1,3]=ci[0][8]\n C[1,4]=ci[0][9]\n C[1,5]=ci[0][10]\n C[2,2]=ci[0][11]\n C[2,3]=ci[0][12]\n C[2,4]=ci[0][13]\n C[2,5]=ci[0][14]\n C[3,3]=ci[0][15]\n C[3,4]=ci[0][16]\n C[3,5]=ci[0][17]\n C[4,4]=ci[0][18]\n C[4,5]=ci[0][19]\n C[5,5]=ci[0][20]\n #--------------------------------------------------------------------------------------------------\n \n \n \n for i in range(5):\n for j in range(i+1,6):\n C[j,i] = C[i,j] \n #%%%--- Calculating the elastic moduli ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if self.__cod in ['espresso']: C = -C/10.\n elif self.__cod in ['vasp','exciting','wien']: C = C*self.__vToGPa/self.__V0#C = C/4.#C=C*self.__CONV/self.__V0\n elif self.__cod in ['emto']: C = C*self.__ToGPa/self.__V0\n self.BV = (C[0,0]+C[1,1]+C[2,2]+2*(C[0,1]+C[0,2]+C[1,2]))/9\n self.GV = ((C[0,0]+C[1,1]+C[2,2])-(C[0,1]+C[0,2]+C[1,2])+3*(C[3,3]+C[4,4]+C[5,5]))/15\n self.EV = (9*self.BV*self.GV)/(3*self.BV+self.GV)\n self.nuV= (1.5*self.BV-self.GV)/(3*self.BV+self.GV)\n self.S = np.linalg.inv(C)\n self.BR = 1/(self.S[0,0]+self.S[1,1]+self.S[2,2]+2*(self.S[0,1]+self.S[0,2]+self.S[1,2]))\n self.GR =15/(4*(self.S[0,0]+self.S[1,1]+self.S[2,2])-4*(self.S[0,1]+self.S[0,2]+self.S[1,2])+3*(self.S[3,3]+self.S[4,4]+self.S[5,5]))\n self.ER = (9*self.BR*self.GR)/(3*self.BR+self.GR)\n self.nuR= (1.5*self.BR-self.GR)/(3*self.BR+self.GR)\n self.BH = 0.50*(self.BV+self.BR)\n self.GH = 0.50*(self.GV+self.GR)\n self.EH = (9.*self.BH*self.GH)/(3.*self.BH+self.GH)\n self.nuH= (1.5*self.BH-self.GH)/(3.*self.BH+self.GH)\n self.AVR= 100.*(self.GV-self.GR)/(self.GV+self.GR)\n #--------------------------------------------------------------------------------------------------------------------------------\n self.__C = C\n \n else:\n Cs = []\n for t in map(str,self.__T):#for t in range(len(self.__T)):\n C = np.zeros((6,6))\n \n LC = self.__structures.items()[0][1].LC\n if self.__mthd == 'Energy':\n if type(etacalc)==list:\n A2=[]\n for i in range(len(etacalc)):\n A2.append(self.__A2[t][i][etacalc[i]])\n else:\n if not etacalc in self.__A2[t][0].keys(): raise ValueError('Please coose one of %s'%(self.__A2[t][0].keys()))\n A2 = [a2[etacalc] for a2 in self.__A2[t]]\n \n #%%%--- Cubic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'CI' or \\\n LC == 'CII'):\n C[0,0] =-2.*(A2[0]-3.*A2[1])/3.\n C[1,1] = C[0,0]\n C[2,2] = C[0,0]\n C[3,3] = A2[2]/6.\n C[4,4] = C[3,3]\n C[5,5] = C[3,3]\n C[0,1] = (2.*A2[0]-3.*A2[1])/3.\n C[0,2] = C[0,1]\n C[1,2] = C[0,1]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Hexagonal structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = 2./3.*A2[0] + 4./3.*A2[1] - 2.*A2[2] - 2.*A2[3]\n C[0,2] = 1./6.*A2[0] - 2./3.*A2[1] + 0.5*A2[2]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[2]\n C[3,3] =-0.5*A2[2] + 0.5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RI'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[0,4] = .5*(-A2[3] - A2[4] + A2[6])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[1,4] =-C[0,4] \n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[3,5] =-C[0,4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TI'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[3]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[3]\n C[0,2] = A2[0]/6.-2.*A2[1]/3.+.5*A2[3]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[3]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TII'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[4]\n C[1,1] = C[0,0]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[4]\n C[0,2] = A2[0]/6.-(2./3.)*A2[1]+.5*A2[4]\n C[0,5] = (-A2[2]+A2[3]-A2[6])/4.\n C[1,2] = C[0,2]\n C[1,5] =-C[0,5]\n C[2,2] = 2.*A2[4]\n C[3,3] = .5*A2[5]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[6]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Orthorhombic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'O'):\n C[0,0] = 2.*A2[0]/3.+4.*A2[1]/3.+A2[3]-2.*A2[4]-2.*A2[5]\n C[0,1] = 1.*A2[0]/3.+2.*A2[1]/3.-.5*A2[3]-A2[5]\n C[0,2] = 1.*A2[0]/3.-2.*A2[1]/3.+4.*A2[2]/3.-.5*A2[3]-A2[4]\n C[1,1] = 2.*A2[4]\n C[1,2] =-2.*A2[1]/3.-4.*A2[2]/3.+.5*A2[3]+A2[4]+A2[5]\n C[2,2] = 2.*A2[5]\n C[3,3] = .5*A2[6]\n C[4,4] = .5*A2[7]\n C[5,5] = .5*A2[8]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Monoclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'M'):\n C[0,0] = 2.*A2[0]/3.+8.*(A2[1]+A2[2])/3.-2.*(A2[5]+A2[8]+A2[9])\n C[0,1] = A2[0]/3.+4.*(A2[1]+A2[2])/3.-2.*A2[5]-A2[9]\n C[0,2] =(A2[0]-4.*A2[2])/3.+A2[5]-A2[8]\n C[0,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.+.5*(A2[5]+A2[7]+A2[8]+A2[9]-A2[12])\n C[1,1] = 2.*A2[8]\n C[1,2] =-4.*(2.*A2[1]+A2[2])/3.+2.*A2[5]+A2[8]+A2[9]+A2[12]\n C[1,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.-.5*A2[3]+A2[5]+.5*(A2[7]+A2[8]+A2[9])\n C[2,2] = 2.*A2[9]\n C[2,5] =-1.*A2[0]/6.+2.*A2[1]/3.-.5*(A2[3]+A2[4]-A2[7]-A2[8]-A2[9]-A2[12])\n C[3,3] = .5*A2[10]\n C[3,4] = .25*(A2[6]-A2[10]-A2[11])\n C[4,4] = .5*A2[11]\n C[5,5] = .5*A2[12]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Triclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'N'):\n C[0,0] = 2.*A2[0]\n C[0,1] = 1.*(-A2[0]-A2[1]+A2[6])\n C[0,2] = 1.*(-A2[0]-A2[2]+A2[7])\n C[0,3] = .5*(-A2[0]-A2[3]+A2[8]) \n C[0,4] = .5*(-A2[0]+A2[9]-A2[4])\n C[0,5] = .5*(-A2[0]+A2[10]-A2[5])\n C[1,1] = 2.*A2[1]\n C[1,2] = 1.*(A2[11]-A2[1]-A2[2])\n C[1,3] = .5*(A2[12]-A2[1]-A2[3])\n C[1,4] = .5*(A2[13]-A2[1]-A2[4])\n C[1,5] = .5*(A2[14]-A2[1]-A2[5])\n C[2,2] = 2.*A2[2] \n C[2,3] = .5*(A2[15]-A2[2]-A2[3])\n C[2,4] = .5*(A2[16]-A2[2]-A2[4])\n C[2,5] = .5*(A2[17]-A2[2]-A2[5])\n C[3,3] = .5*A2[3]\n C[3,4] = .25*(A2[18]-A2[3]-A2[4])\n C[3,5] = .25*(A2[19]-A2[3]-A2[5])\n C[4,4] = .5*A2[4]\n C[4,5] = .25*(A2[20]-A2[4]-A2[5])\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n elif self.__mthd == 'Stress':\n \n if (LC == 'CI' or \\\n LC == 'CII'):\n Matrix = np.mat([[1.0, 5.0, 0.0],\n [2.0, 4.0, 0.0],\n [3.0, 3.0, 0.0],\n [0.0, 0.0, 4.0],\n [0.0, 0.0, 5.0],\n [0.0, 0.0, 6.0]])\n \n if (LC == 'HI' or \\\n LC == 'HII'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0],\n [ 2, 1, 3, 0, 0],\n [ 0, 0, 3, 3, 0],\n [ 0, 0, 0, 0, 4],\n [ 0, 0, 0, 0, 5],\n [ 3,-3, 0, 0, 0],\n [ 3,-5,-1, 0, 0],\n [-5, 3,-1, 0, 0],\n [ 0, 0,-2,-1, 0],\n [ 0, 0, 0, 0, 6],\n [ 0, 0, 0, 0, 2],\n [-2, 2, 0, 0, 0]])\n \n if (LC == 'RI'):\n Matrix = np.mat([[ 1, 2, 3, 4, 0, 0],\n [ 2, 1, 3,-4, 0, 0],\n [ 0, 0, 3, 0, 3, 0],\n [ 0, 0, 0,-1, 0, 4],\n [ 0, 0, 0, 6, 0, 5],\n [ 3,-3, 0, 5, 0, 0],\n [ 3,-5,-1, 6, 0, 0],\n [-5, 3,-1,-6, 0, 0],\n [ 0, 0,-2, 0,-1, 0],\n [ 0, 0, 0, 8, 0, 6],\n [ 0, 0, 0,-4, 0, 2],\n [-2, 2, 0, 2, 0, 0]])\n \n if (LC == 'RII'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 0, 0],\n [ 2, 1, 3,-4,-5, 0, 0],\n [ 0, 0, 3, 0, 0, 3, 0],\n [ 0, 0, 0,-1,-6, 0, 4],\n [ 0, 0, 0, 6,-1, 0, 5],\n [ 3,-3, 0, 5,-4, 0, 0],\n [ 3,-5,-1, 6, 2, 0, 0],\n [-5, 3,-1,-6,-2, 0, 0],\n [ 0, 0,-2, 0, 0,-1, 0],\n [ 0, 0, 0, 8, 4, 0, 6],\n [ 0, 0, 0,-4, 8, 0, 2],\n [-2, 2, 0, 2,-6, 0, 0]])\n \n if (LC == 'TI'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0, 0],\n [ 2, 1, 3, 0, 0, 0],\n [ 0, 0, 3, 3, 0, 0],\n [ 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0, 0, 0, 6],\n [ 3,-5,-1, 0, 0, 0],\n [-5, 3,-1, 0, 0, 0],\n [ 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 0, 0,-4]])\n \n if (LC == 'TII'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0],\n [ 2, 1, 3,-6, 0, 0, 0],\n [ 0, 0, 3, 0, 3, 0, 0],\n [ 0, 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0,-1, 0, 0, 6],\n [ 3,-5,-1,-4, 0, 0, 0],\n [-5, 3,-1, 4, 0, 0, 0],\n [ 0, 0,-2, 0,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 8, 0, 0,-4]])\n \n if (LC == 'O'):\n Matrix = np.mat([[1, 2, 3, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 2, 3, 0, 0, 0, 0],\n [0, 0, 1, 0, 2, 3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 4, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 5, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 6],\n [3,-5,-1, 0, 0, 0, 0, 0, 0],\n [0, 3, 0,-5,-1, 0, 0, 0, 0],\n [0, 0, 3, 0,-5,-1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 6, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-4],\n [5, 4, 6, 0, 0, 0, 0, 0, 0],\n [0, 5, 0, 4, 6, 0, 0, 0, 0],\n [0, 0, 5, 0, 4, 6, 0, 0, 0],\n [0, 0, 0, 0, 0, 0,-2, 0, 0],\n [0, 0, 0, 0, 0, 0, 0,-1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-3]])\n \n if (LC == 'M'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 2, 3, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 2, 0, 3, 6, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0],\n [ 0, 0, 0, 1, 0, 0, 2, 0, 3, 0, 0, 0, 6],\n [-2, 1, 4,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 1, 4,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 1, 0, 4,-5, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0],\n [ 0, 0, 0,-2, 0, 0, 1, 0, 4, 0, 0,-5, 0],\n [ 3,-5,-1,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0,-5,-1,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0,-5, 0,-1,-4, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0],\n [ 0, 0, 0, 3, 0, 0,-5, 0,-1, 0, 0,-4, 0],\n [-4,-6, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0,-6, 5, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0,-6, 0, 5, 2, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0],\n [ 0, 0, 0,-4, 0, 0,-6, 0, 5, 0, 0, 2, 0],\n [ 5, 4, 6,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 4, 6,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 4, 0, 6,-3, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0],\n [ 0, 0, 0, 5, 0, 0, 4, 0, 6, 0, 0,-3, 0]])\n \n if (LC == 'N'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 0, 0, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 5, 6, 0, 0, 0],\n [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6, 0],\n [ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6],\n [-2, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 0, 0, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 6,-5, 0, 0, 0],\n [ 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5, 0],\n [ 0, 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5],\n [ 3,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0, 0, 0,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 2,-4, 0, 0, 0],\n [ 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4, 0],\n [ 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4],\n [-4,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0, 0, 0,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1,-3, 2, 0, 0, 0],\n [ 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2, 0],\n [ 0, 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2],\n [ 5, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 0, 0, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2,-1,-3, 0, 0, 0],\n [ 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3, 0],\n [ 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3],\n [-6, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-6, 0, 0, 0, 0, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5,-4, 1, 0, 0, 0],\n [ 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1, 0],\n [ 0, 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1]])\n \n sigma = np.array(self.__sigma[etacalc])\n \n ci = np.linalg.lstsq(Matrix,sigma)\n \n #-- Cubic structures ------------------------------------------------------------------------------\n if (LC == 'CI' or \\\n LC == 'CII'):\n \n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[3,3]=ci[0][2]\n C[1,1]=C[0,0]\n C[2,2]=C[0,0]\n C[0,2]=C[0,1]\n C[1,2]=C[0,1]\n C[4,4]=C[3,3]\n C[5,5]=C[3,3]\n \n #-- Hexagonal Structures --------------------------------------------------------------------------\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[2,2]=ci[0][3]\n C[3,3]=ci[0][4]\n C[1,1]=C[0,0]\n C[1,2]=C[0,2]\n C[4,4]=C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral I Structures ---------------------------------------------------------------------\n if (LC == 'RI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral II Structures --------------------------------------------------------------------\n if (LC == 'RII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[0,4]= ci[0][4]\n C[2,2]= ci[0][5]\n C[3,3]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[1,4]=-C[0,4]\n C[3,5]=-C[0,4]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Tetragonal I Structures -----------------------------------------------------------------------\n if (LC == 'TI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[2,2]= ci[0][3]\n C[3,3]= ci[0][4]\n C[5,5]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[4,4]= C[3,3]\n \n #-- Tetragonal II Structures ----------------------------------------------------------------------\n if (LC == 'TII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,5]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[5,5]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,5]=-C[0,5]\n C[4,4]= C[3,3]\n \n #-- Orthorhombic Structures -----------------------------------------------------------------------\n if (LC == 'O'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[1,1]=ci[0][3]\n C[1,2]=ci[0][4]\n C[2,2]=ci[0][5]\n C[3,3]=ci[0][6]\n C[4,4]=ci[0][7]\n C[5,5]=ci[0][8]\n \n #-- Monoclinic Structures -------------------------------------------------------------------------\n if (LC == 'M'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,5]=ci[0][3]\n C[1,1]=ci[0][4]\n C[1,2]=ci[0][5]\n C[1,5]=ci[0][6]\n C[2,2]=ci[0][7]\n C[2,5]=ci[0][8]\n C[3,3]=ci[0][9]\n C[3,4]=ci[0][10]\n C[4,4]=ci[0][11]\n C[5,5]=ci[0][12]\n \n #-- Triclinic Structures --------------------------------------------------------------------------\n if (LC == 'N'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,3]=ci[0][3]\n C[0,4]=ci[0][4]\n C[0,5]=ci[0][5]\n C[1,1]=ci[0][6]\n C[1,2]=ci[0][7]\n C[1,3]=ci[0][8]\n C[1,4]=ci[0][9]\n C[1,5]=ci[0][10]\n C[2,2]=ci[0][11]\n C[2,3]=ci[0][12]\n C[2,4]=ci[0][13]\n C[2,5]=ci[0][14]\n C[3,3]=ci[0][15]\n C[3,4]=ci[0][16]\n C[3,5]=ci[0][17]\n C[4,4]=ci[0][18]\n C[4,5]=ci[0][19]\n C[5,5]=ci[0][20]\n #--------------------------------------------------------------------------------------------------\n \n \n \n for i in range(5):\n for j in range(i+1,6):\n C[j,i] = C[i,j] \n #%%%--- Calculating the elastic moduli ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if self.__cod == 'espresso': C = -C/10.\n elif self.__cod in ['vasp','emto','exciting','wien']: C=C*self.__vToGPa/self.__V0#C = C/4.#C=C*self.__CONV/self.__V0#C = C/4.\n self.BV = (C[0,0]+C[1,1]+C[2,2]+2*(C[0,1]+C[0,2]+C[1,2]))/9\n self.GV = ((C[0,0]+C[1,1]+C[2,2])-(C[0,1]+C[0,2]+C[1,2])+3*(C[3,3]+C[4,4]+C[5,5]))/15\n self.EV = (9*self.BV*self.GV)/(3*self.BV+self.GV)\n self.nuV= (1.5*self.BV-self.GV)/(3*self.BV+self.GV)\n self.S = np.linalg.inv(C)\n self.BR = 1/(self.S[0,0]+self.S[1,1]+self.S[2,2]+2*(self.S[0,1]+self.S[0,2]+self.S[1,2]))\n self.GR =15/(4*(self.S[0,0]+self.S[1,1]+self.S[2,2])-4*(self.S[0,1]+self.S[0,2]+self.S[1,2])+3*(self.S[3,3]+self.S[4,4]+self.S[5,5]))\n self.ER = (9*self.BR*self.GR)/(3*self.BR+self.GR)\n self.nuR= (1.5*self.BR-self.GR)/(3*self.BR+self.GR)\n self.BH = 0.50*(self.BV+self.BR)\n self.GH = 0.50*(self.GV+self.GR)\n self.EH = (9.*self.BH*self.GH)/(3.*self.BH+self.GH)\n self.nuH= (1.5*self.BH-self.GH)/(3.*self.BH+self.GH)\n self.AVR= 100.*(self.GV-self.GR)/(self.GV+self.GR)\n #--------------------------------------------------------------------------------------------------------------------------------\n Cs.append(C)\n self.__C = Cs", "def coaccessible_components(self):\n DG = self.digraph().reverse()\n coaccessible_states = DG.breadth_first_search(\n [_.label() for _ in self.iter_final_states()])\n return self.induced_sub_finite_state_machine(\n [self.state(_) for _ in coaccessible_states])", "def setConstruction(self):\n self.graph_construction=[]\n self.graph_display=[]\n self.graph_drawing=self.cleanGraph(self.graph_drawing)\n #self.graph_drawing=self.takeGraphSamples(self.graph_drawing)[:]\n print(self.graph_drawing)\n #self.coefficients=self.discreteComplexDecomposeGraph(self.graph_drawing)\n self.graph_drawing=self.graph_drawing[:len(self.graph_drawing)-1+len(self.graph_drawing)%2]\n self.coefficients=self.numpyFourierTransform(self.graph_drawing)\n self.graph_drawing=self.numpyInverseFourierTransform(self.coefficients)\n #print(len(self.coefficients))\n #self.graph_construction=self.discreteComplexComposeGraph(self.coefficients,0)\n self.graph_construction=self.graph_drawing\n self.time=0\n self.step=0\n print(self.coefficients)", "def plot_mcc_tree():\n t = ete2.Tree(\"mcct.nex\")\n ts = ete2.treeview.TreeStyle()\n ts.show_scale = False\n ts.show_leaf_name = False\n ts.show_branch_support = False\n ts.scale = 500\n margin = 10\n ts.margin_top = margin\n ts.margin_bottom = margin\n ts.margin_left = margin\n ts.margin_right = margin\n\n germ_style = ete2.NodeStyle()\n germ_style[\"bgcolor\"] = \"LightSteelBlue\"\n proto_germ = t.get_common_ancestor(\"Danish\", \"Norwegian\",\"Icelandic\",\"Swedish\", \"Dutch\", \"German\", \"English\")\n proto_germ.set_style(germ_style)\n\n bs_style = ete2.NodeStyle()\n bs_style[\"bgcolor\"] = \"Moccasin\"\n proto_bs = t.get_common_ancestor(\"Bulgarian\", \"Czech\",\"Polish\",\"Russian\")\n proto_bs.set_style(bs_style)\n\n ital_style = ete2.NodeStyle()\n ital_style[\"bgcolor\"] = \"DarkSeaGreen\"\n proto_ital = t.get_common_ancestor(\"French\", \"Romanian\", \"Italian\", \"Portuguese\", \"Spanish\")\n proto_ital.set_style(ital_style)\n\n t.render(\"mcct.eps\", style_func, tree_style=ts, dpi=600, units=\"px\", w=2250)", "def scc(self):\n return self.to_ddm().scc()" ]
[ "0.6580189", "0.6125802", "0.5889315", "0.5867898", "0.5865846", "0.5857007", "0.5807236", "0.5736168", "0.57251334", "0.5632387", "0.5575763", "0.5573248", "0.5559867", "0.5544407", "0.5537933", "0.55150014", "0.5493232", "0.5482677", "0.5473975", "0.5453563", "0.5412237", "0.5386772", "0.5328551", "0.53113675", "0.530257", "0.5282288", "0.52791506", "0.52766126", "0.52697474", "0.5269382" ]
0.6567516
1
SubstanceEC graph of the added core metabolism.
def addedMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEcGraph: parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism) childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism) graph = GeneFunctionAddition.getGraph(parentCoreMetabolism, childCoreMetabolism) graph.name = 'Added metabolism ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames) return graph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def coreMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEcGraph:\n graph = self.group.majorityEcGraph(majorityPercentage = majorityPercentageCoreMetabolism, noMultifunctional = excludeMultifunctionalEnzymes, keepOnHeap = True)\n graph.name = 'Core metabolism ECs ' + ' '.join(self.ncbiNames)\n return graph", "def addedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def collectiveMetabolism(self, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes, addEcDescriptions = False) -> SubstanceEcGraph:\n graph = self.group.collectiveEcGraph(noMultifunctional = excludeMultifunctionalEnzymes, addCount = True, keepOnHeap = True, addEcDescriptions = addEcDescriptions)\n graph.name = 'Collective metabolism ECs ' + ' '.join(self.ncbiNames)\n return graph", "def coreMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraphByEcMajority(majorityPercentage = majorityPercentageCoreMetabolism, majorityTotal = None, noMultifunctional = excludeMultifunctionalEnzymes)\n graph.name = 'Core metabolism Enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def graph(self):\n ...", "def unifiedMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n \n graph = parentCoreMetabolism.union(childCoreMetabolism, addCount = False, updateName = False)\n graph.name = 'Unified metabolism ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n if colour is True:\n lostGraph = GeneFunctionLoss.getGraph(parentCoreMetabolism, childCoreMetabolism)\n lostEdges = lostGraph.getEdges()\n \n addedGraph = GeneFunctionAddition.getGraph(parentCoreMetabolism, childCoreMetabolism)\n addedEdges = addedGraph.getEdges()\n \n conservedGraph = GeneFunctionConservation.getGraph(parentCoreMetabolism, childCoreMetabolism)\n conservedEdges = conservedGraph.getEdges() \n \n Export.addColourAttribute(graph, colour = Export.Colour.BLUE, nodes = False, edges = lostEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.RED, nodes = False, edges = addedEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.PINK, nodes = False, edges = conservedEdges)\n \n return graph", "def conservedMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n graph = GeneFunctionConservation.getGraph(parentCoreMetabolism, childCoreMetabolism)\n graph.name = 'Conserved metabolism ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n return graph", "def addedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def addedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def unifiedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False) -> SubstanceEcGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolism = self.unifiedMetabolism(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def conservedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n conservedMetabolism = self.conservedMetabolism(majorityPercentageCoreMetabolism)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = conservedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def divergedMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n \n if colour is True:\n lostGraph = GeneFunctionLoss.getGraph(parentCoreMetabolism, childCoreMetabolism)\n lostEdges = lostGraph.getEdges()\n \n addedGraph = GeneFunctionAddition.getGraph(parentCoreMetabolism, childCoreMetabolism)\n addedEdges = addedGraph.getEdges()\n \n graph = lostGraph.union(addedGraph, addCount = False, updateName = False) \n \n Export.addColourAttribute(graph, colour = Export.Colour.BLUE, nodes = False, edges = lostEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.RED, nodes = False, edges = addedEdges)\n \n else: \n graph = GeneFunctionDivergence.getGraph(parentCoreMetabolism, childCoreMetabolism)\n \n graph.name = 'Diverged metabolism ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def sub_graph_merging(self):", "def neofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEcGraph:\n # get neofunctionalisations \n neofunctionalisedECs = NeofunctionalisedECs(self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs))\n \n # filter core metabolism EC graph\n coreMetabolism = self.coreMetabolism(majorityPercentageCoreMetabolism)\n minimumOrganismsCount = math.ceil(self.organismsCount * (majorityPercentageNeofunctionalisation / 100))\n \n neofunctionalisedMetabolism = neofunctionalisedECs.filterGraph(coreMetabolism, minimumEcDifference = None, minimumOrganismsCount = minimumOrganismsCount)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = coreMetabolism\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def gen_graph(self):", "def lostMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def divergedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n divergedMetabolism = self.divergedMetabolism(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = divergedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def populate_graph(self):", "def semantics_subgraph(self) -> DiGraph:\n\n return self.graph.subgraph(list(self.semantics_nodes))", "def collectiveMetabolismEnzymes(self, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraph(noMultifunctional = excludeMultifunctionalEnzymes, keepOnHeap = True)\n graph.name = 'Collective metabolism enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def _get_full_graph(self):", "def sub_graph_merging(self):\n raise NotImplementedError()", "def lostMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism) \n graph = GeneFunctionLoss.getGraph(parentCoreMetabolism, childCoreMetabolism)\n graph.name = 'Lost metabolism ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n return graph", "def get_ecg_graph():\n titles = ['ecg1', 'ecg2', 'ecg3']\n colors = ['rgb(240,0,0)', 'rgb(0,240,0)', 'rgb(0,0,240)']\n update()\n signames_ecg = queries['signames_ecg']\n signals = queries['signals']\n latesthr = queries['latesthr']\n return html.Div(className='ecg', children=[\n html.Div(style={'display': 'flex', 'height': '40vh'},\n children=[dcc.Graph(\n id=titles[i] + signame,\n style={'width': '100%'},\n figure={\n 'data': [\n {'x': signals[signame]['time'],\n 'y': signals[signame][titles[i]],\n 'mode': 'line', 'name': signame, 'line': {'color':colors[i]}}\n ],\n 'layout': {\n 'font': {'color':'#fff'},\n 'title': '{}-{}'.format(signame, titles[i]),\n 'xaxis': {'title': 'time', 'color': '#fff', 'showgrid': 'False'},\n 'yaxis': {'title': 'voltage (mv)', 'color': '#fff', 'showgrid': 'False', 'range': np.linspace(-2.5, 2.5, 10)},\n 'paper_bgcolor':'#000', 'plot_bgcolor':'#000'\n }\n }\n ) for i in range(len(titles))]\n +\n [html.Div(\n style={'justify-content': 'center', 'display': 'flex',\n 'align-items': 'center', 'width': '10vh', 'font-size': '30pt', 'color': 'white'},\n children=['{}'.format(latesthr[signame][0])])\n ]\n ) for signame in signames_ecg])", "def build_graph(self):\n pass", "def add_graph(self):\n \n self.cd_sampling = None\n \n if \"CD\" in self.algorithm:\n\n self.add_cd_samples()\n \n if self.num_hidden ==0:\n \n self.cd_sampling = self.get_cd_samples()\n \n if \"CSS\" in self.algorithm and self.mf_steps > 0: \n \n self.add_mf_updates()\n \n elif \"CSS\" in self.algorithm and self.gibbs_steps > 0:\n \n self.add_cd_samples()\n \n if self.num_hidden ==0:\n \n self.cd_sampling = self.get_cd_samples() \n \n self.add_objective()\n\n self.add_grad_updates() \n \n if self.report_p_tilda:\n \n self.add_p_tilda()\n \n self.add_pseudo_cost_measure()\n\n self.optimize = self.optimization_step()", "def test_append_unreactive_structure(self):\n\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().from_smiles('CCO'), # a control species\n Species().from_smiles('[N]=O'),\n Species().from_adjacency_list(\"\"\"1 O u1 p2 c0 {2,S}\n 2 N u0 p2 c0 {1,S}\"\"\"), # a non-representative structure of '[N]=O'\n ]\n\n for spc in spcs:\n cerm.make_new_species(spc)\n\n self.assertEquals(len(cerm.species_dict), 2)\n self.assertEquals(len(cerm.index_species_dict), 2)\n self.assertEquals(len(cerm.index_species_dict[1].molecule), 1)\n self.assertTrue(cerm.index_species_dict[1].molecule[0].reactive)\n self.assertEquals(len(cerm.index_species_dict[2].molecule), 1)\n self.assertTrue(cerm.index_species_dict[2].molecule[0].reactive)", "def geneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n \n \n \n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n geneDuplicationModel = SimpleGeneDuplication\n# geneDuplicationModel = SimpleGroupGeneDuplication(sameGroupOrganisms = self.group)\n \n # filter core metabolism enzyme graph \n geneDuplicatedEnzymes = geneDuplicationModel.filterEnzymes(enzymeGraph, eValue = defaultEValue, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n geneDuplicatedEnzymesOnly = geneDuplicatedEnzymes\n geneDuplicatedEnzymes = enzymeGraph\n Export.addColourAttribute(geneDuplicatedEnzymes, colourToUse, nodes = False, edges = geneDuplicatedEnzymesOnly.getEdges())\n \n geneDuplicatedEnzymes.name = 'Gene-duplicated core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return geneDuplicatedEnzymes", "def _build_computation_graph(self):\n raise NotImplementedError", "def plot_mcc_tree():\n t = ete2.Tree(\"mcct.nex\")\n ts = ete2.treeview.TreeStyle()\n ts.show_scale = False\n ts.show_leaf_name = False\n ts.show_branch_support = False\n ts.scale = 500\n margin = 10\n ts.margin_top = margin\n ts.margin_bottom = margin\n ts.margin_left = margin\n ts.margin_right = margin\n\n germ_style = ete2.NodeStyle()\n germ_style[\"bgcolor\"] = \"LightSteelBlue\"\n proto_germ = t.get_common_ancestor(\"Danish\", \"Norwegian\",\"Icelandic\",\"Swedish\", \"Dutch\", \"German\", \"English\")\n proto_germ.set_style(germ_style)\n\n bs_style = ete2.NodeStyle()\n bs_style[\"bgcolor\"] = \"Moccasin\"\n proto_bs = t.get_common_ancestor(\"Bulgarian\", \"Czech\",\"Polish\",\"Russian\")\n proto_bs.set_style(bs_style)\n\n ital_style = ete2.NodeStyle()\n ital_style[\"bgcolor\"] = \"DarkSeaGreen\"\n proto_ital = t.get_common_ancestor(\"French\", \"Romanian\", \"Italian\", \"Portuguese\", \"Spanish\")\n proto_ital.set_style(ital_style)\n\n t.render(\"mcct.eps\", style_func, tree_style=ts, dpi=600, units=\"px\", w=2250)" ]
[ "0.6702209", "0.6497114", "0.61611027", "0.61203563", "0.6050524", "0.59818614", "0.5975307", "0.5904523", "0.58224356", "0.58078855", "0.57479113", "0.574062", "0.56759965", "0.560662", "0.5584677", "0.5557808", "0.5547269", "0.54960835", "0.54925925", "0.54916406", "0.5462262", "0.5452654", "0.54393333", "0.5427306", "0.5419302", "0.54073685", "0.5395441", "0.5376947", "0.5336413", "0.5310419" ]
0.6712265
0
SubstanceEC graph of the unified core metabolisms. The lost metabolism of the parent is coloured in blue, the conserved metabolism of both in red, and the added metabolism of the child in pink. The colouring is realised by adding a 'colour' attribute to each edge. Nodes are not coloured.
def unifiedMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEcGraph: parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism) childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism) graph = parentCoreMetabolism.union(childCoreMetabolism, addCount = False, updateName = False) graph.name = 'Unified metabolism ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames) if colour is True: lostGraph = GeneFunctionLoss.getGraph(parentCoreMetabolism, childCoreMetabolism) lostEdges = lostGraph.getEdges() addedGraph = GeneFunctionAddition.getGraph(parentCoreMetabolism, childCoreMetabolism) addedEdges = addedGraph.getEdges() conservedGraph = GeneFunctionConservation.getGraph(parentCoreMetabolism, childCoreMetabolism) conservedEdges = conservedGraph.getEdges() Export.addColourAttribute(graph, colour = Export.Colour.BLUE, nodes = False, edges = lostEdges) Export.addColourAttribute(graph, colour = Export.Colour.RED, nodes = False, edges = addedEdges) Export.addColourAttribute(graph, colour = Export.Colour.PINK, nodes = False, edges = conservedEdges) return graph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def divergedMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n \n if colour is True:\n lostGraph = GeneFunctionLoss.getGraph(parentCoreMetabolism, childCoreMetabolism)\n lostEdges = lostGraph.getEdges()\n \n addedGraph = GeneFunctionAddition.getGraph(parentCoreMetabolism, childCoreMetabolism)\n addedEdges = addedGraph.getEdges()\n \n graph = lostGraph.union(addedGraph, addCount = False, updateName = False) \n \n Export.addColourAttribute(graph, colour = Export.Colour.BLUE, nodes = False, edges = lostEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.RED, nodes = False, edges = addedEdges)\n \n else: \n graph = GeneFunctionDivergence.getGraph(parentCoreMetabolism, childCoreMetabolism)\n \n graph.name = 'Diverged metabolism ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def unifiedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False) -> SubstanceEcGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolism = self.unifiedMetabolism(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def unifiedMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n parentGraph = self.parentClade.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n childGraph = self.childClade.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n graph = parentGraph.union(childGraph, addCount = False, updateName = False)\n graph.name = 'Unified metabolism enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n if colour is True:\n parentEdges = parentGraph.getEdges()\n childEdges = childGraph.getEdges()\n \n Export.addColourAttribute(graph, colour = Export.Colour.BLUE, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.RED, nodes = False, edges = childEdges)\n \n return graph", "def conservedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n conservedMetabolism = self.conservedMetabolism(majorityPercentageCoreMetabolism)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = conservedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def unifiedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def conservedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def divergedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n divergedMetabolism = self.divergedMetabolism(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = divergedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def _build_graph_general(self): \n\n #Find a canonical coloring scheme\n #Each node has a color that is determined by the non-mapped aspects\n nodecolors=set()\n for nl in self.net.iter_node_layers():\n nodecolors.add(self._slice_node_layer_not_allowed(nl))\n nodecolors_sorted=sorted(list(nodecolors))\n del nodecolors\n self._assert_full_order(nodecolors_sorted)\n self.colormap=dict( ((color,colorid) for colorid,color in enumerate(nodecolors_sorted) ))\n\n #each aux node has a color that is determined by the aspect\n self.auxcolormap=dict( ((auxcolor, auxcolorid+len(self.colormap)) for auxcolorid,auxcolor in enumerate(sorted(self.asp)) ) )\n\n\n #Add the underlying network\n #node-layers:\n for nl in self.net.iter_node_layers():\n nlid=self._get_node_id(nl)\n color=self._slice_node_layer_not_allowed(nl)\n colorid=self.colormap[color]\n self.add_node(nlid,colorid)\n\n #edges between node-layers:\n for nl1 in self.net.iter_node_layers():\n for nl2 in self.net[nl1]:\n nl1id=self._get_node_id(nl1)\n nl2id=self._get_node_id(nl2)\n self.add_link(nl1id,nl2id)\n\n\n #Add the auxiliary nodes and edges\n #add the aux nodes\n for a in self.asp:\n for elayer in self.net.slices[a]:\n auxid=self._get_auxnode_id( (a,elayer) )\n auxcolorid=self.auxcolormap[a]\n self.add_node(auxid,auxcolorid)\n \n #add the aux edges\n for nl in self.net.iter_node_layers():\n for a in self.asp:\n nlid=self._get_node_id(nl)\n auxid=self._get_auxnode_id( (a,nl[a]) )\n self.add_link(nlid,auxid)", "def divergedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def sub_graph_merging(self):", "def unifiedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentGeneDuplicated.union(childGeneDuplicated, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n return graph", "def addedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def plot_mcc_tree():\n t = ete2.Tree(\"mcct.nex\")\n ts = ete2.treeview.TreeStyle()\n ts.show_scale = False\n ts.show_leaf_name = False\n ts.show_branch_support = False\n ts.scale = 500\n margin = 10\n ts.margin_top = margin\n ts.margin_bottom = margin\n ts.margin_left = margin\n ts.margin_right = margin\n\n germ_style = ete2.NodeStyle()\n germ_style[\"bgcolor\"] = \"LightSteelBlue\"\n proto_germ = t.get_common_ancestor(\"Danish\", \"Norwegian\",\"Icelandic\",\"Swedish\", \"Dutch\", \"German\", \"English\")\n proto_germ.set_style(germ_style)\n\n bs_style = ete2.NodeStyle()\n bs_style[\"bgcolor\"] = \"Moccasin\"\n proto_bs = t.get_common_ancestor(\"Bulgarian\", \"Czech\",\"Polish\",\"Russian\")\n proto_bs.set_style(bs_style)\n\n ital_style = ete2.NodeStyle()\n ital_style[\"bgcolor\"] = \"DarkSeaGreen\"\n proto_ital = t.get_common_ancestor(\"French\", \"Romanian\", \"Italian\", \"Portuguese\", \"Spanish\")\n proto_ital.set_style(ital_style)\n\n t.render(\"mcct.eps\", style_func, tree_style=ts, dpi=600, units=\"px\", w=2250)", "def conservedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentGeneDuplicated.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childGeneDuplicated.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism gene-duplicated enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def addedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def plot_network(genome):\n g = genome.n\n # width = g.graph[\"size\"]\n # height = g.graph[\"size\"]\n\n # fig = plt.figure(figsize=(width,height))\n fig = plt.figure()\n fig.patch.set_facecolor('white')\n ax = fig.add_subplot(111, aspect='equal')\n # ax.set_axis_off()\n\n # collision_coords = find_collisions(genome)\n # das_coords = find_das_extended(genome)\n # slp_coords = find_slp(genome)\n slp_nodes = find_attacker_path(genome.n)\n\n # Plot the parent-child tree\n for n in g.nodes_iter():\n if g.node[n][\"parent\"] is not None:\n _line(g.node[n][\"coord\"], g.node[g.node[n][\"parent\"]][\"coord\"], zorder=0, color='k')\n\n for n in g.nodes_iter():\n coord = g.node[n][\"coord\"]\n shape = _circles\n colour = 'b'\n s = 0.4\n if n in slp_nodes:\n shape = _hexagons\n colour = 'y'\n s = 0.45\n if n == g.graph[\"source\"]:\n shape = _squares\n colour = 'g'\n if n == g.graph[\"sink\"]:\n shape = _octogons\n colour = 'k'\n s = 0.45\n shape(coord[0], coord[1], s, fc=\"white\", ec=colour)\n if(len(str(g.node[n][\"slot\"])) == 1):\n ax.text(coord[0]-0.15, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 2):\n ax.text(coord[0]-0.25, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 3):\n ax.text(coord[0]-0.4, coord[1]+0.15, str(g.node[n][\"slot\"]))\n else:\n ax.text(coord[0]-0.5, coord[1]+0.15, str(g.node[n][\"slot\"]))\n\n\n plt.gca().invert_yaxis()\n fig.show()", "def ShouldInheritColours(self):\r\n\r\n return False", "def conservedMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n graph = GeneFunctionConservation.getGraph(parentCoreMetabolism, childCoreMetabolism)\n graph.name = 'Conserved metabolism ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n return graph", "def lostMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def divergedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentGeneDuplicated.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childGeneDuplicated.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism gene-duplicated enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def geneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n \n \n \n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n geneDuplicationModel = SimpleGeneDuplication\n# geneDuplicationModel = SimpleGroupGeneDuplication(sameGroupOrganisms = self.group)\n \n # filter core metabolism enzyme graph \n geneDuplicatedEnzymes = geneDuplicationModel.filterEnzymes(enzymeGraph, eValue = defaultEValue, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n geneDuplicatedEnzymesOnly = geneDuplicatedEnzymes\n geneDuplicatedEnzymes = enzymeGraph\n Export.addColourAttribute(geneDuplicatedEnzymes, colourToUse, nodes = False, edges = geneDuplicatedEnzymesOnly.getEdges())\n \n geneDuplicatedEnzymes.name = 'Gene-duplicated core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return geneDuplicatedEnzymes", "def neofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEcGraph:\n # get neofunctionalisations \n neofunctionalisedECs = NeofunctionalisedECs(self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs))\n \n # filter core metabolism EC graph\n coreMetabolism = self.coreMetabolism(majorityPercentageCoreMetabolism)\n minimumOrganismsCount = math.ceil(self.organismsCount * (majorityPercentageNeofunctionalisation / 100))\n \n neofunctionalisedMetabolism = neofunctionalisedECs.filterGraph(coreMetabolism, minimumEcDifference = None, minimumOrganismsCount = minimumOrganismsCount)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = coreMetabolism\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def ShouldInheritColours(self):\n\n return False", "def main(G): \n try:\n val_map = {'A': 1.0,\n 'D': 0.5714285714285714,\n 'H': 0.0}\n values = [val_map.get(node, 0.45) for node in G.nodes()]\n edge_colors = 'k'\n \n edge_labels=dict([((u,v,),d['weight'])\n for u,v,d in G.edges(data=True)])\n pos=nx.spring_layout(G) # positions for all nodes \n nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels)\n nx.draw(G,pos, node_color = values, node_size=15,edge_color=edge_colors,edge_cmap=plt.cm.Reds)\n pylab.show()\n\n for ite in range(len(G.nodes())):\n \n Iterations = ite \n SL = SIG.Single_linkage(G, Iterations)\n pos=nx.spring_layout(G) # positions for all nodes\n node_colors = ['b','g','r','y','c','k','m','w']\n for i in range(len(G)):\n node_colors.append('w')\n \n # nodes\n C_list = SL.fit_predict(G)[-1,:]\n for Clust in range(C_list.shape[1]):\n nx.draw_networkx_nodes(G,pos,\n nodelist = list(C_list[0,Clust]),\n node_color=node_colors[Clust],\n node_size=80,\n alpha=0.8)\n \n # edges\n nx.draw_networkx_edges(G,pos,width=1.0,alpha=0.5)\n nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels)\n \n plt.axis('off')\n plt.savefig(\"labels_and_colors.png\") # save as png\n plt.show() # display\n print \"in level :\",ite \n print SL.__str__()\n\n\n except SIG.Single_linkage_Error:\n \n print( \"Got an imput error, please change the input and try it again.\" )", "def lostMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def neofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEnzymeGraph:\n # get neofunctionalisations \n neofunctionalisedEnzymes = self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs)\n \n # filter core metabolism enzyme graph\n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism) \n neofunctionalisedMetabolism = neofunctionalisedEnzymes.filterGraph(enzymeGraph, minimumEcDifference = None)\n \n # colour core metabolism \n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = enzymeGraph\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def colour_node(instance, reaction_colour='darkgrey', Xc_colour='orange', waste_colour='red', res_colour='limegreen', InPr_colour='lightblue'):\n G, mapping = instance.network()\n\n # relabel\n G = nx.relabel_nodes(G, mapping)\n\n node_dict_mapped = nodes_mapped(instance)\n\n waste, resources, intmed_products = instance.amenities()\n\n colour_map = []\n\n for nd in G:\n # print(\"nd\",nd)\n for nd_label, ammentity in node_dict_mapped.items():\n # print(\"nd_label\",nd_label)\n if nd_label == nd:\n # print(nd, nd_label)\n\n if ammentity == \"r\":\n colour_map.append(reaction_colour)\n\n elif ammentity == \"Xc\":\n colour_map.append(Xc_colour)\n\n elif ammentity == \"w\":\n colour_map.append(waste_colour)\n\n elif ammentity == \"Xr\":\n colour_map.append(res_colour)\n\n elif ammentity == \"InPr\":\n colour_map.append(InPr_colour)\n return colour_map", "def graph(self, context=None, **kwargs):\n graph = Dot(graph_type=\"digraph\", rankdir=(\"LR\" if context is None else \"TB\"))\n machine_graph = Subgraph(\n graph_name=\"cluster_machine\", graph_type=\"digraph\", label=\"MACHINE\"\n )\n\n for current_state in sorted(self._states):\n node_args = {}\n shape = \"circle\"\n shape = (\"double\" if current_state in self._end_states else \"\") + shape\n node_args[\"shape\"] = shape\n\n if context is not None and current_state == self._current_state:\n node_args[\"fillcolor\"] = \"cyan\"\n node_args[\"style\"] = \"filled\"\n\n machine_graph.add_node(Node(current_state, **node_args))\n\n machine_graph.add_node(Node(\"0\", shape=\"point\"))\n machine_graph.add_edge(Edge(\"0\", self._start_state))\n\n for current_state in self._states:\n transitions = self._transitions.get(current_state)\n if transitions:\n for current_symbol in transitions:\n next_symbol, direction, next_state = transitions.get(current_symbol)\n label = f\"'{current_symbol}' '{next_symbol}' {'R' if direction else 'L'}\"\n\n edge_args = {}\n if (\n context\n and current_state == self._current_state\n and current_symbol == self._tape[self._head]\n ):\n edge_args[\"color\"] = \"cyan\"\n machine_graph.add_edge(\n Edge(current_state, next_state, label=label, **edge_args)\n )\n\n graph.add_subgraph(machine_graph)\n if context is not None:\n tape_graph = Subgraph(\n graph_name=\"cluster_tape\", graph_type=\"digraph\", label=\"TAPE\"\n )\n tape = []\n for index in range(-4 + self._head, 5 + self._head):\n tape.append(\n f\"<t{index}> {self._tape[index] if 0 <= index < len(self._tape) else self._blank_symbol}\"\n )\n\n tape_graph.add_node(Node(\"tape\", label=\"|\".join(tape), shape=\"record\"))\n tape_graph.add_node(Node(\"t0\", shape=\"point\"))\n tape_graph.add_edge(Edge(\"t0\", f\"tape:t{self._head}\"))\n graph.add_subgraph(tape_graph)\n\n if kwargs.get(\"filename\"):\n graph.write(kwargs.get(\"filename\"), format=\"png\")\n return f\"Graph saved to {kwargs.get('filename')}\"\n\n return graph", "def coreMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEcGraph:\n graph = self.group.majorityEcGraph(majorityPercentage = majorityPercentageCoreMetabolism, noMultifunctional = excludeMultifunctionalEnzymes, keepOnHeap = True)\n graph.name = 'Core metabolism ECs ' + ' '.join(self.ncbiNames)\n return graph", "def graph(self):\n ..." ]
[ "0.66438884", "0.653458", "0.6519239", "0.64919496", "0.63315225", "0.63292813", "0.6266335", "0.6138613", "0.60851824", "0.60435796", "0.59619445", "0.5899194", "0.5817667", "0.5779564", "0.57427", "0.57339853", "0.5727987", "0.5700911", "0.5666281", "0.563707", "0.5624694", "0.5614577", "0.5586562", "0.553503", "0.55262595", "0.55129826", "0.55108833", "0.55038565", "0.5457332", "0.5455517" ]
0.6774996
0
SubstanceEnzyme graph derived from the unified core metabolisms. The lost metabolism of the parent is coloured in blue, the conserved metabolism of both in red, and the added metabolism of the child in pink. The colouring is realised by adding a 'colour' attribute to each edge. Nodes are not coloured.
def unifiedMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: parentGraph = self.parentClade.coreMetabolismEnzymes(majorityPercentageCoreMetabolism) childGraph = self.childClade.coreMetabolismEnzymes(majorityPercentageCoreMetabolism) graph = parentGraph.union(childGraph, addCount = False, updateName = False) graph.name = 'Unified metabolism enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames) if colour is True: parentEdges = parentGraph.getEdges() childEdges = childGraph.getEdges() Export.addColourAttribute(graph, colour = Export.Colour.BLUE, nodes = False, edges = parentEdges) Export.addColourAttribute(graph, colour = Export.Colour.RED, nodes = False, edges = childEdges) return graph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unifiedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def unifiedMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n \n graph = parentCoreMetabolism.union(childCoreMetabolism, addCount = False, updateName = False)\n graph.name = 'Unified metabolism ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n if colour is True:\n lostGraph = GeneFunctionLoss.getGraph(parentCoreMetabolism, childCoreMetabolism)\n lostEdges = lostGraph.getEdges()\n \n addedGraph = GeneFunctionAddition.getGraph(parentCoreMetabolism, childCoreMetabolism)\n addedEdges = addedGraph.getEdges()\n \n conservedGraph = GeneFunctionConservation.getGraph(parentCoreMetabolism, childCoreMetabolism)\n conservedEdges = conservedGraph.getEdges() \n \n Export.addColourAttribute(graph, colour = Export.Colour.BLUE, nodes = False, edges = lostEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.RED, nodes = False, edges = addedEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.PINK, nodes = False, edges = conservedEdges)\n \n return graph", "def conservedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def sub_graph_merging(self):", "def _build_graph_general(self): \n\n #Find a canonical coloring scheme\n #Each node has a color that is determined by the non-mapped aspects\n nodecolors=set()\n for nl in self.net.iter_node_layers():\n nodecolors.add(self._slice_node_layer_not_allowed(nl))\n nodecolors_sorted=sorted(list(nodecolors))\n del nodecolors\n self._assert_full_order(nodecolors_sorted)\n self.colormap=dict( ((color,colorid) for colorid,color in enumerate(nodecolors_sorted) ))\n\n #each aux node has a color that is determined by the aspect\n self.auxcolormap=dict( ((auxcolor, auxcolorid+len(self.colormap)) for auxcolorid,auxcolor in enumerate(sorted(self.asp)) ) )\n\n\n #Add the underlying network\n #node-layers:\n for nl in self.net.iter_node_layers():\n nlid=self._get_node_id(nl)\n color=self._slice_node_layer_not_allowed(nl)\n colorid=self.colormap[color]\n self.add_node(nlid,colorid)\n\n #edges between node-layers:\n for nl1 in self.net.iter_node_layers():\n for nl2 in self.net[nl1]:\n nl1id=self._get_node_id(nl1)\n nl2id=self._get_node_id(nl2)\n self.add_link(nl1id,nl2id)\n\n\n #Add the auxiliary nodes and edges\n #add the aux nodes\n for a in self.asp:\n for elayer in self.net.slices[a]:\n auxid=self._get_auxnode_id( (a,elayer) )\n auxcolorid=self.auxcolormap[a]\n self.add_node(auxid,auxcolorid)\n \n #add the aux edges\n for nl in self.net.iter_node_layers():\n for a in self.asp:\n nlid=self._get_node_id(nl)\n auxid=self._get_auxnode_id( (a,nl[a]) )\n self.add_link(nlid,auxid)", "def ShouldInheritColours(self):\r\n\r\n return False", "def divergedMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n \n if colour is True:\n lostGraph = GeneFunctionLoss.getGraph(parentCoreMetabolism, childCoreMetabolism)\n lostEdges = lostGraph.getEdges()\n \n addedGraph = GeneFunctionAddition.getGraph(parentCoreMetabolism, childCoreMetabolism)\n addedEdges = addedGraph.getEdges()\n \n graph = lostGraph.union(addedGraph, addCount = False, updateName = False) \n \n Export.addColourAttribute(graph, colour = Export.Colour.BLUE, nodes = False, edges = lostEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.RED, nodes = False, edges = addedEdges)\n \n else: \n graph = GeneFunctionDivergence.getGraph(parentCoreMetabolism, childCoreMetabolism)\n \n graph.name = 'Diverged metabolism ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def divergedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def unifiedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentGeneDuplicated.union(childGeneDuplicated, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n return graph", "def test_set_children_styles():\n src1 = magpy.magnet.Cuboid((1, 2, 3), (1, 2, 3))\n src2 = magpy.magnet.Cylinder((1, 2, 3), (1, 2))\n col = src1 + src2\n col.set_children_styles(magnetization_show=False)\n assert (\n src1.style.magnetization.show is False\n and src1.style.magnetization.show is False\n ), \"\"\"failed updating styles to children\"\"\"\n with pytest.raises(ValueError):\n col.set_children_styles(bad_input=\"somevalue\")", "def ShouldInheritColours(self):\n\n return False", "def unifiedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False) -> SubstanceEcGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolism = self.unifiedMetabolism(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def draw(self, model):\n graph = model.graph\n ants = model.ants\n sugar = model.sugar\n nest = model.nest\n\n colors = {node: \"y\" for node in graph.nodes}\n colors[nest] = \"b\"\n colors[sugar] = \"r\"\n for ant in ants:\n colors[ant.position] = \"k\"\n\n weights = [graph[u][v][\"weight\"] / 5 for u, v in graph.edges()]\n super().draw(graph, node_color=colors.values(), width=weights)#, arrows=True)", "def addedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def create_subbasin_graph():\n subbasin_to_downstream = pd.read_csv(module_dir + '/../data/simulations_shervan/test.rvh', sep='\\s+', skiprows=7, nrows=724, names=['subbasin', 'downstream_subbasin'], usecols=[1,2])\n subbasin_to_downstream['subbasin'] = subbasin_to_downstream['subbasin']\n subbasin_to_downstream['downstream_subbasin'] = 'sub' + subbasin_to_downstream['downstream_subbasin'].astype(str)\n subbasin_to_downstream['edge'] = 1\n\n for subbasin in subbasin_to_downstream['subbasin'].unique():\n is_sink = 1 if len(subbasin_to_downstream[(subbasin_to_downstream['subbasin'] == subbasin) & subbasin_to_downstream['edge'] == 1]) == 0 else 0\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': subbasin, 'downstream_subbasin': subbasin, 'edge': is_sink}, ignore_index=True)\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': 'sub-1', 'downstream_subbasin': 'sub-1', 'edge': 1}, ignore_index=True)\n \n adj = subbasin_to_downstream.pivot(index='subbasin', columns='downstream_subbasin', values='edge').fillna(0) \n adj = adj.sort_index(axis=0).sort_index(axis=1)\n \n G = nx.from_numpy_matrix(adj.values, parallel_edges=False, create_using=nx.DiGraph())\n label_mapping = dict(zip(range(len(adj.values)), adj.index))\n G = nx.relabel_nodes(G, label_mapping)\n \n return G", "def conservedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n conservedMetabolism = self.conservedMetabolism(majorityPercentageCoreMetabolism)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = conservedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def testSetParent(self):\n for child in self.color_corrections + self.color_decisions:\n self.assertEqual(\n None,\n child.parent\n )\n\n self.node.append_children(\n self.color_corrections + self.color_decisions\n )\n\n for child in self.node.all_children:\n self.assertEqual(\n self.node,\n child.parent\n )\n child.parent = 'banana'\n self.assertEqual(\n 'banana',\n child.parent\n )\n\n self.node.set_parentage()\n\n for child in self.node.all_children:\n self.assertEqual(\n self.node,\n child.parent\n )", "def geneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n \n \n \n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n geneDuplicationModel = SimpleGeneDuplication\n# geneDuplicationModel = SimpleGroupGeneDuplication(sameGroupOrganisms = self.group)\n \n # filter core metabolism enzyme graph \n geneDuplicatedEnzymes = geneDuplicationModel.filterEnzymes(enzymeGraph, eValue = defaultEValue, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n geneDuplicatedEnzymesOnly = geneDuplicatedEnzymes\n geneDuplicatedEnzymes = enzymeGraph\n Export.addColourAttribute(geneDuplicatedEnzymes, colourToUse, nodes = False, edges = geneDuplicatedEnzymesOnly.getEdges())\n \n geneDuplicatedEnzymes.name = 'Gene-duplicated core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return geneDuplicatedEnzymes", "def plot_network(genome):\n g = genome.n\n # width = g.graph[\"size\"]\n # height = g.graph[\"size\"]\n\n # fig = plt.figure(figsize=(width,height))\n fig = plt.figure()\n fig.patch.set_facecolor('white')\n ax = fig.add_subplot(111, aspect='equal')\n # ax.set_axis_off()\n\n # collision_coords = find_collisions(genome)\n # das_coords = find_das_extended(genome)\n # slp_coords = find_slp(genome)\n slp_nodes = find_attacker_path(genome.n)\n\n # Plot the parent-child tree\n for n in g.nodes_iter():\n if g.node[n][\"parent\"] is not None:\n _line(g.node[n][\"coord\"], g.node[g.node[n][\"parent\"]][\"coord\"], zorder=0, color='k')\n\n for n in g.nodes_iter():\n coord = g.node[n][\"coord\"]\n shape = _circles\n colour = 'b'\n s = 0.4\n if n in slp_nodes:\n shape = _hexagons\n colour = 'y'\n s = 0.45\n if n == g.graph[\"source\"]:\n shape = _squares\n colour = 'g'\n if n == g.graph[\"sink\"]:\n shape = _octogons\n colour = 'k'\n s = 0.45\n shape(coord[0], coord[1], s, fc=\"white\", ec=colour)\n if(len(str(g.node[n][\"slot\"])) == 1):\n ax.text(coord[0]-0.15, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 2):\n ax.text(coord[0]-0.25, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 3):\n ax.text(coord[0]-0.4, coord[1]+0.15, str(g.node[n][\"slot\"]))\n else:\n ax.text(coord[0]-0.5, coord[1]+0.15, str(g.node[n][\"slot\"]))\n\n\n plt.gca().invert_yaxis()\n fig.show()", "def support_tree(self):\n if not self.is_atomistic():\n raise ValueError(\"lattice is not atomistic\")\n\n class_order = topological_sort(self._hase_diagram)\n objects = self.above(self.bottom)\n representatives = {element: element for element in objects}\n classes = {element: {element} for element in objects}\n tree = Graph(objects)\n n_connected_parts = len(objects)\n colors = {obj: i for i, obj in enumerate(objects)}\n\n class_order_position = 1\n while n_connected_parts > 1:\n class_order_position += 1\n current_class_index = class_order[class_order_position]\n if current_class_index not in objects:\n predecessors = list(self.under(current_class_index))\n first_class_representative = representatives[predecessors[0]]\n second_class_representative = representatives[predecessors[1]]\n representatives[current_class_index] = random.choice(\n [first_class_representative, second_class_representative])\n if colors[first_class_representative] != colors[second_class_representative]:\n tree.update(((first_class_representative, second_class_representative),))\n color_to_change = colors[second_class_representative]\n color_to_keep = colors[first_class_representative]\n for element in colors:\n if colors[element] == color_to_change:\n colors[element] = color_to_keep\n n_connected_parts -= 1\n classes[current_class_index] = classes[predecessors[0]].union(classes[predecessors[1]])\n return tree", "def lostMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def flip_subtree_colours(subtree):\n subtree.colour = not subtree.colour\n subtree.left.colour = not subtree.left.colour\n subtree.right.colour = not subtree.right.colour", "def test__graph_structure():\n assert PES_GRAPH == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'),\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n assert pgraph.species(PES_GRAPH) == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'))\n assert pgraph.channels(PES_GRAPH) == (\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n print('\\npes graph')\n print(PES_GRAPH)", "def add_ancestor_edges(graph, node, color):\n out_neighbor, = graph.get_deductive_out_neighbors(node)\n for in_neighbor in graph.get_deductive_in_neighbors(node):\n graph.add_ancestor_edge(out_neighbor, in_neighbor, path=[0, color])\n graph.set_node_attribute(in_neighbor, graph.ANCESTOR_TARGET, True)", "def setUp(self):\n self.complete = nx.Graph()\n self.complete.add_edge(1, 2)\n self.complete.add_edge(2, 3)\n self.complete.add_edge(1, 3)\n\n self.small_tree = nx.Graph()\n self.small_tree.add_edge(1, 3)\n self.small_tree.add_edge(4, 3)\n self.small_tree.add_edge(2, 3)\n self.small_tree.add_edge(3, 5)\n self.small_tree.add_edge(5, 6)\n self.small_tree.add_edge(5, 7)\n self.small_tree.add_edge(6, 7)\n\n self.deterministic_graph = nx.Graph()\n self.deterministic_graph.add_edge(0, 1) # deg(0) = 1\n\n self.deterministic_graph.add_edge(1, 2) # deg(1) = 2\n\n self.deterministic_graph.add_edge(2, 3)\n self.deterministic_graph.add_edge(2, 4) # deg(2) = 3\n\n self.deterministic_graph.add_edge(3, 4)\n self.deterministic_graph.add_edge(3, 5)\n self.deterministic_graph.add_edge(3, 6) # deg(3) = 4\n\n self.deterministic_graph.add_edge(4, 5)\n self.deterministic_graph.add_edge(4, 6)\n self.deterministic_graph.add_edge(4, 7) # deg(4) = 5\n\n self.deterministic_graph.add_edge(5, 6)\n self.deterministic_graph.add_edge(5, 7)\n self.deterministic_graph.add_edge(5, 8)\n self.deterministic_graph.add_edge(5, 9) # deg(5) = 6\n\n self.deterministic_graph.add_edge(6, 7)\n self.deterministic_graph.add_edge(6, 8)\n self.deterministic_graph.add_edge(6, 9) # deg(6) = 6\n\n self.deterministic_graph.add_edge(7, 8)\n self.deterministic_graph.add_edge(7, 9) # deg(7) = 5\n\n self.deterministic_graph.add_edge(8, 9) # deg(8) = 4", "def testAppendChildDecision(self):\n self.node.append_child(self.color_decisions[0])\n\n self.assertEqual(\n [self.color_decisions[0]],\n self.node.color_decisions\n )", "def conservedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentGeneDuplicated.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childGeneDuplicated.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism gene-duplicated enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def divergedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n divergedMetabolism = self.divergedMetabolism(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = divergedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def sub_graph_merging(self):\n raise NotImplementedError()", "def divergedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentGeneDuplicated.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childGeneDuplicated.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism gene-duplicated enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)" ]
[ "0.6252327", "0.60120577", "0.5928985", "0.5926421", "0.58765775", "0.58404446", "0.5823357", "0.58075684", "0.57982713", "0.5772567", "0.57336694", "0.5562557", "0.5510959", "0.54624", "0.5434476", "0.5418058", "0.5414687", "0.54030573", "0.54009295", "0.5381006", "0.5378098", "0.5338047", "0.53350586", "0.53257763", "0.5324028", "0.5321988", "0.53117174", "0.5286863", "0.52720255", "0.5264216" ]
0.64225346
0
SubstanceEnzyme graph of geneduplicated enzymes, derived from the added core metabolism. First, the added core metabolism is calculated. Then, the enzymes associated with the added EC numbers are extracted from the child's enzyme metabolism.
def addedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph: parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism) childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism) addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism) childGraph = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs) childGraph.name = 'Added metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames) return childGraph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def lostMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def geneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n \n \n \n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n geneDuplicationModel = SimpleGeneDuplication\n# geneDuplicationModel = SimpleGroupGeneDuplication(sameGroupOrganisms = self.group)\n \n # filter core metabolism enzyme graph \n geneDuplicatedEnzymes = geneDuplicationModel.filterEnzymes(enzymeGraph, eValue = defaultEValue, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n geneDuplicatedEnzymesOnly = geneDuplicatedEnzymes\n geneDuplicatedEnzymes = enzymeGraph\n Export.addColourAttribute(geneDuplicatedEnzymes, colourToUse, nodes = False, edges = geneDuplicatedEnzymesOnly.getEdges())\n \n geneDuplicatedEnzymes.name = 'Gene-duplicated core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return geneDuplicatedEnzymes", "def coreMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraphByEcMajority(majorityPercentage = majorityPercentageCoreMetabolism, majorityTotal = None, noMultifunctional = excludeMultifunctionalEnzymes)\n graph.name = 'Core metabolism Enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def unifiedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentGeneDuplicated.union(childGeneDuplicated, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n return graph", "def addedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def unifiedMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n parentGraph = self.parentClade.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n childGraph = self.childClade.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n graph = parentGraph.union(childGraph, addCount = False, updateName = False)\n graph.name = 'Unified metabolism enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n if colour is True:\n parentEdges = parentGraph.getEdges()\n childEdges = childGraph.getEdges()\n \n Export.addColourAttribute(graph, colour = Export.Colour.BLUE, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.RED, nodes = False, edges = childEdges)\n \n return graph", "def divergedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentGeneDuplicated.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childGeneDuplicated.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism gene-duplicated enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def lostMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def unifiedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def collectiveMetabolismEnzymes(self, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraph(noMultifunctional = excludeMultifunctionalEnzymes, keepOnHeap = True)\n graph.name = 'Collective metabolism enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def unifiedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]: \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n return parentGeneDuplicated.union(childGeneDuplicated)", "def divergedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def divergedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n # get diverged metabolism\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicate enzyme pairs\n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the diverged metabolism\n parentGeneDuplicatedDiverged = set()\n childGeneDuplicatedDiverged = set()\n \n for enzymeTuple in parentGeneDuplicated:\n if enzymeTuple[0] in divergedMetabolismEnzymes and enzymeTuple[1] in divergedMetabolismEnzymes:\n parentGeneDuplicatedDiverged.add(enzymeTuple)\n \n for enzymeTuple in childGeneDuplicated:\n if enzymeTuple[0] in divergedMetabolismEnzymes and enzymeTuple[1] in divergedMetabolismEnzymes:\n childGeneDuplicatedDiverged.add(enzymeTuple)\n \n return parentGeneDuplicatedDiverged.union(childGeneDuplicatedDiverged)", "def conservedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentGeneDuplicated.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childGeneDuplicated.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism gene-duplicated enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def sub_graph_merging(self):", "def geneDuplicatedEnzymesDict(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Dict[Enzyme, Set[GeneID]]:\n \n \n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n geneDuplicationModel = SimpleGeneDuplication\n \n geneIDsForEnzyme = geneDuplicationModel.getEnzymes(enzymeGraph, returnMatches = True, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None)\n \n# if keepOnHeap is True:\n# self._geneDuplicatedEnzymesObject = geneIDsForEnzyme\n \n return geneIDsForEnzyme", "def addedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]: \n # get added metabolism\n addedMetabolismEnzymes = self.addedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicated enzyme pairs\n geneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the added metabolism\n geneDuplicatedAdded = set()\n \n for enzymeTuple in geneDuplicated:\n if enzymeTuple[0] in addedMetabolismEnzymes and enzymeTuple[1] in addedMetabolismEnzymes:\n geneDuplicatedAdded.add(enzymeTuple)\n \n return geneDuplicatedAdded", "def unifiedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False) -> SubstanceEcGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolism = self.unifiedMetabolism(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def get_eddy_instances(parents, root_nodes, gate_dist):\n\tsnodes = []\n\te_instances = dict()\n\tchild_score = dict()\n\tfor parent in parents:\n\t\tfor child in parent.children:\n\t\t\tif is_term(child):\n\t\t\t\tcontinue\n\t\t\tchild.score = parent.score + max(score(hyp, gate_dist) for hyp in child.tracks())\n\t\t\tchild_score[child] = (child.score, parent)\n\t\t\tif child.obj in e_instances:\n\t\t\t\te_instances[child.obj].append(child)\n\t\t\telse:\n\t\t\t\te_instances[child.obj] = [child]\n\tfor node in root_nodes:\n\t\tnode.score = max(score(hyp, gate_dist) for hyp in node.tracks())\n\t\tchild_score[node] = (node.score, None)\n\t\tif node.obj in e_instances:\n\t\t\te_instances[node.obj].append(node)\n\t\telse:\n\t\t\te_instances[node.obj] = [node]\n\treturn (e_instances, child_score)", "def addedMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n graph = GeneFunctionAddition.getGraph(parentCoreMetabolism, childCoreMetabolism)\n graph.name = 'Added metabolism ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n return graph", "def connect_cells(dfte,vari):\n # Create the variabel cell for mother, grand mother and grand grand mother\n if 'g_parent_cell' not in dfte.columns:\n dfte = rl.genalogy(dfte,'parent_cell') #Create genealogy\n if 'g_g_parent_cell' not in dfte.columns:\n dfte = rl.genalogy(dfte,'g_parent_cell')\n if 'g_g_g_parent_cell' not in dfte.columns:\n dfte = rl.genalogy(dfte,'g_g_parent_cell')\n #give unique index to all cells\n dfte['uid'] = dfte['cell']+dfte['time_sec'].apply(lambda x: str(x))\n vac=[];sc=[];uid = []\n # Create a vecotor for the variable of interest of cell,mother,grand mother and grand grand mother and an unique identifier of it\n for c,idx in enumerate(dfte['cell'].unique()):\n dau = dfte.loc[dfte['cell']==idx]\n pc = dau['parent_cell'].iloc[0]\n mum = dfte.loc[dfte['cell']==pc]\n gpc = dau['g_parent_cell'].iloc[0]\n gmum = dfte.loc[dfte['cell']==gpc]\n ggpc = dau['g_g_parent_cell'].iloc[0]\n ggmum = dfte.loc[dfte['cell']==ggpc]\n gggpc = dau['g_g_g_parent_cell'].iloc[0]\n gggmum = dfte.loc[dfte['cell']==gggpc]\n fte = lambda x: x[['{}'.format(vari),'uid']].values\n tmp = np.vstack([fte(gggmum),fte(ggmum),fte(gmum),fte(mum),fte(dau)])\n vac.append(tmp[:,0])\n uid.append(tmp[:,1])\n sc.append(['super_cell_{}'.format(c)]*len(tmp))\n return pd.DataFrame({'super_cell':np.hstack(sc),'uid':np.hstack(uid),'{}'.format(vari):np.hstack(vac)})", "def divergedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n divergedMetabolism = self.divergedMetabolism(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = divergedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def conservedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def bigger_ex(self):\n rc, node_ids = self.simple_ex()\n self.assertListEqual(rc.locus_position, \n [0.0, 1.0, 2.0, 3.0])\n # Input is pairs of\n # offspringID parentID startingPloidy rec1 rec2 ....\n # in pairs of offpsring chromosomes\n lines_list = [\"\"\"\n 1 0 1\n 1 0 0\n 2 0 1 0\n 2 0 0 1\n 3 0 0 0\n 3 0 1 1\n \"\"\", \"\"\"\n 4 2 0 0 1\n 4 1 1 0\n 5 1 1 0\n 5 2 0 0 1 2\n \"\"\"]\n for lines in lines_list:\n rc.increment_time()\n rc.collect_recombs(lines)\n rc.args.update_times()\n return rc", "def annotate(self):\n logger.debug(f\"found ckt:{self.hier_graph_dict}\")\n\n names = list(self.hier_graph_dict)\n\n for name in names:\n circuit_name= name\n G1 = self.hier_graph_dict[name][\"graph\"]\n self._group_block_const(G1,circuit_name)\n self._group_cap_const(G1,circuit_name)\n\n for circuit_name in list(self.hier_graph_dict.keys()):\n logger.debug(f\"START MATCHING in circuit: {circuit_name}\")\n circuit = self.hier_graph_dict[circuit_name]\n G1 = circuit[\"graph\"]\n # map and reduce graph to dictionary\n mapped_graph_list = self._mapped_graph_list(G1, circuit_name, self.pg )\n const_list = self.hier_graph_dict[circuit_name]['constraints']\n self.hier_graph_dict[circuit_name][\"graph\"] = self._reduce_graph(G1, circuit_name, mapped_graph_list, const_list)\n \n for const in list(const_list):\n self._check_const_length(self.hier_graph_dict[circuit_name].constraints,const)\n check_nodes(self.hier_graph_dict)\n logger.debug(f\"Grest ckt is {circuit['graph'].nodes(data=True)}\")\n if circuit_name not in self.no_array:\n symmetry_blocks = FindSymmetry(circuit[\"graph\"], circuit[\"ports\"], circuit[\"ports_weight\"], self.stop_points)\n for symm_blocks in symmetry_blocks.values():\n logger.debug(f\"generated constraints: {pprint.pformat(symm_blocks, indent=4)}\")\n if isinstance(symm_blocks, dict) and \"graph\" in symm_blocks.keys():\n logger.debug(f\"added new hierarchy: {symm_blocks['name']} {symm_blocks['graph'].nodes()}\")\n self.hier_graph_dict[symm_blocks['name']] = symm_blocks\n assert False, \"Don't understand what's being deleted here\"\n del self.hier_graph_dict[symm_blocks['name']]['name']\n\n self.lib_names = [lib_ele['name'] for lib_ele in self.lib]\n for ckt_name, circuit in self.hier_graph_dict.items():\n if 'id' in self.hier_graph_dict[ckt_name] and len(self.hier_graph_dict[ckt_name]['id']) > 1:\n copies = len(self.hier_graph_dict[ckt_name]['id'])\n self.lib_names += [ckt_name + '_type' + str(n) for n in range(copies)]\n return self.lib_names", "def _endx(self, parents):\n ALPHA = (1.-2*0.35**2)**0.5/2.\n BETA = 0.35/(self.n_gene-1)**0.5\n\n child = np.empty(self.n_gene+1)\n\n t1 = (parents[1, :self.n_gene]-parents[0, :self.n_gene]) / 2.\n t2 = np.random.normal(scale=ALPHA) * (\n parents[1, :self.n_gene] - parents[0, :self.n_gene]\n )\n t3 = np.sum(\n np.random.normal(scale=BETA, size=self.n_gene)[:, np.newaxis]\n * (\n parents[2:, :self.n_gene] - (\n np.sum(parents[2:, :self.n_gene], axis=0) / self.n_gene\n )\n ), axis=0\n )\n child[:self.n_gene] = t1 + t2 + t3\n\n return child", "def lostMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def neofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEnzymeGraph:\n # get neofunctionalisations \n neofunctionalisedEnzymes = self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs)\n \n # filter core metabolism enzyme graph\n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism) \n neofunctionalisedMetabolism = neofunctionalisedEnzymes.filterGraph(enzymeGraph, minimumEcDifference = None)\n \n # colour core metabolism \n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = enzymeGraph\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def geneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n \n \n enzymes = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n geneDuplicationModel = SimpleGeneDuplication\n \n geneIdToEnzyme = dict()\n for enzyme in enzymes:\n geneIdToEnzyme[enzyme.geneID] = enzyme\n \n enzymePairs = geneDuplicationModel.getEnzymePairs(enzymes, ignoreDuplicatesOutsideSet = True, geneIdToEnzyme = geneIdToEnzyme, preCalculatedEnzymes = None)\n \n return enzymePairs" ]
[ "0.67369306", "0.6330515", "0.6303329", "0.62048954", "0.6128838", "0.6119623", "0.6048695", "0.5975053", "0.5937219", "0.5809199", "0.57253855", "0.5707537", "0.56158155", "0.5594471", "0.55635613", "0.5519335", "0.5513304", "0.5461732", "0.53989816", "0.5391257", "0.53775907", "0.5364856", "0.529633", "0.5288653", "0.5286445", "0.5271865", "0.52569604", "0.52527577", "0.52451205", "0.5242083" ]
0.69637746
0
SubstanceEnzyme graph of geneduplicated enzymes, derived from the lost core metabolism. First, the lost core metabolism is calculated. Then, the enzymes associated with the added EC numbers are extracted from the parent's enzyme metabolism.
def lostMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph: parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism) childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism) lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism) parentGraph = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs) parentGraph.name = 'Lost metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames) return parentGraph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def addedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def lostMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def geneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n \n \n \n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n geneDuplicationModel = SimpleGeneDuplication\n# geneDuplicationModel = SimpleGroupGeneDuplication(sameGroupOrganisms = self.group)\n \n # filter core metabolism enzyme graph \n geneDuplicatedEnzymes = geneDuplicationModel.filterEnzymes(enzymeGraph, eValue = defaultEValue, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n geneDuplicatedEnzymesOnly = geneDuplicatedEnzymes\n geneDuplicatedEnzymes = enzymeGraph\n Export.addColourAttribute(geneDuplicatedEnzymes, colourToUse, nodes = False, edges = geneDuplicatedEnzymesOnly.getEdges())\n \n geneDuplicatedEnzymes.name = 'Gene-duplicated core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return geneDuplicatedEnzymes", "def unifiedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentGeneDuplicated.union(childGeneDuplicated, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n return graph", "def addedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def coreMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraphByEcMajority(majorityPercentage = majorityPercentageCoreMetabolism, majorityTotal = None, noMultifunctional = excludeMultifunctionalEnzymes)\n graph.name = 'Core metabolism Enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def divergedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentGeneDuplicated.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childGeneDuplicated.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism gene-duplicated enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def unifiedMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n parentGraph = self.parentClade.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n childGraph = self.childClade.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n graph = parentGraph.union(childGraph, addCount = False, updateName = False)\n graph.name = 'Unified metabolism enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n if colour is True:\n parentEdges = parentGraph.getEdges()\n childEdges = childGraph.getEdges()\n \n Export.addColourAttribute(graph, colour = Export.Colour.BLUE, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.RED, nodes = False, edges = childEdges)\n \n return graph", "def unifiedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def lostMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def collectiveMetabolismEnzymes(self, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraph(noMultifunctional = excludeMultifunctionalEnzymes, keepOnHeap = True)\n graph.name = 'Collective metabolism enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def divergedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def hierarchical_decomposition(self):\n\n vertex_height = dict()\n current_height = 0\n next_neighbor = dict()\n current_degree = {x: len(self._hase_diagram(x)) for x in self}\n sup_irreducibles = set(self.sup_irreducible)\n\n while sup_irreducibles:\n current_chains = set()\n deleted_sup_irreducibles = set()\n for current_sup_irreducible in sup_irreducibles:\n possible_chain = set()\n vertex = current_sup_irreducible\n is_hierarchical = False\n while current_degree[vertex] <= 1:\n possible_chain.add(vertex)\n if current_degree[vertex] == 0 or vertex in current_chains:\n is_hierarchical = True\n break\n vertex = next_neighbor.get(vertex, list(self.above(vertex))[0])\n if is_hierarchical:\n deleted_sup_irreducibles.add(current_sup_irreducible)\n current_chains.update(possible_chain)\n\n for x in current_chains:\n vertex_height[x] = current_height\n for y in self.under(x):\n current_degree[y] -= 1\n if current_degree[y] == 1:\n for z in self.above(y):\n if z not in vertex_height:\n next_neighbor[y] = z\n break\n current_height += 1\n\n sup_irreducibles.difference_update(deleted_sup_irreducibles)\n vertex_height[self.bottom] = max(vertex_height.values()) + 1\n return vertex_height", "def conservedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentGeneDuplicated.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childGeneDuplicated.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism gene-duplicated enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def unifiedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]: \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n return parentGeneDuplicated.union(childGeneDuplicated)", "def divergedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n # get diverged metabolism\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicate enzyme pairs\n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the diverged metabolism\n parentGeneDuplicatedDiverged = set()\n childGeneDuplicatedDiverged = set()\n \n for enzymeTuple in parentGeneDuplicated:\n if enzymeTuple[0] in divergedMetabolismEnzymes and enzymeTuple[1] in divergedMetabolismEnzymes:\n parentGeneDuplicatedDiverged.add(enzymeTuple)\n \n for enzymeTuple in childGeneDuplicated:\n if enzymeTuple[0] in divergedMetabolismEnzymes and enzymeTuple[1] in divergedMetabolismEnzymes:\n childGeneDuplicatedDiverged.add(enzymeTuple)\n \n return parentGeneDuplicatedDiverged.union(childGeneDuplicatedDiverged)", "def geneDuplicatedEnzymesDict(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Dict[Enzyme, Set[GeneID]]:\n \n \n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n geneDuplicationModel = SimpleGeneDuplication\n \n geneIDsForEnzyme = geneDuplicationModel.getEnzymes(enzymeGraph, returnMatches = True, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None)\n \n# if keepOnHeap is True:\n# self._geneDuplicatedEnzymesObject = geneIDsForEnzyme\n \n return geneIDsForEnzyme", "def unifiedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False) -> SubstanceEcGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolism = self.unifiedMetabolism(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def bigger_ex(self):\n rc, node_ids = self.simple_ex()\n self.assertListEqual(rc.locus_position, \n [0.0, 1.0, 2.0, 3.0])\n # Input is pairs of\n # offspringID parentID startingPloidy rec1 rec2 ....\n # in pairs of offpsring chromosomes\n lines_list = [\"\"\"\n 1 0 1\n 1 0 0\n 2 0 1 0\n 2 0 0 1\n 3 0 0 0\n 3 0 1 1\n \"\"\", \"\"\"\n 4 2 0 0 1\n 4 1 1 0\n 5 1 1 0\n 5 2 0 0 1 2\n \"\"\"]\n for lines in lines_list:\n rc.increment_time()\n rc.collect_recombs(lines)\n rc.args.update_times()\n return rc", "def sub_graph_merging(self):", "def get_eddy_instances(parents, root_nodes, gate_dist):\n\tsnodes = []\n\te_instances = dict()\n\tchild_score = dict()\n\tfor parent in parents:\n\t\tfor child in parent.children:\n\t\t\tif is_term(child):\n\t\t\t\tcontinue\n\t\t\tchild.score = parent.score + max(score(hyp, gate_dist) for hyp in child.tracks())\n\t\t\tchild_score[child] = (child.score, parent)\n\t\t\tif child.obj in e_instances:\n\t\t\t\te_instances[child.obj].append(child)\n\t\t\telse:\n\t\t\t\te_instances[child.obj] = [child]\n\tfor node in root_nodes:\n\t\tnode.score = max(score(hyp, gate_dist) for hyp in node.tracks())\n\t\tchild_score[node] = (node.score, None)\n\t\tif node.obj in e_instances:\n\t\t\te_instances[node.obj].append(node)\n\t\telse:\n\t\t\te_instances[node.obj] = [node]\n\treturn (e_instances, child_score)", "def test_append_unreactive_structure(self):\n\n cerm = CoreEdgeReactionModel()\n\n spcs = [Species().from_smiles('CCO'), # a control species\n Species().from_smiles('[N]=O'),\n Species().from_adjacency_list(\"\"\"1 O u1 p2 c0 {2,S}\n 2 N u0 p2 c0 {1,S}\"\"\"), # a non-representative structure of '[N]=O'\n ]\n\n for spc in spcs:\n cerm.make_new_species(spc)\n\n self.assertEquals(len(cerm.species_dict), 2)\n self.assertEquals(len(cerm.index_species_dict), 2)\n self.assertEquals(len(cerm.index_species_dict[1].molecule), 1)\n self.assertTrue(cerm.index_species_dict[1].molecule[0].reactive)\n self.assertEquals(len(cerm.index_species_dict[2].molecule), 1)\n self.assertTrue(cerm.index_species_dict[2].molecule[0].reactive)", "def connect_cells(dfte,vari):\n # Create the variabel cell for mother, grand mother and grand grand mother\n if 'g_parent_cell' not in dfte.columns:\n dfte = rl.genalogy(dfte,'parent_cell') #Create genealogy\n if 'g_g_parent_cell' not in dfte.columns:\n dfte = rl.genalogy(dfte,'g_parent_cell')\n if 'g_g_g_parent_cell' not in dfte.columns:\n dfte = rl.genalogy(dfte,'g_g_parent_cell')\n #give unique index to all cells\n dfte['uid'] = dfte['cell']+dfte['time_sec'].apply(lambda x: str(x))\n vac=[];sc=[];uid = []\n # Create a vecotor for the variable of interest of cell,mother,grand mother and grand grand mother and an unique identifier of it\n for c,idx in enumerate(dfte['cell'].unique()):\n dau = dfte.loc[dfte['cell']==idx]\n pc = dau['parent_cell'].iloc[0]\n mum = dfte.loc[dfte['cell']==pc]\n gpc = dau['g_parent_cell'].iloc[0]\n gmum = dfte.loc[dfte['cell']==gpc]\n ggpc = dau['g_g_parent_cell'].iloc[0]\n ggmum = dfte.loc[dfte['cell']==ggpc]\n gggpc = dau['g_g_g_parent_cell'].iloc[0]\n gggmum = dfte.loc[dfte['cell']==gggpc]\n fte = lambda x: x[['{}'.format(vari),'uid']].values\n tmp = np.vstack([fte(gggmum),fte(ggmum),fte(gmum),fte(mum),fte(dau)])\n vac.append(tmp[:,0])\n uid.append(tmp[:,1])\n sc.append(['super_cell_{}'.format(c)]*len(tmp))\n return pd.DataFrame({'super_cell':np.hstack(sc),'uid':np.hstack(uid),'{}'.format(vari):np.hstack(vac)})", "def conservedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def divergedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n divergedMetabolism = self.divergedMetabolism(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = divergedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def neofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEnzymeGraph:\n # get neofunctionalisations \n neofunctionalisedEnzymes = self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs)\n \n # filter core metabolism enzyme graph\n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism) \n neofunctionalisedMetabolism = neofunctionalisedEnzymes.filterGraph(enzymeGraph, minimumEcDifference = None)\n \n # colour core metabolism \n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = enzymeGraph\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def addedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]: \n # get added metabolism\n addedMetabolismEnzymes = self.addedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicated enzyme pairs\n geneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the added metabolism\n geneDuplicatedAdded = set()\n \n for enzymeTuple in geneDuplicated:\n if enzymeTuple[0] in addedMetabolismEnzymes and enzymeTuple[1] in addedMetabolismEnzymes:\n geneDuplicatedAdded.add(enzymeTuple)\n \n return geneDuplicatedAdded", "def _generateEdges(self, separate_frames=False):\n\n # 3D edges\n Xe = []\n Ye = []\n Ze = []\n\n if not separate_frames:\n if self.showcuts:\n self.nxgraph.add_nodes_from(range(len(self.df)))\n for index, curr in self.df.iterrows():\n if curr['first']:\n self._symbol += ['circle']\n # skip root node\n if curr['number'] == 1:\n continue\n # found first LP solution of a new child node\n # parent is last LP of parent node\n parent = self.df[self.df['number'] == curr['parent']].iloc[-1]\n else:\n # found an improving LP solution at the same node as before\n self._symbol += ['diamond']\n parent = self.df.iloc[index - 1]\n\n Xe += [float(parent['x']), curr['x'], None]\n Ye += [float(parent['y']), curr['y'], None]\n Ze += [float(parent['objval']), curr['objval'], None]\n self.nxgraph.add_edge(parent.name, curr.name)\n else:\n self.nxgraph.add_nodes_from(list(self.df['number']))\n for index, curr in self.df.iterrows():\n self._symbol += ['circle']\n if curr['number'] == 1:\n continue\n parent = self.df[self.df['number'] == curr['parent']]\n Xe += [float(parent['x']), curr['x'], None]\n Ye += [float(parent['y']), curr['y'], None]\n Ze += [float(parent['objval']), curr['objval'], None]\n self.nxgraph.add_edge(parent.iloc[0]['number'], curr['number'])\n\n else:\n max_age = self.df['age'].max()\n for i in range(1, max_age + 1):\n tmp = self.df[self.df['age'] == i]\n Xe_ = []\n Ye_ = []\n Ze_ = []\n for index, curr in tmp.iterrows():\n if curr['first']:\n self._symbol += ['circle']\n # skip root node\n if curr['number'] == 1:\n continue\n # found first LP solution of a new child node\n # parent is last LP of parent node\n parent = self.df[self.df['number'] == curr['parent']].iloc[-1]\n else:\n # found an improving LP solution at the same node as before\n self._symbol += ['diamond']\n parent = self.df.iloc[index - 1]\n\n Xe_ += [float(parent['x']), curr['x'], None]\n Ye_ += [float(parent['y']), curr['y'], None]\n Ze_ += [float(parent['objval']), curr['objval'], None]\n Xe.append(Xe_)\n Ye.append(Ye_)\n Ze.append(Ze_)\n\n self.Xe = Xe\n self.Ye = Ye\n self.Ze = Ze", "def _endx(self, parents):\n ALPHA = (1.-2*0.35**2)**0.5/2.\n BETA = 0.35/(self.n_gene-1)**0.5\n\n child = np.empty(self.n_gene+1)\n\n t1 = (parents[1, :self.n_gene]-parents[0, :self.n_gene]) / 2.\n t2 = np.random.normal(scale=ALPHA) * (\n parents[1, :self.n_gene] - parents[0, :self.n_gene]\n )\n t3 = np.sum(\n np.random.normal(scale=BETA, size=self.n_gene)[:, np.newaxis]\n * (\n parents[2:, :self.n_gene] - (\n np.sum(parents[2:, :self.n_gene], axis=0) / self.n_gene\n )\n ), axis=0\n )\n child[:self.n_gene] = t1 + t2 + t3\n\n return child" ]
[ "0.6755201", "0.6535308", "0.62986535", "0.62453306", "0.6057391", "0.60101473", "0.5982564", "0.5913863", "0.5899153", "0.5793252", "0.57070243", "0.56880486", "0.5584127", "0.5576912", "0.55608404", "0.5558294", "0.5490599", "0.5468845", "0.54171175", "0.54139686", "0.5386362", "0.53774834", "0.53535324", "0.5341034", "0.531099", "0.52948", "0.52684575", "0.5248408", "0.52454114", "0.5240281" ]
0.66248214
1
Pairs of geneduplicated enzymes, derived from the conserved core metabolism. First, the conserved core metabolism is calculated. Then, the enzymes associated with the conserved EC numbers are extracted from the collective parent's and child's metabolism individually. Then, for parent and child, the geneduplicated enzyme pairs are calculated. Finally, the geneduplicated enzymes where both enzymes are in the conserved core metabolism are reported.
def conservedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Tuple[Set[Tuple[Enzyme, Enzyme]]]: # get conserved metabolism conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes() # get gene-duplicate enzyme pairs parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism) childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism) # filter gene-duplicated enzyme pairs for the ones with both enzymes in the conserved metabolism parentGeneDuplicatedConserved = set() childGeneDuplicatedConserved = set() for enzymeTuple in parentGeneDuplicated: if enzymeTuple[0] in conservedMetabolismEnzymes and enzymeTuple[1] in conservedMetabolismEnzymes: parentGeneDuplicatedConserved.add(enzymeTuple) for enzymeTuple in childGeneDuplicated: if enzymeTuple[0] in conservedMetabolismEnzymes and enzymeTuple[1] in conservedMetabolismEnzymes: childGeneDuplicatedConserved.add(enzymeTuple) return (parentGeneDuplicatedConserved, childGeneDuplicatedConserved)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def divergedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n # get diverged metabolism\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicate enzyme pairs\n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the diverged metabolism\n parentGeneDuplicatedDiverged = set()\n childGeneDuplicatedDiverged = set()\n \n for enzymeTuple in parentGeneDuplicated:\n if enzymeTuple[0] in divergedMetabolismEnzymes and enzymeTuple[1] in divergedMetabolismEnzymes:\n parentGeneDuplicatedDiverged.add(enzymeTuple)\n \n for enzymeTuple in childGeneDuplicated:\n if enzymeTuple[0] in divergedMetabolismEnzymes and enzymeTuple[1] in divergedMetabolismEnzymes:\n childGeneDuplicatedDiverged.add(enzymeTuple)\n \n return parentGeneDuplicatedDiverged.union(childGeneDuplicatedDiverged)", "def unifiedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]: \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n return parentGeneDuplicated.union(childGeneDuplicated)", "def addedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def geneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n \n \n enzymes = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n geneDuplicationModel = SimpleGeneDuplication\n \n geneIdToEnzyme = dict()\n for enzyme in enzymes:\n geneIdToEnzyme[enzyme.geneID] = enzyme\n \n enzymePairs = geneDuplicationModel.getEnzymePairs(enzymes, ignoreDuplicatesOutsideSet = True, geneIdToEnzyme = geneIdToEnzyme, preCalculatedEnzymes = None)\n \n return enzymePairs", "def addedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]: \n # get added metabolism\n addedMetabolismEnzymes = self.addedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicated enzyme pairs\n geneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the added metabolism\n geneDuplicatedAdded = set()\n \n for enzymeTuple in geneDuplicated:\n if enzymeTuple[0] in addedMetabolismEnzymes and enzymeTuple[1] in addedMetabolismEnzymes:\n geneDuplicatedAdded.add(enzymeTuple)\n \n return geneDuplicatedAdded", "def divergedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentGeneDuplicated.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childGeneDuplicated.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism gene-duplicated enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def conservedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentGeneDuplicated.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childGeneDuplicated.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism gene-duplicated enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def lostMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def geneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n \n \n \n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n geneDuplicationModel = SimpleGeneDuplication\n# geneDuplicationModel = SimpleGroupGeneDuplication(sameGroupOrganisms = self.group)\n \n # filter core metabolism enzyme graph \n geneDuplicatedEnzymes = geneDuplicationModel.filterEnzymes(enzymeGraph, eValue = defaultEValue, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n geneDuplicatedEnzymesOnly = geneDuplicatedEnzymes\n geneDuplicatedEnzymes = enzymeGraph\n Export.addColourAttribute(geneDuplicatedEnzymes, colourToUse, nodes = False, edges = geneDuplicatedEnzymesOnly.getEdges())\n \n geneDuplicatedEnzymes.name = 'Gene-duplicated core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return geneDuplicatedEnzymes", "def geneDuplicatedEnzymesDict(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Dict[Enzyme, Set[GeneID]]:\n \n \n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n geneDuplicationModel = SimpleGeneDuplication\n \n geneIDsForEnzyme = geneDuplicationModel.getEnzymes(enzymeGraph, returnMatches = True, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None)\n \n# if keepOnHeap is True:\n# self._geneDuplicatedEnzymesObject = geneIDsForEnzyme\n \n return geneIDsForEnzyme", "def unifiedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentGeneDuplicated.union(childGeneDuplicated, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n return graph", "def addedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def lostMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n # get added metabolism\n lostMetabolismEnzymes = self.lostMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicated enzyme pairs\n geneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the lost metabolism\n geneDuplicatedLost = set()\n \n for enzymeTuple in geneDuplicated:\n if enzymeTuple[0] in lostMetabolismEnzymes and enzymeTuple[1] in lostMetabolismEnzymes:\n geneDuplicatedLost.add(enzymeTuple)\n \n return geneDuplicatedLost", "def coreMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraphByEcMajority(majorityPercentage = majorityPercentageCoreMetabolism, majorityTotal = None, noMultifunctional = excludeMultifunctionalEnzymes)\n graph.name = 'Core metabolism Enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def divergedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def conservedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def find_common_interactor():\n # 1. filter the unique fusion gene pairs.\n # fusionGenePair = pd.read_csv(\"./fusionGenePair.csv\", header=0, sep=' ')\n # unique_fusionGenePair = fusionGenePair.drop_duplicates()\n # unique_fusionGenePair.to_csv(\"./uniqueFusion.csv\", sep=' ', index=False)\n unique_fusionGenePair = pd.read_csv(\"./uniqueFusion.csv\", sep=' ', header=0)\n\n # 2. for each gene pairs, get all the interactors each partner has.\n\n # Store the 5' partner gene and 3' partner gene in two lists.\n FivePartnerGenelist = []\n ThreePartnerGenelist = []\n for index, row in unique_fusionGenePair.iterrows():\n FivePartnerGenelist.append(row['5_PARTNER_GENE'])\n ThreePartnerGenelist.append(row['3_PARTNER_GENE'])\n # Get the unique gene in each pair\n uniqueFPGL = list(OrderedDict.fromkeys(FivePartnerGenelist))\n uniqueTPGL = list(OrderedDict.fromkeys(ThreePartnerGenelist))\n uniqueGene = list(OrderedDict.fromkeys(uniqueTPGL + uniqueFPGL))\n\n # Find each gene's interactor in the PPI datasets\n PPIS = pd.read_csv(\"./IID results/PPIs_final.tsv\", sep='\\t', header=0)\n\n # Put each gene interactor into a dictionary.\n geneIntDic = {}\n for item in uniqueGene:\n for index, row in PPIS.iterrows():\n if row['Query Symbol'] == item:\n if item in geneIntDic:\n geneIntDic[item].append(row['Partner Symbol'])\n else:\n key = item\n geneIntDic.setdefault(key, [])\n geneIntDic[item].append(row['Partner Symbol'])\n if row['Partner Symbol'] == item:\n if item in geneIntDic:\n geneIntDic[item].append(row['Query Symbol'])\n else:\n key = item\n geneIntDic.setdefault(key, [])\n geneIntDic[item].append(row['Query Symbol'])\n w = csv.writer(open(\"./geneIntDic.csv\", \"w\"))\n for key, val in geneIntDic.items():\n w.writerow([key, val])", "def divergedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n divergedMetabolism = self.divergedMetabolism(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = divergedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def addedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def lostMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def compute_perm(parents):\n # Function written by M. Defferrard, taken verbatim, from \n # https://github.com/mdeff/cnn_graph/blob/master/lib/coarsening.py#L167\n\n # Order of last layer is random (chosen by the clustering algorithm).\n indices = []\n if len(parents) > 0:\n M_last = max(parents[-1]) + 1\n indices.append(list(range(M_last)))\n\n for parent in parents[::-1]:\n #print('parent: {}'.format(parent))\n\n # Fake nodes go after real ones.\n pool_singeltons = len(parent)\n\n indices_layer = []\n for i in indices[-1]:\n indices_node = list(np.where(parent == i)[0])\n assert 0 <= len(indices_node) <= 2\n #print('indices_node: {}'.format(indices_node))\n\n # Add a node to go with a singelton.\n if len(indices_node) == 1:\n indices_node.append(pool_singeltons)\n pool_singeltons += 1\n #print('new singelton: {}'.format(indices_node))\n # Add two nodes as children of a singelton in the parent.\n elif len(indices_node) == 0:\n indices_node.append(pool_singeltons+0)\n indices_node.append(pool_singeltons+1)\n pool_singeltons += 2\n #print('singelton childrens: {}'.format(indices_node))\n\n indices_layer.extend(indices_node)\n indices.append(indices_layer)\n\n # Sanity checks.\n for i,indices_layer in enumerate(indices):\n M = M_last*2**i\n # Reduction by 2 at each layer (binary tree).\n assert len(indices[0] == M)\n # The new ordering does not omit an indice.\n assert sorted(indices_layer) == list(range(M))\n\n return indices[::-1]", "def redundantECsForContributingNeofunctionalisation(self, \n majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, \n majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, \n eValue = defaultEValue, \n redundancyType: 'RedundancyType' = None,\n considerOnlyECs = None) -> Dict[Neofunctionalisation, Set[EcNumber]]:\n from FEV_KEGG.Robustness.Topology.Redundancy import Redundancy, RedundancyContribution, RedundancyType\n \n if redundancyType is None:\n redundancyType = RedundancyType.default\n \n #- calculate \"neofunctionalised\" ECs\n neofunctionalisedMetabolismSet = self.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, eValue, considerOnlyECs).getECs()\n neofunctionalisationsForFunctionChange = self.neofunctionalisationsForFunctionChange(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, eValue, considerOnlyECs)\n \n #- calculate redundancy\n redundancy = Redundancy( self.coreMetabolism(majorityPercentageCoreMetabolism) )\n redundancyContribution = RedundancyContribution(redundancy, neofunctionalisedMetabolismSet)\n \n contributedECsForContributingNeofunctionalisedEC = redundancyContribution.getContributedKeysForSpecial(redundancyType)\n contributingNeofunctionalisedECs = set(contributedECsForContributingNeofunctionalisedEC.keys())\n \n #- REPEAT for each function change consisting of \"neofunctionalised\" ECs, which also contribute to redundancy\n contributingNeofunctionalisations = dict()\n \n for functionChange, neofunctionalisations in neofunctionalisationsForFunctionChange.items():\n #- report enzyme pairs of neofunctionalisations, which caused the EC to be considered \"neofunctionalised\", and are in return contributing to redundancy \n \n if functionChange.ecA in contributingNeofunctionalisedECs or functionChange.ecB in contributingNeofunctionalisedECs: # function change contributes to redundancy\n \n for neofunctionalisation in neofunctionalisations:\n currentSetOfContributedECs = contributingNeofunctionalisations.get(neofunctionalisation, None)\n \n if currentSetOfContributedECs is None:\n currentSetOfContributedECs = set()\n contributingNeofunctionalisations[neofunctionalisation] = currentSetOfContributedECs\n \n for ec in functionChange.ecPair:\n contributedECs = contributedECsForContributingNeofunctionalisedEC.get(ec, None)\n if contributedECs is not None:\n currentSetOfContributedECs.update(contributedECs)\n \n return contributingNeofunctionalisations", "def recombination(parents):\n\n # pick 5 random numbers that add up to 1\n random_values = np.random.dirichlet(np.ones(5),size=1)[0]\n\n # those random values will serve as weights for the genes 2 offspring get (whole arithmetic recombination)\n offspring1 = random_values[0] * parents[0] + random_values[1] * parents[1] + random_values[2] * parents[2] + random_values[3] * parents[3] + \\\n random_values[4] * parents[4]\n\n # repeat for offspring 2\n random_values = np.random.dirichlet(np.ones(5),size=1)[0]\n offspring2 = random_values[0] * parents[0] + random_values[1] * parents[1] + random_values[2] * parents[2] + random_values[3] * parents[3] + \\\n random_values[4] * parents[4]\n\n # the other 2 offspring will come from 4-point crossover\n random_points = np.sort(np.random.randint(1, parents[0].shape[0]-2, 4))\n\n # to make it so that it won't always be p1 who gives the first portion of DNA etc, we shuffle the parents\n np.random.shuffle(parents)\n\n # add the genes together\n offspring3 = np.concatenate((parents[0][0:random_points[0]], parents[1][random_points[0]:random_points[1]], parents[2][random_points[1]:random_points[2]],\\\n parents[3][random_points[2]:random_points[3]], parents[4][random_points[3]:]))\n\n # repeat for offspring 4\n random_points = np.sort(np.random.randint(1, parents[0].shape[0]-2, 4))\n np.random.shuffle(parents)\n offspring4 = np.concatenate((parents[0][0:random_points[0]], parents[1][random_points[0]:random_points[1]], parents[2][random_points[1]:random_points[2]],\\\n parents[3][random_points[2]:random_points[3]], parents[4][random_points[3]:]))\n\n # return the offspring\n return np.concatenate(([offspring1], [offspring2], [offspring3], [offspring4]))", "def connect_cells(dfte,vari):\n # Create the variabel cell for mother, grand mother and grand grand mother\n if 'g_parent_cell' not in dfte.columns:\n dfte = rl.genalogy(dfte,'parent_cell') #Create genealogy\n if 'g_g_parent_cell' not in dfte.columns:\n dfte = rl.genalogy(dfte,'g_parent_cell')\n if 'g_g_g_parent_cell' not in dfte.columns:\n dfte = rl.genalogy(dfte,'g_g_parent_cell')\n #give unique index to all cells\n dfte['uid'] = dfte['cell']+dfte['time_sec'].apply(lambda x: str(x))\n vac=[];sc=[];uid = []\n # Create a vecotor for the variable of interest of cell,mother,grand mother and grand grand mother and an unique identifier of it\n for c,idx in enumerate(dfte['cell'].unique()):\n dau = dfte.loc[dfte['cell']==idx]\n pc = dau['parent_cell'].iloc[0]\n mum = dfte.loc[dfte['cell']==pc]\n gpc = dau['g_parent_cell'].iloc[0]\n gmum = dfte.loc[dfte['cell']==gpc]\n ggpc = dau['g_g_parent_cell'].iloc[0]\n ggmum = dfte.loc[dfte['cell']==ggpc]\n gggpc = dau['g_g_g_parent_cell'].iloc[0]\n gggmum = dfte.loc[dfte['cell']==gggpc]\n fte = lambda x: x[['{}'.format(vari),'uid']].values\n tmp = np.vstack([fte(gggmum),fte(ggmum),fte(gmum),fte(mum),fte(dau)])\n vac.append(tmp[:,0])\n uid.append(tmp[:,1])\n sc.append(['super_cell_{}'.format(c)]*len(tmp))\n return pd.DataFrame({'super_cell':np.hstack(sc),'uid':np.hstack(uid),'{}'.format(vari):np.hstack(vac)})", "def collectiveMetabolismEnzymes(self, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraph(noMultifunctional = excludeMultifunctionalEnzymes, keepOnHeap = True)\n graph.name = 'Collective metabolism enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def crossover(parent1, parent2):\n child = parent1.clone()\n for k in range(parent1.num_input + parent1.num_output):\n if np.random.randint(2) == 1:\n child.identifiers[k] = parent2.identifiers[k]\n child.inhibitors[k] = parent2.inhibitors[k]\n child.enhancers[k] = parent2.enhancers[k]\n\n child.identifiers = child.identifiers[:(child.num_input +\n child.num_output)]\n child.inhibitors = child.inhibitors[:(child.num_input + child.num_output)]\n child.enhancers = child.enhancers[:(child.num_input + child.num_output)]\n\n p1range = list(range(parent1.num_input + parent1.num_output,\n parent1.size()))\n random.shuffle(p1range)\n p2range = list(range(parent2.num_input + parent2.num_output,\n parent2.size()))\n random.shuffle(p2range)\n\n p1remaining = deepcopy(p1range)\n\n # Crossing regulatory\n p1_gene_count = 0\n p2_gene_count = 0\n for p1idx in p1range:\n min_dist = config.CROSSOVER_THRESHOLD\n paired_idx = None\n for p2idx in p2range:\n gdist = parent1.protein_distance(parent2, p1idx, p2idx)\n if gdist < min_dist:\n min_dist = gdist\n paired_idx = p2idx\n if paired_idx is not None:\n if np.random.randint(2) == 0:\n chosen_parent = parent1\n chosen_idx = p1idx\n p1_gene_count += 1\n else:\n chosen_parent = parent2\n chosen_idx = p2idx\n p2_gene_count += 1\n child.identifiers = np.append(\n child.identifiers, chosen_parent.identifiers[chosen_idx])\n child.inhibitors = np.append(\n child.inhibitors, chosen_parent.inhibitors[chosen_idx])\n child.enhancers = np.append(\n child.enhancers, chosen_parent.enhancers[chosen_idx])\n # Remove from consideration again\n p2range = list(set(p2range) - set([p2idx]))\n p1remaining = list(set(p1remaining) - set([p1idx]))\n\n # Add remaining material\n if child.size() == (child.num_input + child.num_output):\n prob = 0.5\n else:\n prob = p1_gene_count / (p1_gene_count + p2_gene_count)\n\n chosen_parent = parent2\n chosen_range = p2range\n if np.random.random() < prob:\n chosen_parent = parent1\n chosen_range = p1remaining\n\n for idx in chosen_range:\n child.identifiers = np.append(child.identifiers,\n chosen_parent.identifiers[idx])\n child.inhibitors = np.append(child.inhibitors,\n chosen_parent.inhibitors[idx])\n child.enhancers = np.append(child.enhancers,\n chosen_parent.enhancers[idx])\n\n child.num_regulatory = child.size() - (child.num_input + child.num_output)\n\n # Cross dynamics\n if np.random.random() < 0.5:\n child.beta = parent1.beta\n else:\n child.beta = parent2.beta\n\n if np.random.random() < 0.5:\n child.delta = parent1.delta\n else:\n child.delta = parent2.delta\n\n return child", "def conservedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n conservedMetabolism = self.conservedMetabolism(majorityPercentageCoreMetabolism)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = conservedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def unifiedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def get_eddy_instances(parents, root_nodes, gate_dist):\n\tsnodes = []\n\te_instances = dict()\n\tchild_score = dict()\n\tfor parent in parents:\n\t\tfor child in parent.children:\n\t\t\tif is_term(child):\n\t\t\t\tcontinue\n\t\t\tchild.score = parent.score + max(score(hyp, gate_dist) for hyp in child.tracks())\n\t\t\tchild_score[child] = (child.score, parent)\n\t\t\tif child.obj in e_instances:\n\t\t\t\te_instances[child.obj].append(child)\n\t\t\telse:\n\t\t\t\te_instances[child.obj] = [child]\n\tfor node in root_nodes:\n\t\tnode.score = max(score(hyp, gate_dist) for hyp in node.tracks())\n\t\tchild_score[node] = (node.score, None)\n\t\tif node.obj in e_instances:\n\t\t\te_instances[node.obj].append(node)\n\t\telse:\n\t\t\te_instances[node.obj] = [node]\n\treturn (e_instances, child_score)", "def unifiedMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n parentGraph = self.parentClade.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n childGraph = self.childClade.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n graph = parentGraph.union(childGraph, addCount = False, updateName = False)\n graph.name = 'Unified metabolism enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n if colour is True:\n parentEdges = parentGraph.getEdges()\n childEdges = childGraph.getEdges()\n \n Export.addColourAttribute(graph, colour = Export.Colour.BLUE, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.RED, nodes = False, edges = childEdges)\n \n return graph" ]
[ "0.70549107", "0.69402546", "0.6706098", "0.6682713", "0.6562122", "0.64990956", "0.63584626", "0.6323621", "0.62530905", "0.61966795", "0.61394745", "0.6038204", "0.60298336", "0.5788429", "0.5784712", "0.56987387", "0.56817615", "0.56580746", "0.564051", "0.555223", "0.55367935", "0.55034804", "0.54902744", "0.54356956", "0.5427842", "0.5412742", "0.5400876", "0.5369773", "0.5368349", "0.5345703" ]
0.703319
1
Pairs of geneduplicated enzymes, derived from the diverged core metabolism. First, the diverged core metabolism is calculated. Then, the enzymes associated with the added EC numbers are extracted from the collective parent's and child's metabolism individually. Then, for parent and child, the geneduplicated enzyme pairs are calculated. Finally, the geneduplicated enzymes where both enzymes are in the conserved core metabolism are reported.
def divergedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]: # get diverged metabolism divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes() # get gene-duplicate enzyme pairs parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism) childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism) # filter gene-duplicated enzyme pairs for the ones with both enzymes in the diverged metabolism parentGeneDuplicatedDiverged = set() childGeneDuplicatedDiverged = set() for enzymeTuple in parentGeneDuplicated: if enzymeTuple[0] in divergedMetabolismEnzymes and enzymeTuple[1] in divergedMetabolismEnzymes: parentGeneDuplicatedDiverged.add(enzymeTuple) for enzymeTuple in childGeneDuplicated: if enzymeTuple[0] in divergedMetabolismEnzymes and enzymeTuple[1] in divergedMetabolismEnzymes: childGeneDuplicatedDiverged.add(enzymeTuple) return parentGeneDuplicatedDiverged.union(childGeneDuplicatedDiverged)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unifiedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]: \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n return parentGeneDuplicated.union(childGeneDuplicated)", "def addedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def divergedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentGeneDuplicated.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childGeneDuplicated.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism gene-duplicated enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def addedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]: \n # get added metabolism\n addedMetabolismEnzymes = self.addedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicated enzyme pairs\n geneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the added metabolism\n geneDuplicatedAdded = set()\n \n for enzymeTuple in geneDuplicated:\n if enzymeTuple[0] in addedMetabolismEnzymes and enzymeTuple[1] in addedMetabolismEnzymes:\n geneDuplicatedAdded.add(enzymeTuple)\n \n return geneDuplicatedAdded", "def geneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n \n \n enzymes = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n geneDuplicationModel = SimpleGeneDuplication\n \n geneIdToEnzyme = dict()\n for enzyme in enzymes:\n geneIdToEnzyme[enzyme.geneID] = enzyme\n \n enzymePairs = geneDuplicationModel.getEnzymePairs(enzymes, ignoreDuplicatesOutsideSet = True, geneIdToEnzyme = geneIdToEnzyme, preCalculatedEnzymes = None)\n \n return enzymePairs", "def conservedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Tuple[Set[Tuple[Enzyme, Enzyme]]]:\n # get conserved metabolism\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicate enzyme pairs\n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the conserved metabolism\n parentGeneDuplicatedConserved = set()\n childGeneDuplicatedConserved = set()\n \n for enzymeTuple in parentGeneDuplicated:\n if enzymeTuple[0] in conservedMetabolismEnzymes and enzymeTuple[1] in conservedMetabolismEnzymes:\n parentGeneDuplicatedConserved.add(enzymeTuple)\n \n for enzymeTuple in childGeneDuplicated:\n if enzymeTuple[0] in conservedMetabolismEnzymes and enzymeTuple[1] in conservedMetabolismEnzymes:\n childGeneDuplicatedConserved.add(enzymeTuple)\n \n return (parentGeneDuplicatedConserved, childGeneDuplicatedConserved)", "def lostMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def geneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n \n \n \n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n geneDuplicationModel = SimpleGeneDuplication\n# geneDuplicationModel = SimpleGroupGeneDuplication(sameGroupOrganisms = self.group)\n \n # filter core metabolism enzyme graph \n geneDuplicatedEnzymes = geneDuplicationModel.filterEnzymes(enzymeGraph, eValue = defaultEValue, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n geneDuplicatedEnzymesOnly = geneDuplicatedEnzymes\n geneDuplicatedEnzymes = enzymeGraph\n Export.addColourAttribute(geneDuplicatedEnzymes, colourToUse, nodes = False, edges = geneDuplicatedEnzymesOnly.getEdges())\n \n geneDuplicatedEnzymes.name = 'Gene-duplicated core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return geneDuplicatedEnzymes", "def addedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def geneDuplicatedEnzymesDict(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Dict[Enzyme, Set[GeneID]]:\n \n \n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n geneDuplicationModel = SimpleGeneDuplication\n \n geneIDsForEnzyme = geneDuplicationModel.getEnzymes(enzymeGraph, returnMatches = True, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None)\n \n# if keepOnHeap is True:\n# self._geneDuplicatedEnzymesObject = geneIDsForEnzyme\n \n return geneIDsForEnzyme", "def unifiedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentGeneDuplicated.union(childGeneDuplicated, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n return graph", "def coreMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraphByEcMajority(majorityPercentage = majorityPercentageCoreMetabolism, majorityTotal = None, noMultifunctional = excludeMultifunctionalEnzymes)\n graph.name = 'Core metabolism Enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def conservedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentGeneDuplicated.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childGeneDuplicated.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism gene-duplicated enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def divergedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def divergedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n divergedMetabolism = self.divergedMetabolism(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = divergedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def lostMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n # get added metabolism\n lostMetabolismEnzymes = self.lostMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicated enzyme pairs\n geneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the lost metabolism\n geneDuplicatedLost = set()\n \n for enzymeTuple in geneDuplicated:\n if enzymeTuple[0] in lostMetabolismEnzymes and enzymeTuple[1] in lostMetabolismEnzymes:\n geneDuplicatedLost.add(enzymeTuple)\n \n return geneDuplicatedLost", "def addedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def get_eddy_instances(parents, root_nodes, gate_dist):\n\tsnodes = []\n\te_instances = dict()\n\tchild_score = dict()\n\tfor parent in parents:\n\t\tfor child in parent.children:\n\t\t\tif is_term(child):\n\t\t\t\tcontinue\n\t\t\tchild.score = parent.score + max(score(hyp, gate_dist) for hyp in child.tracks())\n\t\t\tchild_score[child] = (child.score, parent)\n\t\t\tif child.obj in e_instances:\n\t\t\t\te_instances[child.obj].append(child)\n\t\t\telse:\n\t\t\t\te_instances[child.obj] = [child]\n\tfor node in root_nodes:\n\t\tnode.score = max(score(hyp, gate_dist) for hyp in node.tracks())\n\t\tchild_score[node] = (node.score, None)\n\t\tif node.obj in e_instances:\n\t\t\te_instances[node.obj].append(node)\n\t\telse:\n\t\t\te_instances[node.obj] = [node]\n\treturn (e_instances, child_score)", "def recombination(parents):\n\n # pick 5 random numbers that add up to 1\n random_values = np.random.dirichlet(np.ones(5),size=1)[0]\n\n # those random values will serve as weights for the genes 2 offspring get (whole arithmetic recombination)\n offspring1 = random_values[0] * parents[0] + random_values[1] * parents[1] + random_values[2] * parents[2] + random_values[3] * parents[3] + \\\n random_values[4] * parents[4]\n\n # repeat for offspring 2\n random_values = np.random.dirichlet(np.ones(5),size=1)[0]\n offspring2 = random_values[0] * parents[0] + random_values[1] * parents[1] + random_values[2] * parents[2] + random_values[3] * parents[3] + \\\n random_values[4] * parents[4]\n\n # the other 2 offspring will come from 4-point crossover\n random_points = np.sort(np.random.randint(1, parents[0].shape[0]-2, 4))\n\n # to make it so that it won't always be p1 who gives the first portion of DNA etc, we shuffle the parents\n np.random.shuffle(parents)\n\n # add the genes together\n offspring3 = np.concatenate((parents[0][0:random_points[0]], parents[1][random_points[0]:random_points[1]], parents[2][random_points[1]:random_points[2]],\\\n parents[3][random_points[2]:random_points[3]], parents[4][random_points[3]:]))\n\n # repeat for offspring 4\n random_points = np.sort(np.random.randint(1, parents[0].shape[0]-2, 4))\n np.random.shuffle(parents)\n offspring4 = np.concatenate((parents[0][0:random_points[0]], parents[1][random_points[0]:random_points[1]], parents[2][random_points[1]:random_points[2]],\\\n parents[3][random_points[2]:random_points[3]], parents[4][random_points[3]:]))\n\n # return the offspring\n return np.concatenate(([offspring1], [offspring2], [offspring3], [offspring4]))", "def redundantECsForContributingNeofunctionalisation(self, \n majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, \n majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, \n eValue = defaultEValue, \n redundancyType: 'RedundancyType' = None,\n considerOnlyECs = None) -> Dict[Neofunctionalisation, Set[EcNumber]]:\n from FEV_KEGG.Robustness.Topology.Redundancy import Redundancy, RedundancyContribution, RedundancyType\n \n if redundancyType is None:\n redundancyType = RedundancyType.default\n \n #- calculate \"neofunctionalised\" ECs\n neofunctionalisedMetabolismSet = self.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, eValue, considerOnlyECs).getECs()\n neofunctionalisationsForFunctionChange = self.neofunctionalisationsForFunctionChange(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, eValue, considerOnlyECs)\n \n #- calculate redundancy\n redundancy = Redundancy( self.coreMetabolism(majorityPercentageCoreMetabolism) )\n redundancyContribution = RedundancyContribution(redundancy, neofunctionalisedMetabolismSet)\n \n contributedECsForContributingNeofunctionalisedEC = redundancyContribution.getContributedKeysForSpecial(redundancyType)\n contributingNeofunctionalisedECs = set(contributedECsForContributingNeofunctionalisedEC.keys())\n \n #- REPEAT for each function change consisting of \"neofunctionalised\" ECs, which also contribute to redundancy\n contributingNeofunctionalisations = dict()\n \n for functionChange, neofunctionalisations in neofunctionalisationsForFunctionChange.items():\n #- report enzyme pairs of neofunctionalisations, which caused the EC to be considered \"neofunctionalised\", and are in return contributing to redundancy \n \n if functionChange.ecA in contributingNeofunctionalisedECs or functionChange.ecB in contributingNeofunctionalisedECs: # function change contributes to redundancy\n \n for neofunctionalisation in neofunctionalisations:\n currentSetOfContributedECs = contributingNeofunctionalisations.get(neofunctionalisation, None)\n \n if currentSetOfContributedECs is None:\n currentSetOfContributedECs = set()\n contributingNeofunctionalisations[neofunctionalisation] = currentSetOfContributedECs\n \n for ec in functionChange.ecPair:\n contributedECs = contributedECsForContributingNeofunctionalisedEC.get(ec, None)\n if contributedECs is not None:\n currentSetOfContributedECs.update(contributedECs)\n \n return contributingNeofunctionalisations", "def lostMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def compute_perm(parents):\n # Function written by M. Defferrard, taken verbatim, from \n # https://github.com/mdeff/cnn_graph/blob/master/lib/coarsening.py#L167\n\n # Order of last layer is random (chosen by the clustering algorithm).\n indices = []\n if len(parents) > 0:\n M_last = max(parents[-1]) + 1\n indices.append(list(range(M_last)))\n\n for parent in parents[::-1]:\n #print('parent: {}'.format(parent))\n\n # Fake nodes go after real ones.\n pool_singeltons = len(parent)\n\n indices_layer = []\n for i in indices[-1]:\n indices_node = list(np.where(parent == i)[0])\n assert 0 <= len(indices_node) <= 2\n #print('indices_node: {}'.format(indices_node))\n\n # Add a node to go with a singelton.\n if len(indices_node) == 1:\n indices_node.append(pool_singeltons)\n pool_singeltons += 1\n #print('new singelton: {}'.format(indices_node))\n # Add two nodes as children of a singelton in the parent.\n elif len(indices_node) == 0:\n indices_node.append(pool_singeltons+0)\n indices_node.append(pool_singeltons+1)\n pool_singeltons += 2\n #print('singelton childrens: {}'.format(indices_node))\n\n indices_layer.extend(indices_node)\n indices.append(indices_layer)\n\n # Sanity checks.\n for i,indices_layer in enumerate(indices):\n M = M_last*2**i\n # Reduction by 2 at each layer (binary tree).\n assert len(indices[0] == M)\n # The new ordering does not omit an indice.\n assert sorted(indices_layer) == list(range(M))\n\n return indices[::-1]", "def unifiedMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n parentGraph = self.parentClade.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n childGraph = self.childClade.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n graph = parentGraph.union(childGraph, addCount = False, updateName = False)\n graph.name = 'Unified metabolism enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n if colour is True:\n parentEdges = parentGraph.getEdges()\n childEdges = childGraph.getEdges()\n \n Export.addColourAttribute(graph, colour = Export.Colour.BLUE, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.RED, nodes = False, edges = childEdges)\n \n return graph", "def unifiedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def unifiedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False) -> SubstanceEcGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolism = self.unifiedMetabolism(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def find_common_interactor():\n # 1. filter the unique fusion gene pairs.\n # fusionGenePair = pd.read_csv(\"./fusionGenePair.csv\", header=0, sep=' ')\n # unique_fusionGenePair = fusionGenePair.drop_duplicates()\n # unique_fusionGenePair.to_csv(\"./uniqueFusion.csv\", sep=' ', index=False)\n unique_fusionGenePair = pd.read_csv(\"./uniqueFusion.csv\", sep=' ', header=0)\n\n # 2. for each gene pairs, get all the interactors each partner has.\n\n # Store the 5' partner gene and 3' partner gene in two lists.\n FivePartnerGenelist = []\n ThreePartnerGenelist = []\n for index, row in unique_fusionGenePair.iterrows():\n FivePartnerGenelist.append(row['5_PARTNER_GENE'])\n ThreePartnerGenelist.append(row['3_PARTNER_GENE'])\n # Get the unique gene in each pair\n uniqueFPGL = list(OrderedDict.fromkeys(FivePartnerGenelist))\n uniqueTPGL = list(OrderedDict.fromkeys(ThreePartnerGenelist))\n uniqueGene = list(OrderedDict.fromkeys(uniqueTPGL + uniqueFPGL))\n\n # Find each gene's interactor in the PPI datasets\n PPIS = pd.read_csv(\"./IID results/PPIs_final.tsv\", sep='\\t', header=0)\n\n # Put each gene interactor into a dictionary.\n geneIntDic = {}\n for item in uniqueGene:\n for index, row in PPIS.iterrows():\n if row['Query Symbol'] == item:\n if item in geneIntDic:\n geneIntDic[item].append(row['Partner Symbol'])\n else:\n key = item\n geneIntDic.setdefault(key, [])\n geneIntDic[item].append(row['Partner Symbol'])\n if row['Partner Symbol'] == item:\n if item in geneIntDic:\n geneIntDic[item].append(row['Query Symbol'])\n else:\n key = item\n geneIntDic.setdefault(key, [])\n geneIntDic[item].append(row['Query Symbol'])\n w = csv.writer(open(\"./geneIntDic.csv\", \"w\"))\n for key, val in geneIntDic.items():\n w.writerow([key, val])", "def collectiveMetabolismEnzymes(self, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraph(noMultifunctional = excludeMultifunctionalEnzymes, keepOnHeap = True)\n graph.name = 'Collective metabolism enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def conservedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def connect_cells(dfte,vari):\n # Create the variabel cell for mother, grand mother and grand grand mother\n if 'g_parent_cell' not in dfte.columns:\n dfte = rl.genalogy(dfte,'parent_cell') #Create genealogy\n if 'g_g_parent_cell' not in dfte.columns:\n dfte = rl.genalogy(dfte,'g_parent_cell')\n if 'g_g_g_parent_cell' not in dfte.columns:\n dfte = rl.genalogy(dfte,'g_g_parent_cell')\n #give unique index to all cells\n dfte['uid'] = dfte['cell']+dfte['time_sec'].apply(lambda x: str(x))\n vac=[];sc=[];uid = []\n # Create a vecotor for the variable of interest of cell,mother,grand mother and grand grand mother and an unique identifier of it\n for c,idx in enumerate(dfte['cell'].unique()):\n dau = dfte.loc[dfte['cell']==idx]\n pc = dau['parent_cell'].iloc[0]\n mum = dfte.loc[dfte['cell']==pc]\n gpc = dau['g_parent_cell'].iloc[0]\n gmum = dfte.loc[dfte['cell']==gpc]\n ggpc = dau['g_g_parent_cell'].iloc[0]\n ggmum = dfte.loc[dfte['cell']==ggpc]\n gggpc = dau['g_g_g_parent_cell'].iloc[0]\n gggmum = dfte.loc[dfte['cell']==gggpc]\n fte = lambda x: x[['{}'.format(vari),'uid']].values\n tmp = np.vstack([fte(gggmum),fte(ggmum),fte(gmum),fte(mum),fte(dau)])\n vac.append(tmp[:,0])\n uid.append(tmp[:,1])\n sc.append(['super_cell_{}'.format(c)]*len(tmp))\n return pd.DataFrame({'super_cell':np.hstack(sc),'uid':np.hstack(uid),'{}'.format(vari):np.hstack(vac)})", "def add_children_to_parents(self, mutated_pop_dict, mating_pop_dict):\n\n print('Combining parent and child generations')\n\n merged_networks_dict = OrderedDict()\n\n for id, G in mutated_pop_dict.items():\n new_id = ''.join(\n [random.choice(string.ascii_letters + string.digits)\n for i in range(10)]\n )\n merged_networks_dict[new_id] = copy.deepcopy(G)\n for id, G in mating_pop_dict.items():\n merged_networks_dict[id] = copy.deepcopy(G)\n\n return merged_networks_dict" ]
[ "0.68335646", "0.6791156", "0.6528488", "0.647306", "0.6471037", "0.6426987", "0.6238266", "0.6163832", "0.61556786", "0.61163086", "0.60860956", "0.59248537", "0.5918993", "0.5840949", "0.58134377", "0.5805267", "0.5802707", "0.5529467", "0.5491607", "0.54788053", "0.54776627", "0.5476002", "0.53985447", "0.53440773", "0.533263", "0.53249145", "0.5292884", "0.52899534", "0.52750945", "0.5257302" ]
0.7014142
0
Pairs of geneduplicated enzymes, derived from the unified core metabolisms.
def unifiedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]: parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism) childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism) return parentGeneDuplicated.union(childGeneDuplicated)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def geneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n \n \n enzymes = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n geneDuplicationModel = SimpleGeneDuplication\n \n geneIdToEnzyme = dict()\n for enzyme in enzymes:\n geneIdToEnzyme[enzyme.geneID] = enzyme\n \n enzymePairs = geneDuplicationModel.getEnzymePairs(enzymes, ignoreDuplicatesOutsideSet = True, geneIdToEnzyme = geneIdToEnzyme, preCalculatedEnzymes = None)\n \n return enzymePairs", "def addedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]: \n # get added metabolism\n addedMetabolismEnzymes = self.addedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicated enzyme pairs\n geneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the added metabolism\n geneDuplicatedAdded = set()\n \n for enzymeTuple in geneDuplicated:\n if enzymeTuple[0] in addedMetabolismEnzymes and enzymeTuple[1] in addedMetabolismEnzymes:\n geneDuplicatedAdded.add(enzymeTuple)\n \n return geneDuplicatedAdded", "def divergedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n # get diverged metabolism\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicate enzyme pairs\n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the diverged metabolism\n parentGeneDuplicatedDiverged = set()\n childGeneDuplicatedDiverged = set()\n \n for enzymeTuple in parentGeneDuplicated:\n if enzymeTuple[0] in divergedMetabolismEnzymes and enzymeTuple[1] in divergedMetabolismEnzymes:\n parentGeneDuplicatedDiverged.add(enzymeTuple)\n \n for enzymeTuple in childGeneDuplicated:\n if enzymeTuple[0] in divergedMetabolismEnzymes and enzymeTuple[1] in divergedMetabolismEnzymes:\n childGeneDuplicatedDiverged.add(enzymeTuple)\n \n return parentGeneDuplicatedDiverged.union(childGeneDuplicatedDiverged)", "def joint_pairs(self):\n return [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16], #17 body keypoints\n [20-3, 23-3], [21-3, 24-3], [22-3, 25-3], [26-3, 42-3], [27-3, 41-3], [28-3, 40-3], [29-3, 39-3], [30-3, 38-3], \n [31-3, 37-3], [32-3, 36-3], [33-3, 35-3], [43-3, 52-3], [44-3, 51-3], [45-3, 50-3], [46-3, 49-3], [47-3, 48-3], \n [62-3, 71-3], [63-3, 70-3], [64-3, 69-3], [65-3, 68-3], [66-3, 73-3], [67-3, 72-3], [57-3, 61-3], [58-3, 60-3],\n [74-3, 80-3], [75-3, 79-3], [76-3, 78-3], [87-3, 89-3], [93-3, 91-3], [86-3, 90-3], [85-3, 81-3], [84-3, 82-3],\n [94-3, 115-3], [95-3, 116-3], [96-3, 117-3], [97-3, 118-3], [98-3, 119-3], [99-3, 120-3], [100-3, 121-3],\n [101-3, 122-3], [102-3, 123-3], [103-3, 124-3], [104-3, 125-3], [105-3, 126-3], [106-3, 127-3], [107-3, 128-3],\n [108-3, 129-3], [109-3, 130-3], [110-3, 131-3], [111-3, 132-3], [112-3, 133-3], [113-3, 134-3], [114-3, 135-3]]", "def lostMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Set[Tuple[Enzyme, Enzyme]]:\n # get added metabolism\n lostMetabolismEnzymes = self.lostMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicated enzyme pairs\n geneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the lost metabolism\n geneDuplicatedLost = set()\n \n for enzymeTuple in geneDuplicated:\n if enzymeTuple[0] in lostMetabolismEnzymes and enzymeTuple[1] in lostMetabolismEnzymes:\n geneDuplicatedLost.add(enzymeTuple)\n \n return geneDuplicatedLost", "def joint_pairs(self):\n return ((1, 4), (2, 5), (3, 6), (14, 11), (15, 12), (16, 13))", "def conservedMetabolismGeneDuplicatedEnzymePairs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Tuple[Set[Tuple[Enzyme, Enzyme]]]:\n # get conserved metabolism\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism).getEnzymes()\n \n # get gene-duplicate enzyme pairs\n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymePairs(majorityPercentageCoreMetabolism)\n \n # filter gene-duplicated enzyme pairs for the ones with both enzymes in the conserved metabolism\n parentGeneDuplicatedConserved = set()\n childGeneDuplicatedConserved = set()\n \n for enzymeTuple in parentGeneDuplicated:\n if enzymeTuple[0] in conservedMetabolismEnzymes and enzymeTuple[1] in conservedMetabolismEnzymes:\n parentGeneDuplicatedConserved.add(enzymeTuple)\n \n for enzymeTuple in childGeneDuplicated:\n if enzymeTuple[0] in conservedMetabolismEnzymes and enzymeTuple[1] in conservedMetabolismEnzymes:\n childGeneDuplicatedConserved.add(enzymeTuple)\n \n return (parentGeneDuplicatedConserved, childGeneDuplicatedConserved)", "def geneDuplicatedEnzymesDict(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> Dict[Enzyme, Set[GeneID]]:\n \n \n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n geneDuplicationModel = SimpleGeneDuplication\n \n geneIDsForEnzyme = geneDuplicationModel.getEnzymes(enzymeGraph, returnMatches = True, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None)\n \n# if keepOnHeap is True:\n# self._geneDuplicatedEnzymesObject = geneIDsForEnzyme\n \n return geneIDsForEnzyme", "def get_3away_pairs(kmers):\n k = len(kmers[0])\n if k == 1 or k==2:\n return []\n if k == 3:\n return [pair for pair in combinations(kmers, 2) if pair[0][0] != pair[1][0] and pair[0][1] != pair[1][1] and pair[0][2] != pair[1][2]]\n k_L = k//2\n k_R = k-k_L\n kmer_L_hashes = defaultdict(list)\n kmer_R_hashes = defaultdict(list)\n pairs = []\n kmers_L = []\n kmers_R = []\n for i, kmer in enumerate(kmers):\n kmer_L = kmer[:k_L]\n kmer_R = kmer[k_L:]\n #print(kmer_L)\n #print(kmer_R)\n kmers_L.append(kmer_L)\n kmers_R.append(kmer_R)\n kmer_L_hashes[kmer_to_int(kmer_L)] += [i]\n kmer_R_hashes[kmer_to_int(kmer_R)] += [i]\n for kmer_L_hash in kmer_L_hashes.values(): #same in first half\n if len(kmer_L_hash) > 1:\n kmer_L = kmers[kmer_L_hash[0]][:k_L] #first half\n pairs += [tuple(kmer_L + kmer for kmer in pair) for pair in get_3away_pairs([kmers[i][k_L:] for i in kmer_L_hash])] #differ by 3 in second half\n for kmer_R_hash in kmer_R_hashes.values(): #same in second half\n if len(kmer_R_hash) > 1:\n kmer_R = kmers[kmer_R_hash[0]][k_L:] #second half\n #print(kmer_R)\n pairs += [tuple(kmer + kmer_R for kmer in pair) for pair in get_3away_pairs([kmers[i][:k_L] for i in kmer_R_hash])] #differ by 3 in first half\n possible_pairs = []\n possible_pairs_L = get_1away_pairs(kmers_L)\n possible_pairs_R = get_2away_pairs(kmers_R)\n #print(kmers_L)\n #print(kmers_R)\n #print(possible_pairs_L)\n #print(possible_pairs_R)\n for possible_pair_L in possible_pairs_L:\n for possible_pair_R in possible_pairs_R:\n possible_kmer1 = possible_pair_L[0]+possible_pair_R[0]\n possible_kmer2 = possible_pair_L[1]+possible_pair_R[1]\n if possible_kmer1 in kmers and possible_kmer2 in kmers:\n pairs += [(possible_kmer1, possible_kmer2)]\n possible_pairs = []\n possible_pairs_L = get_2away_pairs(kmers_L)\n possible_pairs_R = get_1away_pairs(kmers_R)\n for possible_pair_L in possible_pairs_L:\n for possible_pair_R in possible_pairs_R:\n possible_kmer1 = possible_pair_L[0]+possible_pair_R[0]\n possible_kmer2 = possible_pair_L[1]+possible_pair_R[1]\n if possible_kmer1 in kmers and possible_kmer2 in kmers:\n pairs += [(possible_kmer1, possible_kmer2)]\n return(pairs)", "def setAtomPairs(self):\n atomPairs = []\n for item in self.condensedProperDihedrals:\n dih = item[0]\n atom1 = dih.atoms[0]\n atom2 = dih.atoms[3]\n pair = [atom1, atom2]\n if atomPairs.count(pair) == 0:\n atomPairs.append(pair)\n self.atomPairs = atomPairs # [[atom1, atom2], ...]\n self.printDebug(\"atomPairs done\")", "def bone_pairs(self):\n return ((0, 3), (1, 4), (2, 5), (10, 13), (11, 14), (12, 15))", "def genPrimerPairs_5Ext(primer_length=20, anneal_length=10, GC_low=40, GC_high=60):\n\n print('Primers for 5\\' extension half-asstemers')\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n \"\"\"re.match checks if the first 2 Nuc are GC in the forward and backwards direction\"\"\"\n while not (re.match(\"[GC]{2}\",str(forwTemplate5_3)) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[::-1])) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[10:12]))):\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n\n forwTemp3_5 = forwTemplate5_3[::-1]\n forwPrimer5_3 = forwTemp3_5.complement()\n print(f\"Template Seq 3\\' - > 5\\': {forwTemp3_5}\")\n print(f\"ForwPrimer Seq 5\\' - > 3\\': {forwPrimer5_3}\")\n\n forwPrimer_f10 = forwPrimer5_3[:10]\n print(f\"First 10 Nucleotides of forward primer: {forwPrimer_f10}\")\n\n revPrimer_f10 = GenOligoGC(10,GC_low, GC_high)\n while not re.match(\"[GC]{2}\",str(revPrimer_f10)):\n revPrimer_f10 = GenOligoGC(10,GC_low, GC_high)\n\n revPrimer5_3 = revPrimer_f10 + forwPrimer_f10\n\n print(f\"RevPrimer Seq 5\\' - > 3\\': {revPrimer5_3}\")\n\n return forwPrimer5_3, revPrimer5_3", "def get_best_combinations(indiv_chunks_1, indiv_chunks_2, pair_chunks_1, pair_chunks_2):\r\n\thyplo_collection=[]\r\n\tindiv_dict=defaultdict(list)\r\n\tfor i in xrange(0, len(indiv_chunks_1)):\r\n\t\tdecom=get_chunk_combination(indiv_chunks_1[i], indiv_chunks_2[i], pair_chunks_1[i], pair_chunks_2[i])\r\n\t\tindiv_combie=indiv_chunks_1[i]+ indiv_chunks_2[i]\r\n\t\thyplo_collection+=list(itertools.chain.from_iterable(decom))\r\n\t\tindiv_dict[indiv_combie]+=decom\r\n\treturn em(indiv_dict, hyplo_collection)", "def effective_pairs(self):\n out = 0\n hdmat = self.hdmatrix()\n for i in xrange(len(hdmat[0])):\n for j in xrange(i+1, len(hdmat[0])): \n out += hdmat[i,j]**2\n return out", "def genPrimerPairs_3Ext(primer_length=20, anneal_length=10, GC_low=40, GC_high=60):\n\n print('Primers for 3\\' extension half-asstemers')\n\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n \"\"\"re.match checks if the first 2 Nuc are GC in the forward and backwards direction\"\"\"\n while not (re.match(\"[GC]{2}\",str(forwTemplate5_3)) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[::-1])) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[8:10]))):\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n\n forwTemp3_5 = forwTemplate5_3[::-1]\n forwPrimer5_3 = forwTemp3_5.complement()\n print(f\"Template Seq 3\\' - > 5\\': {forwTemp3_5}\")\n print(f\"ForwPrimer Seq 5\\' - > 3\\': {forwPrimer5_3}\")\n\n forwPrimer_L10 = forwPrimer5_3[10:]\n print(f\"Last 10 Nucleotides of forward primer: {forwPrimer_L10}\")\n\n revPrimer_L10 = GenOligoGC(10,GC_low, GC_high)\n while not re.match(\"[GC]{2}\",str(revPrimer_L10[::-1])):\n revPrimer_L10 = GenOligoGC(10,GC_low, GC_high)\n\n \"\"\"First 10 Nuc of rev primer must be identical to last 10 Nuc of forward Primer\"\"\"\n revPrimer5_3 = forwPrimer_L10 + revPrimer_L10\n\n print(f\"RevPrimer Seq 5\\' - > 3\\': {revPrimer5_3}\")\n\n return forwPrimer5_3, revPrimer5_3", "def getmulticombos(peeps):\n\n\tret = []\n\n\tfor p in peeps:\n\t\tu,s = getcombos(p)\n\n\t\tbestu = getbesttriplet(u)\n\t\tbests = getbesttriplet(s)\n\n\t\tret.append((bestu, bests))\n\n\treturn ret", "def get_all_potential_edges(self) -> Dict[str,\n Tuple[int, int, int, int]]:\n orig_rows = self.tile_rows\n\n ret = dict()\n\n for i in range(0, 4):\n self.rotate_right(i)\n for j in range(0, 2):\n self.flip_l_r(j)\n for k in range(0, 2):\n self.flip_t_b(k)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'rr{i}_lr{j}_tb{k}'] = edges\n\n self.tile_rows = orig_rows\n\n for j in range(0, 2):\n self.flip_l_r(j)\n for i in range(0, 4):\n self.rotate_right(i)\n for k in range(0, 2):\n self.flip_t_b(k)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'lr{j}_rr{i}_tb{k}'] = edges\n\n self.tile_rows = orig_rows\n\n for j in range(0, 2):\n self.flip_l_r(j)\n for k in range(0, 2):\n self.flip_t_b(k)\n for i in range(0, 4):\n self.rotate_right(i)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'lr{j}_tb{k}_rr{i}'] = edges\n\n self.tile_rows = orig_rows\n\n for k in range(0, 2):\n self.flip_t_b(k)\n for j in range(0, 2):\n self.flip_l_r(j)\n for i in range(0, 4):\n self.rotate_right(i)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'tb{k}_lr{j}_rr{i}'] = edges\n\n self.tile_rows = orig_rows\n\n for k in range(0, 2):\n self.flip_t_b(k)\n for i in range(0, 4):\n self.rotate_right(i)\n for j in range(0, 2):\n self.flip_l_r(j)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'tb{k}_rr{i}_lr{j}'] = edges\n\n self.tile_rows = orig_rows\n\n for i in range(0, 4):\n self.rotate_right(i)\n for k in range(0, 2):\n self.flip_t_b(k)\n for j in range(0, 2):\n self.flip_l_r(j)\n edges = self.get_current_edges()\n if edges not in ret.values():\n ret[f'rr{i}_tb{k}_lr{j}'] = edges\n\n self.tile_rows = orig_rows\n\n return ret", "def divergedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentGeneDuplicated.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childGeneDuplicated.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism gene-duplicated enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def perm_vs_hyp():\n\n return [\"P\",\"P\",\"P\",\"P\",\"P\"]", "def generate_pairs(self, all_walks):\n logging.info(['edge_types before generate pairs', self.edge_types])\n\n pairs = []\n skip_window = self.config['win_size'] // 2\n for layer_id, e_type in enumerate(self.edge_types):\n walks = all_walks[e_type]\n for walk in tqdm.tqdm(walks):\n for i in range(len(walk)):\n for j in range(1, skip_window + 1):\n if i - j >= 0 and walk[i] != walk[i - j]:\n neg_nodes = self.graph[e_type].sample_nodes(\n self.config['neg_num'])\n pairs.append(\n (walk[i], walk[i - j], *neg_nodes, layer_id))\n if i + j < len(walk) and walk[i] != walk[i + j]:\n neg_nodes = self.graph[e_type].sample_nodes(\n self.config['neg_num'])\n pairs.append(\n (walk[i], walk[i + j], *neg_nodes, layer_id))\n return pairs", "def _build_pairs_for_eval(self):\n rec = list()\n for idx1 in range(len(self)):\n idx2, is_similar = self._get_sec_idx_and_is_similar(idx1)\n rec.append((idx2, is_similar))\n self._pairs_for_eval = rec", "def get_2pairs():\n\n done = 0\n while not done:\n r0 = int(random(GRID_CELLS))\n c0 = int(random(GRID_CELLS))\n\n r1 = int(random(GRID_CELLS))\n c1 = int(random(GRID_CELLS))\n done = 1\n\n if random(1) < 0.5:\n # move one cell right\n ra1 = r0 + 1\n rb1 = r1 + 1\n ra0, rb0 = r0, r1\n ca0, cb0 = c0, c1\n ca1, cb1 = c0, c1\n\n if ra1 >= GRID_CELLS or rb1 >= GRID_CELLS:\n done = 0\n else: # move down:\n ca1 = c0 + 1\n cb1 = c1 + 1\n ca0, cb0 = c0, c1\n ra0, rb0 = r0, r1\n ra1, rb1 = r0, r1\n if ca1 >= GRID_CELLS or cb1 >= GRID_CELLS:\n done = 0\n\n return [((ra0, ca0), (rb0, cb0)), ((ra1, ca1), (rb1, cb1))]", "def get_pairs(self):\n self.get_locations()\n self.choices = {}\n for host, pathogens in self.locations.iteritems():\n if len(pathogens) > 1:\n for pair in combinations(pathogens, 2):\n self.choices.update({pair: host}) # pairs of pathogens in same host", "def eo_edges(self):\n logger.info(\"eo_edges called\")\n permutations = []\n original_state = self.state[:]\n original_solution = self.solution[:]\n tmp_solution_len = len(self.solution)\n\n # Build a list of the wing strings at each midge\n wing_strs = []\n\n for _, square_index, partner_index in midges_recolor_tuples_555:\n square_value = self.state[square_index]\n partner_value = self.state[partner_index]\n wing_str = square_value + partner_value\n wing_str = wing_str_map[square_value + partner_value]\n wing_strs.append(wing_str)\n\n # build a list of all possible EO permutations...an even number of edges must be high\n for num in range(4096):\n num = str(bin(num)).lstrip(\"0b\").zfill(12)\n if num.count(\"1\") % 2 == 0:\n permutations.append(list(map(int, num)))\n\n # Put all 2048 starting states in a file and point ida-via-graph\n # at the file so it can solve all of them and apply the one that is the shortest.\n lr_center_stage_states = []\n eo_outer_orbit_states = []\n eo_inner_orbit_states = []\n\n for permutation in permutations:\n must_be_uppercase = []\n must_be_lowercase = []\n self.state = original_state[:]\n\n for wing_str, uppercase in zip(wing_strs, permutation):\n if uppercase:\n must_be_uppercase.append(wing_str)\n else:\n must_be_lowercase.append(wing_str)\n\n # logger.info(\"%s: %s permutation %s\" % (self, index, \"\".join(map(str, permutation))))\n self.edges_flip_orientation(must_be_uppercase, must_be_lowercase)\n\n # build lists of the states that we need to find state_indexes for\n lr_center_stage_states.append(self.lt_phase3_lr_center_stage.state())\n eo_outer_orbit_states.append(self.lt_phase3_eo_outer_orbit.state())\n eo_inner_orbit_states.append(self.lt_phase3_eo_inner_orbit.state())\n\n # now we have a huge list of states to lookup, do a binary search on multiple states at once (this is drastically faster\n # than binary searching for them individually). state_index_multiple() will return a dict where the state is the key\n # and the state_index is the value.\n lr_center_stage_eo_inner_orbit_state_indexes = self.lt_phase3_lr_center_stage.state_index_multiple(\n lr_center_stage_states\n )\n eo_outer_orbit_state_indexes = self.lt_phase3_eo_outer_orbit.state_index_multiple(eo_outer_orbit_states)\n eo_inner_orbit_state_indexes = self.lt_phase3_eo_inner_orbit.state_index_multiple(eo_inner_orbit_states)\n\n # build a list of tuples of the state indexes\n pt_state_indexes = []\n for lr_center_stage_eo_inner_orbit_state, eo_outer_orbit_state, eo_inner_orbit_state in zip(\n lr_center_stage_states, eo_outer_orbit_states, eo_inner_orbit_states\n ):\n pt_state_indexes.append(\n (\n lr_center_stage_eo_inner_orbit_state_indexes[lr_center_stage_eo_inner_orbit_state],\n eo_outer_orbit_state_indexes[eo_outer_orbit_state],\n eo_inner_orbit_state_indexes[eo_inner_orbit_state],\n )\n )\n\n self.state = original_state[:]\n self.solution = original_solution[:]\n\n # When solve_via_c is passed pt_state_indexes (2048 lines of states in this case), it will try all 2048 of them\n # to find the state that has the shortest solution.\n self.lt_phase3.solve_via_c(pt_states=pt_state_indexes)\n\n self.print_cube_add_comment(\"edges EOed into high/low groups\", tmp_solution_len)\n self.post_eo_state = self.state[:]\n self.post_eo_solution = self.solution[:]\n\n # re-color the cube so that the edges are oriented correctly so we can\n # pair 4-edges then 8-edges. After all edge pairing is done we will uncolor\n # the cube and re-apply the solution.\n self.edges_flip_orientation(wing_strs, [])\n self.highlow_edges_print()", "def _pair_indices(self):\n indices_src = []\n indices_dst = []\n for i in range(self.walk_len):\n for j in range(max(i - self.l, 0), i):\n indices_src.append(i)\n indices_dst.append(j)\n for j in range(i + 1, min(i + self.r + 1, self.walk_len)):\n indices_src.append(i)\n indices_dst.append(j)\n return indices_src, indices_dst", "def generate_edges(self):\n edges = []\n for vertex in self.graph_dict:\n for neighbour in self.graph_dict[vertex]:\n if (neighbour, vertex) not in edges:\n edges.append((vertex, neighbour))\n \n for pair in edges:\n for otherpair in edges:\n if pair[1] == otherpair[0]:\n edges.append((pair[0],otherpair[1]))\n return edges", "def _generate_pileups(self):\n pass", "def illuminator_of_elfes():\n\n\t# Alpha - simplified by taking out the i by multiplying the outerproduct by 2i\n\talpha1i = np.matrix([[0, 0, 0, 2], [0, 0, 2, 0], [0, -2, 0, 0], [-2, 0, 0, 0]])\n\talpha2i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, 2], [0, 0, -2, 0]])\n\talpha3i = np.matrix([[0, 0, 2, 0], [0, 0, 0, -2], [-2, 0, 0, 0], [0, 2, 0, 0]])\n\n\t# Betas - simplified by taking out the i by multiplication of outerprod by 2i\n\tbeta1i = np.matrix([[0, 0, 0, 2], [0, 0, -2, 0], [0, 2, 0, 0], [-2, 0, 0, 0]])\n\tbeta2i = np.matrix([[0, 0, 2, 0], [0, 0, 0, 2], [-2, 0, 0, 0], [0, -2, 0, 0]])\n\tbeta3i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, -2], [0, 0, 2, 0]])\n\n\t# print(\"alpha 1\")\n\t# print(alpha1i)\n\t# print(\"\")\n\t# print(\"alpha 2\")\n\t# print(alpha2i)\n\t# print(\"\")\n\t# print(\"alpha 3\")\n\t# print(alpha3i)\n\t# print(\"\")\n\t# print(\"beta 1\")\n\t# print(beta1i)\n\t# print(\"\")\n\t# print(\"beta 2\")\n\t# print(beta2i)\n\t# print(\"\")\n\t# print(\"beta 3\")\n\t# print(beta3i)\n\t# print(\"\")\n\n\t# abperm_comb = [ np.multiply(alpha1i,-1), np.multiply(alpha2i,-1), np.multiply(alpha3i,-1), np.multiply(beta1i,-1), np.multiply(beta2i,-1), np.multiply(beta3i,-1)]\n\n\tabperm_comb = [alpha1i, alpha2i, alpha3i, beta1i, beta2i, beta3i]\n\treturn abperm_comb", "def geneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n \n \n \n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n geneDuplicationModel = SimpleGeneDuplication\n# geneDuplicationModel = SimpleGroupGeneDuplication(sameGroupOrganisms = self.group)\n \n # filter core metabolism enzyme graph \n geneDuplicatedEnzymes = geneDuplicationModel.filterEnzymes(enzymeGraph, eValue = defaultEValue, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n geneDuplicatedEnzymesOnly = geneDuplicatedEnzymes\n geneDuplicatedEnzymes = enzymeGraph\n Export.addColourAttribute(geneDuplicatedEnzymes, colourToUse, nodes = False, edges = geneDuplicatedEnzymesOnly.getEdges())\n \n geneDuplicatedEnzymes.name = 'Gene-duplicated core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return geneDuplicatedEnzymes", "def generate_pairs(self, _list_d):\n\n length = len(_list_d)\n result_list = {}\n\n for i in range(length):\n for j in xrange(i+1,length):\n l = len(result_list)\n result_list[l] = ((i, _list_d[i]),(j, _list_d[j]))\n\n return result_list" ]
[ "0.66328585", "0.6220458", "0.61605203", "0.59667987", "0.59406346", "0.59398246", "0.592409", "0.58372545", "0.58349013", "0.5752567", "0.5708292", "0.5680166", "0.5635978", "0.55386114", "0.5525389", "0.548303", "0.5480818", "0.5471715", "0.54622984", "0.54544353", "0.545331", "0.5451427", "0.5437494", "0.5418222", "0.5392066", "0.5381583", "0.53751504", "0.5372016", "0.53652364", "0.5341737" ]
0.6404736
1
SubstanceEnzyme graph of neofunctionalised enzymes, derived from the added core metabolism.
def addedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph: parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism) childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism) addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism) childGraph = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs) childGraph.name = 'Added metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames) return childGraph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collectiveMetabolismEnzymes(self, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraph(noMultifunctional = excludeMultifunctionalEnzymes, keepOnHeap = True)\n graph.name = 'Collective metabolism enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def unifiedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def coreMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraphByEcMajority(majorityPercentage = majorityPercentageCoreMetabolism, majorityTotal = None, noMultifunctional = excludeMultifunctionalEnzymes)\n graph.name = 'Core metabolism Enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def lostMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def unifiedMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n parentGraph = self.parentClade.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n childGraph = self.childClade.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n graph = parentGraph.union(childGraph, addCount = False, updateName = False)\n graph.name = 'Unified metabolism enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n if colour is True:\n parentEdges = parentGraph.getEdges()\n childEdges = childGraph.getEdges()\n \n Export.addColourAttribute(graph, colour = Export.Colour.BLUE, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.RED, nodes = False, edges = childEdges)\n \n return graph", "def edges(self):\n return self.dovetails + self.containments + self.internals", "def test__graph_structure():\n assert PES_GRAPH == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'),\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n assert pgraph.species(PES_GRAPH) == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'))\n assert pgraph.channels(PES_GRAPH) == (\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n print('\\npes graph')\n print(PES_GRAPH)", "def addedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def neofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEnzymeGraph:\n # get neofunctionalisations \n neofunctionalisedEnzymes = self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs)\n \n # filter core metabolism enzyme graph\n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism) \n neofunctionalisedMetabolism = neofunctionalisedEnzymes.filterGraph(enzymeGraph, minimumEcDifference = None)\n \n # colour core metabolism \n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = enzymeGraph\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def divergedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def conservedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def line_graph_forbidden_subgraphs():\n from sage.graphs.all import Graph\n from sage.graphs.generators.basic import ClawGraph\n graphs = [ClawGraph()]\n\n graphs.append(Graph({\n 0: [1, 2, 3],\n 1: [2, 3],\n 4: [2],\n 5: [3]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3, 4],\n 1: [2, 3, 4],\n 3: [4],\n 2: [5]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3],\n 1: [2, 3],\n 4: [2, 3]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3],\n 1: [2, 3],\n 4: [2],\n 5: [3, 4]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3, 4],\n 1: [2, 3, 4],\n 3: [4],\n 5: [2, 0, 1]\n }))\n\n graphs.append(Graph({\n 5: [0, 1, 2, 3, 4],\n 0: [1, 4],\n 2: [1, 3],\n 3: [4]\n }))\n\n graphs.append(Graph({\n 1: [0, 2, 3, 4],\n 3: [0, 4],\n 2: [4, 5],\n 4: [5]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3],\n 1: [2, 3, 4],\n 2: [3, 4],\n 3: [4]\n }))\n\n return graphs", "def geneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n \n \n \n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n geneDuplicationModel = SimpleGeneDuplication\n# geneDuplicationModel = SimpleGroupGeneDuplication(sameGroupOrganisms = self.group)\n \n # filter core metabolism enzyme graph \n geneDuplicatedEnzymes = geneDuplicationModel.filterEnzymes(enzymeGraph, eValue = defaultEValue, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n geneDuplicatedEnzymesOnly = geneDuplicatedEnzymes\n geneDuplicatedEnzymes = enzymeGraph\n Export.addColourAttribute(geneDuplicatedEnzymes, colourToUse, nodes = False, edges = geneDuplicatedEnzymesOnly.getEdges())\n \n geneDuplicatedEnzymes.name = 'Gene-duplicated core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return geneDuplicatedEnzymes", "def create_subbasin_graph():\n subbasin_to_downstream = pd.read_csv(module_dir + '/../data/simulations_shervan/test.rvh', sep='\\s+', skiprows=7, nrows=724, names=['subbasin', 'downstream_subbasin'], usecols=[1,2])\n subbasin_to_downstream['subbasin'] = subbasin_to_downstream['subbasin']\n subbasin_to_downstream['downstream_subbasin'] = 'sub' + subbasin_to_downstream['downstream_subbasin'].astype(str)\n subbasin_to_downstream['edge'] = 1\n\n for subbasin in subbasin_to_downstream['subbasin'].unique():\n is_sink = 1 if len(subbasin_to_downstream[(subbasin_to_downstream['subbasin'] == subbasin) & subbasin_to_downstream['edge'] == 1]) == 0 else 0\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': subbasin, 'downstream_subbasin': subbasin, 'edge': is_sink}, ignore_index=True)\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': 'sub-1', 'downstream_subbasin': 'sub-1', 'edge': 1}, ignore_index=True)\n \n adj = subbasin_to_downstream.pivot(index='subbasin', columns='downstream_subbasin', values='edge').fillna(0) \n adj = adj.sort_index(axis=0).sort_index(axis=1)\n \n G = nx.from_numpy_matrix(adj.values, parallel_edges=False, create_using=nx.DiGraph())\n label_mapping = dict(zip(range(len(adj.values)), adj.index))\n G = nx.relabel_nodes(G, label_mapping)\n \n return G", "def sub_graph_merging(self):", "def test_edges(self):\n return self._test_edges", "def lostMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def _get_full_graph(self):", "def get_karate_club_data():\n\n # Edge list of Zachary's karate club.\n edge_list = [\n (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8),\n (0, 10), (0, 11), (0, 12), (0, 13), (0, 17), (0, 19), (0, 21), (0, 31),\n (1, 2), (1, 3), (1, 7), (1, 13), (1, 17), (1, 19), (1, 21), (1, 30),\n (2, 3), (2, 7), (2, 8), (2, 9), (2, 13), (2, 27), (2, 28), (2, 32),\n (3, 7), (3, 12), (3, 13), (4, 6), (4, 10), (5, 6), (5, 10), (5, 16),\n (6, 16), (8, 30), (8, 32), (8, 33), (9, 33), (13, 33), (14, 32), (14, 33),\n (15, 32), (15, 33), (18, 32), (18, 33), (19, 33), (20, 32), (20, 33),\n (22, 32), (22, 33), (23, 25), (23, 27), (23, 29), (23, 32), (23, 33),\n (24, 25), (24, 27), (24, 31), (25, 31), (26, 29), (26, 33), (27, 33),\n (28, 31), (28, 33), (29, 32), (29, 33), (30, 32), (30, 33), (31, 32),\n (31, 33), (32, 33)\n ]\n\n # Student-teacher assignment (before split) as in Zachary (1977).\n # Part-time karate instructor: Mr. Hi, node 0 (labeled as 0).\n # President: John A., node 33 (labeled as 1).\n node_labels = jnp.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0,\n 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n \n return create_graph_data(edge_list=edge_list, node_labels=node_labels)", "def get_graph_karateclub():\n all_members = set(range(34))\n club1 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 16, 17, 19, 21}\n # club2 = all_members - club1\n\n G = eg.Graph(name=\"Zachary's Karate Club\")\n for node in all_members:\n G.add_node(node+1)\n\n zacharydat = \"\"\"\\\n0 1 1 1 1 1 1 1 1 0 1 1 1 1 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 1 0 0\n1 0 1 1 0 0 0 1 0 0 0 0 0 1 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 1 0 0 0\n1 1 0 1 0 0 0 1 1 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 1 0\n1 1 1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1\n0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1\n1 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 1 0 0 1 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 1 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1\n0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 1\n0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 1 1\n0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 1 0 0 0 1 1\n0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 1 0 0 1 0 1 0 1 1 0 0 0 0 0 1 1 1 0 1\n0 0 0 0 0 0 0 0 1 1 0 0 0 1 1 1 0 0 1 1 1 0 1 1 0 0 1 1 1 1 1 1 1 0\"\"\"\n\n for row, line in enumerate(zacharydat.split('\\n')):\n thisrow = [int(b) for b in line.split()]\n for col, entry in enumerate(thisrow):\n if entry == 1:\n G.add_edge(row+1, col+1)\n\n # Add the name of each member's club as a node attribute.\n for v in G:\n G.nodes[v]['club'] = 'Mr. Hi' if v in club1 else 'Officer'\n return G", "def cell_edges(self):", "def addedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def unifiedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentGeneDuplicated.union(childGeneDuplicated, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n return graph", "def graph(self):\n ...", "def FeynmanSubgraphs(graph, model):\n model.SetTypes(graph)\n model.checktadpoles = False\n graph.FindSubgraphs(model)\n\n subs_toremove = subgraphs.DetectSauseges(graph._subgraphs)\n graph.RemoveSubgaphs(subs_toremove)\n\n subgraphs.RemoveTadpoles(graph)", "def _subgraph_isomorphism_matcher(digraph, nxpattern, node_pred, edge_pred):\n graph_matcher = iso.DiGraphMatcher(digraph, nxpattern, node_match=node_pred, edge_match=edge_pred)\n yield from graph_matcher.subgraph_isomorphisms_iter()", "def build_fully_biconnected_test_graph():\n graph = build_biconnected_test_graph()\n\n # Connect the first and third components to create a ring, converting everything into a single biconnected component\n graph.new_edge(1, 12)\n\n return graph", "def neopentane():\n coords = [\n [0.000000, 0.0, 0.0],\n [0.881905, 0.881905, 0.881905],\n [-0.881905, -0.881905, 0.881905],\n [0.881905, -0.881905, -0.881905],\n [-0.881905, 0.881905, -0.881905],\n [-1.524077, 0.276170, -1.524077],\n [1.524077, 1.524077, 0.276170],\n [1.524077, -0.276170, -1.524077],\n [1.524077, 0.276170, 1.524077],\n [-1.524077, -0.276170, 1.524077],\n [1.524077, -1.524077, -0.276170],\n [-0.276170, 1.524077, -1.524077],\n [0.276170, 1.524077, 1.524077],\n [0.276170, -1.524077, -1.524077],\n [-0.276170, -1.524077, 1.524077],\n [-1.524077, 1.524077, -0.276170],\n [-1.524077, -1.524077, 0.276170],\n ]\n coords = [[float(j) / Bohr for j in i] for i in coords]\n\n symbols = [\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n ]\n\n atoms = []\n for i, _ in enumerate(coords):\n atoms.append(Atom(symbols[i], position=coords[i]))\n return Molecule(symbols=atoms)", "def unifiedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False) -> SubstanceEcGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolism = self.unifiedMetabolism(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def edges(self):\r\n return self.__generate_edges()" ]
[ "0.62832594", "0.6032561", "0.59919894", "0.5936504", "0.58536375", "0.5780906", "0.5709709", "0.56339985", "0.56321144", "0.561988", "0.56182474", "0.5617661", "0.5614264", "0.5597388", "0.5581094", "0.5547819", "0.5524394", "0.54902864", "0.5435478", "0.54295534", "0.5349954", "0.53368026", "0.5318905", "0.5315167", "0.52545947", "0.52425486", "0.52136767", "0.52112937", "0.5194748", "0.51700544" ]
0.6251779
1
SubstanceEnzyme graph of neofunctionalised enzymes, derived from the lost core metabolism.
def lostMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph: parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism) childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism) lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism) parentGraph = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs) parentGraph.name = 'Lost metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames) return parentGraph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def collectiveMetabolismEnzymes(self, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraph(noMultifunctional = excludeMultifunctionalEnzymes, keepOnHeap = True)\n graph.name = 'Collective metabolism enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def unifiedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def coreMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraphByEcMajority(majorityPercentage = majorityPercentageCoreMetabolism, majorityTotal = None, noMultifunctional = excludeMultifunctionalEnzymes)\n graph.name = 'Core metabolism Enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def edges(self):\n return self.dovetails + self.containments + self.internals", "def line_graph_forbidden_subgraphs():\n from sage.graphs.all import Graph\n from sage.graphs.generators.basic import ClawGraph\n graphs = [ClawGraph()]\n\n graphs.append(Graph({\n 0: [1, 2, 3],\n 1: [2, 3],\n 4: [2],\n 5: [3]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3, 4],\n 1: [2, 3, 4],\n 3: [4],\n 2: [5]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3],\n 1: [2, 3],\n 4: [2, 3]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3],\n 1: [2, 3],\n 4: [2],\n 5: [3, 4]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3, 4],\n 1: [2, 3, 4],\n 3: [4],\n 5: [2, 0, 1]\n }))\n\n graphs.append(Graph({\n 5: [0, 1, 2, 3, 4],\n 0: [1, 4],\n 2: [1, 3],\n 3: [4]\n }))\n\n graphs.append(Graph({\n 1: [0, 2, 3, 4],\n 3: [0, 4],\n 2: [4, 5],\n 4: [5]\n }))\n\n graphs.append(Graph({\n 0: [1, 2, 3],\n 1: [2, 3, 4],\n 2: [3, 4],\n 3: [4]\n }))\n\n return graphs", "def unifiedMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n parentGraph = self.parentClade.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n childGraph = self.childClade.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n graph = parentGraph.union(childGraph, addCount = False, updateName = False)\n graph.name = 'Unified metabolism enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n if colour is True:\n parentEdges = parentGraph.getEdges()\n childEdges = childGraph.getEdges()\n \n Export.addColourAttribute(graph, colour = Export.Colour.BLUE, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.RED, nodes = False, edges = childEdges)\n \n return graph", "def test__graph_structure():\n assert PES_GRAPH == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'),\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n assert pgraph.species(PES_GRAPH) == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'))\n assert pgraph.channels(PES_GRAPH) == (\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n print('\\npes graph')\n print(PES_GRAPH)", "def lostMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(lostECs)\n parentGraph.name = 'Lost metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def create_subbasin_graph():\n subbasin_to_downstream = pd.read_csv(module_dir + '/../data/simulations_shervan/test.rvh', sep='\\s+', skiprows=7, nrows=724, names=['subbasin', 'downstream_subbasin'], usecols=[1,2])\n subbasin_to_downstream['subbasin'] = subbasin_to_downstream['subbasin']\n subbasin_to_downstream['downstream_subbasin'] = 'sub' + subbasin_to_downstream['downstream_subbasin'].astype(str)\n subbasin_to_downstream['edge'] = 1\n\n for subbasin in subbasin_to_downstream['subbasin'].unique():\n is_sink = 1 if len(subbasin_to_downstream[(subbasin_to_downstream['subbasin'] == subbasin) & subbasin_to_downstream['edge'] == 1]) == 0 else 0\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': subbasin, 'downstream_subbasin': subbasin, 'edge': is_sink}, ignore_index=True)\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': 'sub-1', 'downstream_subbasin': 'sub-1', 'edge': 1}, ignore_index=True)\n \n adj = subbasin_to_downstream.pivot(index='subbasin', columns='downstream_subbasin', values='edge').fillna(0) \n adj = adj.sort_index(axis=0).sort_index(axis=1)\n \n G = nx.from_numpy_matrix(adj.values, parallel_edges=False, create_using=nx.DiGraph())\n label_mapping = dict(zip(range(len(adj.values)), adj.index))\n G = nx.relabel_nodes(G, label_mapping)\n \n return G", "def conservedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def neofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEnzymeGraph:\n # get neofunctionalisations \n neofunctionalisedEnzymes = self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs)\n \n # filter core metabolism enzyme graph\n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism) \n neofunctionalisedMetabolism = neofunctionalisedEnzymes.filterGraph(enzymeGraph, minimumEcDifference = None)\n \n # colour core metabolism \n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = enzymeGraph\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def geneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph:\n \n \n \n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism)\n \n geneDuplicationModel = SimpleGeneDuplication\n# geneDuplicationModel = SimpleGroupGeneDuplication(sameGroupOrganisms = self.group)\n \n # filter core metabolism enzyme graph \n geneDuplicatedEnzymes = geneDuplicationModel.filterEnzymes(enzymeGraph, eValue = defaultEValue, ignoreDuplicatesOutsideSet = True, preCalculatedEnzymes = None)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n geneDuplicatedEnzymesOnly = geneDuplicatedEnzymes\n geneDuplicatedEnzymes = enzymeGraph\n Export.addColourAttribute(geneDuplicatedEnzymes, colourToUse, nodes = False, edges = geneDuplicatedEnzymesOnly.getEdges())\n \n geneDuplicatedEnzymes.name = 'Gene-duplicated core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return geneDuplicatedEnzymes", "def get_karate_club_data():\n\n # Edge list of Zachary's karate club.\n edge_list = [\n (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8),\n (0, 10), (0, 11), (0, 12), (0, 13), (0, 17), (0, 19), (0, 21), (0, 31),\n (1, 2), (1, 3), (1, 7), (1, 13), (1, 17), (1, 19), (1, 21), (1, 30),\n (2, 3), (2, 7), (2, 8), (2, 9), (2, 13), (2, 27), (2, 28), (2, 32),\n (3, 7), (3, 12), (3, 13), (4, 6), (4, 10), (5, 6), (5, 10), (5, 16),\n (6, 16), (8, 30), (8, 32), (8, 33), (9, 33), (13, 33), (14, 32), (14, 33),\n (15, 32), (15, 33), (18, 32), (18, 33), (19, 33), (20, 32), (20, 33),\n (22, 32), (22, 33), (23, 25), (23, 27), (23, 29), (23, 32), (23, 33),\n (24, 25), (24, 27), (24, 31), (25, 31), (26, 29), (26, 33), (27, 33),\n (28, 31), (28, 33), (29, 32), (29, 33), (30, 32), (30, 33), (31, 32),\n (31, 33), (32, 33)\n ]\n\n # Student-teacher assignment (before split) as in Zachary (1977).\n # Part-time karate instructor: Mr. Hi, node 0 (labeled as 0).\n # President: John A., node 33 (labeled as 1).\n node_labels = jnp.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0,\n 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n \n return create_graph_data(edge_list=edge_list, node_labels=node_labels)", "def addedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism gene-duplicated enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def divergedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n divergedMetabolismEnzymes = self.divergedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = divergedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def _get_full_graph(self):", "def get_graph_karateclub():\n all_members = set(range(34))\n club1 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 16, 17, 19, 21}\n # club2 = all_members - club1\n\n G = eg.Graph(name=\"Zachary's Karate Club\")\n for node in all_members:\n G.add_node(node+1)\n\n zacharydat = \"\"\"\\\n0 1 1 1 1 1 1 1 1 0 1 1 1 1 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 1 0 0\n1 0 1 1 0 0 0 1 0 0 0 0 0 1 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0 1 0 0 0\n1 1 0 1 0 0 0 1 1 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 1 0\n1 1 1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1\n0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1\n1 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 1 0 0 1 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 1 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 1 0 0\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1\n0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 1\n0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1\n0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 1 1\n0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1\n1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 1 0 0 0 1 1\n0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 1 0 0 1 0 1 0 1 1 0 0 0 0 0 1 1 1 0 1\n0 0 0 0 0 0 0 0 1 1 0 0 0 1 1 1 0 0 1 1 1 0 1 1 0 0 1 1 1 1 1 1 1 0\"\"\"\n\n for row, line in enumerate(zacharydat.split('\\n')):\n thisrow = [int(b) for b in line.split()]\n for col, entry in enumerate(thisrow):\n if entry == 1:\n G.add_edge(row+1, col+1)\n\n # Add the name of each member's club as a node attribute.\n for v in G:\n G.nodes[v]['club'] = 'Mr. Hi' if v in club1 else 'Officer'\n return G", "def sub_graph_merging(self):", "def test_edges(self):\n return self._test_edges", "def build_fully_biconnected_test_graph():\n graph = build_biconnected_test_graph()\n\n # Connect the first and third components to create a ring, converting everything into a single biconnected component\n graph.new_edge(1, 12)\n\n return graph", "def cell_edges(self):", "def FeynmanSubgraphs(graph, model):\n model.SetTypes(graph)\n model.checktadpoles = False\n graph.FindSubgraphs(model)\n\n subs_toremove = subgraphs.DetectSauseges(graph._subgraphs)\n graph.RemoveSubgaphs(subs_toremove)\n\n subgraphs.RemoveTadpoles(graph)", "def addedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def graph(self):\n ...", "def lostMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def unifiedMetabolismGeneDuplicatedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False) -> SubstanceEnzymeGraph: \n parentGeneDuplicated = self.parentClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childGeneDuplicated = self.childClade.geneDuplicatedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is False:\n graph = parentGeneDuplicated.union(childGeneDuplicated, addCount = False, updateName = False)\n \n else:\n unifiedMetabolismEnzymes = self.unifiedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentGeneDuplicated.getEdges()\n childEdges = childGeneDuplicated.getEdges()\n \n graph = unifiedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n return graph", "def _subgraph_isomorphism_matcher(digraph, nxpattern, node_pred, edge_pred):\n graph_matcher = iso.DiGraphMatcher(digraph, nxpattern, node_match=node_pred, edge_match=edge_pred)\n yield from graph_matcher.subgraph_isomorphisms_iter()", "def testGraph():\n STATES = {\n 'CT': ('MA', 'RI'),\n 'MA': ('CT', 'NH', 'RI', 'VT'),\n 'ME': ('NH',),\n 'NH': ('MA', 'ME', 'VT'),\n 'RI': ('CT', 'MA'),\n 'VT': ('MA', 'NH')\n }\n\n # add all the edges to the graph\n northeast = Graph()\n for state, neighbors in STATES.items():\n for neighbor in neighbors:\n # this automatically creates a new vertices if not already present\n northeast.add_edge(state, neighbor)\n\n # display the vertices, which will show the connected neighbors.\n # this will call the __iter__() method to get the Vertex objects.\n for state in northeast:\n print(state)\n\n print(northeast)\n print(northeast.get_vertex_keys())\n\n # check the __contains__() method\n print('MA in northeast (True)?', 'MA' in northeast)\n print('CA in northeast (False)?', 'CA' in northeast)\n\n # test getVertex()\n print('MA vertex:', northeast.getVertex('MA'))", "def get_subgraphs(graph):\n nodes_powerset = get_nodes_combinations(graph)\n #print(\"Doing\")\n #draw_graph(graph)\n subgraphs = []\n for nodes in nodes_powerset:\n subg = graph.subgraph(nodes)\n nodes = subg.nodes(data=True)\n if nx.is_weakly_connected(subg):\n subgraphs.append(subg)\n return subgraphs" ]
[ "0.619215", "0.61078405", "0.59768206", "0.58706874", "0.5782916", "0.5741197", "0.57312053", "0.5701871", "0.56801665", "0.56514645", "0.56266373", "0.56028616", "0.56004995", "0.55960053", "0.5593098", "0.55900544", "0.5576522", "0.55513394", "0.5510709", "0.54889655", "0.53502786", "0.5304797", "0.5297331", "0.5290658", "0.5281541", "0.5269899", "0.526345", "0.52321863", "0.52021533", "0.5195999" ]
0.61164635
1
SubstanceEC graph of "neofunctionalised" EC numbers, derived from the added core metabolism.
def addedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph: parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism) childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism) addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism) childGraph = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(addedECs) childGraph.name = 'Added metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames) return childGraph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unifiedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False) -> SubstanceEcGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolism = self.unifiedMetabolism(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def neofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEcGraph:\n # get neofunctionalisations \n neofunctionalisedECs = NeofunctionalisedECs(self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs))\n \n # filter core metabolism EC graph\n coreMetabolism = self.coreMetabolism(majorityPercentageCoreMetabolism)\n minimumOrganismsCount = math.ceil(self.organismsCount * (majorityPercentageNeofunctionalisation / 100))\n \n neofunctionalisedMetabolism = neofunctionalisedECs.filterGraph(coreMetabolism, minimumEcDifference = None, minimumOrganismsCount = minimumOrganismsCount)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = coreMetabolism\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def conservedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n conservedMetabolism = self.conservedMetabolism(majorityPercentageCoreMetabolism)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = conservedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def divergedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n divergedMetabolism = self.divergedMetabolism(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = divergedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def lostMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def coreMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEcGraph:\n graph = self.group.majorityEcGraph(majorityPercentage = majorityPercentageCoreMetabolism, noMultifunctional = excludeMultifunctionalEnzymes, keepOnHeap = True)\n graph.name = 'Core metabolism ECs ' + ' '.join(self.ncbiNames)\n return graph", "def redundantECsForContributingNeofunctionalisation(self, \n majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, \n majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, \n eValue = defaultEValue, \n redundancyType: 'RedundancyType' = None,\n considerOnlyECs = None) -> Dict[Neofunctionalisation, Set[EcNumber]]:\n from FEV_KEGG.Robustness.Topology.Redundancy import Redundancy, RedundancyContribution, RedundancyType\n \n if redundancyType is None:\n redundancyType = RedundancyType.default\n \n #- calculate \"neofunctionalised\" ECs\n neofunctionalisedMetabolismSet = self.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, eValue, considerOnlyECs).getECs()\n neofunctionalisationsForFunctionChange = self.neofunctionalisationsForFunctionChange(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, eValue, considerOnlyECs)\n \n #- calculate redundancy\n redundancy = Redundancy( self.coreMetabolism(majorityPercentageCoreMetabolism) )\n redundancyContribution = RedundancyContribution(redundancy, neofunctionalisedMetabolismSet)\n \n contributedECsForContributingNeofunctionalisedEC = redundancyContribution.getContributedKeysForSpecial(redundancyType)\n contributingNeofunctionalisedECs = set(contributedECsForContributingNeofunctionalisedEC.keys())\n \n #- REPEAT for each function change consisting of \"neofunctionalised\" ECs, which also contribute to redundancy\n contributingNeofunctionalisations = dict()\n \n for functionChange, neofunctionalisations in neofunctionalisationsForFunctionChange.items():\n #- report enzyme pairs of neofunctionalisations, which caused the EC to be considered \"neofunctionalised\", and are in return contributing to redundancy \n \n if functionChange.ecA in contributingNeofunctionalisedECs or functionChange.ecB in contributingNeofunctionalisedECs: # function change contributes to redundancy\n \n for neofunctionalisation in neofunctionalisations:\n currentSetOfContributedECs = contributingNeofunctionalisations.get(neofunctionalisation, None)\n \n if currentSetOfContributedECs is None:\n currentSetOfContributedECs = set()\n contributingNeofunctionalisations[neofunctionalisation] = currentSetOfContributedECs\n \n for ec in functionChange.ecPair:\n contributedECs = contributedECsForContributingNeofunctionalisedEC.get(ec, None)\n if contributedECs is not None:\n currentSetOfContributedECs.update(contributedECs)\n \n return contributingNeofunctionalisations", "def __repr__(self):\n return \"EC(%s, %s)\" % (str(self.coefficient), repr(self.basefield))", "def collectiveMetabolism(self, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes, addEcDescriptions = False) -> SubstanceEcGraph:\n graph = self.group.collectiveEcGraph(noMultifunctional = excludeMultifunctionalEnzymes, addCount = True, keepOnHeap = True, addEcDescriptions = addEcDescriptions)\n graph.name = 'Collective metabolism ECs ' + ' '.join(self.ncbiNames)\n return graph", "def get_ecg_graph():\n titles = ['ecg1', 'ecg2', 'ecg3']\n colors = ['rgb(240,0,0)', 'rgb(0,240,0)', 'rgb(0,0,240)']\n update()\n signames_ecg = queries['signames_ecg']\n signals = queries['signals']\n latesthr = queries['latesthr']\n return html.Div(className='ecg', children=[\n html.Div(style={'display': 'flex', 'height': '40vh'},\n children=[dcc.Graph(\n id=titles[i] + signame,\n style={'width': '100%'},\n figure={\n 'data': [\n {'x': signals[signame]['time'],\n 'y': signals[signame][titles[i]],\n 'mode': 'line', 'name': signame, 'line': {'color':colors[i]}}\n ],\n 'layout': {\n 'font': {'color':'#fff'},\n 'title': '{}-{}'.format(signame, titles[i]),\n 'xaxis': {'title': 'time', 'color': '#fff', 'showgrid': 'False'},\n 'yaxis': {'title': 'voltage (mv)', 'color': '#fff', 'showgrid': 'False', 'range': np.linspace(-2.5, 2.5, 10)},\n 'paper_bgcolor':'#000', 'plot_bgcolor':'#000'\n }\n }\n ) for i in range(len(titles))]\n +\n [html.Div(\n style={'justify-content': 'center', 'display': 'flex',\n 'align-items': 'center', 'width': '10vh', 'font-size': '30pt', 'color': 'white'},\n children=['{}'.format(latesthr[signame][0])])\n ]\n ) for signame in signames_ecg])", "def NND_eta( eqCat, dConst, verbose = False, **kwargs):\n #-------------------------------set args and kwargs----------------------------------------------- \n rmax = 500 # in km\n tmax = 20 # in years\n M0 = 0\n if 'M0' in kwargs.keys() and kwargs['M0'] is not None:\n M0 = kwargs['M0']\n if 'rmax' in kwargs.keys() and kwargs['rmax'] is not None:\n rmax = kwargs['rmax']\n if 'tmax' in kwargs.keys() and kwargs['tmax'] is not None:\n tmax = kwargs['tmax']\n #-----------------------------add small uncertainty to X in case events are colocated-------------------------- \n if 'correct_co_located' in kwargs.keys() and kwargs['correct_co_located'] == True:\n vUncer = np.random.randn( eqCat.size())*1e-10\n eqCat.data['X'] += vUncer\n eqCat.data['Time'] += abs( vUncer)#time has to stay positive otherwise parent-offspring gets switched\n #------------------------------------------------------------------------------\n aNND = np.zeros( eqCat.size())\n vID_p = np.zeros( eqCat.size())\n vID_c = np.zeros( eqCat.size())\n deltaMag = (eqCat.data['Mag'] - M0)\n \n for jC in range( eqCat.size()):\n if verbose == True:\n print 'event %i of %i'%( jC+1, eqCat.size())\n # interevent times: take events that happend before t_i \n # child - parent > 0 \n tau = eqCat.data['Time'][jC] - eqCat.data['Time']\n sel_tau_par = tau > 0\n if sel_tau_par.sum() > 0:\n\n vcurr_ID = np.arange( eqCat.size(), dtype = int)[sel_tau_par]\n vR = np.sqrt( (eqCat.data['X'][jC] - eqCat.data['X'][vcurr_ID])**2 + (eqCat.data['Y'][jC] - eqCat.data['Y'][vcurr_ID])**2 )\n # haversine distance\n # = projUtils.haversine( eqCat.data['Lon'][jC], eqCat.data['Lat'][jC],eqCat.data['Lon'][curr_vID], eqCat.data['Lat'][curr_vID] ) \n sel_r_par = vR < rmax\n if sel_r_par.sum() > 0:\n vcurr_ID = vcurr_ID[sel_r_par]\n curr_Eta = tau[vcurr_ID]* (vR[sel_r_par]**dConst['D']) *( 10**(-dConst['b']*deltaMag[vcurr_ID]))\n sel_min = curr_Eta == curr_Eta.min()\n aNND[jC] = curr_Eta[sel_min][0]\n vID_p[jC] = eqCat.data['N'][vcurr_ID][sel_min][0]\n vID_c[jC] = eqCat.data['N'][jC]\n #print 'parent', eqCat.data['N'][vcurr_ID][sel_min][0], 'offspring', eqCat.data['N'][jC]\n #print 'parent', eqCat.data['Time'][vcurr_ID][sel_min][0], 'offspring', eqCat.data['Time'][jC]\n\n if sel_min.sum() > 1:\n print aNND[jC], curr_Eta[sel_min], eqCat.data['N'][vcurr_ID][sel_min]\n print eqCat.data['Lon'][vcurr_ID][sel_min], eqCat.data['Lat'][vcurr_ID][sel_min]\n print eqCat.data['X'][vcurr_ID][sel_min], eqCat.data['Y'][vcurr_ID][sel_min]\n sel2 = aNND > 0\n if np.logical_not(sel2).sum() > 0:\n print 'remove %i offspring without prior parents in catalog'%(np.logical_not(sel2).sum())\n #raise ValueError, error_str\n # remove events with aNND < 0; i.e. event at the beginning with no preceding parent\n return { 'aNND' : aNND[sel2], 'aEqID_p' : vID_p[sel2], 'aEqID_c' : vID_c[sel2]}", "def compute_edge_logits(self):\n TODO('https://github.com/posterior/treecat/issues/26')", "def compute_edge_logits(self):\n TODO('https://github.com/posterior/treecat/issues/27')", "def c_edges(self):\n self.compute_c_edges(self)\n return self._c_edges", "def complex_network_mapping(graph):\n vect = []\n\n n = nx.number_of_nodes(graph)\n e = nx.number_of_edges(graph)\n print n, e\n\n# adj = nx.adjacency_matrix(graph).toarray()\n# adj_bin = np.where(adj > 0, 1., 0.)\n# adj_conn = 1 - adj\n adj_bin = nx.adjacency_matrix(graph).toarray()\n adj_bin = np.array(adj_bin, dtype=np.float)\n\n # Node Betweenness binary\n bt_bin = nx.betweenness_centrality(graph).values()\n avg_btb = np.mean(bt_bin)\n vect.append(avg_btb)\n\n # Edge betweenness\n ebt = np.array(nx.edge_betweenness_centrality(graph).values())\n vect.append(np.mean(ebt))\n\n # Eigen vector centrality binary\n evc_bin = eigenvector_centrality_und(adj_bin)\n avg_evcb = np.mean(evc_bin)\n vect.append(avg_evcb)\n\n # Flow coefficient\n _, flow_bin, _ = flow_coef_bd(adj_bin)\n avg_flow = np.mean(flow_bin)\n vect.append(avg_flow)\n\n # Kcoreness centrality\n kcor_bin, _ = kcoreness_centrality_bu(adj_bin)\n avg_kcor = np.mean(kcor_bin)\n vect.append(avg_kcor)\n\n # Degree assortivity\n dac = nx.degree_assortativity_coefficient(graph)\n vect.append(dac)\n\n # Page rank centrality\n# pgr_wei = pagerank_centrality(adj_bin, d=0.85)\n# avg_pgr = np.mean(pgr_wei)\n# vect.append(avg_pgr)\n\n # Rich club coefficient\n# rcc = nx.rich_club_coefficient(graph).values()\n# avg_rcc = np.mean(rcc)\n# vect.append(avg_rcc)\n\n # Transitivity\n tr = nx.transitivity(graph)\n vect.append(tr)\n\n # average clustering\n avg_clst = nx.average_clustering(graph)\n vect.append(avg_clst)\n\n glb_ef = efficiency_bin(adj_bin)\n vect.append(glb_ef)\n\n return vect", "def set_ec(self, etacalc):\n if not self.__thermodyn:\n C = np.zeros((6,6))\n \n LC = self.__structures.items()[0][1].LC\n if self.__mthd == 'Energy':\n if type(etacalc)==list:\n A2=[]\n for i in range(len(etacalc)):\n A2.append(self.__A2[i][etacalc[i]])\n else:\n if not etacalc in self.__A2[0].keys(): raise ValueError('Please coose one of %s'%(self.__A2[0].keys()))\n A2 = [a2[etacalc] for a2 in self.__A2]\n \n \n #%%%--- Cubic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'CI' or \\\n LC == 'CII'):\n C[0,0] =-2.*(A2[0]-3.*A2[1])/3.\n C[1,1] = C[0,0]\n C[2,2] = C[0,0]\n C[3,3] = A2[2]/6.\n C[4,4] = C[3,3]\n C[5,5] = C[3,3]\n C[0,1] = (2.*A2[0]-3.*A2[1])/3.\n C[0,2] = C[0,1]\n C[1,2] = C[0,1]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Hexagonal structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = 2./3.*A2[0] + 4./3.*A2[1] - 2.*A2[2] - 2.*A2[3]\n C[0,2] = 1./6.*A2[0] - 2./3.*A2[1] + 0.5*A2[2]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[2]\n C[3,3] =-0.5*A2[2] + 0.5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RI'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[0,4] = .5*(-A2[3] - A2[4] + A2[6])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[1,4] =-C[0,4] \n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[3,5] =-C[0,4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TI'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[3]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[3]\n C[0,2] = A2[0]/6.-2.*A2[1]/3.+.5*A2[3]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[3]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TII'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[4]\n C[1,1] = C[0,0]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[4]\n C[0,2] = A2[0]/6.-(2./3.)*A2[1]+.5*A2[4]\n C[0,5] = (-A2[2]+A2[3]-A2[6])/4.\n C[1,2] = C[0,2]\n C[1,5] =-C[0,5]\n C[2,2] = 2.*A2[4]\n C[3,3] = .5*A2[5]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[6]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Orthorhombic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'O'):\n C[0,0] = 2.*A2[0]/3.+4.*A2[1]/3.+A2[3]-2.*A2[4]-2.*A2[5]\n C[0,1] = 1.*A2[0]/3.+2.*A2[1]/3.-.5*A2[3]-A2[5]\n C[0,2] = 1.*A2[0]/3.-2.*A2[1]/3.+4.*A2[2]/3.-.5*A2[3]-A2[4]\n C[1,1] = 2.*A2[4]\n C[1,2] =-2.*A2[1]/3.-4.*A2[2]/3.+.5*A2[3]+A2[4]+A2[5]\n C[2,2] = 2.*A2[5]\n C[3,3] = .5*A2[6]\n C[4,4] = .5*A2[7]\n C[5,5] = .5*A2[8]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Monoclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'M'):\n C[0,0] = 2.*A2[0]/3.+8.*(A2[1]+A2[2])/3.-2.*(A2[5]+A2[8]+A2[9])\n C[0,1] = A2[0]/3.+4.*(A2[1]+A2[2])/3.-2.*A2[5]-A2[9]\n C[0,2] =(A2[0]-4.*A2[2])/3.+A2[5]-A2[8]\n C[0,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.+.5*(A2[5]+A2[7]+A2[8]+A2[9]-A2[12])\n C[1,1] = 2.*A2[8]\n C[1,2] =-4.*(2.*A2[1]+A2[2])/3.+2.*A2[5]+A2[8]+A2[9]+A2[12]\n C[1,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.-.5*A2[3]+A2[5]+.5*(A2[7]+A2[8]+A2[9])\n C[2,2] = 2.*A2[9]\n C[2,5] =-1.*A2[0]/6.+2.*A2[1]/3.-.5*(A2[3]+A2[4]-A2[7]-A2[8]-A2[9]-A2[12])\n C[3,3] = .5*A2[10]\n C[3,4] = .25*(A2[6]-A2[10]-A2[11])\n C[4,4] = .5*A2[11]\n C[5,5] = .5*A2[12]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Triclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'N'):\n C[0,0] = 2.*A2[0]\n C[0,1] = 1.*(-A2[0]-A2[1]+A2[6])\n C[0,2] = 1.*(-A2[0]-A2[2]+A2[7])\n C[0,3] = .5*(-A2[0]-A2[3]+A2[8]) \n C[0,4] = .5*(-A2[0]+A2[9]-A2[4])\n C[0,5] = .5*(-A2[0]+A2[10]-A2[5])\n C[1,1] = 2.*A2[1]\n C[1,2] = 1.*(A2[11]-A2[1]-A2[2])\n C[1,3] = .5*(A2[12]-A2[1]-A2[3])\n C[1,4] = .5*(A2[13]-A2[1]-A2[4])\n C[1,5] = .5*(A2[14]-A2[1]-A2[5])\n C[2,2] = 2.*A2[2] \n C[2,3] = .5*(A2[15]-A2[2]-A2[3])\n C[2,4] = .5*(A2[16]-A2[2]-A2[4])\n C[2,5] = .5*(A2[17]-A2[2]-A2[5])\n C[3,3] = .5*A2[3]\n C[3,4] = .25*(A2[18]-A2[3]-A2[4])\n C[3,5] = .25*(A2[19]-A2[3]-A2[5])\n C[4,4] = .5*A2[4]\n C[4,5] = .25*(A2[20]-A2[4]-A2[5])\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n elif self.__mthd == 'Stress':\n \n if (LC == 'CI' or \\\n LC == 'CII'):\n Matrix = np.mat([[1.0, 5.0, 0.0],\n [2.0, 4.0, 0.0],\n [3.0, 3.0, 0.0],\n [0.0, 0.0, 4.0],\n [0.0, 0.0, 5.0],\n [0.0, 0.0, 6.0]])\n \n if (LC == 'HI' or \\\n LC == 'HII'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0],\n [ 2, 1, 3, 0, 0],\n [ 0, 0, 3, 3, 0],\n [ 0, 0, 0, 0, 4],\n [ 0, 0, 0, 0, 5],\n [ 3,-3, 0, 0, 0],\n [ 3,-5,-1, 0, 0],\n [-5, 3,-1, 0, 0],\n [ 0, 0,-2,-1, 0],\n [ 0, 0, 0, 0, 6],\n [ 0, 0, 0, 0, 2],\n [-2, 2, 0, 0, 0]])\n \n if (LC == 'RI'):\n Matrix = np.mat([[ 1, 2, 3, 4, 0, 0],\n [ 2, 1, 3,-4, 0, 0],\n [ 0, 0, 3, 0, 3, 0],\n [ 0, 0, 0,-1, 0, 4],\n [ 0, 0, 0, 6, 0, 5],\n [ 3,-3, 0, 5, 0, 0],\n [ 3,-5,-1, 6, 0, 0],\n [-5, 3,-1,-6, 0, 0],\n [ 0, 0,-2, 0,-1, 0],\n [ 0, 0, 0, 8, 0, 6],\n [ 0, 0, 0,-4, 0, 2],\n [-2, 2, 0, 2, 0, 0]])\n \n if (LC == 'RII'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 0, 0],\n [ 2, 1, 3,-4,-5, 0, 0],\n [ 0, 0, 3, 0, 0, 3, 0],\n [ 0, 0, 0,-1,-6, 0, 4],\n [ 0, 0, 0, 6,-1, 0, 5],\n [ 3,-3, 0, 5,-4, 0, 0],\n [ 3,-5,-1, 6, 2, 0, 0],\n [-5, 3,-1,-6,-2, 0, 0],\n [ 0, 0,-2, 0, 0,-1, 0],\n [ 0, 0, 0, 8, 4, 0, 6],\n [ 0, 0, 0,-4, 8, 0, 2],\n [-2, 2, 0, 2,-6, 0, 0]])\n \n if (LC == 'TI'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0, 0],\n [ 2, 1, 3, 0, 0, 0],\n [ 0, 0, 3, 3, 0, 0],\n [ 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0, 0, 0, 6],\n [ 3,-5,-1, 0, 0, 0],\n [-5, 3,-1, 0, 0, 0],\n [ 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 0, 0,-4]])\n \n if (LC == 'TII'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0],\n [ 2, 1, 3,-6, 0, 0, 0],\n [ 0, 0, 3, 0, 3, 0, 0],\n [ 0, 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0,-1, 0, 0, 6],\n [ 3,-5,-1,-4, 0, 0, 0],\n [-5, 3,-1, 4, 0, 0, 0],\n [ 0, 0,-2, 0,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 8, 0, 0,-4]])\n \n if (LC == 'O'):\n Matrix = np.mat([[1, 2, 3, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 2, 3, 0, 0, 0, 0],\n [0, 0, 1, 0, 2, 3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 4, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 5, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 6],\n [3,-5,-1, 0, 0, 0, 0, 0, 0],\n [0, 3, 0,-5,-1, 0, 0, 0, 0],\n [0, 0, 3, 0,-5,-1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 6, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-4],\n [5, 4, 6, 0, 0, 0, 0, 0, 0],\n [0, 5, 0, 4, 6, 0, 0, 0, 0],\n [0, 0, 5, 0, 4, 6, 0, 0, 0],\n [0, 0, 0, 0, 0, 0,-2, 0, 0],\n [0, 0, 0, 0, 0, 0, 0,-1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-3]])\n \n if (LC == 'M'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 2, 3, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 2, 0, 3, 6, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0],\n [ 0, 0, 0, 1, 0, 0, 2, 0, 3, 0, 0, 0, 6],\n [-2, 1, 4,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 1, 4,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 1, 0, 4,-5, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0],\n [ 0, 0, 0,-2, 0, 0, 1, 0, 4, 0, 0,-5, 0],\n [ 3,-5,-1,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0,-5,-1,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0,-5, 0,-1,-4, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0],\n [ 0, 0, 0, 3, 0, 0,-5, 0,-1, 0, 0,-4, 0],\n [-4,-6, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0,-6, 5, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0,-6, 0, 5, 2, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0],\n [ 0, 0, 0,-4, 0, 0,-6, 0, 5, 0, 0, 2, 0],\n [ 5, 4, 6,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 4, 6,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 4, 0, 6,-3, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0],\n [ 0, 0, 0, 5, 0, 0, 4, 0, 6, 0, 0,-3, 0]])\n \n if (LC == 'N'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 0, 0, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 5, 6, 0, 0, 0],\n [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6, 0],\n [ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6],\n [-2, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 0, 0, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 6,-5, 0, 0, 0],\n [ 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5, 0],\n [ 0, 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5],\n [ 3,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0, 0, 0,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 2,-4, 0, 0, 0],\n [ 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4, 0],\n [ 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4],\n [-4,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0, 0, 0,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1,-3, 2, 0, 0, 0],\n [ 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2, 0],\n [ 0, 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2],\n [ 5, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 0, 0, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2,-1,-3, 0, 0, 0],\n [ 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3, 0],\n [ 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3],\n [-6, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-6, 0, 0, 0, 0, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5,-4, 1, 0, 0, 0],\n [ 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1, 0],\n [ 0, 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1]])\n \n sigma = np.array(self.__sigma[etacalc])\n \n ci = np.linalg.lstsq(Matrix,sigma)\n \n #-- Cubic structures ------------------------------------------------------------------------------\n if (LC == 'CI' or \\\n LC == 'CII'):\n \n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[3,3]=ci[0][2]\n C[1,1]=C[0,0]\n C[2,2]=C[0,0]\n C[0,2]=C[0,1]\n C[1,2]=C[0,1]\n C[4,4]=C[3,3]\n C[5,5]=C[3,3]\n \n #-- Hexagonal Structures --------------------------------------------------------------------------\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[2,2]=ci[0][3]\n C[3,3]=ci[0][4]\n C[1,1]=C[0,0]\n C[1,2]=C[0,2]\n C[4,4]=C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral I Structures ---------------------------------------------------------------------\n if (LC == 'RI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral II Structures --------------------------------------------------------------------\n if (LC == 'RII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[0,4]= ci[0][4]\n C[2,2]= ci[0][5]\n C[3,3]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[1,4]=-C[0,4]\n C[3,5]=-C[0,4]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Tetragonal I Structures -----------------------------------------------------------------------\n if (LC == 'TI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[2,2]= ci[0][3]\n C[3,3]= ci[0][4]\n C[5,5]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[4,4]= C[3,3]\n \n #-- Tetragonal II Structures ----------------------------------------------------------------------\n if (LC == 'TII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,5]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[5,5]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,5]=-C[0,5]\n C[4,4]= C[3,3]\n \n #-- Orthorhombic Structures -----------------------------------------------------------------------\n if (LC == 'O'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[1,1]=ci[0][3]\n C[1,2]=ci[0][4]\n C[2,2]=ci[0][5]\n C[3,3]=ci[0][6]\n C[4,4]=ci[0][7]\n C[5,5]=ci[0][8]\n \n #-- Monoclinic Structures -------------------------------------------------------------------------\n if (LC == 'M'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,5]=ci[0][3]\n C[1,1]=ci[0][4]\n C[1,2]=ci[0][5]\n C[1,5]=ci[0][6]\n C[2,2]=ci[0][7]\n C[2,5]=ci[0][8]\n C[3,3]=ci[0][9]\n C[3,4]=ci[0][10]\n C[4,4]=ci[0][11]\n C[5,5]=ci[0][12]\n \n #-- Triclinic Structures --------------------------------------------------------------------------\n if (LC == 'N'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,3]=ci[0][3]\n C[0,4]=ci[0][4]\n C[0,5]=ci[0][5]\n C[1,1]=ci[0][6]\n C[1,2]=ci[0][7]\n C[1,3]=ci[0][8]\n C[1,4]=ci[0][9]\n C[1,5]=ci[0][10]\n C[2,2]=ci[0][11]\n C[2,3]=ci[0][12]\n C[2,4]=ci[0][13]\n C[2,5]=ci[0][14]\n C[3,3]=ci[0][15]\n C[3,4]=ci[0][16]\n C[3,5]=ci[0][17]\n C[4,4]=ci[0][18]\n C[4,5]=ci[0][19]\n C[5,5]=ci[0][20]\n #--------------------------------------------------------------------------------------------------\n \n \n \n for i in range(5):\n for j in range(i+1,6):\n C[j,i] = C[i,j] \n #%%%--- Calculating the elastic moduli ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if self.__cod in ['espresso']: C = -C/10.\n elif self.__cod in ['vasp','exciting','wien']: C = C*self.__vToGPa/self.__V0#C = C/4.#C=C*self.__CONV/self.__V0\n elif self.__cod in ['emto']: C = C*self.__ToGPa/self.__V0\n self.BV = (C[0,0]+C[1,1]+C[2,2]+2*(C[0,1]+C[0,2]+C[1,2]))/9\n self.GV = ((C[0,0]+C[1,1]+C[2,2])-(C[0,1]+C[0,2]+C[1,2])+3*(C[3,3]+C[4,4]+C[5,5]))/15\n self.EV = (9*self.BV*self.GV)/(3*self.BV+self.GV)\n self.nuV= (1.5*self.BV-self.GV)/(3*self.BV+self.GV)\n self.S = np.linalg.inv(C)\n self.BR = 1/(self.S[0,0]+self.S[1,1]+self.S[2,2]+2*(self.S[0,1]+self.S[0,2]+self.S[1,2]))\n self.GR =15/(4*(self.S[0,0]+self.S[1,1]+self.S[2,2])-4*(self.S[0,1]+self.S[0,2]+self.S[1,2])+3*(self.S[3,3]+self.S[4,4]+self.S[5,5]))\n self.ER = (9*self.BR*self.GR)/(3*self.BR+self.GR)\n self.nuR= (1.5*self.BR-self.GR)/(3*self.BR+self.GR)\n self.BH = 0.50*(self.BV+self.BR)\n self.GH = 0.50*(self.GV+self.GR)\n self.EH = (9.*self.BH*self.GH)/(3.*self.BH+self.GH)\n self.nuH= (1.5*self.BH-self.GH)/(3.*self.BH+self.GH)\n self.AVR= 100.*(self.GV-self.GR)/(self.GV+self.GR)\n #--------------------------------------------------------------------------------------------------------------------------------\n self.__C = C\n \n else:\n Cs = []\n for t in map(str,self.__T):#for t in range(len(self.__T)):\n C = np.zeros((6,6))\n \n LC = self.__structures.items()[0][1].LC\n if self.__mthd == 'Energy':\n if type(etacalc)==list:\n A2=[]\n for i in range(len(etacalc)):\n A2.append(self.__A2[t][i][etacalc[i]])\n else:\n if not etacalc in self.__A2[t][0].keys(): raise ValueError('Please coose one of %s'%(self.__A2[t][0].keys()))\n A2 = [a2[etacalc] for a2 in self.__A2[t]]\n \n #%%%--- Cubic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'CI' or \\\n LC == 'CII'):\n C[0,0] =-2.*(A2[0]-3.*A2[1])/3.\n C[1,1] = C[0,0]\n C[2,2] = C[0,0]\n C[3,3] = A2[2]/6.\n C[4,4] = C[3,3]\n C[5,5] = C[3,3]\n C[0,1] = (2.*A2[0]-3.*A2[1])/3.\n C[0,2] = C[0,1]\n C[1,2] = C[0,1]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Hexagonal structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = 2./3.*A2[0] + 4./3.*A2[1] - 2.*A2[2] - 2.*A2[3]\n C[0,2] = 1./6.*A2[0] - 2./3.*A2[1] + 0.5*A2[2]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[2]\n C[3,3] =-0.5*A2[2] + 0.5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RI'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[0,4] = .5*(-A2[3] - A2[4] + A2[6])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[1,4] =-C[0,4] \n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[3,5] =-C[0,4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TI'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[3]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[3]\n C[0,2] = A2[0]/6.-2.*A2[1]/3.+.5*A2[3]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[3]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TII'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[4]\n C[1,1] = C[0,0]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[4]\n C[0,2] = A2[0]/6.-(2./3.)*A2[1]+.5*A2[4]\n C[0,5] = (-A2[2]+A2[3]-A2[6])/4.\n C[1,2] = C[0,2]\n C[1,5] =-C[0,5]\n C[2,2] = 2.*A2[4]\n C[3,3] = .5*A2[5]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[6]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Orthorhombic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'O'):\n C[0,0] = 2.*A2[0]/3.+4.*A2[1]/3.+A2[3]-2.*A2[4]-2.*A2[5]\n C[0,1] = 1.*A2[0]/3.+2.*A2[1]/3.-.5*A2[3]-A2[5]\n C[0,2] = 1.*A2[0]/3.-2.*A2[1]/3.+4.*A2[2]/3.-.5*A2[3]-A2[4]\n C[1,1] = 2.*A2[4]\n C[1,2] =-2.*A2[1]/3.-4.*A2[2]/3.+.5*A2[3]+A2[4]+A2[5]\n C[2,2] = 2.*A2[5]\n C[3,3] = .5*A2[6]\n C[4,4] = .5*A2[7]\n C[5,5] = .5*A2[8]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Monoclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'M'):\n C[0,0] = 2.*A2[0]/3.+8.*(A2[1]+A2[2])/3.-2.*(A2[5]+A2[8]+A2[9])\n C[0,1] = A2[0]/3.+4.*(A2[1]+A2[2])/3.-2.*A2[5]-A2[9]\n C[0,2] =(A2[0]-4.*A2[2])/3.+A2[5]-A2[8]\n C[0,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.+.5*(A2[5]+A2[7]+A2[8]+A2[9]-A2[12])\n C[1,1] = 2.*A2[8]\n C[1,2] =-4.*(2.*A2[1]+A2[2])/3.+2.*A2[5]+A2[8]+A2[9]+A2[12]\n C[1,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.-.5*A2[3]+A2[5]+.5*(A2[7]+A2[8]+A2[9])\n C[2,2] = 2.*A2[9]\n C[2,5] =-1.*A2[0]/6.+2.*A2[1]/3.-.5*(A2[3]+A2[4]-A2[7]-A2[8]-A2[9]-A2[12])\n C[3,3] = .5*A2[10]\n C[3,4] = .25*(A2[6]-A2[10]-A2[11])\n C[4,4] = .5*A2[11]\n C[5,5] = .5*A2[12]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Triclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'N'):\n C[0,0] = 2.*A2[0]\n C[0,1] = 1.*(-A2[0]-A2[1]+A2[6])\n C[0,2] = 1.*(-A2[0]-A2[2]+A2[7])\n C[0,3] = .5*(-A2[0]-A2[3]+A2[8]) \n C[0,4] = .5*(-A2[0]+A2[9]-A2[4])\n C[0,5] = .5*(-A2[0]+A2[10]-A2[5])\n C[1,1] = 2.*A2[1]\n C[1,2] = 1.*(A2[11]-A2[1]-A2[2])\n C[1,3] = .5*(A2[12]-A2[1]-A2[3])\n C[1,4] = .5*(A2[13]-A2[1]-A2[4])\n C[1,5] = .5*(A2[14]-A2[1]-A2[5])\n C[2,2] = 2.*A2[2] \n C[2,3] = .5*(A2[15]-A2[2]-A2[3])\n C[2,4] = .5*(A2[16]-A2[2]-A2[4])\n C[2,5] = .5*(A2[17]-A2[2]-A2[5])\n C[3,3] = .5*A2[3]\n C[3,4] = .25*(A2[18]-A2[3]-A2[4])\n C[3,5] = .25*(A2[19]-A2[3]-A2[5])\n C[4,4] = .5*A2[4]\n C[4,5] = .25*(A2[20]-A2[4]-A2[5])\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n elif self.__mthd == 'Stress':\n \n if (LC == 'CI' or \\\n LC == 'CII'):\n Matrix = np.mat([[1.0, 5.0, 0.0],\n [2.0, 4.0, 0.0],\n [3.0, 3.0, 0.0],\n [0.0, 0.0, 4.0],\n [0.0, 0.0, 5.0],\n [0.0, 0.0, 6.0]])\n \n if (LC == 'HI' or \\\n LC == 'HII'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0],\n [ 2, 1, 3, 0, 0],\n [ 0, 0, 3, 3, 0],\n [ 0, 0, 0, 0, 4],\n [ 0, 0, 0, 0, 5],\n [ 3,-3, 0, 0, 0],\n [ 3,-5,-1, 0, 0],\n [-5, 3,-1, 0, 0],\n [ 0, 0,-2,-1, 0],\n [ 0, 0, 0, 0, 6],\n [ 0, 0, 0, 0, 2],\n [-2, 2, 0, 0, 0]])\n \n if (LC == 'RI'):\n Matrix = np.mat([[ 1, 2, 3, 4, 0, 0],\n [ 2, 1, 3,-4, 0, 0],\n [ 0, 0, 3, 0, 3, 0],\n [ 0, 0, 0,-1, 0, 4],\n [ 0, 0, 0, 6, 0, 5],\n [ 3,-3, 0, 5, 0, 0],\n [ 3,-5,-1, 6, 0, 0],\n [-5, 3,-1,-6, 0, 0],\n [ 0, 0,-2, 0,-1, 0],\n [ 0, 0, 0, 8, 0, 6],\n [ 0, 0, 0,-4, 0, 2],\n [-2, 2, 0, 2, 0, 0]])\n \n if (LC == 'RII'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 0, 0],\n [ 2, 1, 3,-4,-5, 0, 0],\n [ 0, 0, 3, 0, 0, 3, 0],\n [ 0, 0, 0,-1,-6, 0, 4],\n [ 0, 0, 0, 6,-1, 0, 5],\n [ 3,-3, 0, 5,-4, 0, 0],\n [ 3,-5,-1, 6, 2, 0, 0],\n [-5, 3,-1,-6,-2, 0, 0],\n [ 0, 0,-2, 0, 0,-1, 0],\n [ 0, 0, 0, 8, 4, 0, 6],\n [ 0, 0, 0,-4, 8, 0, 2],\n [-2, 2, 0, 2,-6, 0, 0]])\n \n if (LC == 'TI'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0, 0],\n [ 2, 1, 3, 0, 0, 0],\n [ 0, 0, 3, 3, 0, 0],\n [ 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0, 0, 0, 6],\n [ 3,-5,-1, 0, 0, 0],\n [-5, 3,-1, 0, 0, 0],\n [ 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 0, 0,-4]])\n \n if (LC == 'TII'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0],\n [ 2, 1, 3,-6, 0, 0, 0],\n [ 0, 0, 3, 0, 3, 0, 0],\n [ 0, 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0,-1, 0, 0, 6],\n [ 3,-5,-1,-4, 0, 0, 0],\n [-5, 3,-1, 4, 0, 0, 0],\n [ 0, 0,-2, 0,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 8, 0, 0,-4]])\n \n if (LC == 'O'):\n Matrix = np.mat([[1, 2, 3, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 2, 3, 0, 0, 0, 0],\n [0, 0, 1, 0, 2, 3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 4, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 5, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 6],\n [3,-5,-1, 0, 0, 0, 0, 0, 0],\n [0, 3, 0,-5,-1, 0, 0, 0, 0],\n [0, 0, 3, 0,-5,-1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 6, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-4],\n [5, 4, 6, 0, 0, 0, 0, 0, 0],\n [0, 5, 0, 4, 6, 0, 0, 0, 0],\n [0, 0, 5, 0, 4, 6, 0, 0, 0],\n [0, 0, 0, 0, 0, 0,-2, 0, 0],\n [0, 0, 0, 0, 0, 0, 0,-1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-3]])\n \n if (LC == 'M'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 2, 3, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 2, 0, 3, 6, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0],\n [ 0, 0, 0, 1, 0, 0, 2, 0, 3, 0, 0, 0, 6],\n [-2, 1, 4,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 1, 4,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 1, 0, 4,-5, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0],\n [ 0, 0, 0,-2, 0, 0, 1, 0, 4, 0, 0,-5, 0],\n [ 3,-5,-1,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0,-5,-1,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0,-5, 0,-1,-4, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0],\n [ 0, 0, 0, 3, 0, 0,-5, 0,-1, 0, 0,-4, 0],\n [-4,-6, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0,-6, 5, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0,-6, 0, 5, 2, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0],\n [ 0, 0, 0,-4, 0, 0,-6, 0, 5, 0, 0, 2, 0],\n [ 5, 4, 6,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 4, 6,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 4, 0, 6,-3, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0],\n [ 0, 0, 0, 5, 0, 0, 4, 0, 6, 0, 0,-3, 0]])\n \n if (LC == 'N'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 0, 0, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 5, 6, 0, 0, 0],\n [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6, 0],\n [ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6],\n [-2, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 0, 0, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 6,-5, 0, 0, 0],\n [ 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5, 0],\n [ 0, 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5],\n [ 3,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0, 0, 0,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 2,-4, 0, 0, 0],\n [ 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4, 0],\n [ 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4],\n [-4,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0, 0, 0,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1,-3, 2, 0, 0, 0],\n [ 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2, 0],\n [ 0, 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2],\n [ 5, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 0, 0, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2,-1,-3, 0, 0, 0],\n [ 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3, 0],\n [ 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3],\n [-6, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-6, 0, 0, 0, 0, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5,-4, 1, 0, 0, 0],\n [ 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1, 0],\n [ 0, 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1]])\n \n sigma = np.array(self.__sigma[etacalc])\n \n ci = np.linalg.lstsq(Matrix,sigma)\n \n #-- Cubic structures ------------------------------------------------------------------------------\n if (LC == 'CI' or \\\n LC == 'CII'):\n \n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[3,3]=ci[0][2]\n C[1,1]=C[0,0]\n C[2,2]=C[0,0]\n C[0,2]=C[0,1]\n C[1,2]=C[0,1]\n C[4,4]=C[3,3]\n C[5,5]=C[3,3]\n \n #-- Hexagonal Structures --------------------------------------------------------------------------\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[2,2]=ci[0][3]\n C[3,3]=ci[0][4]\n C[1,1]=C[0,0]\n C[1,2]=C[0,2]\n C[4,4]=C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral I Structures ---------------------------------------------------------------------\n if (LC == 'RI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral II Structures --------------------------------------------------------------------\n if (LC == 'RII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[0,4]= ci[0][4]\n C[2,2]= ci[0][5]\n C[3,3]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[1,4]=-C[0,4]\n C[3,5]=-C[0,4]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Tetragonal I Structures -----------------------------------------------------------------------\n if (LC == 'TI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[2,2]= ci[0][3]\n C[3,3]= ci[0][4]\n C[5,5]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[4,4]= C[3,3]\n \n #-- Tetragonal II Structures ----------------------------------------------------------------------\n if (LC == 'TII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,5]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[5,5]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,5]=-C[0,5]\n C[4,4]= C[3,3]\n \n #-- Orthorhombic Structures -----------------------------------------------------------------------\n if (LC == 'O'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[1,1]=ci[0][3]\n C[1,2]=ci[0][4]\n C[2,2]=ci[0][5]\n C[3,3]=ci[0][6]\n C[4,4]=ci[0][7]\n C[5,5]=ci[0][8]\n \n #-- Monoclinic Structures -------------------------------------------------------------------------\n if (LC == 'M'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,5]=ci[0][3]\n C[1,1]=ci[0][4]\n C[1,2]=ci[0][5]\n C[1,5]=ci[0][6]\n C[2,2]=ci[0][7]\n C[2,5]=ci[0][8]\n C[3,3]=ci[0][9]\n C[3,4]=ci[0][10]\n C[4,4]=ci[0][11]\n C[5,5]=ci[0][12]\n \n #-- Triclinic Structures --------------------------------------------------------------------------\n if (LC == 'N'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,3]=ci[0][3]\n C[0,4]=ci[0][4]\n C[0,5]=ci[0][5]\n C[1,1]=ci[0][6]\n C[1,2]=ci[0][7]\n C[1,3]=ci[0][8]\n C[1,4]=ci[0][9]\n C[1,5]=ci[0][10]\n C[2,2]=ci[0][11]\n C[2,3]=ci[0][12]\n C[2,4]=ci[0][13]\n C[2,5]=ci[0][14]\n C[3,3]=ci[0][15]\n C[3,4]=ci[0][16]\n C[3,5]=ci[0][17]\n C[4,4]=ci[0][18]\n C[4,5]=ci[0][19]\n C[5,5]=ci[0][20]\n #--------------------------------------------------------------------------------------------------\n \n \n \n for i in range(5):\n for j in range(i+1,6):\n C[j,i] = C[i,j] \n #%%%--- Calculating the elastic moduli ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if self.__cod == 'espresso': C = -C/10.\n elif self.__cod in ['vasp','emto','exciting','wien']: C=C*self.__vToGPa/self.__V0#C = C/4.#C=C*self.__CONV/self.__V0#C = C/4.\n self.BV = (C[0,0]+C[1,1]+C[2,2]+2*(C[0,1]+C[0,2]+C[1,2]))/9\n self.GV = ((C[0,0]+C[1,1]+C[2,2])-(C[0,1]+C[0,2]+C[1,2])+3*(C[3,3]+C[4,4]+C[5,5]))/15\n self.EV = (9*self.BV*self.GV)/(3*self.BV+self.GV)\n self.nuV= (1.5*self.BV-self.GV)/(3*self.BV+self.GV)\n self.S = np.linalg.inv(C)\n self.BR = 1/(self.S[0,0]+self.S[1,1]+self.S[2,2]+2*(self.S[0,1]+self.S[0,2]+self.S[1,2]))\n self.GR =15/(4*(self.S[0,0]+self.S[1,1]+self.S[2,2])-4*(self.S[0,1]+self.S[0,2]+self.S[1,2])+3*(self.S[3,3]+self.S[4,4]+self.S[5,5]))\n self.ER = (9*self.BR*self.GR)/(3*self.BR+self.GR)\n self.nuR= (1.5*self.BR-self.GR)/(3*self.BR+self.GR)\n self.BH = 0.50*(self.BV+self.BR)\n self.GH = 0.50*(self.GV+self.GR)\n self.EH = (9.*self.BH*self.GH)/(3.*self.BH+self.GH)\n self.nuH= (1.5*self.BH-self.GH)/(3.*self.BH+self.GH)\n self.AVR= 100.*(self.GV-self.GR)/(self.GV+self.GR)\n #--------------------------------------------------------------------------------------------------------------------------------\n Cs.append(C)\n self.__C = Cs", "def addedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def cell_edges(self):", "def neofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEnzymeGraph:\n # get neofunctionalisations \n neofunctionalisedEnzymes = self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs)\n \n # filter core metabolism enzyme graph\n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism) \n neofunctionalisedMetabolism = neofunctionalisedEnzymes.filterGraph(enzymeGraph, minimumEcDifference = None)\n \n # colour core metabolism \n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = enzymeGraph\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def all_subconstituents(self, compute=False):\n out = {}\n for i in range(self._.d+1):\n try:\n out[i] = self.subconstituent(i, compute=compute)\n except IndexError:\n pass\n return out", "def _nelec(self):\n pd = self.particle_distribution(self._gam * mec2)\n return pd.to(1/mec2_unit).value", "def eci(self):\n num_ext_terms = len(self._subspace.external_terms) # check for extra terms\n coefs = self.coefs[:-num_ext_terms] if num_ext_terms else self.coefs[:]\n eci = coefs.copy()\n eci = eci / self._subspace.function_total_multiplicities\n return eci", "def compute_edge_logits(self):", "def coreMetabolismEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEnzymeGraph:\n graph = self.group.collectiveEnzymeGraphByEcMajority(majorityPercentage = majorityPercentageCoreMetabolism, majorityTotal = None, noMultifunctional = excludeMultifunctionalEnzymes)\n graph.name = 'Core metabolism Enzymes ' + ' '.join(self.ncbiNames)\n return graph", "def colored_edges(genome):\n edges = []\n for chromo in genome:\n nodes = [0] + chromosome_to_cycle(chromo)\n nodes.append(nodes[1])\n for j in range(1, len(chromo) + 1):\n edges.append((nodes[2 * j], nodes[2 * j + 1]))\n\n return edges", "def nE(self):\n return int(self.vnE.sum())", "def sage_graph(self):\n self.fe.load_cache()\n edges = []\n is_bipartite = self.variant.is_bipartite()\n for X in self.L:\n for Y in self.L:\n a = self.op_norm(X, Y)\n if not self.K.is_zero(a):\n for c in self.K.unit_group:\n d = a - c\n if X != Y or c < d or is_bipartite:\n edges.append(((X, c, False), (Y, d, is_bipartite)))\n if X == Y and not is_bipartite:\n break\n return sage.all.Graph(edges)", "def conservedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False):\n conservedMetabolismEnzymes = self.conservedMetabolismEnzymes(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolismEnzymes\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolismEnzymes[0].removeAllEnzymesExcept(parentNeofunctionalised.getEnzymes())\n childGraph = conservedMetabolismEnzymes[1].removeAllEnzymesExcept(childNeofunctionalised.getEnzymes())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised enzymes *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def connected_component(self):\n t1 = datetime.datetime.now()\n nodes = set(x.hex for x in self.agents)\n result = []\n while nodes:\n node = nodes.pop()\n # This set will contain the next group of nodes connected to each other.\n group = {node}\n # Build a queue with this node in it.\n queue = [node]\n # Iterate the queue.\n # When it's empty, we finished visiting a group of connected nodes.\n while queue:\n # Consume the next item from the queue.\n node = queue.pop(0)\n # Fetch the neighbors.\n neighbors = set(x for x in node.fon if x.is_occupied == 1)\n # Remove the neighbors we already visited.\n neighbors.difference_update(group)\n # Remove the remaining nodes from the global set.\n nodes.difference_update(neighbors)\n # Add them to the group of connected nodes.\n group.update(neighbors)\n # Add them to the queue, so we visit them in the next iterations.\n queue.extend(neighbors)\n\n # Add the group to the list of groups.\n result.append(len(group))\n td = datetime.datetime.now() - t1\n print(\"calculated {} connected components in {} seconds\".format(len(result),td.total_seconds()))\n return len(result), np.histogram(result, self.cluster_hist_breaks)[0]", "def calEachCrossflowAllAxialNode():\n AxialNodeno = 14 # axial node number in CFD data\n Nodes = []\n base = 'Node'\n for i in range(0, AxialNodeno):\n Nodes.append(base+str(i))\n \n crossFlow = pd.read_csv('Data_crossflow.csv', index_col = 'Unnamed: 0')\n lateralFactors = []\n for node in Nodes:\n lateralFactors.append(crossFlow[node]/0.8)\n #need to judge the sign of lateral flow according to CTF rule!!\n gapsToFlip = [2,4,6,7,9,11,13,14,16,18,20,21] #gaps in y direction\n gapsToFlipIndex = [x - 1 for x in gapsToFlip]\n for factors in lateralFactors:\n for index in gapsToFlipIndex:\n factors[index] = -factors[index] \n #note: lateralFactors is a list of list\n \n #below calculate factors averaged over all subchannels\n crossFlowAveFactor = crossFlow.apply(abs).mean(axis = 0)/0.8\n lateralFactorsAvelist = []\n for i in range(0,14):\n base = []\n for j in range(0,24):\n base.append(crossFlowAveFactor[i])\n lateralFactorsAvelist.append(base)\n \n \n for i in range(0, 14):\n for j in range(0, 24):\n #note, in the original model there is only one sign for all source\n #terms in one sub-channel. therefore -- sign(crossFlow.iloc[j,2])\n lateralFactorsAvelist[i][j] = lateralFactorsAvelist[i][j] *sign(crossFlow.iloc[j,2]) \n for each in lateralFactorsAvelist:\n for index in gapsToFlipIndex:\n each[index] = -each[index] \n \n \n return lateralFactors, lateralFactorsAvelist" ]
[ "0.6423988", "0.6371142", "0.63001156", "0.62050164", "0.6046099", "0.5791749", "0.5715767", "0.55602425", "0.5521478", "0.5479102", "0.54652435", "0.5458608", "0.54561746", "0.54515", "0.5428723", "0.5406992", "0.53999406", "0.53977454", "0.52835554", "0.5282024", "0.526819", "0.52627015", "0.525324", "0.52465224", "0.52282745", "0.52223384", "0.52139306", "0.5208815", "0.5199048", "0.51972765" ]
0.65178627
0
Two SubstanceEC graphs of "neofunctionalised" EC numbers, derived from the diverged core metabolism.
def divergedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False): divergedMetabolism = self.divergedMetabolism(majorityPercentageCoreMetabolism, colour = colour) parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False) childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False) if colour is True: parentEdges = parentNeofunctionalised.getEdges() childEdges = childNeofunctionalised.getEdges() graph = divergedMetabolism Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges) Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges) graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames) return graph else: parentGraph = divergedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs()) childGraph = divergedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs()) parentGraph.name = 'Diverged metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames) childGraph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*' return (parentGraph, childGraph)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unifiedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False) -> SubstanceEcGraph: \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is False:\n graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False)\n \n else:\n unifiedMetabolism = self.unifiedMetabolism(majorityPercentageCoreMetabolism, colour = True)\n \n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = unifiedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph", "def conservedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n conservedMetabolism = self.conservedMetabolism(majorityPercentageCoreMetabolism)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = conservedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def addedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def neofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEcGraph:\n # get neofunctionalisations \n neofunctionalisedECs = NeofunctionalisedECs(self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs))\n \n # filter core metabolism EC graph\n coreMetabolism = self.coreMetabolism(majorityPercentageCoreMetabolism)\n minimumOrganismsCount = math.ceil(self.organismsCount * (majorityPercentageNeofunctionalisation / 100))\n \n neofunctionalisedMetabolism = neofunctionalisedECs.filterGraph(coreMetabolism, minimumEcDifference = None, minimumOrganismsCount = minimumOrganismsCount)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = coreMetabolism\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def lostMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def get_ecg_graph():\n titles = ['ecg1', 'ecg2', 'ecg3']\n colors = ['rgb(240,0,0)', 'rgb(0,240,0)', 'rgb(0,0,240)']\n update()\n signames_ecg = queries['signames_ecg']\n signals = queries['signals']\n latesthr = queries['latesthr']\n return html.Div(className='ecg', children=[\n html.Div(style={'display': 'flex', 'height': '40vh'},\n children=[dcc.Graph(\n id=titles[i] + signame,\n style={'width': '100%'},\n figure={\n 'data': [\n {'x': signals[signame]['time'],\n 'y': signals[signame][titles[i]],\n 'mode': 'line', 'name': signame, 'line': {'color':colors[i]}}\n ],\n 'layout': {\n 'font': {'color':'#fff'},\n 'title': '{}-{}'.format(signame, titles[i]),\n 'xaxis': {'title': 'time', 'color': '#fff', 'showgrid': 'False'},\n 'yaxis': {'title': 'voltage (mv)', 'color': '#fff', 'showgrid': 'False', 'range': np.linspace(-2.5, 2.5, 10)},\n 'paper_bgcolor':'#000', 'plot_bgcolor':'#000'\n }\n }\n ) for i in range(len(titles))]\n +\n [html.Div(\n style={'justify-content': 'center', 'display': 'flex',\n 'align-items': 'center', 'width': '10vh', 'font-size': '30pt', 'color': 'white'},\n children=['{}'.format(latesthr[signame][0])])\n ]\n ) for signame in signames_ecg])", "def redundantECsForContributingNeofunctionalisation(self, \n majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, \n majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, \n eValue = defaultEValue, \n redundancyType: 'RedundancyType' = None,\n considerOnlyECs = None) -> Dict[Neofunctionalisation, Set[EcNumber]]:\n from FEV_KEGG.Robustness.Topology.Redundancy import Redundancy, RedundancyContribution, RedundancyType\n \n if redundancyType is None:\n redundancyType = RedundancyType.default\n \n #- calculate \"neofunctionalised\" ECs\n neofunctionalisedMetabolismSet = self.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, eValue, considerOnlyECs).getECs()\n neofunctionalisationsForFunctionChange = self.neofunctionalisationsForFunctionChange(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, eValue, considerOnlyECs)\n \n #- calculate redundancy\n redundancy = Redundancy( self.coreMetabolism(majorityPercentageCoreMetabolism) )\n redundancyContribution = RedundancyContribution(redundancy, neofunctionalisedMetabolismSet)\n \n contributedECsForContributingNeofunctionalisedEC = redundancyContribution.getContributedKeysForSpecial(redundancyType)\n contributingNeofunctionalisedECs = set(contributedECsForContributingNeofunctionalisedEC.keys())\n \n #- REPEAT for each function change consisting of \"neofunctionalised\" ECs, which also contribute to redundancy\n contributingNeofunctionalisations = dict()\n \n for functionChange, neofunctionalisations in neofunctionalisationsForFunctionChange.items():\n #- report enzyme pairs of neofunctionalisations, which caused the EC to be considered \"neofunctionalised\", and are in return contributing to redundancy \n \n if functionChange.ecA in contributingNeofunctionalisedECs or functionChange.ecB in contributingNeofunctionalisedECs: # function change contributes to redundancy\n \n for neofunctionalisation in neofunctionalisations:\n currentSetOfContributedECs = contributingNeofunctionalisations.get(neofunctionalisation, None)\n \n if currentSetOfContributedECs is None:\n currentSetOfContributedECs = set()\n contributingNeofunctionalisations[neofunctionalisation] = currentSetOfContributedECs\n \n for ec in functionChange.ecPair:\n contributedECs = contributedECsForContributingNeofunctionalisedEC.get(ec, None)\n if contributedECs is not None:\n currentSetOfContributedECs.update(contributedECs)\n \n return contributingNeofunctionalisations", "def uncertainty_ee(self,e1,e2):\n # reco\n unc = (self._eleRecoWeight[(e1.pt(),e1.eta())][1]/self._eleRecoWeight[(e1.pt(),e1.eta())][0] + \\\n self._eleRecoWeight[(e2.pt(),e2.eta())][1]/self._eleRecoWeight[(e2.pt(),e2.eta())][0])**2\n # id-isolation\n unc += (self._eleIdIsoWeight[(e1.pt(),e1.eta())][1]/self._eleIdIsoWeight[(e1.pt(),e1.eta())][0] + \\\n self._eleIdIsoWeight[(e2.pt(),e2.eta())][1]/self._eleIdIsoWeight[(e2.pt(),e2.eta())][0])**2\n # trigger (approximate)\n unc += (abs(self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][1]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][1]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][1]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][1])/ \\\n (self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]))**2\n unc += ((self._ele8TrgWeight[(e1.pt(),e1.eta())][1]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][1])/ \\\n (self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]))**2\n #outcome\n return sqrt(unc)", "def algo_Euclide_etendu(a,b):\n r1=a\n u1=1\n v1=0\n r2=b\n u2=0\n v2=1\n while r2!=0 : #Invariants de boucle : r1=u1*a+v1*b et r2=u2*a+v2*b\n q=r1//r2\n rs=r1 ; us=u1 ; vs=v1 #Variables de sauvegarde\n r1=r2 ; u1=u2 ; v1=v2\n r2 = rs - q*r2 # r2 <- Reste de la division euclidienne de r1 par r2\n u2 = us - q*u2\n v2 = vs - q*v2\n return(r1,u1,v1) #On prend le dernier reste non nul.", "def intersectConics(E1, E2):\n\n P = np.array([])\n r1 = matrix_rank(E1)\n r2 = matrix_rank(E2)\n \n if(r1==3 and r2==3):\n P = completeIntersection(E1,E2) \n else:\n if (r2 < 3): #E2 is degenerate\n defE = E2\n fullE = E1\n else:\n defE = E1 #E1 is degenerate\n fullE = E2\n m, l = decomposeDegenerateConic(defE)\n P1 = intersectConicLine(fullE,m)\n P2 = intersectConicLine(fullE,l)\n P = np.array([P1, P2])\n points_x = []\n points_y = []\n for i in range(2):\n P1 = P[i]\n if(P1.size!=0):\n for j in range(P1.shape[0]):\n points_x.append(P1[j,0]/P1[j,2])\n points_y.append(P1[j,1]/P1[j,2])\n return points_x, points_y", "def sub_graph_merging(self):", "def betweenness_pol(G, ms):\n dict_eb = nx.edge_betweenness_centrality(G, k = int(0.75*len(G)))\n #n_pivots = min(1000, len(G))\n #dict_eb = nx.edge_betweenness_centrality(G, k=n_pivots)\n \n cut_edges = []\n rest_edges = []\n \n BCC_dist = []\n\n for e in G.edges():\n s, t = e\n\n if ms[s] != ms[t]:\n cut_edges += [e]\n else:\n rest_edges += [e]\n\n cut_ebc = [dict_eb[e] for e in cut_edges]\n rest_ebc = [dict_eb[e] for e in rest_edges]\n \n if len(cut_ebc) <= 1:\n print(\"Error in the gap!\")\n return -1\n \n kernel_for_cut = scipy.stats.gaussian_kde(cut_ebc, \"silverman\")\n kernel_for_rest = scipy.stats.gaussian_kde(rest_ebc, \"silverman\")\n\n BCC = []\n \n for _ in range(10):\n cut_dist = kernel_for_cut.resample(10000)[0]\n rest_dist = kernel_for_rest.resample(10000)[0]\n\n cut_dist = [max(0.00001, value) for value in cut_dist]\n rest_dist = [max(0.00001, value) for value in rest_dist]\n\n kl_divergence = scipy.stats.entropy(rest_dist, cut_dist)\n\n BCCval = 1-2.71828**(-kl_divergence)\n \n BCC.append(BCCval)\n \n return sum(BCC)/len(BCC)", "def cell_edges2d_cartesian(self, axis2):", "def plot_ecdf(self, variant_one, variant_two):\n if variant_one == variant_two:\n raise ValueError('variant_one and variant_two cannot be the same')\n if variant_one not in self.posteriors.keys() or \\\n variant_two not in self.posteriors.keys():\n raise ValueError(('Variants must only be a value in column '\n '{}'.format(self.bucket_col_name)))\n\n if variant_one in self.ecdf.keys() and \\\n variant_two in self.ecdf[variant_one].keys():\n self._plot_ecdf(numerator_name=variant_one,\n denominator_name=variant_two)\n plt.ylabel('Cumulative Lift: {0} vs {1}'\n .format(variant_two, variant_one))\n else:\n self._plot_ecdf(numerator_name=variant_two,\n denominator_name=variant_one)\n plt.ylabel('Cumulative Lift: {0} vs {1}'\n .format(variant_one, variant_two))", "def __repr__(self):\n return \"EC(%s, %s)\" % (str(self.coefficient), repr(self.basefield))", "def division_euclidienne(n1, n2):", "def question_2():\r\n comparison_graph = er_algorithm(1000, random.uniform(0, 1))\r\n in_degree_dist = utility_graph.in_degree_distribution(comparison_graph)\r\n normalized_dist = utility_graph.normalize_distribution(in_degree_dist)\r\n\r\n utility_graph.plot_log_log_scatter(normalized_dist,\r\n 'ER Algorithm In-degree Distribution',\r\n 'in-degree log-base-10',\r\n 'normalized distribution log-base-10')", "def CS2e(Type=\"DFA\"):\n CC50, CC51, CC52, CC53 = state('CC50'), state('CC51'), state('CC52'), state('CC53')\n for i in sigma:\n CC52.transit[i] = CC52\n CC53.transit[i] = CC53\n for i in sigma_1:\n CC50.transit[i] = CC50\n for i in sigma_2:\n CC50.transit[i] = CC53\n CC51.transit[i] = CC51\n for i in sigma_cc:\n CC50.transit[i] = CC51\n CC51.transit[i] = CC51\n for i in sigma_ncc:\n CC51.transit[i] = CC50\n for i in sigma_B_A:\n CC51.transit[i] = CC50\n for i in sigma_e:\n CC51.transit[i] = CC53\n for i in sigma_A:\n CC51.transit[i] = CC52\n if Type == \"pDFA\":\n CC5 = pDFA('CC5', sigma, [CC50, CC51, CC52, CC53], CC50, [CC52])\n else:\n CC5 = DFA('CC5', sigma, [CC50, CC51, CC52, CC53], CC50, [CC52])\n if (SIZEOF):\n EM_size[\"CS2e\"] = asizeof.asizeof(CC5)\n return CC5", "def test_efficiency_disconnected_nodes(self):\n assert_equal(nx.efficiency(self.G1, 1, 2), 0)", "def _nelec(self):\n pd = self.particle_distribution(self._gam * mec2)\n return pd.to(1/mec2_unit).value", "def eccentricity(self):\n new_data = self._data[['pl_pnum', 'pl_orbper', 'pl_orbsmax',\n 'pl_masse', 'pl_orbeccen',\n 'pl_radj', 'pl_dens', 'st_teff',\n 'st_mass', 'st_rad']]\n new_data = new_data.dropna()\n\n features = new_data[['pl_pnum', 'pl_orbper', 'pl_orbsmax',\n 'pl_masse',\n 'pl_radj', 'pl_dens', 'st_teff',\n 'st_mass', 'st_rad']]\n labels = new_data['pl_orbeccen']\n\n features_train, features_test, labels_train, labels_test = \\\n train_test_split(features, labels, test_size=0.2)\n\n # Create an untrained model\n model = DecisionTreeRegressor()\n\n # Train it on the **training set**\n model.fit(features_train, labels_train)\n\n # Compute test accuracy\n test_predictions = model.predict(features_test)\n test_acc = mean_absolute_error(labels_test, test_predictions)\n test_acc_r2 = r2_score(labels_test, test_predictions)\n\n # Plot ML vs Actual\n fig, [ax1, ax2] = plt.subplots(2, figsize=(15, 12))\n\n sns.distplot(test_predictions, kde=False, ax=ax1)\n sns.distplot(labels_test, kde=False, ax=ax2)\n\n ax1.set_title('Distribution of Predicted Eccentricities of Orbits')\n ax1.set_xlabel('Eccentricity of Orbit')\n ax1.set_ylabel('Number of Planets')\n\n ax2.set_title('Distribution of Actual Eccentricities of Orbits')\n ax2.set_xlabel('Eccentricity of Orbit')\n ax2.set_ylabel('Number of Planets')\n\n plt.savefig('figures/ML_Eccentricity.png', bbox_inches='tight')\n\n return (test_acc, test_acc_r2)", "def ordinal_difference(evs, evs2):\n \n sigma = calc_sigma(evs)\n \n # print sigma\n # ordinal difference\n ordDiff = zeros(vecLen-1, dtype='d')\n for j in range(vecLen-1):\n ordDiff[j] = absolute(evs[j] - evs2[j]) / sigma[j]\n del j\n return ordDiff", "def set_ec(self, etacalc):\n if not self.__thermodyn:\n C = np.zeros((6,6))\n \n LC = self.__structures.items()[0][1].LC\n if self.__mthd == 'Energy':\n if type(etacalc)==list:\n A2=[]\n for i in range(len(etacalc)):\n A2.append(self.__A2[i][etacalc[i]])\n else:\n if not etacalc in self.__A2[0].keys(): raise ValueError('Please coose one of %s'%(self.__A2[0].keys()))\n A2 = [a2[etacalc] for a2 in self.__A2]\n \n \n #%%%--- Cubic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'CI' or \\\n LC == 'CII'):\n C[0,0] =-2.*(A2[0]-3.*A2[1])/3.\n C[1,1] = C[0,0]\n C[2,2] = C[0,0]\n C[3,3] = A2[2]/6.\n C[4,4] = C[3,3]\n C[5,5] = C[3,3]\n C[0,1] = (2.*A2[0]-3.*A2[1])/3.\n C[0,2] = C[0,1]\n C[1,2] = C[0,1]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Hexagonal structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = 2./3.*A2[0] + 4./3.*A2[1] - 2.*A2[2] - 2.*A2[3]\n C[0,2] = 1./6.*A2[0] - 2./3.*A2[1] + 0.5*A2[2]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[2]\n C[3,3] =-0.5*A2[2] + 0.5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RI'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[0,4] = .5*(-A2[3] - A2[4] + A2[6])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[1,4] =-C[0,4] \n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[3,5] =-C[0,4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TI'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[3]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[3]\n C[0,2] = A2[0]/6.-2.*A2[1]/3.+.5*A2[3]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[3]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TII'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[4]\n C[1,1] = C[0,0]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[4]\n C[0,2] = A2[0]/6.-(2./3.)*A2[1]+.5*A2[4]\n C[0,5] = (-A2[2]+A2[3]-A2[6])/4.\n C[1,2] = C[0,2]\n C[1,5] =-C[0,5]\n C[2,2] = 2.*A2[4]\n C[3,3] = .5*A2[5]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[6]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Orthorhombic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'O'):\n C[0,0] = 2.*A2[0]/3.+4.*A2[1]/3.+A2[3]-2.*A2[4]-2.*A2[5]\n C[0,1] = 1.*A2[0]/3.+2.*A2[1]/3.-.5*A2[3]-A2[5]\n C[0,2] = 1.*A2[0]/3.-2.*A2[1]/3.+4.*A2[2]/3.-.5*A2[3]-A2[4]\n C[1,1] = 2.*A2[4]\n C[1,2] =-2.*A2[1]/3.-4.*A2[2]/3.+.5*A2[3]+A2[4]+A2[5]\n C[2,2] = 2.*A2[5]\n C[3,3] = .5*A2[6]\n C[4,4] = .5*A2[7]\n C[5,5] = .5*A2[8]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Monoclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'M'):\n C[0,0] = 2.*A2[0]/3.+8.*(A2[1]+A2[2])/3.-2.*(A2[5]+A2[8]+A2[9])\n C[0,1] = A2[0]/3.+4.*(A2[1]+A2[2])/3.-2.*A2[5]-A2[9]\n C[0,2] =(A2[0]-4.*A2[2])/3.+A2[5]-A2[8]\n C[0,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.+.5*(A2[5]+A2[7]+A2[8]+A2[9]-A2[12])\n C[1,1] = 2.*A2[8]\n C[1,2] =-4.*(2.*A2[1]+A2[2])/3.+2.*A2[5]+A2[8]+A2[9]+A2[12]\n C[1,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.-.5*A2[3]+A2[5]+.5*(A2[7]+A2[8]+A2[9])\n C[2,2] = 2.*A2[9]\n C[2,5] =-1.*A2[0]/6.+2.*A2[1]/3.-.5*(A2[3]+A2[4]-A2[7]-A2[8]-A2[9]-A2[12])\n C[3,3] = .5*A2[10]\n C[3,4] = .25*(A2[6]-A2[10]-A2[11])\n C[4,4] = .5*A2[11]\n C[5,5] = .5*A2[12]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Triclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'N'):\n C[0,0] = 2.*A2[0]\n C[0,1] = 1.*(-A2[0]-A2[1]+A2[6])\n C[0,2] = 1.*(-A2[0]-A2[2]+A2[7])\n C[0,3] = .5*(-A2[0]-A2[3]+A2[8]) \n C[0,4] = .5*(-A2[0]+A2[9]-A2[4])\n C[0,5] = .5*(-A2[0]+A2[10]-A2[5])\n C[1,1] = 2.*A2[1]\n C[1,2] = 1.*(A2[11]-A2[1]-A2[2])\n C[1,3] = .5*(A2[12]-A2[1]-A2[3])\n C[1,4] = .5*(A2[13]-A2[1]-A2[4])\n C[1,5] = .5*(A2[14]-A2[1]-A2[5])\n C[2,2] = 2.*A2[2] \n C[2,3] = .5*(A2[15]-A2[2]-A2[3])\n C[2,4] = .5*(A2[16]-A2[2]-A2[4])\n C[2,5] = .5*(A2[17]-A2[2]-A2[5])\n C[3,3] = .5*A2[3]\n C[3,4] = .25*(A2[18]-A2[3]-A2[4])\n C[3,5] = .25*(A2[19]-A2[3]-A2[5])\n C[4,4] = .5*A2[4]\n C[4,5] = .25*(A2[20]-A2[4]-A2[5])\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n elif self.__mthd == 'Stress':\n \n if (LC == 'CI' or \\\n LC == 'CII'):\n Matrix = np.mat([[1.0, 5.0, 0.0],\n [2.0, 4.0, 0.0],\n [3.0, 3.0, 0.0],\n [0.0, 0.0, 4.0],\n [0.0, 0.0, 5.0],\n [0.0, 0.0, 6.0]])\n \n if (LC == 'HI' or \\\n LC == 'HII'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0],\n [ 2, 1, 3, 0, 0],\n [ 0, 0, 3, 3, 0],\n [ 0, 0, 0, 0, 4],\n [ 0, 0, 0, 0, 5],\n [ 3,-3, 0, 0, 0],\n [ 3,-5,-1, 0, 0],\n [-5, 3,-1, 0, 0],\n [ 0, 0,-2,-1, 0],\n [ 0, 0, 0, 0, 6],\n [ 0, 0, 0, 0, 2],\n [-2, 2, 0, 0, 0]])\n \n if (LC == 'RI'):\n Matrix = np.mat([[ 1, 2, 3, 4, 0, 0],\n [ 2, 1, 3,-4, 0, 0],\n [ 0, 0, 3, 0, 3, 0],\n [ 0, 0, 0,-1, 0, 4],\n [ 0, 0, 0, 6, 0, 5],\n [ 3,-3, 0, 5, 0, 0],\n [ 3,-5,-1, 6, 0, 0],\n [-5, 3,-1,-6, 0, 0],\n [ 0, 0,-2, 0,-1, 0],\n [ 0, 0, 0, 8, 0, 6],\n [ 0, 0, 0,-4, 0, 2],\n [-2, 2, 0, 2, 0, 0]])\n \n if (LC == 'RII'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 0, 0],\n [ 2, 1, 3,-4,-5, 0, 0],\n [ 0, 0, 3, 0, 0, 3, 0],\n [ 0, 0, 0,-1,-6, 0, 4],\n [ 0, 0, 0, 6,-1, 0, 5],\n [ 3,-3, 0, 5,-4, 0, 0],\n [ 3,-5,-1, 6, 2, 0, 0],\n [-5, 3,-1,-6,-2, 0, 0],\n [ 0, 0,-2, 0, 0,-1, 0],\n [ 0, 0, 0, 8, 4, 0, 6],\n [ 0, 0, 0,-4, 8, 0, 2],\n [-2, 2, 0, 2,-6, 0, 0]])\n \n if (LC == 'TI'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0, 0],\n [ 2, 1, 3, 0, 0, 0],\n [ 0, 0, 3, 3, 0, 0],\n [ 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0, 0, 0, 6],\n [ 3,-5,-1, 0, 0, 0],\n [-5, 3,-1, 0, 0, 0],\n [ 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 0, 0,-4]])\n \n if (LC == 'TII'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0],\n [ 2, 1, 3,-6, 0, 0, 0],\n [ 0, 0, 3, 0, 3, 0, 0],\n [ 0, 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0,-1, 0, 0, 6],\n [ 3,-5,-1,-4, 0, 0, 0],\n [-5, 3,-1, 4, 0, 0, 0],\n [ 0, 0,-2, 0,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 8, 0, 0,-4]])\n \n if (LC == 'O'):\n Matrix = np.mat([[1, 2, 3, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 2, 3, 0, 0, 0, 0],\n [0, 0, 1, 0, 2, 3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 4, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 5, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 6],\n [3,-5,-1, 0, 0, 0, 0, 0, 0],\n [0, 3, 0,-5,-1, 0, 0, 0, 0],\n [0, 0, 3, 0,-5,-1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 6, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-4],\n [5, 4, 6, 0, 0, 0, 0, 0, 0],\n [0, 5, 0, 4, 6, 0, 0, 0, 0],\n [0, 0, 5, 0, 4, 6, 0, 0, 0],\n [0, 0, 0, 0, 0, 0,-2, 0, 0],\n [0, 0, 0, 0, 0, 0, 0,-1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-3]])\n \n if (LC == 'M'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 2, 3, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 2, 0, 3, 6, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0],\n [ 0, 0, 0, 1, 0, 0, 2, 0, 3, 0, 0, 0, 6],\n [-2, 1, 4,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 1, 4,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 1, 0, 4,-5, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0],\n [ 0, 0, 0,-2, 0, 0, 1, 0, 4, 0, 0,-5, 0],\n [ 3,-5,-1,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0,-5,-1,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0,-5, 0,-1,-4, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0],\n [ 0, 0, 0, 3, 0, 0,-5, 0,-1, 0, 0,-4, 0],\n [-4,-6, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0,-6, 5, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0,-6, 0, 5, 2, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0],\n [ 0, 0, 0,-4, 0, 0,-6, 0, 5, 0, 0, 2, 0],\n [ 5, 4, 6,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 4, 6,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 4, 0, 6,-3, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0],\n [ 0, 0, 0, 5, 0, 0, 4, 0, 6, 0, 0,-3, 0]])\n \n if (LC == 'N'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 0, 0, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 5, 6, 0, 0, 0],\n [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6, 0],\n [ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6],\n [-2, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 0, 0, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 6,-5, 0, 0, 0],\n [ 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5, 0],\n [ 0, 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5],\n [ 3,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0, 0, 0,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 2,-4, 0, 0, 0],\n [ 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4, 0],\n [ 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4],\n [-4,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0, 0, 0,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1,-3, 2, 0, 0, 0],\n [ 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2, 0],\n [ 0, 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2],\n [ 5, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 0, 0, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2,-1,-3, 0, 0, 0],\n [ 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3, 0],\n [ 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3],\n [-6, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-6, 0, 0, 0, 0, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5,-4, 1, 0, 0, 0],\n [ 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1, 0],\n [ 0, 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1]])\n \n sigma = np.array(self.__sigma[etacalc])\n \n ci = np.linalg.lstsq(Matrix,sigma)\n \n #-- Cubic structures ------------------------------------------------------------------------------\n if (LC == 'CI' or \\\n LC == 'CII'):\n \n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[3,3]=ci[0][2]\n C[1,1]=C[0,0]\n C[2,2]=C[0,0]\n C[0,2]=C[0,1]\n C[1,2]=C[0,1]\n C[4,4]=C[3,3]\n C[5,5]=C[3,3]\n \n #-- Hexagonal Structures --------------------------------------------------------------------------\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[2,2]=ci[0][3]\n C[3,3]=ci[0][4]\n C[1,1]=C[0,0]\n C[1,2]=C[0,2]\n C[4,4]=C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral I Structures ---------------------------------------------------------------------\n if (LC == 'RI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral II Structures --------------------------------------------------------------------\n if (LC == 'RII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[0,4]= ci[0][4]\n C[2,2]= ci[0][5]\n C[3,3]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[1,4]=-C[0,4]\n C[3,5]=-C[0,4]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Tetragonal I Structures -----------------------------------------------------------------------\n if (LC == 'TI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[2,2]= ci[0][3]\n C[3,3]= ci[0][4]\n C[5,5]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[4,4]= C[3,3]\n \n #-- Tetragonal II Structures ----------------------------------------------------------------------\n if (LC == 'TII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,5]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[5,5]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,5]=-C[0,5]\n C[4,4]= C[3,3]\n \n #-- Orthorhombic Structures -----------------------------------------------------------------------\n if (LC == 'O'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[1,1]=ci[0][3]\n C[1,2]=ci[0][4]\n C[2,2]=ci[0][5]\n C[3,3]=ci[0][6]\n C[4,4]=ci[0][7]\n C[5,5]=ci[0][8]\n \n #-- Monoclinic Structures -------------------------------------------------------------------------\n if (LC == 'M'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,5]=ci[0][3]\n C[1,1]=ci[0][4]\n C[1,2]=ci[0][5]\n C[1,5]=ci[0][6]\n C[2,2]=ci[0][7]\n C[2,5]=ci[0][8]\n C[3,3]=ci[0][9]\n C[3,4]=ci[0][10]\n C[4,4]=ci[0][11]\n C[5,5]=ci[0][12]\n \n #-- Triclinic Structures --------------------------------------------------------------------------\n if (LC == 'N'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,3]=ci[0][3]\n C[0,4]=ci[0][4]\n C[0,5]=ci[0][5]\n C[1,1]=ci[0][6]\n C[1,2]=ci[0][7]\n C[1,3]=ci[0][8]\n C[1,4]=ci[0][9]\n C[1,5]=ci[0][10]\n C[2,2]=ci[0][11]\n C[2,3]=ci[0][12]\n C[2,4]=ci[0][13]\n C[2,5]=ci[0][14]\n C[3,3]=ci[0][15]\n C[3,4]=ci[0][16]\n C[3,5]=ci[0][17]\n C[4,4]=ci[0][18]\n C[4,5]=ci[0][19]\n C[5,5]=ci[0][20]\n #--------------------------------------------------------------------------------------------------\n \n \n \n for i in range(5):\n for j in range(i+1,6):\n C[j,i] = C[i,j] \n #%%%--- Calculating the elastic moduli ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if self.__cod in ['espresso']: C = -C/10.\n elif self.__cod in ['vasp','exciting','wien']: C = C*self.__vToGPa/self.__V0#C = C/4.#C=C*self.__CONV/self.__V0\n elif self.__cod in ['emto']: C = C*self.__ToGPa/self.__V0\n self.BV = (C[0,0]+C[1,1]+C[2,2]+2*(C[0,1]+C[0,2]+C[1,2]))/9\n self.GV = ((C[0,0]+C[1,1]+C[2,2])-(C[0,1]+C[0,2]+C[1,2])+3*(C[3,3]+C[4,4]+C[5,5]))/15\n self.EV = (9*self.BV*self.GV)/(3*self.BV+self.GV)\n self.nuV= (1.5*self.BV-self.GV)/(3*self.BV+self.GV)\n self.S = np.linalg.inv(C)\n self.BR = 1/(self.S[0,0]+self.S[1,1]+self.S[2,2]+2*(self.S[0,1]+self.S[0,2]+self.S[1,2]))\n self.GR =15/(4*(self.S[0,0]+self.S[1,1]+self.S[2,2])-4*(self.S[0,1]+self.S[0,2]+self.S[1,2])+3*(self.S[3,3]+self.S[4,4]+self.S[5,5]))\n self.ER = (9*self.BR*self.GR)/(3*self.BR+self.GR)\n self.nuR= (1.5*self.BR-self.GR)/(3*self.BR+self.GR)\n self.BH = 0.50*(self.BV+self.BR)\n self.GH = 0.50*(self.GV+self.GR)\n self.EH = (9.*self.BH*self.GH)/(3.*self.BH+self.GH)\n self.nuH= (1.5*self.BH-self.GH)/(3.*self.BH+self.GH)\n self.AVR= 100.*(self.GV-self.GR)/(self.GV+self.GR)\n #--------------------------------------------------------------------------------------------------------------------------------\n self.__C = C\n \n else:\n Cs = []\n for t in map(str,self.__T):#for t in range(len(self.__T)):\n C = np.zeros((6,6))\n \n LC = self.__structures.items()[0][1].LC\n if self.__mthd == 'Energy':\n if type(etacalc)==list:\n A2=[]\n for i in range(len(etacalc)):\n A2.append(self.__A2[t][i][etacalc[i]])\n else:\n if not etacalc in self.__A2[t][0].keys(): raise ValueError('Please coose one of %s'%(self.__A2[t][0].keys()))\n A2 = [a2[etacalc] for a2 in self.__A2[t]]\n \n #%%%--- Cubic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'CI' or \\\n LC == 'CII'):\n C[0,0] =-2.*(A2[0]-3.*A2[1])/3.\n C[1,1] = C[0,0]\n C[2,2] = C[0,0]\n C[3,3] = A2[2]/6.\n C[4,4] = C[3,3]\n C[5,5] = C[3,3]\n C[0,1] = (2.*A2[0]-3.*A2[1])/3.\n C[0,2] = C[0,1]\n C[1,2] = C[0,1]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Hexagonal structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = 2./3.*A2[0] + 4./3.*A2[1] - 2.*A2[2] - 2.*A2[3]\n C[0,2] = 1./6.*A2[0] - 2./3.*A2[1] + 0.5*A2[2]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[2]\n C[3,3] =-0.5*A2[2] + 0.5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RI'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[0,4] = .5*(-A2[3] - A2[4] + A2[6])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[1,4] =-C[0,4] \n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[3,5] =-C[0,4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TI'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[3]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[3]\n C[0,2] = A2[0]/6.-2.*A2[1]/3.+.5*A2[3]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[3]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TII'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[4]\n C[1,1] = C[0,0]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[4]\n C[0,2] = A2[0]/6.-(2./3.)*A2[1]+.5*A2[4]\n C[0,5] = (-A2[2]+A2[3]-A2[6])/4.\n C[1,2] = C[0,2]\n C[1,5] =-C[0,5]\n C[2,2] = 2.*A2[4]\n C[3,3] = .5*A2[5]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[6]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Orthorhombic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'O'):\n C[0,0] = 2.*A2[0]/3.+4.*A2[1]/3.+A2[3]-2.*A2[4]-2.*A2[5]\n C[0,1] = 1.*A2[0]/3.+2.*A2[1]/3.-.5*A2[3]-A2[5]\n C[0,2] = 1.*A2[0]/3.-2.*A2[1]/3.+4.*A2[2]/3.-.5*A2[3]-A2[4]\n C[1,1] = 2.*A2[4]\n C[1,2] =-2.*A2[1]/3.-4.*A2[2]/3.+.5*A2[3]+A2[4]+A2[5]\n C[2,2] = 2.*A2[5]\n C[3,3] = .5*A2[6]\n C[4,4] = .5*A2[7]\n C[5,5] = .5*A2[8]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Monoclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'M'):\n C[0,0] = 2.*A2[0]/3.+8.*(A2[1]+A2[2])/3.-2.*(A2[5]+A2[8]+A2[9])\n C[0,1] = A2[0]/3.+4.*(A2[1]+A2[2])/3.-2.*A2[5]-A2[9]\n C[0,2] =(A2[0]-4.*A2[2])/3.+A2[5]-A2[8]\n C[0,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.+.5*(A2[5]+A2[7]+A2[8]+A2[9]-A2[12])\n C[1,1] = 2.*A2[8]\n C[1,2] =-4.*(2.*A2[1]+A2[2])/3.+2.*A2[5]+A2[8]+A2[9]+A2[12]\n C[1,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.-.5*A2[3]+A2[5]+.5*(A2[7]+A2[8]+A2[9])\n C[2,2] = 2.*A2[9]\n C[2,5] =-1.*A2[0]/6.+2.*A2[1]/3.-.5*(A2[3]+A2[4]-A2[7]-A2[8]-A2[9]-A2[12])\n C[3,3] = .5*A2[10]\n C[3,4] = .25*(A2[6]-A2[10]-A2[11])\n C[4,4] = .5*A2[11]\n C[5,5] = .5*A2[12]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Triclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'N'):\n C[0,0] = 2.*A2[0]\n C[0,1] = 1.*(-A2[0]-A2[1]+A2[6])\n C[0,2] = 1.*(-A2[0]-A2[2]+A2[7])\n C[0,3] = .5*(-A2[0]-A2[3]+A2[8]) \n C[0,4] = .5*(-A2[0]+A2[9]-A2[4])\n C[0,5] = .5*(-A2[0]+A2[10]-A2[5])\n C[1,1] = 2.*A2[1]\n C[1,2] = 1.*(A2[11]-A2[1]-A2[2])\n C[1,3] = .5*(A2[12]-A2[1]-A2[3])\n C[1,4] = .5*(A2[13]-A2[1]-A2[4])\n C[1,5] = .5*(A2[14]-A2[1]-A2[5])\n C[2,2] = 2.*A2[2] \n C[2,3] = .5*(A2[15]-A2[2]-A2[3])\n C[2,4] = .5*(A2[16]-A2[2]-A2[4])\n C[2,5] = .5*(A2[17]-A2[2]-A2[5])\n C[3,3] = .5*A2[3]\n C[3,4] = .25*(A2[18]-A2[3]-A2[4])\n C[3,5] = .25*(A2[19]-A2[3]-A2[5])\n C[4,4] = .5*A2[4]\n C[4,5] = .25*(A2[20]-A2[4]-A2[5])\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n elif self.__mthd == 'Stress':\n \n if (LC == 'CI' or \\\n LC == 'CII'):\n Matrix = np.mat([[1.0, 5.0, 0.0],\n [2.0, 4.0, 0.0],\n [3.0, 3.0, 0.0],\n [0.0, 0.0, 4.0],\n [0.0, 0.0, 5.0],\n [0.0, 0.0, 6.0]])\n \n if (LC == 'HI' or \\\n LC == 'HII'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0],\n [ 2, 1, 3, 0, 0],\n [ 0, 0, 3, 3, 0],\n [ 0, 0, 0, 0, 4],\n [ 0, 0, 0, 0, 5],\n [ 3,-3, 0, 0, 0],\n [ 3,-5,-1, 0, 0],\n [-5, 3,-1, 0, 0],\n [ 0, 0,-2,-1, 0],\n [ 0, 0, 0, 0, 6],\n [ 0, 0, 0, 0, 2],\n [-2, 2, 0, 0, 0]])\n \n if (LC == 'RI'):\n Matrix = np.mat([[ 1, 2, 3, 4, 0, 0],\n [ 2, 1, 3,-4, 0, 0],\n [ 0, 0, 3, 0, 3, 0],\n [ 0, 0, 0,-1, 0, 4],\n [ 0, 0, 0, 6, 0, 5],\n [ 3,-3, 0, 5, 0, 0],\n [ 3,-5,-1, 6, 0, 0],\n [-5, 3,-1,-6, 0, 0],\n [ 0, 0,-2, 0,-1, 0],\n [ 0, 0, 0, 8, 0, 6],\n [ 0, 0, 0,-4, 0, 2],\n [-2, 2, 0, 2, 0, 0]])\n \n if (LC == 'RII'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 0, 0],\n [ 2, 1, 3,-4,-5, 0, 0],\n [ 0, 0, 3, 0, 0, 3, 0],\n [ 0, 0, 0,-1,-6, 0, 4],\n [ 0, 0, 0, 6,-1, 0, 5],\n [ 3,-3, 0, 5,-4, 0, 0],\n [ 3,-5,-1, 6, 2, 0, 0],\n [-5, 3,-1,-6,-2, 0, 0],\n [ 0, 0,-2, 0, 0,-1, 0],\n [ 0, 0, 0, 8, 4, 0, 6],\n [ 0, 0, 0,-4, 8, 0, 2],\n [-2, 2, 0, 2,-6, 0, 0]])\n \n if (LC == 'TI'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0, 0],\n [ 2, 1, 3, 0, 0, 0],\n [ 0, 0, 3, 3, 0, 0],\n [ 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0, 0, 0, 6],\n [ 3,-5,-1, 0, 0, 0],\n [-5, 3,-1, 0, 0, 0],\n [ 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 0, 0,-4]])\n \n if (LC == 'TII'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0],\n [ 2, 1, 3,-6, 0, 0, 0],\n [ 0, 0, 3, 0, 3, 0, 0],\n [ 0, 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0,-1, 0, 0, 6],\n [ 3,-5,-1,-4, 0, 0, 0],\n [-5, 3,-1, 4, 0, 0, 0],\n [ 0, 0,-2, 0,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 8, 0, 0,-4]])\n \n if (LC == 'O'):\n Matrix = np.mat([[1, 2, 3, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 2, 3, 0, 0, 0, 0],\n [0, 0, 1, 0, 2, 3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 4, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 5, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 6],\n [3,-5,-1, 0, 0, 0, 0, 0, 0],\n [0, 3, 0,-5,-1, 0, 0, 0, 0],\n [0, 0, 3, 0,-5,-1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 6, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-4],\n [5, 4, 6, 0, 0, 0, 0, 0, 0],\n [0, 5, 0, 4, 6, 0, 0, 0, 0],\n [0, 0, 5, 0, 4, 6, 0, 0, 0],\n [0, 0, 0, 0, 0, 0,-2, 0, 0],\n [0, 0, 0, 0, 0, 0, 0,-1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-3]])\n \n if (LC == 'M'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 2, 3, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 2, 0, 3, 6, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0],\n [ 0, 0, 0, 1, 0, 0, 2, 0, 3, 0, 0, 0, 6],\n [-2, 1, 4,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 1, 4,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 1, 0, 4,-5, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0],\n [ 0, 0, 0,-2, 0, 0, 1, 0, 4, 0, 0,-5, 0],\n [ 3,-5,-1,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0,-5,-1,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0,-5, 0,-1,-4, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0],\n [ 0, 0, 0, 3, 0, 0,-5, 0,-1, 0, 0,-4, 0],\n [-4,-6, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0,-6, 5, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0,-6, 0, 5, 2, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0],\n [ 0, 0, 0,-4, 0, 0,-6, 0, 5, 0, 0, 2, 0],\n [ 5, 4, 6,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 4, 6,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 4, 0, 6,-3, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0],\n [ 0, 0, 0, 5, 0, 0, 4, 0, 6, 0, 0,-3, 0]])\n \n if (LC == 'N'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 0, 0, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 5, 6, 0, 0, 0],\n [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6, 0],\n [ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6],\n [-2, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 0, 0, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 6,-5, 0, 0, 0],\n [ 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5, 0],\n [ 0, 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5],\n [ 3,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0, 0, 0,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 2,-4, 0, 0, 0],\n [ 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4, 0],\n [ 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4],\n [-4,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0, 0, 0,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1,-3, 2, 0, 0, 0],\n [ 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2, 0],\n [ 0, 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2],\n [ 5, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 0, 0, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2,-1,-3, 0, 0, 0],\n [ 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3, 0],\n [ 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3],\n [-6, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-6, 0, 0, 0, 0, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5,-4, 1, 0, 0, 0],\n [ 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1, 0],\n [ 0, 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1]])\n \n sigma = np.array(self.__sigma[etacalc])\n \n ci = np.linalg.lstsq(Matrix,sigma)\n \n #-- Cubic structures ------------------------------------------------------------------------------\n if (LC == 'CI' or \\\n LC == 'CII'):\n \n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[3,3]=ci[0][2]\n C[1,1]=C[0,0]\n C[2,2]=C[0,0]\n C[0,2]=C[0,1]\n C[1,2]=C[0,1]\n C[4,4]=C[3,3]\n C[5,5]=C[3,3]\n \n #-- Hexagonal Structures --------------------------------------------------------------------------\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[2,2]=ci[0][3]\n C[3,3]=ci[0][4]\n C[1,1]=C[0,0]\n C[1,2]=C[0,2]\n C[4,4]=C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral I Structures ---------------------------------------------------------------------\n if (LC == 'RI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral II Structures --------------------------------------------------------------------\n if (LC == 'RII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[0,4]= ci[0][4]\n C[2,2]= ci[0][5]\n C[3,3]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[1,4]=-C[0,4]\n C[3,5]=-C[0,4]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Tetragonal I Structures -----------------------------------------------------------------------\n if (LC == 'TI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[2,2]= ci[0][3]\n C[3,3]= ci[0][4]\n C[5,5]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[4,4]= C[3,3]\n \n #-- Tetragonal II Structures ----------------------------------------------------------------------\n if (LC == 'TII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,5]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[5,5]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,5]=-C[0,5]\n C[4,4]= C[3,3]\n \n #-- Orthorhombic Structures -----------------------------------------------------------------------\n if (LC == 'O'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[1,1]=ci[0][3]\n C[1,2]=ci[0][4]\n C[2,2]=ci[0][5]\n C[3,3]=ci[0][6]\n C[4,4]=ci[0][7]\n C[5,5]=ci[0][8]\n \n #-- Monoclinic Structures -------------------------------------------------------------------------\n if (LC == 'M'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,5]=ci[0][3]\n C[1,1]=ci[0][4]\n C[1,2]=ci[0][5]\n C[1,5]=ci[0][6]\n C[2,2]=ci[0][7]\n C[2,5]=ci[0][8]\n C[3,3]=ci[0][9]\n C[3,4]=ci[0][10]\n C[4,4]=ci[0][11]\n C[5,5]=ci[0][12]\n \n #-- Triclinic Structures --------------------------------------------------------------------------\n if (LC == 'N'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,3]=ci[0][3]\n C[0,4]=ci[0][4]\n C[0,5]=ci[0][5]\n C[1,1]=ci[0][6]\n C[1,2]=ci[0][7]\n C[1,3]=ci[0][8]\n C[1,4]=ci[0][9]\n C[1,5]=ci[0][10]\n C[2,2]=ci[0][11]\n C[2,3]=ci[0][12]\n C[2,4]=ci[0][13]\n C[2,5]=ci[0][14]\n C[3,3]=ci[0][15]\n C[3,4]=ci[0][16]\n C[3,5]=ci[0][17]\n C[4,4]=ci[0][18]\n C[4,5]=ci[0][19]\n C[5,5]=ci[0][20]\n #--------------------------------------------------------------------------------------------------\n \n \n \n for i in range(5):\n for j in range(i+1,6):\n C[j,i] = C[i,j] \n #%%%--- Calculating the elastic moduli ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if self.__cod == 'espresso': C = -C/10.\n elif self.__cod in ['vasp','emto','exciting','wien']: C=C*self.__vToGPa/self.__V0#C = C/4.#C=C*self.__CONV/self.__V0#C = C/4.\n self.BV = (C[0,0]+C[1,1]+C[2,2]+2*(C[0,1]+C[0,2]+C[1,2]))/9\n self.GV = ((C[0,0]+C[1,1]+C[2,2])-(C[0,1]+C[0,2]+C[1,2])+3*(C[3,3]+C[4,4]+C[5,5]))/15\n self.EV = (9*self.BV*self.GV)/(3*self.BV+self.GV)\n self.nuV= (1.5*self.BV-self.GV)/(3*self.BV+self.GV)\n self.S = np.linalg.inv(C)\n self.BR = 1/(self.S[0,0]+self.S[1,1]+self.S[2,2]+2*(self.S[0,1]+self.S[0,2]+self.S[1,2]))\n self.GR =15/(4*(self.S[0,0]+self.S[1,1]+self.S[2,2])-4*(self.S[0,1]+self.S[0,2]+self.S[1,2])+3*(self.S[3,3]+self.S[4,4]+self.S[5,5]))\n self.ER = (9*self.BR*self.GR)/(3*self.BR+self.GR)\n self.nuR= (1.5*self.BR-self.GR)/(3*self.BR+self.GR)\n self.BH = 0.50*(self.BV+self.BR)\n self.GH = 0.50*(self.GV+self.GR)\n self.EH = (9.*self.BH*self.GH)/(3.*self.BH+self.GH)\n self.nuH= (1.5*self.BH-self.GH)/(3.*self.BH+self.GH)\n self.AVR= 100.*(self.GV-self.GR)/(self.GV+self.GR)\n #--------------------------------------------------------------------------------------------------------------------------------\n Cs.append(C)\n self.__C = Cs", "def NND_eta( eqCat, dConst, verbose = False, **kwargs):\n #-------------------------------set args and kwargs----------------------------------------------- \n rmax = 500 # in km\n tmax = 20 # in years\n M0 = 0\n if 'M0' in kwargs.keys() and kwargs['M0'] is not None:\n M0 = kwargs['M0']\n if 'rmax' in kwargs.keys() and kwargs['rmax'] is not None:\n rmax = kwargs['rmax']\n if 'tmax' in kwargs.keys() and kwargs['tmax'] is not None:\n tmax = kwargs['tmax']\n #-----------------------------add small uncertainty to X in case events are colocated-------------------------- \n if 'correct_co_located' in kwargs.keys() and kwargs['correct_co_located'] == True:\n vUncer = np.random.randn( eqCat.size())*1e-10\n eqCat.data['X'] += vUncer\n eqCat.data['Time'] += abs( vUncer)#time has to stay positive otherwise parent-offspring gets switched\n #------------------------------------------------------------------------------\n aNND = np.zeros( eqCat.size())\n vID_p = np.zeros( eqCat.size())\n vID_c = np.zeros( eqCat.size())\n deltaMag = (eqCat.data['Mag'] - M0)\n \n for jC in range( eqCat.size()):\n if verbose == True:\n print 'event %i of %i'%( jC+1, eqCat.size())\n # interevent times: take events that happend before t_i \n # child - parent > 0 \n tau = eqCat.data['Time'][jC] - eqCat.data['Time']\n sel_tau_par = tau > 0\n if sel_tau_par.sum() > 0:\n\n vcurr_ID = np.arange( eqCat.size(), dtype = int)[sel_tau_par]\n vR = np.sqrt( (eqCat.data['X'][jC] - eqCat.data['X'][vcurr_ID])**2 + (eqCat.data['Y'][jC] - eqCat.data['Y'][vcurr_ID])**2 )\n # haversine distance\n # = projUtils.haversine( eqCat.data['Lon'][jC], eqCat.data['Lat'][jC],eqCat.data['Lon'][curr_vID], eqCat.data['Lat'][curr_vID] ) \n sel_r_par = vR < rmax\n if sel_r_par.sum() > 0:\n vcurr_ID = vcurr_ID[sel_r_par]\n curr_Eta = tau[vcurr_ID]* (vR[sel_r_par]**dConst['D']) *( 10**(-dConst['b']*deltaMag[vcurr_ID]))\n sel_min = curr_Eta == curr_Eta.min()\n aNND[jC] = curr_Eta[sel_min][0]\n vID_p[jC] = eqCat.data['N'][vcurr_ID][sel_min][0]\n vID_c[jC] = eqCat.data['N'][jC]\n #print 'parent', eqCat.data['N'][vcurr_ID][sel_min][0], 'offspring', eqCat.data['N'][jC]\n #print 'parent', eqCat.data['Time'][vcurr_ID][sel_min][0], 'offspring', eqCat.data['Time'][jC]\n\n if sel_min.sum() > 1:\n print aNND[jC], curr_Eta[sel_min], eqCat.data['N'][vcurr_ID][sel_min]\n print eqCat.data['Lon'][vcurr_ID][sel_min], eqCat.data['Lat'][vcurr_ID][sel_min]\n print eqCat.data['X'][vcurr_ID][sel_min], eqCat.data['Y'][vcurr_ID][sel_min]\n sel2 = aNND > 0\n if np.logical_not(sel2).sum() > 0:\n print 'remove %i offspring without prior parents in catalog'%(np.logical_not(sel2).sum())\n #raise ValueError, error_str\n # remove events with aNND < 0; i.e. event at the beginning with no preceding parent\n return { 'aNND' : aNND[sel2], 'aEqID_p' : vID_p[sel2], 'aEqID_c' : vID_c[sel2]}", "def connect_cells(dfte,vari):\n # Create the variabel cell for mother, grand mother and grand grand mother\n if 'g_parent_cell' not in dfte.columns:\n dfte = rl.genalogy(dfte,'parent_cell') #Create genealogy\n if 'g_g_parent_cell' not in dfte.columns:\n dfte = rl.genalogy(dfte,'g_parent_cell')\n if 'g_g_g_parent_cell' not in dfte.columns:\n dfte = rl.genalogy(dfte,'g_g_parent_cell')\n #give unique index to all cells\n dfte['uid'] = dfte['cell']+dfte['time_sec'].apply(lambda x: str(x))\n vac=[];sc=[];uid = []\n # Create a vecotor for the variable of interest of cell,mother,grand mother and grand grand mother and an unique identifier of it\n for c,idx in enumerate(dfte['cell'].unique()):\n dau = dfte.loc[dfte['cell']==idx]\n pc = dau['parent_cell'].iloc[0]\n mum = dfte.loc[dfte['cell']==pc]\n gpc = dau['g_parent_cell'].iloc[0]\n gmum = dfte.loc[dfte['cell']==gpc]\n ggpc = dau['g_g_parent_cell'].iloc[0]\n ggmum = dfte.loc[dfte['cell']==ggpc]\n gggpc = dau['g_g_g_parent_cell'].iloc[0]\n gggmum = dfte.loc[dfte['cell']==gggpc]\n fte = lambda x: x[['{}'.format(vari),'uid']].values\n tmp = np.vstack([fte(gggmum),fte(ggmum),fte(gmum),fte(mum),fte(dau)])\n vac.append(tmp[:,0])\n uid.append(tmp[:,1])\n sc.append(['super_cell_{}'.format(c)]*len(tmp))\n return pd.DataFrame({'super_cell':np.hstack(sc),'uid':np.hstack(uid),'{}'.format(vari):np.hstack(vac)})", "def hr_egn(A, B, R, x0):\n # A - Adjacency matrix, np.ndarray (N,N)\n # B - A 2D or 3D matrix with all payoff matrices, np.ndarray (S,S,N)\n # R - Relationship or preference matrix, np.ndarray (N,N)\n # x0 - Initial state of our system, np.ndarray (N,S), must be double\n\n # Number of players\n N = A[:, 0].size\n # Number of strategies\n S = x0[0, :].size\n # Degree and degree of preferences\n d = np.zeros([N, 2])\n d[:, 0] = np.dot(A, np.ones(N))\n\n for v in range(N):\n d[v, 1] = np.dot(np.ceil(np.abs(R[v, :])), A[v, :])\n\n # Player v neighborhood\n k = np.zeros([N, S], dtype='double')\n for v in range(N):\n for u in range(N):\n k[v, :] = np.add(k[v, :], np.multiply(A[v, u], x0[u, :]))\n # Weights the neighborhood\n k[v, :] = np.multiply(np.divide(1, d[v, 0]), k[v, :])\n\n # This variable is the increments that x0 receives, the derivative\n x = np.zeros([N, S], dtype='double')\n # This is the unit vector with 1 in some entry\n es = np.zeros(S, dtype='int')\n\n # Phi and gamma\n p = 0\n g = 0\n\n # Auxiliary variables for better clarity\n aux1 = 0\n aux2 = 0\n\n # Here is the derivative calculation\n # We first test if all payoffs are the same so we do less comparisons\n if B.ndim == 2:\n for v in range(N):\n for s in range(S):\n # Set es value\n es[s] = 1\n for u in range(N):\n if v == u:\n # Same payoff personal equation\n # First we will do the dot products\n # e_s*B*k_v\n aux1 = np.dot(es, np.dot(B, k[v, :]))\n # x_v*B*k_v\n aux2 = np.dot(x0[v, :], np.dot(B, k[v, :]))\n # Finally we subtract them to multiply by r_vv\n p = np.multiply(R[v, u], np.subtract(aux1, aux2))\n elif A[v, u] != 0:\n # Same payoff social equation\n # x_u*B*e_s\n aux1 = np.dot(x0[u, :], np.dot(B, es))\n # x_u*B*x_v\n aux2 = np.dot(x0[u, :], np.dot(B, x0[v, :]))\n # Subtract then multiply\n aux1 = np.subtract(aux1, aux2)\n aux2 = np.multiply(R[v, u], A[v, u])\n g = np.add(g, np.multiply(aux2, aux1))\n # Weights the social part\n if d[v, 1] != 0:\n g = np.multiply(np.divide(1, d[v, 1]), g)\n # Estimates the derivative\n x[v, s] = np.multiply(x0[v, s], np.add(p, g))\n # Prepare variables to next iteration\n p = 0\n g = 0\n es[s] = 0\n else:\n for v in range(N):\n for s in range(S):\n # Same thing as before, but now with individual payoffs\n es[s] = 1\n for u in range(N):\n if v == u:\n # Individual payoffs personal equation\n # e_s*B_v*k_v\n aux1 = np.dot(es, np.dot(B[:, :, v], k[v, :]))\n # x_u*B_v*k_v\n aux2 = np.dot(x0[v, :], np.dot(B[:, :, v], k[v, :]))\n p = np.multiply(R[v, u], np.subtract(aux1, aux2))\n elif A[v, u] != 0:\n # Individual payoffs social equation\n # x_u*B_u*e_s\n aux1 = np.dot(x0[u, :], np.dot(B[:, :, u], es))\n # x_u*B_u*x_v\n aux2 = np.dot(x0[u, :], np.dot(B[:, :, u], x0[v, :]))\n # Subtract then multiply\n aux1 = np.subtract(aux1, aux2)\n aux2 = np.multiply(R[v, u], A[v, u])\n g = np.add(g, np.multiply(aux2, aux1))\n # Weights the social part\n if d[v, 1] != 0:\n g = np.multiply(np.divide(1, d[v, 1]), g)\n # Estimates the derivative\n x[v, s] = np.multiply(x0[v, s], np.add(p, g))\n # Prepare variables to next iteration\n p = 0\n g = 0\n es[s] = 0\n return x", "def tao_BC_and_LEM_lines(epics):\n bc1_e0=epics.caget('SIOC:SYS0:ML00:AO483')*1e6\n bc2_e0=epics.caget('SIOC:SYS0:ML00:AO489')*1e9\n l3_e0 =epics.caget('SIOC:SYS0:ML00:AO500')*1e9\n \n # Charge in LTU\n q_after_horn_cutting = epics.caget('SIOC:SYS0:ML00:CALC252')*1e-12 # pC -> C\n bc1_offset=epics.caget('BMLN:LI21:235:MOTR')*1e-3\n bc2_offset=epics.caget('BMLN:LI24:805:MOTR')*1e-3\n \n bc1_current=epics.caget('SIOC:SYS0:ML00:AO485')\n bc2_current=epics.caget('SIOC:SYS0:ML00:AO195')\n \n # Catch bad settings\n if bc1_current==0:\n print('Warning: BC1 current is zero!')\n bc1_sigma_z = 0\n else:\n # Assumes parabolic distribution\n bc1_sigma_z = q_after_horn_cutting*299792458 / sqrt(10) / bc1_current\n\n if bc2_current==0:\n print('Warning: BC1 current is zero!')\n bc2_sigma_z = 0\n else:\n # Assumes Gaussian distribution\n bc2_sigma_z = q_after_horn_cutting*299792458 / sqrt(12) / bc2_current \n \n lines = []\n lines.append('set dat BC1.energy[1]|meas = '+str(bc1_e0))\n lines.append('set dat BC2.energy[1]|meas = '+str(bc2_e0))\n lines.append('set dat L3.energy[2]|meas = '+str(l3_e0))\n lines.append('set dat BC1.offset[1]|meas = '+str(bc1_offset))\n lines.append('set dat BC2.offset[1]|meas = '+str(bc2_offset))\n \n lines.append(f'! Charge after horn cutting: {q_after_horn_cutting*1e12:10.4} pC')\n lines.append(f'! For BC1 current {bc1_current} A')\n lines.append('set dat BC1.beam[1]|meas = '+str( bc1_sigma_z))\n lines.append(f'! For BC2 current {bc2_current} A')\n lines.append('set dat BC2.beam[1]|meas = '+str( bc2_sigma_z)) \n\n return lines", "def epidote():\n\n rho = 3465.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 211.5; C[0,1] = 65.6; C[0,2] = 43.2; C[0,3] = 0.; C[0,4] = -6.5; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 239.; C[1,2] = 43.6; C[1,3] = 0.; C[1,4] = -10.4; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 202.1; C[2,3] = 0.; C[2,4] = -20.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 39.1; C[3,4] = 0.; C[3,5] = -2.3\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 43.4; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 79.5\n\n return C, rho", "def add_electronetivity_and_lone_pair(df):\n assert 'atom_0' in df.columns\n assert 'atom_1' in df.columns\n electro_df = get_electonegativity()\n electro_df.index.name = 'atom_0'\n df = df.join(electro_df, how='left', on='atom_0')\n df.rename({\n 'Electronegativity': 'Electronegativity_0'\n },\n axis=1,\n inplace=True)\n\n electro_df.index.name = 'atom_1'\n df = df.join(electro_df, how='left', on='atom_1')\n df.rename({\n 'Electronegativity': 'Electronegativity_1'\n },\n axis=1,\n inplace=True)\n df['Electronegativity_diff'] = df['Electronegativity_1'] - df[\n 'Electronegativity_0']\n\n # lone pair\n df['atom_0_lone_pair'] = df['atom_0'].map(get_lone_pair())\n df['atom_1_lone_pair'] = df['atom_1'].map(get_lone_pair())\n return df", "def getEnergyEvolution(self):\n\n\t\tEBefore = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleBefore]\n\t\tEAfter = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleAfter]\n\n\t\treturn EBefore, EAfter" ]
[ "0.6214158", "0.60957927", "0.59478533", "0.58485806", "0.57059896", "0.5507641", "0.5501157", "0.54937583", "0.54590195", "0.5301212", "0.52908236", "0.52827936", "0.5239076", "0.5235464", "0.5215645", "0.52147555", "0.52099645", "0.5206502", "0.5192779", "0.5181326", "0.5179808", "0.5170709", "0.51515466", "0.5138134", "0.51235694", "0.5122695", "0.51097876", "0.5109335", "0.50935864", "0.5092792" ]
0.61443126
1
SubstanceEC graph of "neofunctionalised" EC numbers, derived from the unified core metabolisms.
def unifiedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False) -> SubstanceEcGraph: parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False) childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False) if colour is False: graph = parentNeofunctionalised.union(childNeofunctionalised, addCount = False, updateName = False) else: unifiedMetabolism = self.unifiedMetabolism(majorityPercentageCoreMetabolism, colour = True) parentEdges = parentNeofunctionalised.getEdges() childEdges = childNeofunctionalised.getEdges() graph = unifiedMetabolism Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges) Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges) graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames) return graph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def neofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEcGraph:\n # get neofunctionalisations \n neofunctionalisedECs = NeofunctionalisedECs(self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs))\n \n # filter core metabolism EC graph\n coreMetabolism = self.coreMetabolism(majorityPercentageCoreMetabolism)\n minimumOrganismsCount = math.ceil(self.organismsCount * (majorityPercentageNeofunctionalisation / 100))\n \n neofunctionalisedMetabolism = neofunctionalisedECs.filterGraph(coreMetabolism, minimumEcDifference = None, minimumOrganismsCount = minimumOrganismsCount)\n \n # colour core metabolism\n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = coreMetabolism\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def addedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def conservedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n conservedMetabolism = self.conservedMetabolism(majorityPercentageCoreMetabolism)\n \n parentNeofunctionalised= self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = conservedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = conservedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = conservedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Conserved metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Conserved metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def divergedMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, colour = False):\n divergedMetabolism = self.divergedMetabolism(majorityPercentageCoreMetabolism, colour = colour)\n \n parentNeofunctionalised = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n childNeofunctionalised = self.childClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False)\n \n if colour is True:\n parentEdges = parentNeofunctionalised.getEdges()\n childEdges = childNeofunctionalised.getEdges()\n \n graph = divergedMetabolism\n \n Export.addColourAttribute(graph, colour = Export.Colour.GREEN, nodes = False, edges = parentEdges)\n Export.addColourAttribute(graph, colour = Export.Colour.YELLOW, nodes = False, edges = childEdges)\n \n graph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return graph\n else:\n parentGraph = divergedMetabolism[0].removeAllECsExcept(parentNeofunctionalised.getECs())\n childGraph = divergedMetabolism[1].removeAllECsExcept(childNeofunctionalised.getECs())\n \n parentGraph.name = 'Diverged metabolism neofunctionalised ECs *' + ' '.join(self.parentNCBInames) + '* -> ' + ' '.join(self.childNCBInames)\n childGraph.name = 'Diverged metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> *' + ' '.join(self.childNCBInames) + '*'\n \n return (parentGraph, childGraph)", "def lostMetabolismNeofunctionalisedECs(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation) -> SubstanceEcGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n lostECs = GeneFunctionLoss.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n parentGraph = self.parentClade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, colour = False).removeAllECsExcept(lostECs)\n parentGraph.name = 'Lost metabolism neofunctionalised ECs ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return parentGraph", "def redundantECsForContributingNeofunctionalisation(self, \n majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, \n majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, \n eValue = defaultEValue, \n redundancyType: 'RedundancyType' = None,\n considerOnlyECs = None) -> Dict[Neofunctionalisation, Set[EcNumber]]:\n from FEV_KEGG.Robustness.Topology.Redundancy import Redundancy, RedundancyContribution, RedundancyType\n \n if redundancyType is None:\n redundancyType = RedundancyType.default\n \n #- calculate \"neofunctionalised\" ECs\n neofunctionalisedMetabolismSet = self.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, eValue, considerOnlyECs).getECs()\n neofunctionalisationsForFunctionChange = self.neofunctionalisationsForFunctionChange(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, eValue, considerOnlyECs)\n \n #- calculate redundancy\n redundancy = Redundancy( self.coreMetabolism(majorityPercentageCoreMetabolism) )\n redundancyContribution = RedundancyContribution(redundancy, neofunctionalisedMetabolismSet)\n \n contributedECsForContributingNeofunctionalisedEC = redundancyContribution.getContributedKeysForSpecial(redundancyType)\n contributingNeofunctionalisedECs = set(contributedECsForContributingNeofunctionalisedEC.keys())\n \n #- REPEAT for each function change consisting of \"neofunctionalised\" ECs, which also contribute to redundancy\n contributingNeofunctionalisations = dict()\n \n for functionChange, neofunctionalisations in neofunctionalisationsForFunctionChange.items():\n #- report enzyme pairs of neofunctionalisations, which caused the EC to be considered \"neofunctionalised\", and are in return contributing to redundancy \n \n if functionChange.ecA in contributingNeofunctionalisedECs or functionChange.ecB in contributingNeofunctionalisedECs: # function change contributes to redundancy\n \n for neofunctionalisation in neofunctionalisations:\n currentSetOfContributedECs = contributingNeofunctionalisations.get(neofunctionalisation, None)\n \n if currentSetOfContributedECs is None:\n currentSetOfContributedECs = set()\n contributingNeofunctionalisations[neofunctionalisation] = currentSetOfContributedECs\n \n for ec in functionChange.ecPair:\n contributedECs = contributedECsForContributingNeofunctionalisedEC.get(ec, None)\n if contributedECs is not None:\n currentSetOfContributedECs.update(contributedECs)\n \n return contributingNeofunctionalisations", "def coreMetabolism(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes) -> SubstanceEcGraph:\n graph = self.group.majorityEcGraph(majorityPercentage = majorityPercentageCoreMetabolism, noMultifunctional = excludeMultifunctionalEnzymes, keepOnHeap = True)\n graph.name = 'Core metabolism ECs ' + ' '.join(self.ncbiNames)\n return graph", "def __repr__(self):\n return \"EC(%s, %s)\" % (str(self.coefficient), repr(self.basefield))", "def complex_network_mapping(graph):\n vect = []\n\n n = nx.number_of_nodes(graph)\n e = nx.number_of_edges(graph)\n print n, e\n\n# adj = nx.adjacency_matrix(graph).toarray()\n# adj_bin = np.where(adj > 0, 1., 0.)\n# adj_conn = 1 - adj\n adj_bin = nx.adjacency_matrix(graph).toarray()\n adj_bin = np.array(adj_bin, dtype=np.float)\n\n # Node Betweenness binary\n bt_bin = nx.betweenness_centrality(graph).values()\n avg_btb = np.mean(bt_bin)\n vect.append(avg_btb)\n\n # Edge betweenness\n ebt = np.array(nx.edge_betweenness_centrality(graph).values())\n vect.append(np.mean(ebt))\n\n # Eigen vector centrality binary\n evc_bin = eigenvector_centrality_und(adj_bin)\n avg_evcb = np.mean(evc_bin)\n vect.append(avg_evcb)\n\n # Flow coefficient\n _, flow_bin, _ = flow_coef_bd(adj_bin)\n avg_flow = np.mean(flow_bin)\n vect.append(avg_flow)\n\n # Kcoreness centrality\n kcor_bin, _ = kcoreness_centrality_bu(adj_bin)\n avg_kcor = np.mean(kcor_bin)\n vect.append(avg_kcor)\n\n # Degree assortivity\n dac = nx.degree_assortativity_coefficient(graph)\n vect.append(dac)\n\n # Page rank centrality\n# pgr_wei = pagerank_centrality(adj_bin, d=0.85)\n# avg_pgr = np.mean(pgr_wei)\n# vect.append(avg_pgr)\n\n # Rich club coefficient\n# rcc = nx.rich_club_coefficient(graph).values()\n# avg_rcc = np.mean(rcc)\n# vect.append(avg_rcc)\n\n # Transitivity\n tr = nx.transitivity(graph)\n vect.append(tr)\n\n # average clustering\n avg_clst = nx.average_clustering(graph)\n vect.append(avg_clst)\n\n glb_ef = efficiency_bin(adj_bin)\n vect.append(glb_ef)\n\n return vect", "def cell_edges(self):", "def c_edges(self):\n self.compute_c_edges(self)\n return self._c_edges", "def get_ecg_graph():\n titles = ['ecg1', 'ecg2', 'ecg3']\n colors = ['rgb(240,0,0)', 'rgb(0,240,0)', 'rgb(0,0,240)']\n update()\n signames_ecg = queries['signames_ecg']\n signals = queries['signals']\n latesthr = queries['latesthr']\n return html.Div(className='ecg', children=[\n html.Div(style={'display': 'flex', 'height': '40vh'},\n children=[dcc.Graph(\n id=titles[i] + signame,\n style={'width': '100%'},\n figure={\n 'data': [\n {'x': signals[signame]['time'],\n 'y': signals[signame][titles[i]],\n 'mode': 'line', 'name': signame, 'line': {'color':colors[i]}}\n ],\n 'layout': {\n 'font': {'color':'#fff'},\n 'title': '{}-{}'.format(signame, titles[i]),\n 'xaxis': {'title': 'time', 'color': '#fff', 'showgrid': 'False'},\n 'yaxis': {'title': 'voltage (mv)', 'color': '#fff', 'showgrid': 'False', 'range': np.linspace(-2.5, 2.5, 10)},\n 'paper_bgcolor':'#000', 'plot_bgcolor':'#000'\n }\n }\n ) for i in range(len(titles))]\n +\n [html.Div(\n style={'justify-content': 'center', 'display': 'flex',\n 'align-items': 'center', 'width': '10vh', 'font-size': '30pt', 'color': 'white'},\n children=['{}'.format(latesthr[signame][0])])\n ]\n ) for signame in signames_ecg])", "def set_ec(self, etacalc):\n if not self.__thermodyn:\n C = np.zeros((6,6))\n \n LC = self.__structures.items()[0][1].LC\n if self.__mthd == 'Energy':\n if type(etacalc)==list:\n A2=[]\n for i in range(len(etacalc)):\n A2.append(self.__A2[i][etacalc[i]])\n else:\n if not etacalc in self.__A2[0].keys(): raise ValueError('Please coose one of %s'%(self.__A2[0].keys()))\n A2 = [a2[etacalc] for a2 in self.__A2]\n \n \n #%%%--- Cubic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'CI' or \\\n LC == 'CII'):\n C[0,0] =-2.*(A2[0]-3.*A2[1])/3.\n C[1,1] = C[0,0]\n C[2,2] = C[0,0]\n C[3,3] = A2[2]/6.\n C[4,4] = C[3,3]\n C[5,5] = C[3,3]\n C[0,1] = (2.*A2[0]-3.*A2[1])/3.\n C[0,2] = C[0,1]\n C[1,2] = C[0,1]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Hexagonal structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = 2./3.*A2[0] + 4./3.*A2[1] - 2.*A2[2] - 2.*A2[3]\n C[0,2] = 1./6.*A2[0] - 2./3.*A2[1] + 0.5*A2[2]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[2]\n C[3,3] =-0.5*A2[2] + 0.5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RI'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[0,4] = .5*(-A2[3] - A2[4] + A2[6])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[1,4] =-C[0,4] \n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[3,5] =-C[0,4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TI'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[3]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[3]\n C[0,2] = A2[0]/6.-2.*A2[1]/3.+.5*A2[3]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[3]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TII'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[4]\n C[1,1] = C[0,0]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[4]\n C[0,2] = A2[0]/6.-(2./3.)*A2[1]+.5*A2[4]\n C[0,5] = (-A2[2]+A2[3]-A2[6])/4.\n C[1,2] = C[0,2]\n C[1,5] =-C[0,5]\n C[2,2] = 2.*A2[4]\n C[3,3] = .5*A2[5]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[6]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Orthorhombic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'O'):\n C[0,0] = 2.*A2[0]/3.+4.*A2[1]/3.+A2[3]-2.*A2[4]-2.*A2[5]\n C[0,1] = 1.*A2[0]/3.+2.*A2[1]/3.-.5*A2[3]-A2[5]\n C[0,2] = 1.*A2[0]/3.-2.*A2[1]/3.+4.*A2[2]/3.-.5*A2[3]-A2[4]\n C[1,1] = 2.*A2[4]\n C[1,2] =-2.*A2[1]/3.-4.*A2[2]/3.+.5*A2[3]+A2[4]+A2[5]\n C[2,2] = 2.*A2[5]\n C[3,3] = .5*A2[6]\n C[4,4] = .5*A2[7]\n C[5,5] = .5*A2[8]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Monoclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'M'):\n C[0,0] = 2.*A2[0]/3.+8.*(A2[1]+A2[2])/3.-2.*(A2[5]+A2[8]+A2[9])\n C[0,1] = A2[0]/3.+4.*(A2[1]+A2[2])/3.-2.*A2[5]-A2[9]\n C[0,2] =(A2[0]-4.*A2[2])/3.+A2[5]-A2[8]\n C[0,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.+.5*(A2[5]+A2[7]+A2[8]+A2[9]-A2[12])\n C[1,1] = 2.*A2[8]\n C[1,2] =-4.*(2.*A2[1]+A2[2])/3.+2.*A2[5]+A2[8]+A2[9]+A2[12]\n C[1,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.-.5*A2[3]+A2[5]+.5*(A2[7]+A2[8]+A2[9])\n C[2,2] = 2.*A2[9]\n C[2,5] =-1.*A2[0]/6.+2.*A2[1]/3.-.5*(A2[3]+A2[4]-A2[7]-A2[8]-A2[9]-A2[12])\n C[3,3] = .5*A2[10]\n C[3,4] = .25*(A2[6]-A2[10]-A2[11])\n C[4,4] = .5*A2[11]\n C[5,5] = .5*A2[12]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Triclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'N'):\n C[0,0] = 2.*A2[0]\n C[0,1] = 1.*(-A2[0]-A2[1]+A2[6])\n C[0,2] = 1.*(-A2[0]-A2[2]+A2[7])\n C[0,3] = .5*(-A2[0]-A2[3]+A2[8]) \n C[0,4] = .5*(-A2[0]+A2[9]-A2[4])\n C[0,5] = .5*(-A2[0]+A2[10]-A2[5])\n C[1,1] = 2.*A2[1]\n C[1,2] = 1.*(A2[11]-A2[1]-A2[2])\n C[1,3] = .5*(A2[12]-A2[1]-A2[3])\n C[1,4] = .5*(A2[13]-A2[1]-A2[4])\n C[1,5] = .5*(A2[14]-A2[1]-A2[5])\n C[2,2] = 2.*A2[2] \n C[2,3] = .5*(A2[15]-A2[2]-A2[3])\n C[2,4] = .5*(A2[16]-A2[2]-A2[4])\n C[2,5] = .5*(A2[17]-A2[2]-A2[5])\n C[3,3] = .5*A2[3]\n C[3,4] = .25*(A2[18]-A2[3]-A2[4])\n C[3,5] = .25*(A2[19]-A2[3]-A2[5])\n C[4,4] = .5*A2[4]\n C[4,5] = .25*(A2[20]-A2[4]-A2[5])\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n elif self.__mthd == 'Stress':\n \n if (LC == 'CI' or \\\n LC == 'CII'):\n Matrix = np.mat([[1.0, 5.0, 0.0],\n [2.0, 4.0, 0.0],\n [3.0, 3.0, 0.0],\n [0.0, 0.0, 4.0],\n [0.0, 0.0, 5.0],\n [0.0, 0.0, 6.0]])\n \n if (LC == 'HI' or \\\n LC == 'HII'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0],\n [ 2, 1, 3, 0, 0],\n [ 0, 0, 3, 3, 0],\n [ 0, 0, 0, 0, 4],\n [ 0, 0, 0, 0, 5],\n [ 3,-3, 0, 0, 0],\n [ 3,-5,-1, 0, 0],\n [-5, 3,-1, 0, 0],\n [ 0, 0,-2,-1, 0],\n [ 0, 0, 0, 0, 6],\n [ 0, 0, 0, 0, 2],\n [-2, 2, 0, 0, 0]])\n \n if (LC == 'RI'):\n Matrix = np.mat([[ 1, 2, 3, 4, 0, 0],\n [ 2, 1, 3,-4, 0, 0],\n [ 0, 0, 3, 0, 3, 0],\n [ 0, 0, 0,-1, 0, 4],\n [ 0, 0, 0, 6, 0, 5],\n [ 3,-3, 0, 5, 0, 0],\n [ 3,-5,-1, 6, 0, 0],\n [-5, 3,-1,-6, 0, 0],\n [ 0, 0,-2, 0,-1, 0],\n [ 0, 0, 0, 8, 0, 6],\n [ 0, 0, 0,-4, 0, 2],\n [-2, 2, 0, 2, 0, 0]])\n \n if (LC == 'RII'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 0, 0],\n [ 2, 1, 3,-4,-5, 0, 0],\n [ 0, 0, 3, 0, 0, 3, 0],\n [ 0, 0, 0,-1,-6, 0, 4],\n [ 0, 0, 0, 6,-1, 0, 5],\n [ 3,-3, 0, 5,-4, 0, 0],\n [ 3,-5,-1, 6, 2, 0, 0],\n [-5, 3,-1,-6,-2, 0, 0],\n [ 0, 0,-2, 0, 0,-1, 0],\n [ 0, 0, 0, 8, 4, 0, 6],\n [ 0, 0, 0,-4, 8, 0, 2],\n [-2, 2, 0, 2,-6, 0, 0]])\n \n if (LC == 'TI'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0, 0],\n [ 2, 1, 3, 0, 0, 0],\n [ 0, 0, 3, 3, 0, 0],\n [ 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0, 0, 0, 6],\n [ 3,-5,-1, 0, 0, 0],\n [-5, 3,-1, 0, 0, 0],\n [ 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 0, 0,-4]])\n \n if (LC == 'TII'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0],\n [ 2, 1, 3,-6, 0, 0, 0],\n [ 0, 0, 3, 0, 3, 0, 0],\n [ 0, 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0,-1, 0, 0, 6],\n [ 3,-5,-1,-4, 0, 0, 0],\n [-5, 3,-1, 4, 0, 0, 0],\n [ 0, 0,-2, 0,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 8, 0, 0,-4]])\n \n if (LC == 'O'):\n Matrix = np.mat([[1, 2, 3, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 2, 3, 0, 0, 0, 0],\n [0, 0, 1, 0, 2, 3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 4, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 5, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 6],\n [3,-5,-1, 0, 0, 0, 0, 0, 0],\n [0, 3, 0,-5,-1, 0, 0, 0, 0],\n [0, 0, 3, 0,-5,-1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 6, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-4],\n [5, 4, 6, 0, 0, 0, 0, 0, 0],\n [0, 5, 0, 4, 6, 0, 0, 0, 0],\n [0, 0, 5, 0, 4, 6, 0, 0, 0],\n [0, 0, 0, 0, 0, 0,-2, 0, 0],\n [0, 0, 0, 0, 0, 0, 0,-1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-3]])\n \n if (LC == 'M'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 2, 3, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 2, 0, 3, 6, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0],\n [ 0, 0, 0, 1, 0, 0, 2, 0, 3, 0, 0, 0, 6],\n [-2, 1, 4,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 1, 4,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 1, 0, 4,-5, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0],\n [ 0, 0, 0,-2, 0, 0, 1, 0, 4, 0, 0,-5, 0],\n [ 3,-5,-1,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0,-5,-1,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0,-5, 0,-1,-4, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0],\n [ 0, 0, 0, 3, 0, 0,-5, 0,-1, 0, 0,-4, 0],\n [-4,-6, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0,-6, 5, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0,-6, 0, 5, 2, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0],\n [ 0, 0, 0,-4, 0, 0,-6, 0, 5, 0, 0, 2, 0],\n [ 5, 4, 6,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 4, 6,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 4, 0, 6,-3, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0],\n [ 0, 0, 0, 5, 0, 0, 4, 0, 6, 0, 0,-3, 0]])\n \n if (LC == 'N'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 0, 0, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 5, 6, 0, 0, 0],\n [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6, 0],\n [ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6],\n [-2, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 0, 0, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 6,-5, 0, 0, 0],\n [ 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5, 0],\n [ 0, 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5],\n [ 3,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0, 0, 0,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 2,-4, 0, 0, 0],\n [ 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4, 0],\n [ 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4],\n [-4,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0, 0, 0,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1,-3, 2, 0, 0, 0],\n [ 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2, 0],\n [ 0, 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2],\n [ 5, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 0, 0, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2,-1,-3, 0, 0, 0],\n [ 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3, 0],\n [ 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3],\n [-6, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-6, 0, 0, 0, 0, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5,-4, 1, 0, 0, 0],\n [ 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1, 0],\n [ 0, 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1]])\n \n sigma = np.array(self.__sigma[etacalc])\n \n ci = np.linalg.lstsq(Matrix,sigma)\n \n #-- Cubic structures ------------------------------------------------------------------------------\n if (LC == 'CI' or \\\n LC == 'CII'):\n \n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[3,3]=ci[0][2]\n C[1,1]=C[0,0]\n C[2,2]=C[0,0]\n C[0,2]=C[0,1]\n C[1,2]=C[0,1]\n C[4,4]=C[3,3]\n C[5,5]=C[3,3]\n \n #-- Hexagonal Structures --------------------------------------------------------------------------\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[2,2]=ci[0][3]\n C[3,3]=ci[0][4]\n C[1,1]=C[0,0]\n C[1,2]=C[0,2]\n C[4,4]=C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral I Structures ---------------------------------------------------------------------\n if (LC == 'RI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral II Structures --------------------------------------------------------------------\n if (LC == 'RII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[0,4]= ci[0][4]\n C[2,2]= ci[0][5]\n C[3,3]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[1,4]=-C[0,4]\n C[3,5]=-C[0,4]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Tetragonal I Structures -----------------------------------------------------------------------\n if (LC == 'TI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[2,2]= ci[0][3]\n C[3,3]= ci[0][4]\n C[5,5]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[4,4]= C[3,3]\n \n #-- Tetragonal II Structures ----------------------------------------------------------------------\n if (LC == 'TII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,5]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[5,5]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,5]=-C[0,5]\n C[4,4]= C[3,3]\n \n #-- Orthorhombic Structures -----------------------------------------------------------------------\n if (LC == 'O'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[1,1]=ci[0][3]\n C[1,2]=ci[0][4]\n C[2,2]=ci[0][5]\n C[3,3]=ci[0][6]\n C[4,4]=ci[0][7]\n C[5,5]=ci[0][8]\n \n #-- Monoclinic Structures -------------------------------------------------------------------------\n if (LC == 'M'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,5]=ci[0][3]\n C[1,1]=ci[0][4]\n C[1,2]=ci[0][5]\n C[1,5]=ci[0][6]\n C[2,2]=ci[0][7]\n C[2,5]=ci[0][8]\n C[3,3]=ci[0][9]\n C[3,4]=ci[0][10]\n C[4,4]=ci[0][11]\n C[5,5]=ci[0][12]\n \n #-- Triclinic Structures --------------------------------------------------------------------------\n if (LC == 'N'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,3]=ci[0][3]\n C[0,4]=ci[0][4]\n C[0,5]=ci[0][5]\n C[1,1]=ci[0][6]\n C[1,2]=ci[0][7]\n C[1,3]=ci[0][8]\n C[1,4]=ci[0][9]\n C[1,5]=ci[0][10]\n C[2,2]=ci[0][11]\n C[2,3]=ci[0][12]\n C[2,4]=ci[0][13]\n C[2,5]=ci[0][14]\n C[3,3]=ci[0][15]\n C[3,4]=ci[0][16]\n C[3,5]=ci[0][17]\n C[4,4]=ci[0][18]\n C[4,5]=ci[0][19]\n C[5,5]=ci[0][20]\n #--------------------------------------------------------------------------------------------------\n \n \n \n for i in range(5):\n for j in range(i+1,6):\n C[j,i] = C[i,j] \n #%%%--- Calculating the elastic moduli ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if self.__cod in ['espresso']: C = -C/10.\n elif self.__cod in ['vasp','exciting','wien']: C = C*self.__vToGPa/self.__V0#C = C/4.#C=C*self.__CONV/self.__V0\n elif self.__cod in ['emto']: C = C*self.__ToGPa/self.__V0\n self.BV = (C[0,0]+C[1,1]+C[2,2]+2*(C[0,1]+C[0,2]+C[1,2]))/9\n self.GV = ((C[0,0]+C[1,1]+C[2,2])-(C[0,1]+C[0,2]+C[1,2])+3*(C[3,3]+C[4,4]+C[5,5]))/15\n self.EV = (9*self.BV*self.GV)/(3*self.BV+self.GV)\n self.nuV= (1.5*self.BV-self.GV)/(3*self.BV+self.GV)\n self.S = np.linalg.inv(C)\n self.BR = 1/(self.S[0,0]+self.S[1,1]+self.S[2,2]+2*(self.S[0,1]+self.S[0,2]+self.S[1,2]))\n self.GR =15/(4*(self.S[0,0]+self.S[1,1]+self.S[2,2])-4*(self.S[0,1]+self.S[0,2]+self.S[1,2])+3*(self.S[3,3]+self.S[4,4]+self.S[5,5]))\n self.ER = (9*self.BR*self.GR)/(3*self.BR+self.GR)\n self.nuR= (1.5*self.BR-self.GR)/(3*self.BR+self.GR)\n self.BH = 0.50*(self.BV+self.BR)\n self.GH = 0.50*(self.GV+self.GR)\n self.EH = (9.*self.BH*self.GH)/(3.*self.BH+self.GH)\n self.nuH= (1.5*self.BH-self.GH)/(3.*self.BH+self.GH)\n self.AVR= 100.*(self.GV-self.GR)/(self.GV+self.GR)\n #--------------------------------------------------------------------------------------------------------------------------------\n self.__C = C\n \n else:\n Cs = []\n for t in map(str,self.__T):#for t in range(len(self.__T)):\n C = np.zeros((6,6))\n \n LC = self.__structures.items()[0][1].LC\n if self.__mthd == 'Energy':\n if type(etacalc)==list:\n A2=[]\n for i in range(len(etacalc)):\n A2.append(self.__A2[t][i][etacalc[i]])\n else:\n if not etacalc in self.__A2[t][0].keys(): raise ValueError('Please coose one of %s'%(self.__A2[t][0].keys()))\n A2 = [a2[etacalc] for a2 in self.__A2[t]]\n \n #%%%--- Cubic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'CI' or \\\n LC == 'CII'):\n C[0,0] =-2.*(A2[0]-3.*A2[1])/3.\n C[1,1] = C[0,0]\n C[2,2] = C[0,0]\n C[3,3] = A2[2]/6.\n C[4,4] = C[3,3]\n C[5,5] = C[3,3]\n C[0,1] = (2.*A2[0]-3.*A2[1])/3.\n C[0,2] = C[0,1]\n C[1,2] = C[0,1]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Hexagonal structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = 2./3.*A2[0] + 4./3.*A2[1] - 2.*A2[2] - 2.*A2[3]\n C[0,2] = 1./6.*A2[0] - 2./3.*A2[1] + 0.5*A2[2]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[2]\n C[3,3] =-0.5*A2[2] + 0.5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RI'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Rhombohedral II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'RII'):\n C[0,0] = 2.*A2[3]\n C[0,1] = A2[1]- 2.*A2[3]\n C[0,2] = .5*( A2[0] - A2[1] - A2[2])\n C[0,3] = .5*(-A2[3] - A2[4] + A2[5])\n C[0,4] = .5*(-A2[3] - A2[4] + A2[6])\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[1,3] =-C[0,3]\n C[1,4] =-C[0,4] \n C[2,2] = 2.*A2[2]\n C[3,3] = .5*A2[4]\n C[3,5] =-C[0,4]\n C[4,4] = C[3,3]\n C[4,5] = C[0,3]\n C[5,5] = .5*(C[0,0] - C[0,1])\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal I structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TI'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[3]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[3]\n C[0,2] = A2[0]/6.-2.*A2[1]/3.+.5*A2[3]\n C[1,1] = C[0,0]\n C[1,2] = C[0,2]\n C[2,2] = 2.*A2[3]\n C[3,3] = .5*A2[4]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Tetragonal II structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'TII'):\n C[0,0] = (A2[0]+2.*A2[1])/3.+.5*A2[2]-A2[4]\n C[1,1] = C[0,0]\n C[0,1] = (A2[0]+2.*A2[1])/3.-.5*A2[2]-A2[4]\n C[0,2] = A2[0]/6.-(2./3.)*A2[1]+.5*A2[4]\n C[0,5] = (-A2[2]+A2[3]-A2[6])/4.\n C[1,2] = C[0,2]\n C[1,5] =-C[0,5]\n C[2,2] = 2.*A2[4]\n C[3,3] = .5*A2[5]\n C[4,4] = C[3,3]\n C[5,5] = .5*A2[6]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Orthorhombic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'O'):\n C[0,0] = 2.*A2[0]/3.+4.*A2[1]/3.+A2[3]-2.*A2[4]-2.*A2[5]\n C[0,1] = 1.*A2[0]/3.+2.*A2[1]/3.-.5*A2[3]-A2[5]\n C[0,2] = 1.*A2[0]/3.-2.*A2[1]/3.+4.*A2[2]/3.-.5*A2[3]-A2[4]\n C[1,1] = 2.*A2[4]\n C[1,2] =-2.*A2[1]/3.-4.*A2[2]/3.+.5*A2[3]+A2[4]+A2[5]\n C[2,2] = 2.*A2[5]\n C[3,3] = .5*A2[6]\n C[4,4] = .5*A2[7]\n C[5,5] = .5*A2[8]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Monoclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'M'):\n C[0,0] = 2.*A2[0]/3.+8.*(A2[1]+A2[2])/3.-2.*(A2[5]+A2[8]+A2[9])\n C[0,1] = A2[0]/3.+4.*(A2[1]+A2[2])/3.-2.*A2[5]-A2[9]\n C[0,2] =(A2[0]-4.*A2[2])/3.+A2[5]-A2[8]\n C[0,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.+.5*(A2[5]+A2[7]+A2[8]+A2[9]-A2[12])\n C[1,1] = 2.*A2[8]\n C[1,2] =-4.*(2.*A2[1]+A2[2])/3.+2.*A2[5]+A2[8]+A2[9]+A2[12]\n C[1,5] =-1.*A2[0]/6.-2.*(A2[1]+A2[2])/3.-.5*A2[3]+A2[5]+.5*(A2[7]+A2[8]+A2[9])\n C[2,2] = 2.*A2[9]\n C[2,5] =-1.*A2[0]/6.+2.*A2[1]/3.-.5*(A2[3]+A2[4]-A2[7]-A2[8]-A2[9]-A2[12])\n C[3,3] = .5*A2[10]\n C[3,4] = .25*(A2[6]-A2[10]-A2[11])\n C[4,4] = .5*A2[11]\n C[5,5] = .5*A2[12]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n #%%%--- Triclinic structures ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if (LC == 'N'):\n C[0,0] = 2.*A2[0]\n C[0,1] = 1.*(-A2[0]-A2[1]+A2[6])\n C[0,2] = 1.*(-A2[0]-A2[2]+A2[7])\n C[0,3] = .5*(-A2[0]-A2[3]+A2[8]) \n C[0,4] = .5*(-A2[0]+A2[9]-A2[4])\n C[0,5] = .5*(-A2[0]+A2[10]-A2[5])\n C[1,1] = 2.*A2[1]\n C[1,2] = 1.*(A2[11]-A2[1]-A2[2])\n C[1,3] = .5*(A2[12]-A2[1]-A2[3])\n C[1,4] = .5*(A2[13]-A2[1]-A2[4])\n C[1,5] = .5*(A2[14]-A2[1]-A2[5])\n C[2,2] = 2.*A2[2] \n C[2,3] = .5*(A2[15]-A2[2]-A2[3])\n C[2,4] = .5*(A2[16]-A2[2]-A2[4])\n C[2,5] = .5*(A2[17]-A2[2]-A2[5])\n C[3,3] = .5*A2[3]\n C[3,4] = .25*(A2[18]-A2[3]-A2[4])\n C[3,5] = .25*(A2[19]-A2[3]-A2[5])\n C[4,4] = .5*A2[4]\n C[4,5] = .25*(A2[20]-A2[4]-A2[5])\n C[5,5] = .5*A2[5]\n #--------------------------------------------------------------------------------------------------------------------------------\n \n elif self.__mthd == 'Stress':\n \n if (LC == 'CI' or \\\n LC == 'CII'):\n Matrix = np.mat([[1.0, 5.0, 0.0],\n [2.0, 4.0, 0.0],\n [3.0, 3.0, 0.0],\n [0.0, 0.0, 4.0],\n [0.0, 0.0, 5.0],\n [0.0, 0.0, 6.0]])\n \n if (LC == 'HI' or \\\n LC == 'HII'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0],\n [ 2, 1, 3, 0, 0],\n [ 0, 0, 3, 3, 0],\n [ 0, 0, 0, 0, 4],\n [ 0, 0, 0, 0, 5],\n [ 3,-3, 0, 0, 0],\n [ 3,-5,-1, 0, 0],\n [-5, 3,-1, 0, 0],\n [ 0, 0,-2,-1, 0],\n [ 0, 0, 0, 0, 6],\n [ 0, 0, 0, 0, 2],\n [-2, 2, 0, 0, 0]])\n \n if (LC == 'RI'):\n Matrix = np.mat([[ 1, 2, 3, 4, 0, 0],\n [ 2, 1, 3,-4, 0, 0],\n [ 0, 0, 3, 0, 3, 0],\n [ 0, 0, 0,-1, 0, 4],\n [ 0, 0, 0, 6, 0, 5],\n [ 3,-3, 0, 5, 0, 0],\n [ 3,-5,-1, 6, 0, 0],\n [-5, 3,-1,-6, 0, 0],\n [ 0, 0,-2, 0,-1, 0],\n [ 0, 0, 0, 8, 0, 6],\n [ 0, 0, 0,-4, 0, 2],\n [-2, 2, 0, 2, 0, 0]])\n \n if (LC == 'RII'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 0, 0],\n [ 2, 1, 3,-4,-5, 0, 0],\n [ 0, 0, 3, 0, 0, 3, 0],\n [ 0, 0, 0,-1,-6, 0, 4],\n [ 0, 0, 0, 6,-1, 0, 5],\n [ 3,-3, 0, 5,-4, 0, 0],\n [ 3,-5,-1, 6, 2, 0, 0],\n [-5, 3,-1,-6,-2, 0, 0],\n [ 0, 0,-2, 0, 0,-1, 0],\n [ 0, 0, 0, 8, 4, 0, 6],\n [ 0, 0, 0,-4, 8, 0, 2],\n [-2, 2, 0, 2,-6, 0, 0]])\n \n if (LC == 'TI'):\n Matrix = np.mat([[ 1, 2, 3, 0, 0, 0],\n [ 2, 1, 3, 0, 0, 0],\n [ 0, 0, 3, 3, 0, 0],\n [ 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0, 0, 0, 6],\n [ 3,-5,-1, 0, 0, 0],\n [-5, 3,-1, 0, 0, 0],\n [ 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 0, 0,-4]])\n \n if (LC == 'TII'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0],\n [ 2, 1, 3,-6, 0, 0, 0],\n [ 0, 0, 3, 0, 3, 0, 0],\n [ 0, 0, 0, 0, 0, 4, 0],\n [ 0, 0, 0, 0, 0, 5, 0],\n [ 0, 0, 0,-1, 0, 0, 6],\n [ 3,-5,-1,-4, 0, 0, 0],\n [-5, 3,-1, 4, 0, 0, 0],\n [ 0, 0,-2, 0,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 6, 0],\n [ 0, 0, 0, 0, 0, 2, 0],\n [ 0, 0, 0, 8, 0, 0,-4]])\n \n if (LC == 'O'):\n Matrix = np.mat([[1, 2, 3, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 2, 3, 0, 0, 0, 0],\n [0, 0, 1, 0, 2, 3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 4, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 5, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 6],\n [3,-5,-1, 0, 0, 0, 0, 0, 0],\n [0, 3, 0,-5,-1, 0, 0, 0, 0],\n [0, 0, 3, 0,-5,-1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 6, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 2, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-4],\n [5, 4, 6, 0, 0, 0, 0, 0, 0],\n [0, 5, 0, 4, 6, 0, 0, 0, 0],\n [0, 0, 5, 0, 4, 6, 0, 0, 0],\n [0, 0, 0, 0, 0, 0,-2, 0, 0],\n [0, 0, 0, 0, 0, 0, 0,-1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0,-3]])\n \n if (LC == 'M'):\n Matrix = np.mat([[ 1, 2, 3, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 2, 3, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 2, 0, 3, 6, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 5, 0],\n [ 0, 0, 0, 1, 0, 0, 2, 0, 3, 0, 0, 0, 6],\n [-2, 1, 4,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 1, 4,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 1, 0, 4,-5, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-3, 6, 0],\n [ 0, 0, 0,-2, 0, 0, 1, 0, 4, 0, 0,-5, 0],\n [ 3,-5,-1,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0,-5,-1,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0,-5, 0,-1,-4, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 2, 0],\n [ 0, 0, 0, 3, 0, 0,-5, 0,-1, 0, 0,-4, 0],\n [-4,-6, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0,-6, 5, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0,-6, 0, 5, 2, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,-3, 0],\n [ 0, 0, 0,-4, 0, 0,-6, 0, 5, 0, 0, 2, 0],\n [ 5, 4, 6,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 4, 6,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 4, 0, 6,-3, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-2,-1, 0],\n [ 0, 0, 0, 5, 0, 0, 4, 0, 6, 0, 0,-3, 0]])\n \n if (LC == 'N'):\n Matrix = np.mat([[ 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 1, 0, 0, 0, 0, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 5, 6, 0, 0, 0],\n [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6, 0],\n [ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 4, 0, 5, 6],\n [-2, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-2, 0, 0, 0, 0, 1, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4,-3, 6,-5, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 6,-5, 0, 0, 0],\n [ 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5, 0],\n [ 0, 0, 0, 0, 0,-2, 0, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0,-3, 0, 6,-5],\n [ 3,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 3, 0, 0, 0, 0,-5,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 6, 2,-4, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 2,-4, 0, 0, 0],\n [ 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4, 0],\n [ 0, 0, 0, 0, 0, 3, 0, 0, 0, 0,-5, 0, 0, 0,-1, 0, 0, 6, 0, 2,-4],\n [-4,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-4, 0, 0, 0, 0,-6, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 1,-3, 2, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1,-3, 2, 0, 0, 0],\n [ 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2, 0],\n [ 0, 0, 0, 0, 0,-4, 0, 0, 0, 0,-6, 0, 0, 0, 5, 0, 0, 1, 0,-3, 2],\n [ 5, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 5, 0, 0, 0, 0, 4, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6,-2,-1,-3, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2,-1,-3, 0, 0, 0],\n [ 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3, 0],\n [ 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0, 0, 0, 6, 0, 0,-2, 0,-1,-3],\n [-6, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0,-6, 0, 0, 0, 0, 3,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 5,-4, 1, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5,-4, 1, 0, 0, 0],\n [ 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1, 0],\n [ 0, 0, 0, 0, 0,-6, 0, 0, 0, 0, 3, 0, 0, 0,-2, 0, 0, 5, 0,-4, 1]])\n \n sigma = np.array(self.__sigma[etacalc])\n \n ci = np.linalg.lstsq(Matrix,sigma)\n \n #-- Cubic structures ------------------------------------------------------------------------------\n if (LC == 'CI' or \\\n LC == 'CII'):\n \n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[3,3]=ci[0][2]\n C[1,1]=C[0,0]\n C[2,2]=C[0,0]\n C[0,2]=C[0,1]\n C[1,2]=C[0,1]\n C[4,4]=C[3,3]\n C[5,5]=C[3,3]\n \n #-- Hexagonal Structures --------------------------------------------------------------------------\n if (LC == 'HI' or \\\n LC == 'HII'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[2,2]=ci[0][3]\n C[3,3]=ci[0][4]\n C[1,1]=C[0,0]\n C[1,2]=C[0,2]\n C[4,4]=C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral I Structures ---------------------------------------------------------------------\n if (LC == 'RI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Rhombohedral II Structures --------------------------------------------------------------------\n if (LC == 'RII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,3]= ci[0][3]\n C[0,4]= ci[0][4]\n C[2,2]= ci[0][5]\n C[3,3]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,3]=-C[0,3]\n C[4,5]= C[0,3]\n C[1,4]=-C[0,4]\n C[3,5]=-C[0,4]\n C[4,4]= C[3,3]\n C[5,5]=0.5*(C[0,0]-C[0,1])\n \n #-- Tetragonal I Structures -----------------------------------------------------------------------\n if (LC == 'TI'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[2,2]= ci[0][3]\n C[3,3]= ci[0][4]\n C[5,5]= ci[0][5]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[4,4]= C[3,3]\n \n #-- Tetragonal II Structures ----------------------------------------------------------------------\n if (LC == 'TII'):\n C[0,0]= ci[0][0]\n C[0,1]= ci[0][1]\n C[0,2]= ci[0][2]\n C[0,5]= ci[0][3]\n C[2,2]= ci[0][4]\n C[3,3]= ci[0][5]\n C[5,5]= ci[0][6]\n C[1,1]= C[0,0]\n C[1,2]= C[0,2]\n C[1,5]=-C[0,5]\n C[4,4]= C[3,3]\n \n #-- Orthorhombic Structures -----------------------------------------------------------------------\n if (LC == 'O'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[1,1]=ci[0][3]\n C[1,2]=ci[0][4]\n C[2,2]=ci[0][5]\n C[3,3]=ci[0][6]\n C[4,4]=ci[0][7]\n C[5,5]=ci[0][8]\n \n #-- Monoclinic Structures -------------------------------------------------------------------------\n if (LC == 'M'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,5]=ci[0][3]\n C[1,1]=ci[0][4]\n C[1,2]=ci[0][5]\n C[1,5]=ci[0][6]\n C[2,2]=ci[0][7]\n C[2,5]=ci[0][8]\n C[3,3]=ci[0][9]\n C[3,4]=ci[0][10]\n C[4,4]=ci[0][11]\n C[5,5]=ci[0][12]\n \n #-- Triclinic Structures --------------------------------------------------------------------------\n if (LC == 'N'):\n C[0,0]=ci[0][0]\n C[0,1]=ci[0][1]\n C[0,2]=ci[0][2]\n C[0,3]=ci[0][3]\n C[0,4]=ci[0][4]\n C[0,5]=ci[0][5]\n C[1,1]=ci[0][6]\n C[1,2]=ci[0][7]\n C[1,3]=ci[0][8]\n C[1,4]=ci[0][9]\n C[1,5]=ci[0][10]\n C[2,2]=ci[0][11]\n C[2,3]=ci[0][12]\n C[2,4]=ci[0][13]\n C[2,5]=ci[0][14]\n C[3,3]=ci[0][15]\n C[3,4]=ci[0][16]\n C[3,5]=ci[0][17]\n C[4,4]=ci[0][18]\n C[4,5]=ci[0][19]\n C[5,5]=ci[0][20]\n #--------------------------------------------------------------------------------------------------\n \n \n \n for i in range(5):\n for j in range(i+1,6):\n C[j,i] = C[i,j] \n #%%%--- Calculating the elastic moduli ---%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n if self.__cod == 'espresso': C = -C/10.\n elif self.__cod in ['vasp','emto','exciting','wien']: C=C*self.__vToGPa/self.__V0#C = C/4.#C=C*self.__CONV/self.__V0#C = C/4.\n self.BV = (C[0,0]+C[1,1]+C[2,2]+2*(C[0,1]+C[0,2]+C[1,2]))/9\n self.GV = ((C[0,0]+C[1,1]+C[2,2])-(C[0,1]+C[0,2]+C[1,2])+3*(C[3,3]+C[4,4]+C[5,5]))/15\n self.EV = (9*self.BV*self.GV)/(3*self.BV+self.GV)\n self.nuV= (1.5*self.BV-self.GV)/(3*self.BV+self.GV)\n self.S = np.linalg.inv(C)\n self.BR = 1/(self.S[0,0]+self.S[1,1]+self.S[2,2]+2*(self.S[0,1]+self.S[0,2]+self.S[1,2]))\n self.GR =15/(4*(self.S[0,0]+self.S[1,1]+self.S[2,2])-4*(self.S[0,1]+self.S[0,2]+self.S[1,2])+3*(self.S[3,3]+self.S[4,4]+self.S[5,5]))\n self.ER = (9*self.BR*self.GR)/(3*self.BR+self.GR)\n self.nuR= (1.5*self.BR-self.GR)/(3*self.BR+self.GR)\n self.BH = 0.50*(self.BV+self.BR)\n self.GH = 0.50*(self.GV+self.GR)\n self.EH = (9.*self.BH*self.GH)/(3.*self.BH+self.GH)\n self.nuH= (1.5*self.BH-self.GH)/(3.*self.BH+self.GH)\n self.AVR= 100.*(self.GV-self.GR)/(self.GV+self.GR)\n #--------------------------------------------------------------------------------------------------------------------------------\n Cs.append(C)\n self.__C = Cs", "def edges(self):\n return self.dovetails + self.containments + self.internals", "def collectiveMetabolism(self, excludeMultifunctionalEnzymes = defaultExcludeMultifunctionalEnzymes, addEcDescriptions = False) -> SubstanceEcGraph:\n graph = self.group.collectiveEcGraph(noMultifunctional = excludeMultifunctionalEnzymes, addCount = True, keepOnHeap = True, addEcDescriptions = addEcDescriptions)\n graph.name = 'Collective metabolism ECs ' + ' '.join(self.ncbiNames)\n return graph", "def compute_edge_logits(self):\n TODO('https://github.com/posterior/treecat/issues/26')", "def compute_edge_logits(self):\n TODO('https://github.com/posterior/treecat/issues/27')", "def NND_eta( eqCat, dConst, verbose = False, **kwargs):\n #-------------------------------set args and kwargs----------------------------------------------- \n rmax = 500 # in km\n tmax = 20 # in years\n M0 = 0\n if 'M0' in kwargs.keys() and kwargs['M0'] is not None:\n M0 = kwargs['M0']\n if 'rmax' in kwargs.keys() and kwargs['rmax'] is not None:\n rmax = kwargs['rmax']\n if 'tmax' in kwargs.keys() and kwargs['tmax'] is not None:\n tmax = kwargs['tmax']\n #-----------------------------add small uncertainty to X in case events are colocated-------------------------- \n if 'correct_co_located' in kwargs.keys() and kwargs['correct_co_located'] == True:\n vUncer = np.random.randn( eqCat.size())*1e-10\n eqCat.data['X'] += vUncer\n eqCat.data['Time'] += abs( vUncer)#time has to stay positive otherwise parent-offspring gets switched\n #------------------------------------------------------------------------------\n aNND = np.zeros( eqCat.size())\n vID_p = np.zeros( eqCat.size())\n vID_c = np.zeros( eqCat.size())\n deltaMag = (eqCat.data['Mag'] - M0)\n \n for jC in range( eqCat.size()):\n if verbose == True:\n print 'event %i of %i'%( jC+1, eqCat.size())\n # interevent times: take events that happend before t_i \n # child - parent > 0 \n tau = eqCat.data['Time'][jC] - eqCat.data['Time']\n sel_tau_par = tau > 0\n if sel_tau_par.sum() > 0:\n\n vcurr_ID = np.arange( eqCat.size(), dtype = int)[sel_tau_par]\n vR = np.sqrt( (eqCat.data['X'][jC] - eqCat.data['X'][vcurr_ID])**2 + (eqCat.data['Y'][jC] - eqCat.data['Y'][vcurr_ID])**2 )\n # haversine distance\n # = projUtils.haversine( eqCat.data['Lon'][jC], eqCat.data['Lat'][jC],eqCat.data['Lon'][curr_vID], eqCat.data['Lat'][curr_vID] ) \n sel_r_par = vR < rmax\n if sel_r_par.sum() > 0:\n vcurr_ID = vcurr_ID[sel_r_par]\n curr_Eta = tau[vcurr_ID]* (vR[sel_r_par]**dConst['D']) *( 10**(-dConst['b']*deltaMag[vcurr_ID]))\n sel_min = curr_Eta == curr_Eta.min()\n aNND[jC] = curr_Eta[sel_min][0]\n vID_p[jC] = eqCat.data['N'][vcurr_ID][sel_min][0]\n vID_c[jC] = eqCat.data['N'][jC]\n #print 'parent', eqCat.data['N'][vcurr_ID][sel_min][0], 'offspring', eqCat.data['N'][jC]\n #print 'parent', eqCat.data['Time'][vcurr_ID][sel_min][0], 'offspring', eqCat.data['Time'][jC]\n\n if sel_min.sum() > 1:\n print aNND[jC], curr_Eta[sel_min], eqCat.data['N'][vcurr_ID][sel_min]\n print eqCat.data['Lon'][vcurr_ID][sel_min], eqCat.data['Lat'][vcurr_ID][sel_min]\n print eqCat.data['X'][vcurr_ID][sel_min], eqCat.data['Y'][vcurr_ID][sel_min]\n sel2 = aNND > 0\n if np.logical_not(sel2).sum() > 0:\n print 'remove %i offspring without prior parents in catalog'%(np.logical_not(sel2).sum())\n #raise ValueError, error_str\n # remove events with aNND < 0; i.e. event at the beginning with no preceding parent\n return { 'aNND' : aNND[sel2], 'aEqID_p' : vID_p[sel2], 'aEqID_c' : vID_c[sel2]}", "def colored_edges(genome):\n edges = []\n for chromo in genome:\n nodes = [0] + chromosome_to_cycle(chromo)\n nodes.append(nodes[1])\n for j in range(1, len(chromo) + 1):\n edges.append((nodes[2 * j], nodes[2 * j + 1]))\n\n return edges", "def neofunctionalisations(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, eValue = defaultEValue, considerOnlyECs = None) -> Set[Neofunctionalisation]:\n # get neofunctionalisations \n return self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs).getNeofunctionalisations()", "def all_subconstituents(self, compute=False):\n out = {}\n for i in range(self._.d+1):\n try:\n out[i] = self.subconstituent(i, compute=compute)\n except IndexError:\n pass\n return out", "def display_energy_levels_0d(diagram, num_atoms, atoms, h_poly):\n h = eval_hamiltonian(num_atoms, h_poly, (1, 1))\n\n e, v = eigensystem(h)\n\n left = 0\n bottom = 0\n right = max([len(row) for row in diagram.split('\\n')])\n top = len(diagram.split('\\n'))\n\n plot_rows = numpy.ceil(math.sqrt(num_atoms+1))\n plot_cols = plot_rows\n\n for i in range(num_atoms):\n matplotlib.pyplot.subplot(plot_rows, plot_cols, i+1, axisbg=\"#000000\")\n y = [atom[0] for atom in atoms]\n x = [atom[1] for atom in atoms]\n c = numpy.abs(v[i]*v[i])\n\n matplotlib.pyplot.title('E = %f' % numpy.real(e[i]), fontsize = 10)\n norm = matplotlib.colors.Normalize(vmin = min(c),\n vmax = max(0.0001, max(c)))\n #x = [0,0,1,1]\n #y = [0,1,0,1]\n #c = [1,2,3,4]\n matplotlib.pyplot.hexbin(x, y, C = c,\n gridsize = (right-left, top-bottom),\n extent = (left, right, bottom, top),\n cmap = matplotlib.pyplot.get_cmap(\"gray\"),\n norm = norm\n )\n\n matplotlib.pyplot.subplot(plot_rows, plot_cols, num_atoms+1)\n matplotlib.pyplot.scatter(num_atoms*[0], e, s = 0.1)", "def addedMetabolismNeofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism) -> SubstanceEnzymeGraph:\n parentCoreMetabolism = self.parentClade.coreMetabolism(majorityPercentageCoreMetabolism)\n childCoreMetabolism = self.childClade.coreMetabolism(majorityPercentageCoreMetabolism)\n addedECs = GeneFunctionAddition.getECs(parentCoreMetabolism, childCoreMetabolism)\n \n childGraph = self.childClade.neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, colour = False).keepEnzymesByEC(addedECs)\n childGraph.name = 'Added metabolism neofunctionalised enzymes ' + ' '.join(self.parentNCBInames) + ' -> ' + ' '.join(self.childNCBInames)\n \n return childGraph", "def neofunctionalisedEnzymes(self, majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, colour = False, eValue = defaultEValue, considerOnlyECs = None) -> SubstanceEnzymeGraph:\n # get neofunctionalisations \n neofunctionalisedEnzymes = self._neofunctionalisedEnzymes(majorityPercentageCoreMetabolism, eValue, considerOnlyECs)\n \n # filter core metabolism enzyme graph\n enzymeGraph = self.coreMetabolismEnzymes(majorityPercentageCoreMetabolism) \n neofunctionalisedMetabolism = neofunctionalisedEnzymes.filterGraph(enzymeGraph, minimumEcDifference = None)\n \n # colour core metabolism \n if colour is not False:\n \n if colour is True:\n colourToUse = Export.Colour.GREEN\n else:\n colourToUse = colour\n \n neofunctionalisedMetabolismOnly = neofunctionalisedMetabolism\n neofunctionalisedMetabolism = enzymeGraph\n Export.addColourAttribute(neofunctionalisedMetabolism, colourToUse, nodes = False, edges = neofunctionalisedMetabolismOnly.getEdges())\n \n neofunctionalisedMetabolism.name = 'Neofunctionalised core metabolism enzymes ' + ' '.join(self.ncbiNames)\n \n return neofunctionalisedMetabolism", "def vnE(self):\n return np.array(\n [x for x in [self.nEx, self.nEy, self.nEz] if x is not None],\n dtype=int\n )", "def get_band_edges():\n # Vacuum level energy from LOCPOT.\n locpot = Locpot.from_file('LOCPOT')\n evac = max(locpot.get_average_along_axis(2))\n\n vasprun = Vasprun('vasprun.xml')\n bs = vasprun.get_band_structure()\n eigenvals = vasprun.eigenvalues\n efermi = vasprun.efermi - evac\n\n if bs.is_metal():\n edges = {'up_cbm': None, 'up_vbm': None, 'dn_cbm': None, 'dn_vbm': None,\n 'efermi': efermi}\n\n elif bs.is_spin_polarized:\n up_cbm = min(\n [min([e[0] for e in eigenvals[Spin.up][i] if not e[1]])\n for i in range(len(eigenvals[Spin.up]))]) - evac\n up_vbm = max(\n [max([e[0] for e in eigenvals[Spin.up][i] if e[1]])\n for i in range(len(eigenvals[Spin.up]))]) - evac\n dn_cbm = min(\n [min([e[0] for e in eigenvals[Spin.down][i] if not e[1]])\n for i in range(len(eigenvals[Spin.down]))]) - evac\n dn_vbm = max(\n [max([e[0] for e in eigenvals[Spin.down][i] if e[1]])\n for i in range(len(eigenvals[Spin.down]))]) - evac\n edges = {'up_cbm': up_cbm, 'up_vbm': up_vbm, 'dn_cbm': dn_cbm,\n 'dn_vbm': dn_vbm, 'efermi': efermi}\n\n else:\n cbm = bs.get_cbm()['energy'] - evac\n vbm = bs.get_vbm()['energy'] - evac\n edges = {'up_cbm': cbm, 'up_vbm': vbm, 'dn_cbm': cbm, 'dn_vbm': vbm,\n 'efermi': efermi}\n\n return edges", "def sage_graph(self):\n self.fe.load_cache()\n edges = []\n is_bipartite = self.variant.is_bipartite()\n for X in self.L:\n for Y in self.L:\n a = self.op_norm(X, Y)\n if not self.K.is_zero(a):\n for c in self.K.unit_group:\n d = a - c\n if X != Y or c < d or is_bipartite:\n edges.append(((X, c, False), (Y, d, is_bipartite)))\n if X == Y and not is_bipartite:\n break\n return sage.all.Graph(edges)", "def _get_full_graph(self):", "def nE(self):\n return int(self.vnE.sum())", "def edges(self):\n return map(Edge, self._top_exp.edges())" ]
[ "0.6325263", "0.6321526", "0.62352514", "0.6154268", "0.6030487", "0.5650482", "0.5579346", "0.55359584", "0.55139494", "0.5471832", "0.5470082", "0.54345083", "0.54121524", "0.53877515", "0.53301024", "0.53161114", "0.5314621", "0.5297274", "0.5284816", "0.5276731", "0.526976", "0.5254203", "0.5225095", "0.5213531", "0.5208045", "0.5200646", "0.51907206", "0.5168163", "0.5167861", "0.51625115" ]
0.6479725
0
Two clades in NCBI taxonomy, 'child' is assumed younger and must be nested somewhere inside 'parent'.
def __init__(self, parent, child, excludeUnclassified = defaultExcludeUnclassified): # read first NCBI name from Clade object, if necessary if isinstance(parent, Clade): parentNCBIname = parent.ncbiNames[0] elif not isinstance(parent, str): # must be iterable, else fail parentNCBIname = parent[0] if isinstance(child, Clade): childNCBIname = child.ncbiNames[0] elif not isinstance(child, str): # must be iterable, else fail childNCBIname = child[0] # check if child is really a child of parent taxonomy = NCBI.getTaxonomy() parentNode = taxonomy.searchNodesByPath(parentNCBIname, exceptPaths=('unclassified' if excludeUnclassified else None)) if parentNode is None or len(parentNode) == 0: raise ValueError("No clade of this path found: " + parentNCBIname) else: # only consider first element parentNode = parentNode[0] childNode = taxonomy.searchNodesByPath(childNCBIname, exceptPaths=('unclassified' if excludeUnclassified else None)) if childNode is None or len(childNode) == 0: raise ValueError("No clade of this path found: " + childNCBIname) else: # only consider first element childNode = childNode[0] foundParent = False for ancestor in childNode.ancestors: if Taxonomy.nodePath2String(ancestor) == Taxonomy.nodePath2String(parentNode): foundParent = True break if foundParent == False: raise ValueError("Child is not a descendant of parent.") super().__init__(parent, child, excludeUnclassified)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, parent, child, excludeUnclassified = defaultExcludeUnclassified, oneOrganismPerSpecies = defaultOneOrganismPerSpecies):\n # read NCBI names from Clade object, if necessary\n if isinstance(parent, Clade):\n self.parentClade = parent\n else:\n self.parentClade = Clade(parent, excludeUnclassified, oneOrganismPerSpecies=oneOrganismPerSpecies)\n \n if isinstance(child, Clade):\n self.childClade = child\n else:\n self.childClade = Clade(child, excludeUnclassified, oneOrganismPerSpecies=oneOrganismPerSpecies)", "def children(self, taxon, taxonomy):\n\n c = set()\n for taxon_id, taxa in taxonomy.items():\n if taxon in taxa:\n\n if taxon.startswith('s__'):\n c.add(taxon_id)\n else:\n taxon_index = taxa.index(taxon)\n for child in taxa[taxon_index + 1:]:\n if len(child) > 3: # not just an empty prefix\n c.add(child)\n\n return c", "def add_hierarchy(self, parent, edge, child): # XXX DEPRECATED\n if type(parent) != rdflib.URIRef:\n parent = self.check_thing(parent)\n\n if type(edge) != rdflib.URIRef:\n edge = self.check_thing(edge)\n\n if type(child) != infixowl.Class:\n if type(child) != rdflib.URIRef:\n child = self.check_thing(child)\n child = infixowl.Class(child, graph=self.g)\n\n restriction = infixowl.Restriction(edge, graph=self.g, someValuesFrom=parent)\n child.subClassOf = [restriction] + [c for c in child.subClassOf]", "def test_make_taxonomy(self):\n basic_test_runner(self, 'taxonomy')", "def add_relatives(self, child2parent, idx2word):\n for child, parent in child2parent.items():\n if parent not in (0, -1):\n parent_word = idx2word[parent]\n parent_word.add_child(child)\n child.parent = parent_word", "def is_allowed_to_have_child_terms(self):\n return self._is_allowed_to_have_child_terms", "def _is_child(self, parent, child): # type: (str, str) -> bool\n return child != parent and child.startswith(parent + \".\")", "def is_allowed_to_have_child_terms(self, is_allowed_to_have_child_terms):\n self._is_allowed_to_have_child_terms = is_allowed_to_have_child_terms", "def Children(self) -> _n_1_t_2:", "def findDiscripancies(taxonomy):\n i = 0\n for entry in taxonomy:\n if entry['parentName'] != None:\n print entry['nodeName']\n if entry['nodeName'].lower() == entry['parentName'].lower():\n i += 1\n print \"No of same nodes = {} \" .format(i)", "def taxon_children(self, taxonomy):\n\n taxon_children = defaultdict(set)\n for taxon_id, taxa in taxonomy.items():\n for i, taxon in enumerate(taxa):\n if len(taxon) == 3:\n continue # just rank prefix\n\n if len(taxa) > i + 1 and len(taxa[i + 1]) != 3:\n taxon_children[taxon].add(taxa[i + 1])\n\n if len(taxa) > self.rank_index['s__']:\n taxon = taxa[self.rank_index['s__']]\n if taxon != 's__':\n taxon_children[taxon].add(taxon_id)\n\n return taxon_children", "def test_process_label_in_node(self):\n tree = Node(children=[\n Node(\"Defining secret phrase.\", label=['AB', 'a']),\n Node(\"Has secret phrase. Then some other content\", \n label=['AB', 'b'])\n ], label=['AB'])\n t = Terms(tree)\n t.scoped_terms = {\n ('AB',): [Ref(\"secret phrase\", \"AB-a\", (9,22))]\n }\n # Term is defined in the first child\n self.assertEqual([], t.process(tree.children[0]))\n self.assertEqual(1, len(t.process(tree.children[1])))", "def cu_for_new_child(self,instance,undo=True):\n\t\tself.utility.increment_counts(instance)\n\t\tself.create_new_child(instance)\n\t\tcu = self.utility.category_utility()\n\t\tif undo:\n\t\t\tself.tree.children.pop()\n\t\t\tself.utility.decrement_counts(instance)\n\t\treturn cu", "def gen_child(self, g, ng, child):\n with About(child.debug, self.relation):\n self.remap_node((g, child), g, child, ng, ng.apply())", "def test_field_resolution_multiple_inheritance_with_child_field_defined(self):\n child_field = ChildWithMultipleParentsAndTheSameField()._get_fields()[\"name\"]\n self.assertEqual(child_field.default, \"C\")", "def is_child_of(self, *args):\n return _ida_hexrays.cexpr_t_is_child_of(self, *args)", "def __post_init__(self) -> None:\n arity = self.root.arity\n length = len(self.children)\n if arity != length:\n raise ValueError(\n 'Incorrect number of child terms: '\n f'Expected {arity}, found {length}'\n )", "def _del_node_two_children(self, parent, node):\n succ = self._get_successor(node)\n self.delete(succ._data)\n succ._rkid = node._rkid\n succ._lkid = node._lkid\n if node._rkid:\n node._rkid._parent = succ\n if node._lkid:\n node._lkid._parent = succ\n if node is not self._root:\n if parent._rkid is node:\n parent._rkid = succ\n succ._parent = parent\n else:\n parent._lkid = succ\n succ._parent = parent\n else:\n self._root = succ\n succ._parent = None", "def search_category(self):\n return _(self.child_class)", "def test_grandchildren():\n\n # note c.upto(\"status\").desired.grandchildren\n # this is the same as *c.upto(\"status\").desired in python3.5+\n res = conf.status.conditions.choose(lambda c: (c.type, c.reason, c.upto(\"status\").desired.grandchildren))\n assert \"type\" in res\n assert \"reason\" in res\n assert \"version\" in res\n assert \"image\" in res\n assert \"force\" in res", "def get_code_per_child(self, obj, child):\n return []", "def set_child(self,b):\n if b.isChecked() == True:\n self.mother = False\n self.child = True\n else:\n self.mother = True\n self.child = False", "def get_taxonomy(): # noqa: E501\n return 'do some magic!'", "def test_environmentReverseInheritance(self):\n # In the child only\n node = create_node(\"somewhere\", \"myservice\", \"parent:child\")\n disco = create_disco()\n disco.onMessage(None, NodeActive(node))\n # Parent can't find it\n self.assertEqual(resolve(disco, \"myservice\", \"1.0\", \"parent\"), None)", "def children(self): # noqa: ANN201", "def __init__(self, child_type = None):\r\n super().__init__()\r\n self.__child_dict = collections.OrderedDict()\r\n self.__child_type = child_type\r\n self.__mykeys = ()\r\n self.__parent = None", "def create_child(self, value, fun=None):\n child = TaxonomyTree(value, self.weights, fun)\n self.children[value] = child\n child.parent = self\n child.level = self.level + 1\n child.__root = self.__root\n self.__root.__fast_find.append((child.fun, child))\n return child", "def cladistic(tree, taxa):\n tips = []\n taxa = set(taxa)\n for tip in tree.tips():\n if tip.name in taxa:\n tips.append(tip)\n n = len(taxa)\n if len(tips) < n:\n raise ValueError('Taxa not found in the tree.')\n return ('uni' if n == 1 else\n ('mono' if len(tree.lca(tips).subset()) == n else\n 'poly'))", "def testMotherChild(self):\n attr = self.session.create_visit_attr()\n\n self.util.stringTypeTest(self, attr, \"mother_child\")\n\n self.util.stringPropertyTest(self, attr, \"mother_child\")", "def childNCBInames(self):\n return self.childClade.ncbiNames" ]
[ "0.5975638", "0.58271253", "0.5484291", "0.5396265", "0.5390652", "0.53713834", "0.53584284", "0.53490835", "0.5338156", "0.53251886", "0.5263821", "0.5227132", "0.51992923", "0.5184101", "0.51213396", "0.5080763", "0.50803787", "0.5078497", "0.50769305", "0.5075885", "0.50741947", "0.5056136", "0.50552154", "0.50527424", "0.50440645", "0.50069416", "0.49847037", "0.4984668", "0.4952457", "0.49428636" ]
0.68073165
0
Compare the current scene references versions with metadata and update as needed
def sceneRefCheck(silent=False): uptodate = True logger.debug('init sceneChecking...') currentProject = database.getCurrentProject() projName = pm.fileInfo.get('projectName') if currentProject != projName: logger.error('This file is from a project different from the current project') return item = Item(fromScene=True) # get current scene metadata # compare references and metadata and create lists of references to add, delete, update and replace logger.debug('creating lists of changes...') refOnSceneList = pm.getReferences() toDelete = [x for x in refOnSceneList if x not in item.components] toAdd = [x for x in item.components if x not in refOnSceneList and x != 'cam'] toReplace = [x for x in item.components if item.components[x]['task'] != item.components[x]['proxyMode']] refToCheckUpdate = [x for x in refOnSceneList if x not in toDelete and x not in toReplace] toUpdate = {} # create the list of references to update depending on the assemble mode logger.debug('check update...') for ns in refToCheckUpdate: logger.info('updating ns:%s' % ns) if item.components[ns]['assembleMode'] == 'camera': continue if item.components[ns]['assembleMode'] == 'reference': logger.debug('reference') start_time = time.time() component = ReferenceComponent(ns, item.components[ns], parent=item) toUpdate[ns] = component.updateVersion(refOnSceneList[ns]) elapsed_time = time.time () - start_time logger.debug ('%s Total info' % elapsed_time) if item.components[ns]['assembleMode'] == 'xlo': component = XloComponent(ns, item.components[ns], parent=item) toUpdate[ns] = component.updateVersion(refOnSceneList[ns]) if item.components[ns]['assembleMode'] == 'cache': cache = CacheComponent(ns, item.components[ns], parent=item) toUpdate[ns] = cache.updateVersion(refOnSceneList[ns]) # If not in silent mode, show dialogs to the user choose which references should be processed logger.debug('prompt if needed') if not silent: if toDelete: uptodate = False toDelete = pm.layoutDialog(ui=lambda: refCheckPrompt(toDelete, 'delete')).split(',') if toAdd: uptodate = False toAdd = pm.layoutDialog(ui=lambda: refCheckPrompt(toAdd, 'add')).split(',') if toReplace: uptodate = False toReplace = pm.layoutDialog(ui=lambda: refCheckPrompt(toReplace, 'replace')).split(',') upDateList = [x for x, y in toUpdate.iteritems() if y] if upDateList: uptodate = False upDateList = pm.layoutDialog(ui=lambda: refCheckPrompt(upDateList, 'update')).split(',') toUpdate = {x: y for x, y in toUpdate.iteritems() if x in upDateList} else: toUpdate = {} if uptodate: pm.confirmDialog(title='Scene Check', ma='center', message='Versions ok!', button=['OK'], defaultButton='OK', dismissString='OK') logger.debug('processing...') # Do the processing # delete logger.debug('toDelete:%s' % toDelete) for ns in toDelete: refOnSceneList[ns].remove() # add logger.debug('toAdd:%s' % toAdd) for ns in toAdd: if item.components[ns]['assembleMode'] == 'camera': continue if item.components[ns]['assembleMode'] == 'reference': component = ReferenceComponent(ns, item.components[ns], parent=item) component.addToScene() elif item.components[ns]['assembleMode'] == 'xlo': component = XloComponent(ns, item.components[ns], parent=item) component.addToScene() cache = CacheComponent(ns, item.components[ns], parent=item) cache.importCache() elif item.components[ns]['assembleMode'] == 'cache': cache = CacheComponent(ns, item.components[ns], parent=item) cache.addToScene() #update versions for ns, versions in toUpdate.iteritems(): if item.components[ns]['assembleMode'] == 'camera': continue if item.components[ns]['assembleMode'] == 'reference': component = ReferenceComponent(ns, item.components[ns], parent=item) componentPath = component.getPublishPath() refOnSceneList[ns].replaceWith(componentPath) if item.components[ns]['assembleMode'] == 'xlo': if 'ver' in versions: component = XloComponent(ns, item.components[ns], parent=item) componentPath = component.getPublishPath() refOnSceneList[ns].replaceWith(componentPath) if 'cacheVer' in versions: #todo check if need to delete old cache node cache = CacheComponent(ns, item.components[ns], parent=item) cache.importCache() if item.components[ns]['assembleMode'] == 'cache': component = CacheComponent(ns, item.components[ns], parent=item) componentPath = component.getPublishPath() refOnSceneList[ns].replaceWith(componentPath) # Replace for ns in toReplace: if item.components[ns]['assembleMode'] == 'reference': oldProxyMode = item.components[ns]['task'] item.components[ns]['task'] = item.components[ns]['proxyMode'] component = ReferenceComponent(ns, item.components[ns], parent=item) componentItem = component.getItem() if componentItem.publishVer > 0: refOnSceneList[ns].replaceWith(component.getPublishPath()) else: item.components[ns]['task'] = oldProxyMode item.putDataToDB() logger.info('done sceneChecking!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkVersions():\n item = Item(fromScene=True)\n\n for ns, componentMData in item.components.iteritems():\n if ns == 'cam':\n # todo tratar versoes da camera\n continue\n\n if componentMData['assembleMode'] == 'reference':\n refComponent = ReferenceComponent(ns, componentMData, parent=item)\n refComponent.checkDBForNewVersion()\n\n elif componentMData['assembleMode'] == 'xlo':\n xloComponent = XloComponent(ns, componentMData, parent=item)\n xloComponent.checkDBForNewVersion()\n xloComponent.checkDBForNewCacheVersion()\n\n elif componentMData['assembleMode'] == 'cache':\n cacheComponent = CacheComponent(ns, componentMData, parent=item)\n cacheComponent.checkDBForNewVersion()\n\n item.putDataToDB()", "def update_versions(self, reference_resolution):\n raise NotImplementedError(\"update_versions is not implemented\")", "def set_media_versions(self):\n\n # access to .multimedia_map is slow\n previous_version = self._get_version_comparison_build()\n prev_multimedia_map = previous_version.multimedia_map if previous_version else {}\n\n for path, map_item in self.multimedia_map.items():\n prev_map_item = prev_multimedia_map.get(path, None)\n if prev_map_item and prev_map_item.unique_id:\n # Re-use the id so CommCare knows it's the same resource\n map_item.unique_id = prev_map_item.unique_id\n if (prev_map_item and prev_map_item.version\n and prev_map_item.multimedia_id == map_item.multimedia_id):\n map_item.version = prev_map_item.version\n else:\n map_item.version = self.version", "def check_referenced_versions(self, pdm=None):\n if not pdm:\n pdm = ProgressManagerFactory.get_progress_manager()\n\n caller = pdm.register(\n 3, \"%s.check_referenced_versions() prepare data\" % self.__class__.__name__\n )\n\n # deeply get which file is referencing which other files\n self.deep_version_inputs_update()\n if caller:\n caller.step()\n\n from anima.dcc import empty_reference_resolution\n\n reference_resolution = empty_reference_resolution(\n root=self.get_referenced_versions()\n )\n\n if caller:\n caller.step()\n\n # reverse walk in DFS\n dfs_version_references = []\n\n version = self.get_current_version()\n if not version:\n return reference_resolution\n\n for v in version.walk_inputs():\n dfs_version_references.append(v)\n\n if caller:\n caller.step()\n\n # pop the first element which is the current scene\n dfs_version_references.pop(0)\n\n caller.end_progress()\n\n # register a new caller\n caller = pdm.register(\n len(dfs_version_references),\n \"%s.check_referenced_versions()\" % self.__class__.__name__,\n )\n\n # iterate back in the list\n for v in reversed(dfs_version_references):\n # check inputs first\n to_be_updated_list = []\n for ref_v in v.inputs:\n if not ref_v.is_latest_published_version():\n to_be_updated_list.append(ref_v)\n\n if to_be_updated_list:\n action = \"create\"\n # check if there is a new published version of this version\n # that is using all the updated versions of the references\n latest_published_version = v.latest_published_version\n if latest_published_version and not v.is_latest_published_version():\n # so there is a new published version\n # check if its children needs any update\n # and the updated child versions are already\n # referenced to the this published version\n if all(\n [\n ref_v.latest_published_version\n in latest_published_version.inputs\n for ref_v in to_be_updated_list\n ]\n ):\n # so all new versions are referenced to this published\n # version, just update to this latest published version\n action = \"update\"\n else:\n # not all references are in the inputs\n # so we need to create a new version as usual\n # and update the references to the latest versions\n action = \"create\"\n else:\n # nothing needs to be updated,\n # so check if this version has a new version,\n # also there could be no reference under this referenced\n # version\n if v.is_latest_published_version():\n # do nothing\n action = \"leave\"\n else:\n # update to latest published version\n action = \"update\"\n\n # before setting the action check all the inputs in\n # resolution_dictionary, if any of them are update, or create\n # then set this one to 'create'\n if any(\n rev_v in reference_resolution[\"update\"]\n or rev_v in reference_resolution[\"create\"]\n for rev_v in v.inputs\n ):\n action = \"create\"\n\n # so append this v to the related action list\n reference_resolution[action].append(v)\n\n # from stalker import Version\n # assert isinstance(v, Version)\n caller.step(message=v.nice_name)\n\n caller.end_progress()\n\n return reference_resolution", "def check_if_previous_version_references(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n\n from anima.dcc.mayaEnv import Maya\n\n m = Maya()\n ver = m.get_current_version()\n\n if ver is None:\n progress_controller.complete()\n return\n\n same_version_references = []\n all_references = pm.listReferences()\n progress_controller.maximum = len(all_references)\n for ref in all_references: # check only 1st level references\n ref_version = m.get_version_from_full_path(ref.path)\n if ref_version:\n if ref_version.task == ver.task and ref_version.take_name == ver.take_name:\n same_version_references.append(ref)\n progress_controller.increment()\n\n progress_controller.complete()\n if len(same_version_references):\n print(\"The following nodes are references to an older version of this \" \"scene\")\n print(\"\\n\".join(map(lambda x: x.refNode.name(), same_version_references)))\n raise PublishError(\n \"The current scene contains a <b>reference</b> to a<br>\"\n \"<b>previous version</b> of itself.<br><br>\"\n \"Please remove it!!!\"\n )", "def saveVersion(self, makeReference=True, versionNotes=\"\", sceneFormat=\"mb\", *args, **kwargs):\n logger.debug(\"Func: saveVersion\")\n\n\n\n now = datetime.datetime.now().strftime(\"%d/%m/%Y-%H:%M\")\n completeNote = \"[%s] on %s\\n%s\\n\" % (self.currentUser, now, versionNotes)\n\n sceneName = self.getSceneFile()\n if not sceneName:\n msg = \"This is not a base scene (Untitled)\"\n self._exception(360, msg)\n return -1, msg\n\n sceneInfo = self.getOpenSceneInfo()\n\n if sceneInfo: ## getCurrentJson returns None if the resolved json path is missing\n jsonFile = sceneInfo[\"jsonFile\"]\n jsonInfo = self._loadJson(jsonFile)\n\n currentVersion = len(jsonInfo[\"Versions\"]) + 1\n sceneName = \"{0}_{1}_{2}_v{3}\".format(jsonInfo[\"Name\"], jsonInfo[\"Category\"], self._usersDict[self.currentUser],\n str(currentVersion).zfill(3))\n relSceneFile = os.path.join(jsonInfo[\"Path\"], \"{0}.{1}\".format(sceneName, sceneFormat))\n\n sceneFile = os.path.join(sceneInfo[\"projectPath\"], relSceneFile)\n\n # killTurtle()\n # TODO // cmds?\n # pm.saveAs(sceneFile)\n\n nuke.scriptSaveAs(sceneFile)\n\n thumbPath = self.createThumbnail(dbPath=jsonFile, versionInt=currentVersion)\n\n jsonInfo[\"Versions\"].append(\n # PATH => Notes => User Initials => Machine ID => Playblast => Thumbnail\n # TODO : ref => Dict\n {\"RelativePath\": relSceneFile,\n \"Note\": completeNote,\n \"User\": self._usersDict[self.currentUser],\n \"Workstation\": socket.gethostname(),\n \"Preview\": {},\n \"Thumb\": thumbPath,\n \"Ranges\": self._getTimelineRanges()\n }\n )\n\n if makeReference:\n referenceName = \"{0}_{1}_forReference\".format(jsonInfo[\"Name\"], jsonInfo[\"Category\"])\n relReferenceFile = os.path.join(jsonInfo[\"Path\"], \"{0}.{1}\".format(referenceName, sceneFormat))\n referenceFile = os.path.join(sceneInfo[\"projectPath\"], relReferenceFile)\n\n shutil.copyfile(sceneFile, referenceFile)\n jsonInfo[\"ReferenceFile\"] = relReferenceFile\n jsonInfo[\"ReferencedVersion\"] = currentVersion\n self._dumpJson(jsonInfo, jsonFile)\n else:\n msg = \"This is not a base scene (Json file cannot be found)\"\n self._exception(360, msg)\n return -1, msg\n return jsonInfo", "def version_updater(logging_level=logging.WARNING):\n # connect to db\n do_db_setup()\n\n # set Qt lib\n set_qt_lib()\n\n from anima.ui import version_updater, models\n from anima.env import mayaEnv\n reload(mayaEnv)\n reload(version_updater)\n reload(models)\n m = Maya()\n import pymel\n m.name = \"Maya\" + str(pymel.versions.current())[0:4]\n\n logger.setLevel(logging_level)\n\n # generate a reference_resolution\n version_updater.UI(environment=m)", "def populateSceneRefs(*args):\n pi.referenceDictionary = {}\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, ra=True)\n\n #get reference paths\n refs = cmds.file(q=True, r=True)\n\n buff = []\n # loaded = []\n for ref in refs:\n #get the associated namespace\n ns = cmds.file(ref, q=True, ns=True)\n pi.referenceDictionary[ns] = ref\n\n # put files in buffer list to sort\n for g in pi.referenceDictionary.keys():\n buff.append(g)\n buff.sort()\n\n # now put the sorted namespaces in the list\n for b in buff:\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, append=b, dcc = selectRefs)\n\n # if ref is deferred(not loaded), change it's font\n for ref in refs:\n if cmds.file(ref, q=True, deferReference=True):\n ns = cmds.file(ref, q=True, ns=True) # get the namespace in order to get the item name\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, selectItem=ns) # sel the item in order to query it\n index = cmds.textScrollList(widgets[\"shotAssListTSL\"], q=True, selectIndexedItem=True)[0] # query the index of sel\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, lineFont = [index, \"obliqueLabelFont\"])\n cmds.textScrollList(widgets[\"shotAssListTSL\"], e=True, deselectAll=True)\n\n # if we're in a lgt file, look through current refs and for each one of type \"anm\", check the frame rates, etc. and give option to change\n curr = paths.PathManager(cmds.file(q=True, sn=True))\n if curr.shotType == \"lgt\":\n for ref in refs:\n p=paths.PathManager(ref)\n if p.shotType == \"anm\":\n dict = cFuncs.getFileFrameInfo(cFuncs.fixPath(ref))\n csi.compareSceneInfo(dict)", "def test_master_versions(self):\n m = self.d.master(4242)\n r = self.d.release(79)\n v = m.versions\n\n self.assertEqual(len(v), 2)\n self.assertTrue(r in v)\n self.assertEqual(r.master, m)\n\n r2 = self.d.release(3329867)\n self.assertTrue(r2.master is None)", "def compareVersions(self):\n logger.debug(\"Func: compareVersions\")\n\n cMajorV = nuke.NUKE_VERSION_MAJOR\n cMinorV = nuke.NUKE_VERSION_MINOR\n currentVersion = float(\"{0}.{1}\".format(cMajorV, cMinorV))\n\n dbMajorV = self._currentSceneInfo[\"NukeVersion\"][0]\n dbMinorV = self._currentSceneInfo[\"NukeVersion\"][1]\n databaseVersion = float(\"{0}.{1}\".format(dbMajorV, dbMinorV))\n\n messageList = []\n\n\n if currentVersion == databaseVersion:\n pass\n\n if currentVersion < databaseVersion: # version compare\n message = \"Base Scene is created with a HIGHER Nuke version ({0}). Are you sure you want to continue?\".format(databaseVersion)\n messageList.append(message)\n\n if currentVersion > databaseVersion:\n message = \"Base Scene is created with a LOWER Nuke version ({0}). Are you sure you want to continue?\".format(databaseVersion)\n messageList.append(message)\n\n message=\"\"\n for x in messageList:\n message = message + \"\\n\" + str(x)\n\n if messageList == []:\n return 0, message\n else:\n return -1, message", "def test_ls_returns_sorted_versions():\n with pipeline.fixture(assets=[\"Asset1\"], subsets=[\"animRig\"], versions=1):\n for asset in pipeline.ls():\n previous_version = 0\n for subset in asset[\"subsets\"]:\n for version in subset[\"versions\"]:\n version = version[\"version\"]\n assert version > previous_version\n previous_version = version", "def do_snapshot(\n self, force: bool = False\n ) -> Tuple[bool, Dict[str, MetaFile]]:\n\n # Snapshot update is needed if\n # * any targets files are not yet in snapshot or\n # * any targets version is incorrect\n update_version = force\n removed: Dict[str, MetaFile] = {}\n\n with self.edit_snapshot() as snapshot:\n for keyname, new_meta in self.targets_infos.items():\n if keyname not in snapshot.meta:\n update_version = True\n snapshot.meta[keyname] = deepcopy(new_meta)\n continue\n\n old_meta = snapshot.meta[keyname]\n if new_meta.version < old_meta.version:\n raise ValueError(f\"{keyname} version rollback\")\n if new_meta.version > old_meta.version:\n update_version = True\n snapshot.meta[keyname] = deepcopy(new_meta)\n removed[keyname] = old_meta\n\n if not update_version:\n # prevent edit_snapshot() from storing a new version\n raise AbortEdit(\"Skip snapshot: No targets version changes\")\n\n if not update_version:\n # this is reachable as edit_snapshot() handles AbortEdit\n logger.debug(\"Snapshot update not needed\") # type: ignore[unreachable]\n else:\n logger.debug(\"Snapshot v%d\", snapshot.version)\n\n return update_version, removed", "def _update_metadata_if_changed(self, metadata_role, referenced_metadata='release'):\n \n uncompressed_metadata_filename = metadata_role + '.txt'\n\n # Ensure the referenced metadata has been loaded. The 'root' role may be\n # updated without having 'release' available. \n if referenced_metadata not in self.metadata['current']:\n message = 'Cannot update '+repr(metadata_role)+' because ' \\\n +referenced_metadata+' is missing.'\n raise tuf.RepositoryError(message)\n # The referenced metadata has been loaded. Extract the new\n # fileinfo for 'metadata_role' from it. \n else:\n message = repr(metadata_role)+' referenced in '+\\\n repr(referenced_metadata)+'. '+repr(metadata_role)+' may be updated.'\n logger.debug(message)\n \n # There might be a compressed version of 'release.txt' or Targets\n # metadata available for download. Check the 'meta' field of\n # 'referenced_metadata' to see if it is listed when 'metadata_role'\n # is 'release'. The full rolename for delegated Targets metadata\n # must begin with 'targets/'. The Release role lists all the Targets\n # metadata available on the repository, including any that may be in\n # compressed form.\n compression = None\n\n # Extract the fileinfo of the uncompressed version of 'metadata_role'.\n uncompressed_fileinfo = self.metadata['current'][referenced_metadata] \\\n ['meta'] \\\n [uncompressed_metadata_filename]\n\n # Check for availability of compressed versions of 'release.txt',\n # 'targets.txt', and delegated Targets, which also start with 'targets'.\n # For 'targets.txt' and delegated metadata, 'referenced_metata'\n # should always be 'release'. 'release.txt' specifies all roles\n # provided by a repository, including their file sizes and hashes.\n if metadata_role == 'release' or metadata_role.startswith('targets'):\n gzip_metadata_filename = uncompressed_metadata_filename + '.gz'\n if gzip_metadata_filename in self.metadata['current'] \\\n [referenced_metadata]['meta']:\n compression = 'gzip'\n compressed_fileinfo = self.metadata['current'][referenced_metadata] \\\n ['meta'][gzip_metadata_filename]\n # NOTE: When we download the compressed file, we care about its\n # compressed length. However, we check the hash of the uncompressed\n # file; therefore we use the hashes of the uncompressed file.\n fileinfo = {'length': compressed_fileinfo['length'],\n 'hashes': uncompressed_fileinfo['hashes']}\n logger.debug('Compressed version of '+\\\n repr(uncompressed_metadata_filename)+' is available at '+\\\n repr(gzip_metadata_filename)+'.')\n else:\n logger.debug('Compressed version of '+\\\n repr(uncompressed_metadata_filename)+' not available.')\n fileinfo = uncompressed_fileinfo\n else:\n fileinfo = uncompressed_fileinfo\n\n # Simply return if the file has not changed, according to the metadata\n # about the uncompressed file provided by the referenced metadata.\n if not self._fileinfo_has_changed(uncompressed_metadata_filename,\n uncompressed_fileinfo):\n return\n\n logger.debug('Metadata '+repr(uncompressed_metadata_filename)+\\\n ' has changed.')\n\n try:\n self._update_metadata(metadata_role, fileinfo=fileinfo,\n compression=compression)\n except:\n # The current metadata we have is not current but we couldn't\n # get new metadata. We shouldn't use the old metadata anymore.\n # This will get rid of in-memory knowledge of the role and\n # delegated roles, but will leave delegated metadata files as\n # current files on disk.\n # TODO: Should we get rid of the delegated metadata files?\n # We shouldn't need to, but we need to check the trust\n # implications of the current implementation.\n self._delete_metadata(metadata_role)\n logger.error('Metadata for '+str(metadata_role)+' could not be updated')\n raise\n else:\n # We need to remove delegated roles because the delegated roles\n # may not be trusted anymore.\n if metadata_role == 'targets' or metadata_role.startswith('targets/'):\n logger.debug('Removing delegated roles of '+repr(metadata_role)+'.')\n # TODO: Should we also remove the keys of the delegated roles?\n tuf.roledb.remove_delegated_roles(metadata_role)\n self._import_delegations(metadata_role)", "def _recalculate_versions(self):\n versions = self._get_local_resource_versions()\n for versions_dict in self._versions_by_consumer.values():\n for res_type, res_version in versions_dict.items():\n versions[res_type].add(res_version)\n self._versions = versions", "def load_referenced_versions(self):\n raise NotImplementedError(\"load_referenced_versions is not implemented\")", "def updateVersions(self):\r\n f = open('../versions.pckl', 'wb')\r\n pickle.dump(self.versions, f)\r\n f.close()", "def testMetacommunityReferencesStorage(self):\n community_dict1 = self.t2.get_community_parameters(1)\n community_dict2 = self.t2.get_community_parameters(2)\n community_dict3 = self.t2.get_community_parameters(3)\n community_dict4 = self.t2.get_community_parameters(4)\n comparison_dict1 = {\"speciation_rate\": 0.5, \"time\": 0.0, \"fragments\": 0, \"metacommunity_reference\": 0}\n comparison_dict2 = {\"speciation_rate\": 0.5, \"time\": 0.0, \"fragments\": 0, \"metacommunity_reference\": 1}\n comparison_dict3 = {\"speciation_rate\": 0.5, \"time\": 0.0, \"fragments\": 0, \"metacommunity_reference\": 2}\n comparison_dict4 = {\"speciation_rate\": 0.5, \"time\": 0.0, \"fragments\": 0, \"metacommunity_reference\": 3}\n self.assertDictEqual(comparison_dict1, community_dict1)\n self.assertDictEqual(comparison_dict2, community_dict2)\n self.assertDictEqual(comparison_dict3, community_dict3)\n self.assertDictEqual(comparison_dict4, community_dict4)", "def addon_matrix(self):\n print(\"Checking riot-web version\")\n repo = self.github.get_repo('vector-im/riot-web')\n releases = list(repo.get_releases())\n index = 0\n while True:\n remote_version = releases[index].tag_name\n if 'b' in remote_version:\n index = index + 1\n else:\n break\n file = \"{}/Dockerfile\".format(self.name)\n remote_file = self.get_file_obj(file)\n masterfile = self.repoupdater.get_file_content(remote_file)\n file_version = masterfile.split('releases/download/')[1]\n file_version = file_version.split('/')[0]\n if self.verbose:\n print(\"Current version\", file_version)\n print(\"Available version\", remote_version)\n if remote_version != file_version:\n msg = COMMIT_MSG.format('riot-web', remote_version)\n new_content = self.repoupdater.get_file_content(remote_file)\n new_content = new_content.replace(file_version, remote_version)\n self.repoupdater.commit(file, msg, new_content, remote_file.sha)\n else:\n print(\"riot-web already have the newest version\", file_version)", "def makeReference(self):\n logger.debug(\"Func: makeReference\")\n\n if self._currentVersionIndex == -1:\n msg = \"Cursor is not on a Base Scene Version. Cancelling\"\n # logger.warning(msg)\n # raise Exception([101, msg])\n self._exception(101, msg)\n return\n # return\n\n absVersionFile = os.path.join(self.projectDir, self._currentSceneInfo[\"Versions\"][self._currentVersionIndex-1][\"RelativePath\"])\n name = os.path.split(absVersionFile)[1]\n filename, extension = os.path.splitext(name)\n referenceName = \"{0}_{1}_forReference\".format(self._currentSceneInfo[\"Name\"], self._currentSceneInfo[\"Category\"])\n relReferenceFile = os.path.join(self._currentSceneInfo[\"Path\"], \"{0}{1}\".format(referenceName, extension))\n absReferenceFile = os.path.join(self.projectDir, relReferenceFile)\n shutil.copyfile(absVersionFile, absReferenceFile)\n self._currentSceneInfo[\"ReferenceFile\"] = relReferenceFile\n # SET the referenced version as the 'VISUAL INDEX NUMBER' starting from 1\n self._currentSceneInfo[\"ReferencedVersion\"] = self._currentVersionIndex\n\n self._dumpJson(self._currentSceneInfo, self._baseScenesInCategory[self.currentBaseSceneName])", "def testMultipleOldRefs(self):\n self.mr2 = cdl_convert.MediaRef('hello')\n\n self.assertEqual(\n {'hello': [self.mr, self.mr2]},\n cdl_convert.MediaRef.members\n )\n\n self.mr._filename = 'goodbye'\n self.mr._change_membership(old_ref='hello')\n\n self.assertEqual(\n {'hello': [self.mr2],'goodbye': [self.mr]},\n cdl_convert.MediaRef.members\n )", "def updateMdrizVerHistory(self,build,versions):\n _plist = self.assoc.parlist[0]\n if build == True: _output = _plist['output']\n else: _output = _plist['outdata']\n \n fhdu = pyfits.open(_output,mode='update')\n prihdr = fhdu[0].header\n \n ver_str = \"MultiDrizzle product generated using: \"\n prihdr.add_history(ver_str)\n \n for key in versions:\n if versions[key].find('\\n') < 0:\n prihdr.add_history(key+versions[key])\n else:\n # This will accomodate multi-line comments\n _ver_str = versions[key].split('\\n')\n prihdr.add_history(key)\n for val in _ver_str:\n if val.strip() != '':\n prihdr.add_history(val)\n \n #ver_str = ' MultiDrizzle Version '+str(version)\n #prihdr.add_history(ver_str)\n \n fhdu.close()\n del fhdu", "def updateToLatest(self):\n # Determine the newest stamp in each dependency\n latest = {}\n for item in self.spec:\n if isinstance(item, CachedResource):\n latest[item] = item.getLatestStamp()\n if not latest:\n return\n\n # Our new stamp is the greatest out of all deps' stamps\n stamp = max(latest.itervalues())\n\n # Update only if we need to\n if self.getLatestStamp() >= stamp:\n return\n self.updateStamp(latest, stamp)\n\n # Clean old versions if that was successful\n self.cleanStamps(lambda s: s < stamp)", "def getVersion(self):\n self.getDocumentedObject().getVersion()", "def get_and_update_versions ():\n\n try:\n get_comp_versions (\"ACE\")\n get_comp_versions (\"TAO\")\n\n if opts.update:\n files = []\n files += update_version_files (\"ACE\")\n files += update_version_files (\"TAO\")\n files += create_changelog (\"ACE\")\n files += create_changelog (\"TAO\")\n files += update_spec_file ()\n files += update_debianbuild ()\n\n commit (files)\n\n except:\n print (\"Fatal error in get_and_update_versions.\")\n raise", "def compare_version_objects(version1, version2):\n if version1.epoch < version2.epoch:\n return -1\n if version1.epoch > version2.epoch:\n return 1\n result = compare_strings(version1.upstream, version2.upstream)\n if result != 0:\n return result\n if version1.revision or version2.revision:\n return compare_strings(version1.revision, version2.revision)\n return 0", "def test_changeAllProjectVersionsPreRelease(self):\n root = FilePath(self.mktemp())\n root.createDirectory()\n coreNews = (\"Twisted Core 1.0.0 (2009-12-25)\\n\"\n \"===============================\\n\"\n \"\\n\")\n webNews = (\"Twisted Web 1.0.0pre1 (2009-12-25)\\n\"\n \"==================================\\n\"\n \"\\n\")\n structure = {\n \"README\": \"Hi this is 1.0.0.\",\n \"NEWS\": coreNews + webNews,\n \"twisted\": {\n \"topfiles\": {\n \"README\": \"Hi this is 1.0.0\",\n \"NEWS\": coreNews},\n \"_version.py\":\n genVersion(\"twisted\", 1, 0, 0),\n \"web\": {\n \"topfiles\": {\n \"README\": \"Hi this is 1.0.0pre1\",\n \"NEWS\": webNews},\n \"_version.py\": genVersion(\"twisted.web\", 1, 0, 0, 1)\n }}}\n self.createStructure(root, structure)\n changeAllProjectVersions(root, Version(\"lol\", 1, 0, 2), '2010-01-01')\n coreNews = (\n \"Twisted Core 1.0.0 (2009-12-25)\\n\"\n \"===============================\\n\"\n \"\\n\")\n webNews = (\"Twisted Web 1.0.2 (2010-01-01)\\n\"\n \"==============================\\n\"\n \"\\n\")\n outStructure = {\n \"README\": \"Hi this is 1.0.2.\",\n \"NEWS\": coreNews + webNews,\n \"twisted\": {\n \"topfiles\": {\n \"README\": \"Hi this is 1.0.2\",\n \"NEWS\": coreNews},\n \"_version.py\":\n genVersion(\"twisted\", 1, 0, 2),\n \"web\": {\n \"topfiles\": {\n \"README\": \"Hi this is 1.0.2\",\n \"NEWS\": webNews},\n \"_version.py\": genVersion(\"twisted.web\", 1, 0, 2),\n }}}\n self.assertStructure(root, outStructure)", "def versions(self):\n raise Exception(\"mcapi.Datafile.versions is not implemented\")", "def test_ls():\n\n with pipeline.fixture(assets=[\"Asset1\"],\n subsets=[\"animRig\"],\n versions=1) as root:\n asset = next(pipeline.ls())\n\n reference = {\n \"schema\": \"pyblish-mindbender:asset-1.0\",\n \"name\": \"Asset1\",\n \"subsets\": [\n {\n \"schema\": \"pyblish-mindbender:subset-1.0\",\n \"name\": \"animRig\",\n \"versions\": [\n {\n \"schema\": \"pyblish-mindbender:version-1.0\",\n \"version\": 1,\n \"path\": os.path.join(\n root,\n \"Asset1\",\n \"publish\",\n \"animRig\",\n \"v001\"\n ),\n \"source\": os.path.join(\n \"{project}\",\n \"maya\",\n \"scenes\",\n \"scene.ma\"\n ),\n \"representations\": [\n {\n \"schema\": (\"pyblish-mindbender:\"\n \"representation-1.0\"),\n \"format\": \".ma\",\n \"path\": os.path.join(\n \"{dirname}\",\n \"Asset1{format}\"\n ),\n }\n ],\n \"time\": \"\",\n \"author\": \"mottosso\",\n },\n ]\n }\n ]\n }\n\n # Printed on error\n print(\"# Comparing result:\")\n print(json.dumps(asset, indent=4, sort_keys=True))\n print(\"# With reference:\")\n print(json.dumps(reference, indent=4, sort_keys=True))\n\n assert_equals(asset, reference)", "def saveCallback(self):\n\n ## TODO // TEST IT\n self._pathsDict[\"sceneFile\"] = self.getSceneFile()\n try:\n openSceneInfo = self.getOpenSceneInfo()\n if not openSceneInfo:\n return\n except TypeError:\n return\n if openSceneInfo[\"jsonFile\"]:\n jsonInfo = self._loadJson(openSceneInfo[\"jsonFile\"])\n if jsonInfo[\"ReferenceFile\"]:\n absRefFile = os.path.join(self._pathsDict[\"projectDir\"], jsonInfo[\"ReferenceFile\"])\n # TODO : ref => Dict\n absBaseSceneVersion = os.path.join(self._pathsDict[\"projectDir\"], jsonInfo[\"Versions\"][int(jsonInfo[\"ReferencedVersion\"]) - 1][\"RelativePath\"])\n # if the refererenced scene file is the saved file (saved or saved as)\n if self._pathsDict[\"sceneFile\"] == absBaseSceneVersion:\n # copy over the forReference file\n try:\n shutil.copyfile(self._pathsDict[\"sceneFile\"], absRefFile)\n print \"Scene Manager Update:\\nReference File Updated\"\n except:\n pass", "def set_form_versions(self):\n def _hash(val):\n return hashlib.md5(val).hexdigest()\n\n latest_build = self._get_version_comparison_build()\n if not latest_build:\n return\n force_new_version = self.build_profiles != latest_build.build_profiles\n for form_stuff in self.get_forms(bare=False):\n filename = 'files/%s' % self.get_form_filename(**form_stuff)\n current_form = form_stuff[\"form\"]\n if not force_new_version:\n try:\n previous_form = latest_build.get_form(current_form.unique_id)\n # take the previous version's compiled form as-is\n # (generation code may have changed since last build)\n previous_source = latest_build.fetch_attachment(filename)\n except (ResourceNotFound, FormNotFoundException):\n current_form.version = None\n else:\n previous_hash = _hash(previous_source)\n\n # set form version to previous version, and only update if content has changed\n current_form.version = previous_form.get_version()\n current_form = current_form.validate_form()\n current_hash = _hash(current_form.render_xform())\n if previous_hash != current_hash:\n current_form.version = None\n # clear cache since render_xform was called with a mutated form set to the previous version\n current_form.render_xform.reset_cache(current_form)\n else:\n current_form.version = None" ]
[ "0.6834863", "0.6241142", "0.6191124", "0.6186932", "0.6126859", "0.5808705", "0.57901436", "0.5686815", "0.5674683", "0.56705105", "0.5636066", "0.5609275", "0.55931807", "0.553265", "0.54316425", "0.5389392", "0.53655267", "0.52925307", "0.52915853", "0.52761626", "0.525988", "0.52414685", "0.52390707", "0.52130103", "0.52093095", "0.5209143", "0.5199475", "0.5194603", "0.5188073", "0.518789" ]
0.6645423
1
Read a .plist file from filepath. Return the unpacked root object (which usually is a dictionary).
def readPlist(filepath): plistData = NSData.dataWithContentsOfFile_(filepath) dataObject, plistFormat, error = NSPropertyListSerialization.propertyListFromData_mutabilityOption_format_errorDescription_(plistData, NSPropertyListMutableContainers, None, None) if error: errmsg = "%s in file %s" % (error, filepath) raise NSPropertyListSerializationException(errmsg) else: return dataObject
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_plist(path: str, format: plistlib.PlistFormat) -> dict:\n if not os.path.isfile(path):\n raise ValueError(f'File {path} does not exist')\n\n with open(path, 'rb') as file:\n return plistlib.load(file, fmt=format, dict_type=dict)", "def _read_plist(self, filename):\n file_path = self.get_file(filename)\n try:\n self._plist[filename] = readPlist(file_path)\n except:\n # Is binaryPlist?\n try:\n self._plist[filename] = readBinaryPlist(file_path)\n except:\n # What is it?\n pass", "def load_plist(path):\n dt = _np.dtype({\"names\": ['ridx', 'azidx'],\n \"formats\": ['>u4', '>u4']})\n plist = _np.fromfile(path, dtype=dt)\n return plist", "def read_plist(path: str) -> dict:\n return _read_plist(path, plistlib.FMT_XML)", "def read_file(self, path):\n # pylint: disable=unused-variable\n info, pformat, error = (\n NSPropertyListSerialization.propertyListWithData_options_format_error_(\n NSData.dataWithContentsOfFile_(os.path.expanduser(path)),\n NSPropertyListMutableContainersAndLeaves,\n None,\n None\n ))\n # pylint: enable=unused-variable\n if info is None:\n if error is None:\n error = \"Invalid plist file.\"\n raise PlistParseError(\"Can't read %s: %s\" % (path, error))\n\n return info", "def Read(self, file_object):\n try:\n self.root_key = plistlib.load(file_object)\n\n except plistlib.InvalidFileException as exception:\n raise IOError(exception)", "def load_dictionary(filepath):\r\n # context manager read binary\r\n with open(filepath, 'rb') as file:\r\n # pickle load\r\n return pickle.load(file)", "def parse_file(file_path):\n with open(file_path) as f:\n return XmlPropertyListParser().parse(f)", "def loadPickle(filepath):\n\tf = open(filepath, 'rb')\n\tobj = pickle.load(f)\n\tf.close()\n\treturn obj", "def load_file(self, filepath):\n filepath = self._yaml_extension(filepath)\n data = self._load_data_yaml(filepath)\n return data", "def load_pkl_file(p):\n pkl_file = open(p, 'rb')\n obj = pickle.load(pkl_file)\n pkl_file.close()\n return obj", "def _read_pkl(self, input_file):\n data = pickle.load(open(input_file, 'rb'))\n return data", "def load(filepath):\n with open(filepath) as f:\n return Config(json.load(f))", "def load_pkl(file_name):\n with open(file_name) as fp:\n data = pkl.load(fp)\n return data", "def load(file_path):\n\t# assert type(file_path) == str, 'File_path must be a string'\n\t\n\twith open(file_path, 'rb') as f:\n\t\treturn pickle.load(f)", "def pload(filename):\n return pickle.load(open(filename, 'rb'))", "def read_dictionary(filepath):\n with open(filepath, 'r') as dict_file:\n return dict_file.read().splitlines()", "def load_data(filepath):\n with open(filepath, 'r') as f:\n data = f.read()\n return data", "def pickle_read(file_path):\n\n with open(file_path, 'rb') as file:\n return pickle.load(file)", "def pklload(path:str):\n pkl = pickle.load(open(path, 'rb'))\n return pkl", "def load(self):\n data = None\n try:\n with open(self.__filepath, 'r') as file:\n text = file.read()\n data = jsonpickle.decode(text)\n except FileNotFoundError:\n data = None\n except IOError as e:\n print(e)\n return data", "def extract_from_pickle(filepath):\n\t\twith open(filepath, 'rb') as pfile:\n\t\t\tpy_obj = pickle.load(pfile)\n\t\t\treturn py_obj", "def read_yaml_file(filepath: str) -> Dict:\n return yaml.safe_load(read_file(filepath))", "def load_pkl(path):\r\n f = open(path, 'rb')\r\n try:\r\n rval = cPickle.load(f)\r\n finally:\r\n f.close()\r\n return rval", "def read_pickle(file_path):\n with open(file_path, 'rb') as file:\n return pickle.load(file)", "def fromFile(filename: unicode) -> ghidra.framework.ApplicationProperties:\n ...", "def load(filename):\n import pickle\n return pickle.load(open(filename, 'r'))", "def load_object(fpath):\r\n with open(fpath, 'rb') as i:\r\n return pickle.load(i)", "def load_pkl_file(path):\n with open(path, 'rb') as pkl_file:\n return pickle.load(pkl_file)", "def deserialize(file):\n global root_dir\n global wells_list\n global tops_list\n global project_file\n\n f = open(file, 'rb')\n\n current_project = pickle.load(f)\n root_dir = current_project.root_dir\n wells_list = current_project.wells_list\n tops_list = current_project.tops_list\n project_file = current_project.project_file" ]
[ "0.7377023", "0.7077552", "0.7020647", "0.70009124", "0.69380563", "0.6635612", "0.66037655", "0.6400661", "0.6191319", "0.60980314", "0.59968185", "0.5973855", "0.5956443", "0.5919474", "0.5916405", "0.59139293", "0.58797735", "0.58763933", "0.58301824", "0.5828497", "0.58160996", "0.5814658", "0.57757777", "0.5770311", "0.57604474", "0.57598037", "0.5734003", "0.57173073", "0.56958675", "0.5679054" ]
0.76269853
0
Print out the end status of the game when the server closes the connection.
def server_closed_connection(self): print("Game Over!") if self._winner: print("Player {} wins!".format(self._winner)) else: print("Draw!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_connection_end() -> None:\r\n print(\"Connection lost with G-Earth\")\r\n print()", "def endSession(self):\n if(self.verb >= DLS_VERB_HIGH):\n print \"--Ending session with %s (no action)\" % (self.server)", "def end(self):\n winners = mafia.str_player_list(self.game.winners())\n logging.info(\"Game over! Winners: %s\" % winners)\n\n subject = \"%s: The End\" % self.name\n body = \"Game over!\\n\\nCongratulations to %s for a well \" \\\n \"(or poorly; I can't tell) played game!\" % winners\n self.send_message(mafia.events.PUBLIC, subject, body)", "def endGame(self, message):\n print(self.board)\n print(\"Game over! \" + message)\n self.gameOver = True", "def endGame(self):\n pass", "def handle_close(self):\r\n if self.log_fh:\r\n self.log_fh.write(\r\n \"Server closed connection at %s. Shutting down.\\n\" %\r\n time())\r\n self.close()", "def handle_game_end(self, winner_color, win_reason): # possible GameHistory object...\n\n # TODO: implement this method\n print('\\--------------Game End--------------/')\n print(winner_color)\n print(win_reason)\n pass", "def game_exit(self):\n self.set_state(GameState.EXITING)\n self.game_stop()\n self.game_log_statistics()", "def on_client_exit(self, game) -> None:\n pass", "def end(self):\n self.my_print(\"\\t[DONE]\", msg_types.INFO)\n self.in_progress = False", "def api_end_game(self):\n pass", "def server_exit():\n return", "def endMessage(self):", "def end(self):\n self.send_all(\"SHUTDOWN\") #On indique a tout le monde qu'on ferme\n self.socket.shutdown(socket.SHUT_RDWR)\n self.socket.close()", "def end(self):\n # Update all the things.\n end_font = pygame.font.SysFont(*END_FONT)\n final_score = self.player.nest.acorn_count\n message = \"Game over! Final score: {0}\".format(final_score)\n text_surf = end_font.render(message, True, FONT_COLOUR)\n text_rect = text_surf.get_rect()\n text_rect.center = (SCREEN.width // 2, SCREEN.height // 2)\n\n # Draw all the things.\n self.screen_surf.fill(BKGD_COLOUR)\n self.screen_surf.blit(text_surf, text_rect)\n\n # Render the screen.\n pygame.display.update()\n\n # The main game loop.\n while self.mode is WorldMode.end:\n self.handle_events()", "def handle_close(self):\r\n self.end_time = time.time()\r\n self.time_ran = self.end_time - self.start_time\r\n if self.status != 'PASS':\r\n server_log.info('Client {} aborted!'.format(self.client_id))\r\n self.status = 'ABORTED'\r\n self.close()", "def end():\n logging.info(\"Execution Ended\")", "def EndSession( self ):\r\n\r\n self._socket.write( 'X' ) \r\n # self._connection.write( 'X' ).flush() \r\n\r\n return self.GetServerResponse()", "def shutdown():\n shutdown_server()\n return \"Shutting down server\"", "def disconnect():\n logging.info('Client disconnected')", "def outConnectionLost(self):\n self.logger('stdout closed by process %d' % self._pid)", "def exit(self):\n self._status = \"\"\n self._sock.settimeout(1.0)\n self._sock.sendto(bytes(\"bla\", \"utf-8\"), (self._cfg.host, self._cfg.port))", "def _on_surface_disconnected(self):\n\n # Close the socket\n self._client_socket.close()\n\n # Inform that the connection has been closed\n print(\"Video stream from {} address closed successfully\".format(self._client_address))", "def handleClose(self):\n logging.info(\"%s %s\", self.address, \"closed\")\n self.logbook.clients_disconnected_count += 1", "def end_of_game(self):\n end_game = pyip.inputYesNo(f'\\nDo you want to play again?: ')\n\n if end_game == 'no':\n print('\\n-- GAME OVER --')\n sys.exit()\n elif end_game == 'yes':\n self.game_counter += 1", "def server_close(self):\n\t\tpass", "def handle_close(self):\n print(self.addr, \"bye\")\n self.close()", "def on_session_destroyed(session_context):\n if data.AUTO_SHUTDOWN:\n import sys\n\n sys.exit(\n \"\\033[1;31mThe session has ended - tab closed or timeout. \\n\\n --- Terminating the Forest progam and relinquishing control of port. ---\\033[1;00m\"\n )", "def end(self, won, reason):\n pass\n # replace with your end logic", "async def end(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n else:\n host = tod_games[room]['host']\n await amor_manager.say(\"Game Over in {}! Thank you to {} for hosting this game!\".format(room, host))\n del tod_games[room]" ]
[ "0.7442786", "0.7357788", "0.70812464", "0.7068161", "0.6758912", "0.67495376", "0.6671296", "0.6662621", "0.66588444", "0.6629597", "0.6612989", "0.6597301", "0.658959", "0.6576882", "0.657107", "0.65640134", "0.65556294", "0.64894456", "0.6479372", "0.64617205", "0.64595443", "0.64184713", "0.6407799", "0.64009345", "0.6375154", "0.6372171", "0.63383776", "0.6331625", "0.63315636", "0.6331007" ]
0.78706425
0
Track a download in Piwik
def track_download_request(download_url, download_title): from indico_piwik.plugin import PiwikPlugin if not download_url: raise ValueError("download_url can't be empty") if not download_title: raise ValueError("download_title can't be empty") request = PiwikRequest(server_url=PiwikPlugin.settings.get('server_api_url'), site_id=PiwikPlugin.settings.get('site_id_events'), api_token=PiwikPlugin.settings.get('server_token'), query_script=PiwikPlugin.track_script) action_url = quote(download_url) dt = datetime.now() request.call(idsite=request.site_id, rec=1, action_name=quote(download_title), url=action_url, download=action_url, h=dt.hour, m=dt.minute, s=dt.second)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download(self):\n pass", "def download(self):\n pass", "def download(self, download_request):\n raise NotImplementedError", "def download_progress(self, cloud_file, size, downloaded):", "def dowload_vt():\n print get_date_time_now() + \" ==> Download VT Samples started!\"\n print get_date_time_now() + \" ==> Nothing downloaded\"", "def download():\n raise NotImplementedError", "def download(self, url_match):\n pass", "def log_download(self, download):\n with self._conn.begin():\n self._conn.execute(\n \"VALUES (log_download(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s))\",\n (\n sanitize(download.filename),\n download.host,\n download.timestamp.astimezone(UTC).replace(tzinfo=None),\n sanitize(download.arch),\n sanitize(download.distro_name),\n sanitize(download.distro_version),\n sanitize(download.os_name),\n sanitize(download.os_version),\n sanitize(download.py_name),\n sanitize(download.py_version),\n sanitize(download.installer_name),\n sanitize(download.installer_version),\n sanitize(download.setuptools_version),\n ))", "def download(self, download_path):\n return", "def download():\n if auth.has_membership(1):\n user = \"Admin\"\n elif auth.has_membership(2):\n user = \"Examiner\"\n elif auth.has_membership(3):\n user = \"student\"\n elif auth.has_membership(5):\n user = \"Managment\"\n\n db.activity_log.insert( Title_entry=\"Download assignment\", \n referance_id=auth.user.id,\n remarks=\"content downloaded by {}\".format(user))\n db.commit()\n return response.download(request, db)", "def _download_single(url, to, id):\n if os.path.exists(to):\n error_flags[id] = 1\n return\n\n try:\n request = rq.Request(url=url, headers=forge_agent_header)\n info = rq.urlopen(request).read()\n\n except urllib.error.URLError as e:\n print(url, 'urllib error')\n error_flags[id] = 2\n return\n\n except Exception as e:\n print(url, e)\n error_flags[id] = 2\n return\n\n with open(to, \"wb\") as file:\n print(url, 'writing')\n file.write(info)\n\n error_flags[id] = 1", "def report_download(self, data, token):\n requestcontent = json.loads(data)\n if len(requestcontent) > 2:\n request_context = requestcontent[2]\n if 'progress_code' in request_context:\n context = request.env.context.copy()\n context.update(request_context)\n request._env = request.env(context=context)\n request._context = context\n web_progress_obj = request.env['web.progress']\n web_progress_obj.web_progress_percent(0, 'Report')\n ret = super(WPReportController, self).report_download(data, token)\n web_progress_obj.web_progress_percent(100, 'Report done')\n return ret", "def download_files(self):", "def onContentDownload(self, fetcher, numBytes): #$NON-NLS-1$\r", "def download_track(trackpath):\n\n if trackpath.startswith('gs://'):\n localpath = path.basename(trackpath)\n if not any([path.exists(localpath), path.isfile(localpath)]):\n subprocess.run(['gsutil', '-m', 'cp', trackpath, './'], check=True)\n\n elif any(trackpath.startswith(prefix) for prefix in 'http:// https:// ftp://'.split()):\n localpath = path.basename(trackpath)\n if not any([path.exists(localpath), path.isfile(localpath)]):\n subprocess.run(['wget', '--no-check-certificate', trackpath], check=True)\n\n else:\n localpath = trackpath\n\n # Correct for URL convention of replacing spaces with \"%20\"\n localpath = localpath.replace('%20', ' ')\n\n return localpath", "def download(self, release: CrossrefEventsRelease, **kwargs):\n release.download()", "def onContentDownloadStart(self, fetcher, contentLength): #$NON-NLS-1$\r", "def download(self, release: DoabRelease, **kwargs):\n # Download release\n release.download()", "def test_download(self):\n pass", "def report(self, url):\n\n print(self.get(url))", "def download_track(self, track = None, url = None):\n # check that track doesn't exist\n if url == None or track == None:\n return\n\n print \"Retrieving the name of the track.\"\n filename = self.get_track_filename(url)\n\n print \"Filename found: \" + filename\n \n if (filename, track.user[\"username\"]) in self.past_songs_db_data or \\\n (filename, \"\") in self.past_songs_db_data or \\\n os.path.isfile(filename): \n print \"File exists\"\n else:\n print \"Downloading\"\n filename = wget.download(url)\n self.set_track_metadata(track, filename, url)\n mp3_name = filename[:-4] + \".mp3\"\n\n # Save filename for future reference\n self.past_songs_db.write(filename + \"\\n\")\n self.past_songs_db_data.append((filename, track.user[\"username\"]))\n \n if not filename.endswith(\".mp3\"):\n self.past_songs_db.write(mp3_name + \"\\n\")\n self.past_songs_db_data.append((mp3_name, track.user[\"username\"]))\n \n print", "def _download(item):\n\n filename = item.filename()\n filename = os.path.join(item.vdir(), filename)\n logger.info(\"Downloading '%s' to %s\" % (item.show, filename))\n\n f = open(filename, \"wb\")\n\n buf = net.tivoget(item.show.url)\n for chunk in buf:\n f.write(chunk)\n\n f.close()\n\n item.downloaded = True\n item.save()", "def tracking_url(self) -> str:\n return pulumi.get(self, \"tracking_url\")", "def download_report(self, response):\n \n if self.is_visited(response.url) == True:\n return None\n \n def get_filename_from_url(url):\n #http://www.gtja.com/f//lotus/201510/20151023%20Company%20Report%2001816%20HK_addStamper_addEncrypt.pdf\n import re\n pattern = re.compile(\"http://www.gtja.com/f//lotus/(\\d+)/(.*)\")\n result = pattern.match(url)\n if result is None:\n return str(datetime.date.today()), hashlib.md5(url).hexdigest() + \".pdf\"\n else:\n #return str(datetime.date.today()), hashlib.md5(url).hexdigest() + \".pdf\"\n return result.group(1), unquote(result.group(2))\n \n date, name = get_filename_from_url(response.url) #TODO Create date directory.\n\n file_path = settings[\"FILES_STORE_PATH\"] + date + \"/\"\n if os.path.exists(file_path) != True:\n os.mkdir(file_path)\n\n filename = file_path + name\n with open(filename.decode(\"utf-8\"), \"wb\") as f: #TODO what is the diffenrence between \"w+\" and \"wb\"\n f.write(response.body)\n \n item = ReportFileItem()\n item[\"url\"] = unquote(response.url)\n item[\"date\"] = date\n item[\"path\"] = \"/\" + date + \"/\" + name #Relative path\n item[\"link\"] = response.meta[\"link_url\"]\n item[\"create_date\"] = datetime.datetime.now()\n \n self.visit(response.url)\n \n return item", "def onContentDownloadComplete(self, fetcher, connectionResp): #$NON-NLS-1$\r", "def post_download(self, remote_files):\n pass", "def download():\n env_banner()\n\n download_data = Download()\n download_data()\n click.echo('Download done.')", "def start_download(url):\n return _add_download_to_deluge(url)", "def downloadFile()-> None:\n logging.info(f\"Downloading current data set {getTime()}\")\n with open(DATA_FILE,\"wb\") as f:\n f.write(get(\"https://covid.ourworldindata.org/data/owid-covid-data.csv\").text.encode())\n logging.info(f\"Finished Downloading current data set {getTime()}\")", "def download_report(request):\n params = request.query_params\n provider_uuid = params.get(\"provider_uuid\")\n bill_date = params.get(\"bill_date\")\n async_download_result = check_report_updates.delay(provider_uuid=provider_uuid, bill_date=bill_date)\n return Response({\"Download Request Task ID\": str(async_download_result)})" ]
[ "0.62857395", "0.62857395", "0.6190119", "0.61862576", "0.60990036", "0.60830104", "0.59435177", "0.5895922", "0.58797216", "0.58775985", "0.58351547", "0.5832015", "0.5803117", "0.579149", "0.5750479", "0.5704647", "0.5701332", "0.5676492", "0.5647217", "0.5612867", "0.5608241", "0.5603173", "0.55964583", "0.5589744", "0.5561809", "0.55588883", "0.5555596", "0.5519754", "0.5517934", "0.55163425" ]
0.7677783
0
T.__new__(S, ...) > a new object with type S, a subtype of T
def __new__(S, *more): # real signature unknown; restored from __doc__ pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __new__(cls):\n return object.__new__(cls)", "def __new__(cls):\n return object.__new__(cls)", "def __newobj__(cls, *args):\n return cls.__new__(cls, *args)", "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "def __new__(self, ???):", "def __new__(S, *more): # real signature unknown; restored from __doc__\r\n pass", "def __new__(S, *more): # real signature unknown; restored from __doc__\r\n pass", "def __new__(*args, **kwargs): # real signature unknown\n pass", "def __new__(*args, **kwargs): # real signature unknown\n pass", "def __new__(*args, **kwargs): # real signature unknown\n pass", "def __new__(*args, **kwargs): # real signature unknown\n pass", "def __new__(*args, **kwargs): # real signature unknown\n pass", "def __new__(*args, **kwargs): # real signature unknown\n pass", "def __new__(*args, **kwargs): # real signature unknown\r\n pass", "def create(cls, **dictionary):\n new_inst = cls.__new__(cls)\n if cls.__name__ == \"Rectangle\":\n new_inst.__init__(42, 98)\n elif cls.__name__ == \"Square\":\n new_inst.__init__(42)\n new_inst.update(**dictionary)\n return new_inst", "def __new__(cls, name):\n for sub_cls in cls.__subclasses__():\n if sub_cls.__name__ == name:\n return super().__new__(sub_cls)", "def __new__(*args, **kwargs): # real signature unknown\n pass", "def __new__(*args, **kwargs): # real signature unknown\n pass", "def __new__(*args, **kwargs): # real signature unknown\n pass", "def __new__(*args, **kwargs): # real signature unknown\n pass", "def __new__(*args, **kwargs): # real signature unknown\n pass", "def __new__(*args, **kwargs): # real signature unknown\n pass", "def __new__(*args, **kwargs): # real signature unknown\n pass", "def __new__(*args, **kwargs): # real signature unknown\n pass", "def __new__(*args, **kwargs): # real signature unknown\n pass", "def __new__(*args, **kwargs): # real signature unknown\n pass", "def __new__(*args, **kwargs): # real signature unknown\n pass" ]
[ "0.72864395", "0.72864395", "0.7262999", "0.7261351", "0.7261351", "0.7261351", "0.7261351", "0.7228152", "0.72100246", "0.72100246", "0.66210115", "0.66210115", "0.66210115", "0.66210115", "0.66210115", "0.66210115", "0.6530774", "0.6427577", "0.6424623", "0.64146465", "0.64146465", "0.64146465", "0.64146465", "0.64146465", "0.64146465", "0.64146465", "0.64146465", "0.64146465", "0.64146465", "0.64146465" ]
0.73813504
0
Returns the names of all qualities with exitsting expert knowledge.
def get_qualities_with_expert_knowledge(self) -> List[str]: return sorted(list(set([q for _, q in self.expert_knowledge])))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_expert_knowledge_for_qualities(self, qualities: List[str]) -> List[str]:\n expert_knowledge = reduce(\n lambda res, q: res | set(p for p, _q in self.expert_knowledge if _q == q),\n qualities,\n set()\n )\n return sorted(list(expert_knowledge))", "def get_qualification_list(self):\n try:\n qualifications = self.db_handler.get_qualifications_list()\n self.logger.write_to_log('qualifications got', 'model')\n return qualifications\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def list_qualifications() -> Dict[str, Any]:\n qualifications_schema = QualificationSchema(many=True)\n qualifications = Qualification.query.all()\n return cast(Dict[str, Any], jsonify(qualifications_schema.dump(qualifications)))", "def get_parameters_affecting_qualites(self, qualities: List[str]) -> List[str]:\n parameters = reduce(\n lambda res, q: res | set(self.get_parameters_affecting_quality(q)),\n qualities,\n set()\n )\n return sorted(list(parameters))", "def get_assessment_terms(self):\n return # osid.assessment.AssessmentQueryInspector", "def get_exercise_recording_full_names(self):\n full_names = set()\n for er in self.exercise_recordings:\n full_names.add(er.full_name)\n return full_names", "def ieqs(self):\n return self.inequalities()", "def qps(self):\n from admin.models.qualification_pack import QualificationPack\n return QualificationPack.objects.filter(\n occupation=self, is_draft=False\n )", "def earned_hw_scores(self):\r\n return [s.earned for s in self.get_grade_summary()['totaled_scores']['Homework']]", "def get_knowledge_category_terms(self):\n return # osid.grading.GradeQueryInspector", "def end_effectors(self) -> list:\n S = self.parents\n return [[x, f\"q{x[1:]}\"] for x in S if S.out_degree(x) == 0]", "def get_well_aliases(self):\n return self.info_wells['well'].unique()", "def all_present_experiments(self):\n return _yield_subdir_names(self.exp_configs)", "def qalist(self):\n return self._palist.qalist", "def display_injury_type(self):\n return ', '.join(type_of_injury.name for genre in self.type_of_injury.all()[:3])", "def find_gene_name(qualifiers):\n if not isinstance(qualifiers, dict):\n raise TypeError(\"Expected qualifier dictionary\")\n for tag in [\"protein_id\", \"locus_tag\", \"id\", \"gene\", \"name\", \"label\"]:\n if tag in qualifiers:\n return qualifiers[tag][0]\n return \"N.A.\"", "def inconsistent_entityName(self):\n a = [s for s in self.subjects if len([sa for sa in s.samples if sa.inconsistent_entityName]) > 0]\n if len(a) == 0:\n return None\n return a", "def get_assessment_terms(self):\n return # osid.repository.AssetQueryInspector", "def get_level_terms(self):\n return # osid.grading.GradeQueryInspector", "def endog_names(self):\n return self.data.ynames", "def exog_names(self):\n return self.data.xnames", "def pyranose_names(self):\n output = set()\n for item in self.pyranoses():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)", "def get_cognitive_process_terms(self):\n return # osid.grading.GradeQueryInspector", "def all_tone_qualities():\n\n tone_qualities = crud.get_tone_qualities()\n\n return render_template('all_tone_qualities.html', tone_qualities=tone_qualities)", "def getStudyNames(self):\n try:\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_study_names', [results])\n study_name_list = []\n for row in results:\n if row[0] is None:\n continue\n else:\n study_name_list.append(row)\n return study_name_list\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def find_qualifications(\n self, qualification_name: Optional[str] = None\n ) -> List[Qualification]:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT * from qualifications\n WHERE (?1 IS NULL OR qualification_name = ?1)\n \"\"\",\n (qualification_name,),\n )\n rows = c.fetchall()\n return [\n Qualification(\n self, str(r[\"qualification_id\"]), row=r, _used_new_call=True\n )\n for r in rows\n ]", "def getQualitativeSpecies(self):\n return _libsbml.Output_getQualitativeSpecies(self)", "def IEs(self):\n return self._ies", "def getEMPStudyList(self):\n try:\n studies = []\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_emp_study_list', [results])\n for row in results:\n # study_id, sample_id, sample_name, project_name, study_title, email, sample_count, metadata_complete,\n # study_score, sample_score, s.number_samples_promised, s.number_samples_in_freezer, \n # s.principal_investigator\n studies.append((row[0], row[1], row[2], row[3], row[4], row[5],\n row[6], row[7], row[8], row[9], row[10], row[11], row[12]))\n return studies\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)", "def getOqiNames( self ):\n\n if self.oqiNames:\n return self.oqiNames.keys()\n\n n = self.adb.get( \"nSrss\" )\n for indx in xrange( n ):\n name = self.adb.get( \"srsName\", indx )\n self.oqiNames[ name ] = indx\n\n return self.oqiNames.keys()" ]
[ "0.6796296", "0.6399958", "0.5745922", "0.570581", "0.56431264", "0.5579781", "0.5524196", "0.5486619", "0.5414175", "0.5371931", "0.5336818", "0.5334099", "0.5322436", "0.53192633", "0.5316867", "0.5283304", "0.5277264", "0.527555", "0.52340245", "0.5215532", "0.5200703", "0.5166302", "0.5160945", "0.51546645", "0.5123504", "0.51158535", "0.5115845", "0.51147926", "0.5109323", "0.50758994" ]
0.68818367
0
Returns the names of all parameters with exitsting expert knowledge.
def get_parameters_with_expert_knowledge(self) -> List[str]: return sorted(list(set([p for p, _ in self.expert_knowledge])))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parameter_names(self) -> List[str]:", "def _get_fitted_param_names(self):\n return self._fitted_param_names", "def get_hyperparameter_names():\n params = ['mu', 'nu', 'r', 's']\n return params", "def get_parameter_names(self):\n parNames = []\n # for par in self.variables: # TODO: LIKELY A BUG! DOES THE SAME AS get_variable_names()\n for par in self.parameters: # TRYING TO SOLVE THE ISSUE\n # EstimationVariable\n parNames.append(par.name)\n return parNames", "def parameters(self):\n return []", "def get_ext_param_names(self):\n num_param = core.xc_func_info_get_n_ext_params(self.xc_func_info)\n\n ret = []\n for p in range(num_param):\n tmp = core.xc_func_info_get_ext_params_name(self.xc_func_info, p)\n ret.append(tmp.decode(\"UTF-8\"))\n\n return ret", "def get_param_names(self):\n return list(self.params.keys())", "def return_all_parameter_names():\n a = list(titles)\n a.append(r\"$\\chi^{2}$ per degree of freedom\")\n b = list(labels)\n b.append(\"chi2_per_dof\")\n return a, b", "def _get_param_names(self):\r\n return sorted([p\r\n for p in self.__dict__\r\n if p != 'additional_args'])", "def parameters(self):\n return [term.parameter for term in self.terms]", "def parameters_names(cls):\n return cls._Parameters._fields", "def get_ext_param_descriptions(self):\n num_param = core.xc_func_info_get_n_ext_params(self.xc_func_info)\n\n ret = []\n for p in range(num_param):\n tmp = core.xc_func_info_get_ext_params_description(self.xc_func_info, p)\n ret.append(tmp.decode(\"UTF-8\"))\n\n return ret", "def parameter_names(self):\n raise NotImplementedError(\"the parameter_names property should \"\n \"be defined in the Estimator sub-class\")", "def get_param_names(hf):\n parameters = get_params(hf)\n return [p.name for p in parameters]", "def potential_parameters(cls):\n return [\"k\", \"length\"]", "def getParameters(self): #$NON-NLS-1$\r", "def paramDetails(cls):\n return {\n 'dim': (10, 20, 2, 20),\n 'nIter': (1, 10, 2, 5),\n 'lamb': (.1, 1., .1, .05),\n 'alph': (30, 50, 5, 40)\n }", "def parameter_names(self):\n return [x for x in self.transformations.values() if isinstance(x, str)]", "def get_param_texts(self):\n return self.param_texts", "def parameterNames(self, p_int): # real signature unknown; restored from __doc__\n return []", "def parameters(self):\n ps = super().parameters()\n exclude = set(self.estimator.parameters())\n ps = (p for p in ps if not p in exclude)\n return ps", "def get_params(self):\n return []", "def get_layer_var_names(self):\n return(self.params)", "def variables_used (self) :\r\n\t\treturn [i[0] for i in self.parameters]", "def param_unc_names(self) -> List[str]:\n return self._param_unc_names(self.model).decode(\"utf-8\").split(\",\")", "def param(self):\r\n\r\n return []", "def get_model_parameter_names():\n params = ['mu', 'rho']\n return params", "def param(self):\n return []", "def param(self):\n return []", "def return_parameter_names():\n return list(titles), list(labels)" ]
[ "0.6681219", "0.65302336", "0.6523821", "0.6485351", "0.64587325", "0.64377654", "0.63435304", "0.63268703", "0.6247378", "0.62124157", "0.6184946", "0.61841184", "0.6173412", "0.6156799", "0.61424565", "0.6138973", "0.6092514", "0.6074142", "0.6068428", "0.60556334", "0.6051626", "0.6050011", "0.6045184", "0.6039994", "0.60339165", "0.60132223", "0.60107785", "0.601017", "0.601017", "0.60091674" ]
0.7096718
0
Returns the names of all qualities that are affected by the given parameter.
def get_qualities_affected_by_paramter(self, parameter: str) -> List[str]: return [q for p, q in self.correlating_pq_tuples if p == parameter]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_parameters_affecting_qualites(self, qualities: List[str]) -> List[str]:\n parameters = reduce(\n lambda res, q: res | set(self.get_parameters_affecting_quality(q)),\n qualities,\n set()\n )\n return sorted(list(parameters))", "def get_parameters_affecting_quality(self, quality: str) -> List[str]:\n return [p for p, q in self.correlating_pq_tuples if q == quality]", "def get_qualification_list(self):\n try:\n qualifications = self.db_handler.get_qualifications_list()\n self.logger.write_to_log('qualifications got', 'model')\n return qualifications\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def parameter_names(self) -> List[str]:", "def parameter_names(self, add_self=False, adjust_for_printing=False, recursive=True):\n if adjust_for_printing: adjust = lambda x: adjust_name_for_printing(x)\n else: adjust = lambda x: x\n if recursive: names = [xi for x in self.parameters for xi in x.parameter_names(add_self=True, adjust_for_printing=adjust_for_printing)]\n else: names = [adjust(x.name) for x in self.parameters]\n if add_self: names = map(lambda x: adjust(self.name) + \".\" + x, names)\n return names", "def parameter_names(self) -> list:\n parameters = []\n parameters.extend(self.properties.parameter_names)\n return parameters", "def param_names(\n self, *, include_tp: bool = False, include_gq: bool = False\n ) -> List[str]:\n return (\n self._param_names(self.model, int(include_tp), int(include_gq))\n .decode(\"utf-8\")\n .split(\",\")\n )", "def parameters_names(cls):\n return cls._Parameters._fields", "def get_parameter_names(self):\n parNames = []\n # for par in self.variables: # TODO: LIKELY A BUG! DOES THE SAME AS get_variable_names()\n for par in self.parameters: # TRYING TO SOLVE THE ISSUE\n # EstimationVariable\n parNames.append(par.name)\n return parNames", "def parameters(self):\n return [i.parameter for i in self.joints.values()]", "def parameters(self):\n return [term.parameter for term in self.terms]", "def parameter_names(self):\n raise NotImplementedError(\"the parameter_names property should \"\n \"be defined in the Estimator sub-class\")", "def getResRatioVarNames( self ):\n\n self.updateAdb( )\n\n return self.resNames.keys()", "def list_qualifications() -> Dict[str, Any]:\n qualifications_schema = QualificationSchema(many=True)\n qualifications = Qualification.query.all()\n return cast(Dict[str, Any], jsonify(qualifications_schema.dump(qualifications)))", "def getListOfQualitativeSpecies(self, *args):\n return _libsbml.QualModelPlugin_getListOfQualitativeSpecies(self, *args)", "def get_expert_knowledge_for_parameter(self, parameter: str) -> List[str]:\n expert_knowledge = [q for (p, q) in self.expert_knowledge if p == parameter]\n return sorted(list(set(expert_knowledge)))", "def get_param(self, obs_name):\n return self.datasets[obs_name].keys()", "def get_param_names(hf):\n parameters = get_params(hf)\n return [p.name for p in parameters]", "def parameterNames(self, p_int): # real signature unknown; restored from __doc__\n return []", "def get_paramnames_list(self):\n # TODO include syselem?\n\n query = \"SELECT NAME FROM %s\" % self.__schema\n with self.__connection.cursor() as cursor:\n cursor.execute(query)\n result = cursor.fetchall()\n return [val['NAME'] for val in result]", "def get_str_param_names(self):\n # Exclude self.api and self.names from the command string\n return self.get_attribute_names(FormattedParameter)", "def get_hyperparameter_names():\n params = ['mu', 'nu', 'r', 's']\n return params", "def getName(self):\n return _libsbml.QualitativeSpecies_getName(self)", "def _get_fitted_param_names(self):\n return self._fitted_param_names", "def get_qualities_with_expert_knowledge(self) -> List[str]:\n return sorted(list(set([q for _, q in self.expert_knowledge])))", "def GetResourceNames(self):\r\n return [x.name for x in self.resources]", "def getName(self):\n return _libsbml.QualExtension_getName(self)", "def parameters(self):\n return [o.parameters for o in self.obs]", "def find_qualifications(\n self, qualification_name: Optional[str] = None\n ) -> List[Qualification]:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT * from qualifications\n WHERE (?1 IS NULL OR qualification_name = ?1)\n \"\"\",\n (qualification_name,),\n )\n rows = c.fetchall()\n return [\n Qualification(\n self, str(r[\"qualification_id\"]), row=r, _used_new_call=True\n )\n for r in rows\n ]", "def return_all_parameter_names():\n a = list(titles)\n a.append(r\"$\\chi^{2}$ per degree of freedom\")\n b = list(labels)\n b.append(\"chi2_per_dof\")\n return a, b" ]
[ "0.7138901", "0.65434414", "0.60828495", "0.5970741", "0.5706705", "0.56861335", "0.5615085", "0.5553577", "0.5414541", "0.53965795", "0.5352581", "0.5300973", "0.52954173", "0.52879333", "0.5286698", "0.5282407", "0.5249492", "0.5248632", "0.52475625", "0.52351964", "0.5211867", "0.5166537", "0.5151962", "0.51517856", "0.51467603", "0.51409054", "0.5120053", "0.5111621", "0.5098414", "0.5093125" ]
0.7769609
0
Returns the names of all parameters are affecting the given quality.
def get_parameters_affecting_quality(self, quality: str) -> List[str]: return [p for p, q in self.correlating_pq_tuples if q == quality]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_parameters(self, quality):\n if (quality.upper() == 'NAV') or (quality.upper() == 'NAVIGATION'):\n return self._sensor_param_dict['navigation'].copy()\n \n elif (quality.upper() == 'TAC') or (quality.upper() == 'TACTICAL'):\n return self._sensor_param_dict['tactical'].copy()\n \n elif (quality.upper() == 'CON') or (quality.upper() == 'CONSUMER'):\n return self._sensor_param_dict['consumer'].copy()", "def get_parameters_affecting_qualites(self, qualities: List[str]) -> List[str]:\n parameters = reduce(\n lambda res, q: res | set(self.get_parameters_affecting_quality(q)),\n qualities,\n set()\n )\n return sorted(list(parameters))", "def get_qualities_affected_by_paramter(self, parameter: str) -> List[str]:\n return [q for p, q in self.correlating_pq_tuples if p == parameter]", "def parameter_names(self) -> List[str]:", "def print_quality(self, quality):\n # Find the parameter dictionary for the desired quality level\n \n imu = self.get_parameters(quality)\n \n print '\\n%s Quality \\n-------------------------' % quality.upper()\n print ' %30s' % 'GYRO'\n print ' %22s \\t %g' % ('Wide Band Noise', imu['sigma_w_g'])\n print ' %22s \\t %g' % ('Constant Bias' , imu['sigma_n_g'])\n print ' %22s \\t %g' % ('Bias Stability' , imu['sigma_c_g'])\n print ' %22s \\t %g' % ('Bias Correlation Time', imu['tau_g'])\n print '\\n'\n \n print ' %35s' % 'ACCELEROMETER'\n print ' %22s \\t %g' % ('Wide Band Noise', imu['sigma_w_f'])\n print ' %22s \\t %g' % ('Constant Bias' , imu['sigma_n_f'])\n print ' %22s \\t %g' % ('Bias Stability' , imu['sigma_c_f'])\n print ' %22s \\t %g' % ('Bias Correlation Time', imu['tau_f'])\n print '\\n'", "def show_parameters(self):\n with np.printoptions(precision=3, suppress=True):\n print('number of wind phase = {}'.format(self.ncomp))\n print('galactic parameter = {}'.format(self.scaling_field))\n print('reference height = {}'.format(self.z0))\n for p in ['cool_params','hot_params','params','ref_params','scaling_params']:\n params = getattr(self,p)\n print(p)\n for k,v in params.items():\n print(' {} = {}'.format(k,v))", "def potential_parameters(cls):\n return [\"k\", \"length\"]", "def parameters_names(cls):\n return cls._Parameters._fields", "def quality(self):\n try:\n qid = int((self.tool_metadata or {}).get(\"quality\", 0))\n except:\n qid = 0\n\n # We might be able to get the quality strings from the item's tags\n internal_name, name = \"normal\", \"Normal\"\n if self.tags:\n tags = {x.get('category'): x for x in self.tags}\n if 'Quality' in tags:\n internal_name, name = tags['Quality'].get('internal_name'), tags['Quality'].get('name')\n\n return qid, internal_name, name", "def parameter_names(self):\n raise NotImplementedError(\"the parameter_names property should \"\n \"be defined in the Estimator sub-class\")", "def parameterNames(self, p_int): # real signature unknown; restored from __doc__\n return []", "def potential_parameters(cls):\n return [\"k\", \"angle\"]", "def get_model_parameter_names():\n params = ['mu', 'rho']\n return params", "def get_parameter_names(self):\n parNames = []\n # for par in self.variables: # TODO: LIKELY A BUG! DOES THE SAME AS get_variable_names()\n for par in self.parameters: # TRYING TO SOLVE THE ISSUE\n # EstimationVariable\n parNames.append(par.name)\n return parNames", "def potential_parameters(cls):\n return [\"k\", \"periodicity\", \"phase\", \"idivf\"]", "def potential_parameters(cls):\n return [\"k\", \"periodicity\", \"phase\", \"idivf\"]", "def parameter_names(self) -> list:\n parameters = []\n parameters.extend(self.properties.parameter_names)\n return parameters", "def parameters(self):\n #print \"in instrument.parameter()\"\n return self._params", "def get_str_param_names(self):\n # Exclude self.api and self.names from the command string\n return self.get_attribute_names(FormattedParameter)", "def derived_parameters(cls):\n return ['cgg', 'cdd', 'css', 'cbb', 'vstar', 'gain', 'ft']", "def get_param_names(hf):\n parameters = get_params(hf)\n return [p.name for p in parameters]", "def supported_parameters(cls):\n return [\"smirks\", \"id\", \"k\", \"periodicity\", \"phase\", \"idivf\"]", "def set_quality(self):\n p = self.suitability + 1.15 * self.fono\n self.quality = np.exp(p) / (1 + np.exp(p))", "def derived_parameters(cls):\n return ['cgg', 'cdd', 'vstar', 'gain', 'ft']", "def potential_parameters(cls):\n return [\"length\", \"distance\"]", "def paramDetails(cls):\n return {\n 'dim': (10, 20, 2, 20),\n 'nIter': (1, 10, 2, 5),\n 'lamb': (.1, 1., .1, .05),\n 'alph': (30, 50, 5, 40)\n }", "def param_info():\n\n\tgizmo_names = syn.getGizmoNames()\n\n\tfor gizmo in gizmo_names:\n\t\tparams = syn.getParameterNames(gizmo)\n\t#doesnt get all parameters from gizmos i.e. WaveFreq\n\n\t# get all info on the 'WaveFreq' parameter\n\tGIZMO = 'aStim2'\n\tPARAMETER = 'WaveFreq'\n\n\t# info = syn.getParameterInfo(GIZMO, PARAMETER)\n\t#\n\t# # get the array size (should be 100)\n\t# sz = syn.getParameterSize(GIZMO, PARAMETER)\n\t#\n\t# # write values 1 to 50 in second half of buffer\n\t# result = syn.setParameterValues(GIZMO, PARAMETER, np.arange(1, 51), 50)\n\t#\n\t# # read all values from buffer\n\t# syn.getParameterValues(GIZMO, PARAMETER, sz)\n\t#\n\t# # get all info on the 'Go' parameter\n\t# PARAMETER = 'Go'\n\t# info = syn.getParameterInfo(GIZMO, PARAMETER)\n\t#\n\t# # flip the switch\n\t# result = syn.setParameterValue(GIZMO, PARAMETER, 1)\n\t#\n\t# # check the value\n\tfreq = syn.getParameterValue(GIZMO, PARAMETER)\n\tprint('value =', freq)\n\tfreq = [freq]\n\n\t# also verify visually that the switch slipped in the run\n\t# time interface. This state change will be logged just\n\t# like any other variable change and saved with the runtime\n\t# state.\n\n\tnumTrials = 5 #total number of trials across stimuli\n\tISI = [2.0, 3.0, 4.0, 5.0] # ISI in seconds\n\n\t# flash parameters\n\tflash_dur = [.001] # flash durs in seconds (100 ms, 200 ms)\n\tluminance = [[1, 1, 1], [.86, .86, .86], [0, .1, 1]] # white , grayish, purple just for testing\n\n\t# auditory parameters\n\tduration = [.005] # in seconds; pulseDur in TDT\n\tsound_levels = [20.0, 40.0, 60.0, 80.0] # dB; waveAmp in TDT\n\n\t# Auditory on (T/F? if T then A+V, if F then Visual only)\n\tstims = {0: \"auditory_only\",\n\t\t\t 1: \"visual_only\",\n\t\t\t 2: \"A+V\"\n\t\t\t }\n\n\texper = Experiment(numTrials=numTrials, ISI=ISI, flash_dur=flash_dur, luminance=luminance, wave_freq=freq,\n\t\t\t\t\t pulse_dur=duration, wave_amp=sound_levels, stimulus=stims)\n\texper.run_experiment()", "def parameters(self):\n return []", "def return_all_parameter_names():\n a = list(titles)\n a.append(r\"$\\chi^{2}$ per degree of freedom\")\n b = list(labels)\n b.append(\"chi2_per_dof\")\n return a, b", "def get_parameter_estimation_parameters(self, friendly=True):\n #Get the sensitivities task:\n fitTask=self._getTask('parameterFitting')\n fitProblem = fitTask.find(xmlns + 'Problem')\n optimizationItems = fitProblem.find(xmlns + 'ParameterGroup')\n parameters = []\n for subGroup in optimizationItems:\n name = None\n lowerBound = None\n upperBound = None\n startValue = None\n \n for item in subGroup:\n if item.attrib['name'] == 'ObjectCN':\n name = item.attrib['value']\n elif item.attrib['name'] == 'UpperBound':\n upperBound = item.attrib['value']\n elif item.attrib['name'] == 'LowerBound':\n lowerBound = item.attrib['value']\n elif item.attrib['name'] == 'StartValue':\n startValue = item.attrib['value']\n assert name !=None\n assert lowerBound != None\n assert upperBound != None\n assert startValue != None\n \n if friendly:\n #Construct a user-friendly name for the parameter name using regexs\n #Look for a match for global parameters: Vector=Values[Test parameter],\n global_string = r'.*Vector=Values\\[(?P<name>.*)\\].*'\n global_string_re = re.compile(global_string)\n global_match = re.match(global_string_re, name)\n \n if global_match:\n name = global_match.group('name')\n \n #else check for a local match.\n #Vector=Reactions[Reaction] Parameter=k1\n local_string = r'.*Vector=Reactions\\[(?P<reaction>.*)\\].*Parameter=(?P<parameter>.*),Reference=Value.*'\n local_string_re = re.compile(local_string)\n local_match = re.match(local_string_re, name)\n \n if local_match:\n reaction = local_match.group('reaction')\n parameter = local_match.group('parameter')\n name = '(%s).%s'%(reaction, parameter)\n\n parameters.append((name, lowerBound, upperBound, startValue))\n\n return parameters" ]
[ "0.6934809", "0.67903876", "0.6167672", "0.61589646", "0.6091067", "0.6062602", "0.59788996", "0.59663045", "0.5930198", "0.5925212", "0.5914016", "0.587946", "0.57363737", "0.5702479", "0.57009995", "0.57009995", "0.5699666", "0.5673059", "0.5664768", "0.5636903", "0.5629475", "0.5623803", "0.5621273", "0.5620138", "0.56011814", "0.55965364", "0.5592449", "0.55895233", "0.5578792", "0.5577558" ]
0.8470386
0
Returns the names of all parameters are affecting the given qualities.
def get_parameters_affecting_qualites(self, qualities: List[str]) -> List[str]: parameters = reduce( lambda res, q: res | set(self.get_parameters_affecting_quality(q)), qualities, set() ) return sorted(list(parameters))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_parameters_affecting_quality(self, quality: str) -> List[str]:\n return [p for p, q in self.correlating_pq_tuples if q == quality]", "def get_qualities_affected_by_paramter(self, parameter: str) -> List[str]:\n return [q for p, q in self.correlating_pq_tuples if p == parameter]", "def parameter_names(self) -> List[str]:", "def parameters_names(cls):\n return cls._Parameters._fields", "def param_names(\n self, *, include_tp: bool = False, include_gq: bool = False\n ) -> List[str]:\n return (\n self._param_names(self.model, int(include_tp), int(include_gq))\n .decode(\"utf-8\")\n .split(\",\")\n )", "def parameterNames(self, p_int): # real signature unknown; restored from __doc__\n return []", "def parameters(self):\n return [i.parameter for i in self.joints.values()]", "def get_param_names(hf):\n parameters = get_params(hf)\n return [p.name for p in parameters]", "def parameter_names(self):\n raise NotImplementedError(\"the parameter_names property should \"\n \"be defined in the Estimator sub-class\")", "def _get_fitted_param_names(self):\n return self._fitted_param_names", "def parameter_names(self) -> list:\n parameters = []\n parameters.extend(self.properties.parameter_names)\n return parameters", "def get_str_param_names(self):\n # Exclude self.api and self.names from the command string\n return self.get_attribute_names(FormattedParameter)", "def get_model_parameter_names():\n params = ['mu', 'rho']\n return params", "def get_parameter_names(self):\n parNames = []\n # for par in self.variables: # TODO: LIKELY A BUG! DOES THE SAME AS get_variable_names()\n for par in self.parameters: # TRYING TO SOLVE THE ISSUE\n # EstimationVariable\n parNames.append(par.name)\n return parNames", "def get_param_names(self):\n return list(self.params.keys())", "def parameters(self):\n return [term.parameter for term in self.terms]", "def get_hyperparameter_names():\n params = ['mu', 'nu', 'r', 's']\n return params", "def _get_param_names_transformed(self):\r\n n = self._get_param_names()\r\n\r\n # remove/concatenate the tied parameter names\r\n if len(self.tied_indices):\r\n for t in self.tied_indices:\r\n n[t[0]] = \"<tie>\".join([n[tt] for tt in t])\r\n remove = np.hstack([t[1:] for t in self.tied_indices])\r\n else:\r\n remove = np.empty(shape=(0,), dtype=np.int)\r\n\r\n # also remove the fixed params\r\n if len(self.fixed_indices):\r\n remove = np.hstack((remove, np.hstack(self.fixed_indices)))\r\n\r\n # add markers to show that some variables are constrained\r\n for i, t in zip(self.constrained_indices, self.constraints):\r\n for ii in i:\r\n n[ii] = n[ii] + t.__str__()\r\n\r\n n = [nn for i, nn in enumerate(n) if not i in remove]\r\n return n", "def potential_parameters(cls):\n return [\"k\", \"periodicity\", \"phase\", \"idivf\"]", "def potential_parameters(cls):\n return [\"k\", \"periodicity\", \"phase\", \"idivf\"]", "def parameters(self):\n return [i for i in self.variables if has_roles(i, Parameter)]", "def get_parameter_names(self, exclude_pop_model=False):\n if (self._population_model is None) or exclude_pop_model:\n names = self._mechanistic_model.parameters()\n for error_model in self._error_models:\n names += error_model.get_parameter_names()\n return names\n\n return self._population_model.get_parameter_names()", "def _get_param_names(self):\n temp_params = {'function': self.function, 'target': self.target}\n\n temp_params.update(self.kwargs)\n\n return temp_params", "def potential_parameters(cls):\n return [\"k\", \"length\"]", "def _get_param_names(self):\r\n return sorted([p\r\n for p in self.__dict__\r\n if p != 'additional_args'])", "def potential_parameters(cls):\n return [\"length\", \"distance\"]", "def parameters(self):\n return [p for _, a in vars(self).items() for p in self._params(a)]", "def parameters(self):\n return [o.parameters for o in self.obs]", "def get_params_list():\n return common.QOL_PARAMS", "def potential_parameters(cls):\n return [\"k\", \"angle\"]" ]
[ "0.684624", "0.6697432", "0.64668196", "0.6275496", "0.62012315", "0.60170823", "0.5992412", "0.597497", "0.5971461", "0.5962293", "0.5953351", "0.5907317", "0.58813965", "0.5875759", "0.58324844", "0.57914823", "0.5765543", "0.57585037", "0.5748246", "0.5748246", "0.5661398", "0.5660464", "0.559595", "0.55902904", "0.55680096", "0.55584663", "0.5547049", "0.5524267", "0.55196404", "0.55166405" ]
0.82784075
0
Returns the names of all parameters that are expert knowledge of the given qualities.
def get_expert_knowledge_for_qualities(self, qualities: List[str]) -> List[str]: expert_knowledge = reduce( lambda res, q: res | set(p for p, _q in self.expert_knowledge if _q == q), qualities, set() ) return sorted(list(expert_knowledge))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_parameters_affecting_qualites(self, qualities: List[str]) -> List[str]:\n parameters = reduce(\n lambda res, q: res | set(self.get_parameters_affecting_quality(q)),\n qualities,\n set()\n )\n return sorted(list(parameters))", "def get_parameters_with_expert_knowledge(self) -> List[str]:\n return sorted(list(set([p for p, _ in self.expert_knowledge])))", "def get_parameters_affecting_quality(self, quality: str) -> List[str]:\n return [p for p, q in self.correlating_pq_tuples if q == quality]", "def get_qualities_with_expert_knowledge(self) -> List[str]:\n return sorted(list(set([q for _, q in self.expert_knowledge])))", "def _get_fitted_param_names(self):\n return self._fitted_param_names", "def parameters(self):\n return [term.parameter for term in self.terms]", "def get_qualities_affected_by_paramter(self, parameter: str) -> List[str]:\n return [q for p, q in self.correlating_pq_tuples if p == parameter]", "def get_hyperparameter_names():\n params = ['mu', 'nu', 'r', 's']\n return params", "def parameters_names(cls):\n return cls._Parameters._fields", "def potential_parameters(cls):\n return [\"k\", \"length\"]", "def parameters(self):\n return [i.parameter for i in self.joints.values()]", "def parameter_names(self) -> List[str]:", "def potential_parameters(cls):\n return [\"k\", \"periodicity\", \"phase\", \"idivf\"]", "def potential_parameters(cls):\n return [\"k\", \"periodicity\", \"phase\", \"idivf\"]", "def parameter_names(self):\n raise NotImplementedError(\"the parameter_names property should \"\n \"be defined in the Estimator sub-class\")", "def get_ext_param_names(self):\n num_param = core.xc_func_info_get_n_ext_params(self.xc_func_info)\n\n ret = []\n for p in range(num_param):\n tmp = core.xc_func_info_get_ext_params_name(self.xc_func_info, p)\n ret.append(tmp.decode(\"UTF-8\"))\n\n return ret", "def supported_qparams(self):\n import re\n return re.findall(r\"\\$\\$\\{(\\w+)\\}\", self.QTEMPLATE)", "def get_model_parameter_names():\n params = ['mu', 'rho']\n return params", "def get_param_names(hf):\n parameters = get_params(hf)\n return [p.name for p in parameters]", "def supported_parameters(cls):\n return [\"smirks\", \"id\", \"k\", \"periodicity\", \"phase\", \"idivf\"]", "def get_expert_knowledge_for_parameter(self, parameter: str) -> List[str]:\n expert_knowledge = [q for (p, q) in self.expert_knowledge if p == parameter]\n return sorted(list(set(expert_knowledge)))", "def get_parameter_names(self):\n parNames = []\n # for par in self.variables: # TODO: LIKELY A BUG! DOES THE SAME AS get_variable_names()\n for par in self.parameters: # TRYING TO SOLVE THE ISSUE\n # EstimationVariable\n parNames.append(par.name)\n return parNames", "def get_qualification_list(self):\n try:\n qualifications = self.db_handler.get_qualifications_list()\n self.logger.write_to_log('qualifications got', 'model')\n return qualifications\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def potential_parameters(cls):\n return [\"length\", \"distance\"]", "def potential_parameters(cls):\n return [\"k\", \"angle\"]", "def get_parameters(self, quality):\n if (quality.upper() == 'NAV') or (quality.upper() == 'NAVIGATION'):\n return self._sensor_param_dict['navigation'].copy()\n \n elif (quality.upper() == 'TAC') or (quality.upper() == 'TACTICAL'):\n return self._sensor_param_dict['tactical'].copy()\n \n elif (quality.upper() == 'CON') or (quality.upper() == 'CONSUMER'):\n return self._sensor_param_dict['consumer'].copy()", "def paramDetails(cls):\n return {\n 'dim': (10, 20, 2, 20),\n 'nIter': (1, 10, 2, 5),\n 'lamb': (.1, 1., .1, .05),\n 'alph': (30, 50, 5, 40)\n }", "def get_param_names(self):\n return list(self.params.keys())", "def parameters(self):\n return []", "def return_all_parameter_names():\n a = list(titles)\n a.append(r\"$\\chi^{2}$ per degree of freedom\")\n b = list(labels)\n b.append(\"chi2_per_dof\")\n return a, b" ]
[ "0.7242944", "0.6727106", "0.6571987", "0.63019365", "0.6018897", "0.60036373", "0.57837", "0.57691544", "0.5760132", "0.5750516", "0.5735969", "0.5735225", "0.56694657", "0.56694657", "0.56474066", "0.56144816", "0.5601561", "0.55747986", "0.55496305", "0.5520627", "0.55122304", "0.5489969", "0.54870766", "0.5476984", "0.5465503", "0.5437874", "0.54369843", "0.5421734", "0.54205024", "0.5419641" ]
0.6919965
1
Returns the names of all qualties that are expert knowledge of the given parameter.
def get_expert_knowledge_for_parameter(self, parameter: str) -> List[str]: expert_knowledge = [q for (p, q) in self.expert_knowledge if p == parameter] return sorted(list(set(expert_knowledge)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_qualities_affected_by_paramter(self, parameter: str) -> List[str]:\n return [q for p, q in self.correlating_pq_tuples if p == parameter]", "def get_qualities_with_expert_knowledge(self) -> List[str]:\n return sorted(list(set([q for _, q in self.expert_knowledge])))", "def get_expert_knowledge_for_qualities(self, qualities: List[str]) -> List[str]:\n expert_knowledge = reduce(\n lambda res, q: res | set(p for p, _q in self.expert_knowledge if _q == q),\n qualities,\n set()\n )\n return sorted(list(expert_knowledge))", "def get_parameters_with_expert_knowledge(self) -> List[str]:\n return sorted(list(set([p for p, _ in self.expert_knowledge])))", "def get_parameters_affecting_quality(self, quality: str) -> List[str]:\n return [p for p, q in self.correlating_pq_tuples if q == quality]", "def get_parameters_affecting_qualites(self, qualities: List[str]) -> List[str]:\n parameters = reduce(\n lambda res, q: res | set(self.get_parameters_affecting_quality(q)),\n qualities,\n set()\n )\n return sorted(list(parameters))", "def get_qualification_list(self):\n try:\n qualifications = self.db_handler.get_qualifications_list()\n self.logger.write_to_log('qualifications got', 'model')\n return qualifications\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def list_qualifications() -> Dict[str, Any]:\n qualifications_schema = QualificationSchema(many=True)\n qualifications = Qualification.query.all()\n return cast(Dict[str, Any], jsonify(qualifications_schema.dump(qualifications)))", "def find(self):\n self._get_ids()\n G.debug_(F'Experiments found with matching cross-experiment key and algorithm: {len(self.experiment_ids)}')\n self._get_scored_params()\n self._filter_by_space()\n G.debug_(F'Experiments whose hyperparameters fit in the currently defined space: {len(self.hyperparameters_and_scores)}')\n\n if self.module_name == 'keras':\n if ('model_init_params', 'compile_params', 'optimizer') in self.hyperparameter_space.get_names():\n self._filter_by_guidelines_multi(('model_init_params', 'compile_params', 'optimizer'))\n else:\n self._filter_by_guidelines()\n else:\n self._filter_by_guidelines()\n G.debug_(F'Experiments whose hyperparameters match the current guidelines: {len(self.similar_experiments)}')", "def parameters(self):\n return [term.parameter for term in self.terms]", "def getQualitativeSpecies(self):\n return _libsbml.Input_getQualitativeSpecies(self)", "def measurements(self):\n exp_type = 'Q_MS_MEASUREMENT'\n path = \"/%s/%s\" % (space, project)\n search = self.transaction.getSearchService()\n exps = search.listExperiments(path)\n return [exp for exp in exps if exp.getExperimentType() == exp_type]", "def _get_fitted_param_names(self):\n return self._fitted_param_names", "def getQualitativeSpecies(self):\n return _libsbml.Output_getQualitativeSpecies(self)", "def describe_qual(df):\n\n categorical = df.dtypes[df.dtypes == \"object\"].index\n df[categorical].describe()", "def get_knowledge_category_terms(self):\n return # osid.grading.GradeQueryInspector", "def get_evaluation_analysis_types(self, parameters):\n eval_types =[]\n for evaluation_criteria_id in parameters[\"clustering\"][\"evaluation\"][\"evaluation_criteria\"]:\n# for subcriteria in parameters[\"clustering\"][\"evaluation\"][\"evaluation_criteria\"][evaluation_criteria_id]:\n# eval_types.append(subcriteria)\n eval_types.extend(parameters[\"clustering\"][\"evaluation\"][\"evaluation_criteria\"][evaluation_criteria_id].keys())\n return list(set(eval_types))", "def find_qualifications(\n self, qualification_name: Optional[str] = None\n ) -> List[Qualification]:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT * from qualifications\n WHERE (?1 IS NULL OR qualification_name = ?1)\n \"\"\",\n (qualification_name,),\n )\n rows = c.fetchall()\n return [\n Qualification(\n self, str(r[\"qualification_id\"]), row=r, _used_new_call=True\n )\n for r in rows\n ]", "def getListOfQualitativeSpecies(self, *args):\n return _libsbml.QualModelPlugin_getListOfQualitativeSpecies(self, *args)", "def parameters(self):\n return [i.parameter for i in self.joints.values()]", "def parameter_names(self) -> List[str]:", "def find_gene_name(qualifiers):\n if not isinstance(qualifiers, dict):\n raise TypeError(\"Expected qualifier dictionary\")\n for tag in [\"protein_id\", \"locus_tag\", \"id\", \"gene\", \"name\", \"label\"]:\n if tag in qualifiers:\n return qualifiers[tag][0]\n return \"N.A.\"", "def getElementName(self):\n return _libsbml.ListOfQualitativeSpecies_getElementName(self)", "def get_requisite_objective_terms(self):\n return # osid.learning.ObjectiveQueryInspector", "def get_hyperparameter_names():\n params = ['mu', 'nu', 'r', 's']\n return params", "def getName(self):\n return _libsbml.QualExtension_getName(self)", "def getName(self):\n return _libsbml.QualitativeSpecies_getName(self)", "def variables_used (self) :\r\n\t\treturn [i[0] for i in self.parameters]", "def qps(self):\n from admin.models.qualification_pack import QualificationPack\n return QualificationPack.objects.filter(\n occupation=self, is_draft=False\n )", "def get_parameter_names(self):\n parNames = []\n # for par in self.variables: # TODO: LIKELY A BUG! DOES THE SAME AS get_variable_names()\n for par in self.parameters: # TRYING TO SOLVE THE ISSUE\n # EstimationVariable\n parNames.append(par.name)\n return parNames" ]
[ "0.69750583", "0.6972947", "0.6572471", "0.6532671", "0.6255699", "0.62491757", "0.5842264", "0.5311929", "0.52768415", "0.5177957", "0.5048739", "0.50431305", "0.502833", "0.49590343", "0.49437335", "0.4934118", "0.49310294", "0.49303398", "0.49248543", "0.4907331", "0.48799762", "0.48762646", "0.48758587", "0.48745614", "0.48654816", "0.4844379", "0.48338932", "0.48101285", "0.4807595", "0.48019952" ]
0.7195483
0
method tests timetable records in STATE_EMBRYO state
def test_state_embryo(self): self.pipeline_real.insert_uow = then_return_uow pipeline = spy(self.pipeline_real) job_record = get_job_record(job.STATE_EMBRYO, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY) pipeline.manage_pipeline_for_process(job_record.process_name, job_record) verify(self.time_table_mocked). \ update_job_record(any(str), any(Job), any(UnitOfWork), any(str))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_preset_timeperiod_state_in_progress(self):\n when(self.time_table_mocked).can_finalize_job_record(any(str), any(Job)).thenReturn(True)\n uow_dao_mock = mock(UnitOfWorkDao)\n when(uow_dao_mock).get_one(any()).thenReturn(create_unit_of_work(PROCESS_UNIT_TEST, 0, 1, None))\n self.pipeline_real.uow_dao = uow_dao_mock\n\n self.pipeline_real.insert_uow = then_return_uow\n pipeline = spy(self.pipeline_real)\n\n job_record = get_job_record(job.STATE_IN_PROGRESS,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=0). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n # verify(pipeline, times=1).\\\n # _compute_and_transfer_to_final_run(any(str), any(str), any(str), any(Job))\n # verify(pipeline, times=0).\\\n # _process_state_final_run(any(str), any(Job))", "def test_future_timeperiod_state_in_progress(self):\n when(self.time_table_mocked).can_finalize_job_record(any(str), any(Job)).thenReturn(True)\n uow_dao_mock = mock(UnitOfWorkDao)\n when(uow_dao_mock).get_one(any()).thenReturn(create_unit_of_work(PROCESS_UNIT_TEST, 0, 1, None))\n self.pipeline_real.uow_dao = uow_dao_mock\n\n self.pipeline_real.insert_uow = then_raise\n pipeline = spy(self.pipeline_real)\n\n job_record = get_job_record(job.STATE_IN_PROGRESS, TEST_FUTURE_TIMEPERIOD, PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=0). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))", "def test_retrieve_instances_schedule_state(self):\n pass", "def test_time_type_state_is_noon(day):\n\n assert day_time_info(day.hours_0).is_noon is False\n assert day_time_info(day.hours_1).is_noon is False\n assert day_time_info(day.hours_2).is_noon is False\n assert day_time_info(day.hours_3).is_noon is False\n assert day_time_info(day.hours_4).is_noon is False\n assert day_time_info(day.hours_5).is_noon is False\n assert day_time_info(day.hours_6).is_noon is False\n assert day_time_info(day.hours_7).is_noon is False\n assert day_time_info(day.hours_8).is_noon is False\n assert day_time_info(day.hours_9).is_noon is False\n assert day_time_info(day.hours_10).is_noon is False\n assert day_time_info(day.hours_11).is_noon is False\n assert day_time_info(day.hours_12).is_noon is True\n assert day_time_info(day.hours_13).is_noon is False\n assert day_time_info(day.hours_14).is_noon is False\n assert day_time_info(day.hours_15).is_noon is False\n assert day_time_info(day.hours_16).is_noon is False\n assert day_time_info(day.hours_17).is_noon is False\n assert day_time_info(day.hours_18).is_noon is False\n assert day_time_info(day.hours_19).is_noon is False\n assert day_time_info(day.hours_20).is_noon is False\n assert day_time_info(day.hours_21).is_noon is False\n assert day_time_info(day.hours_22).is_noon is False\n assert day_time_info(day.hours_23).is_noon is False", "def test_table_false_positives(self):\n pass", "def test_state(self, api, state):\n stream = AdsInsights(api=api, start_date=datetime(2010, 1, 1), end_date=datetime(2011, 1, 1), insights_lookback_window=28)\n\n assert stream.state == {}\n\n stream.state = state\n actual_state = stream.state\n actual_state[\"slices\"] = sorted(actual_state.get(\"slices\", []))\n state[\"slices\"] = sorted(state.get(\"slices\", []))\n state[\"time_increment\"] = 1\n\n assert actual_state == state", "def test_time_type_state_types(day):\n\n assert day_time_info(day.hours_0).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_1).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_2).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_3).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_4).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_5).types == {TimeType.MORNING}\n assert day_time_info(day.hours_6).types == {TimeType.MORNING}\n assert day_time_info(day.hours_7).types == {TimeType.MORNING}\n assert day_time_info(day.hours_8).types == {TimeType.MORNING}\n assert day_time_info(day.hours_9).types == {TimeType.MORNING}\n assert day_time_info(day.hours_10).types == {TimeType.MIDMORNING}\n assert day_time_info(day.hours_11).types == {TimeType.MIDMORNING}\n assert day_time_info(day.hours_12).types == {TimeType.NOON}\n assert day_time_info(day.hours_13).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_14).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_15).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_16).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_17).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_18).types == {TimeType.EVENING}\n assert day_time_info(day.hours_19).types == {TimeType.EVENING}\n assert day_time_info(day.hours_20).types == {TimeType.EVENING}\n assert day_time_info(day.hours_21).types == {TimeType.EVENING}\n assert day_time_info(day.hours_22).types == {TimeType.EVENING}\n assert day_time_info(day.hours_23).types == {TimeType.NIGHT}", "def test_time_type_state_is_afternoon(day):\n\n assert day_time_info(day.hours_0).is_afternoon is False\n assert day_time_info(day.hours_1).is_afternoon is False\n assert day_time_info(day.hours_2).is_afternoon is False\n assert day_time_info(day.hours_3).is_afternoon is False\n assert day_time_info(day.hours_4).is_afternoon is False\n assert day_time_info(day.hours_5).is_afternoon is False\n assert day_time_info(day.hours_6).is_afternoon is False\n assert day_time_info(day.hours_7).is_afternoon is False\n assert day_time_info(day.hours_8).is_afternoon is False\n assert day_time_info(day.hours_9).is_afternoon is False\n assert day_time_info(day.hours_10).is_afternoon is False\n assert day_time_info(day.hours_11).is_afternoon is False\n assert day_time_info(day.hours_12).is_afternoon is False\n assert day_time_info(day.hours_13).is_afternoon is True\n assert day_time_info(day.hours_14).is_afternoon is True\n assert day_time_info(day.hours_15).is_afternoon is True\n assert day_time_info(day.hours_16).is_afternoon is True\n assert day_time_info(day.hours_17).is_afternoon is True\n assert day_time_info(day.hours_18).is_afternoon is False\n assert day_time_info(day.hours_19).is_afternoon is False\n assert day_time_info(day.hours_20).is_afternoon is False\n assert day_time_info(day.hours_21).is_afternoon is False\n assert day_time_info(day.hours_22).is_afternoon is False\n assert day_time_info(day.hours_23).is_afternoon is False", "def test_time_type_state_is_evening(day):\n\n assert day_time_info(day.hours_0).is_evening is False\n assert day_time_info(day.hours_1).is_evening is False\n assert day_time_info(day.hours_2).is_evening is False\n assert day_time_info(day.hours_3).is_evening is False\n assert day_time_info(day.hours_4).is_evening is False\n assert day_time_info(day.hours_5).is_evening is False\n assert day_time_info(day.hours_6).is_evening is False\n assert day_time_info(day.hours_7).is_evening is False\n assert day_time_info(day.hours_8).is_evening is False\n assert day_time_info(day.hours_9).is_evening is False\n assert day_time_info(day.hours_10).is_evening is False\n assert day_time_info(day.hours_11).is_evening is False\n assert day_time_info(day.hours_12).is_evening is False\n assert day_time_info(day.hours_13).is_evening is False\n assert day_time_info(day.hours_14).is_evening is False\n assert day_time_info(day.hours_15).is_evening is False\n assert day_time_info(day.hours_16).is_evening is False\n assert day_time_info(day.hours_17).is_evening is False\n assert day_time_info(day.hours_18).is_evening is True\n assert day_time_info(day.hours_19).is_evening is True\n assert day_time_info(day.hours_20).is_evening is True\n assert day_time_info(day.hours_21).is_evening is True\n assert day_time_info(day.hours_22).is_evening is True\n assert day_time_info(day.hours_23).is_evening is False", "def test_exp_by_states(self):\n api = my_mock.api_mock({\"items\": [{'state': 'Waiting', 'id': 10134},\n {'state': 'Waiting', 'id': 10135},\n {'state': 'Running', 'id': 10130}]})\n states_d = helpers.exps_by_states_dict(api, helpers.ACTIVE_STATES)\n self.assertEquals(\n {'Waiting': [10134, 10135], 'Running': [10130]}, states_d)\n my_mock.api_mock_stop()", "def test_transfer_to_final_timeperiod_state_in_progress(self):\n when(self.time_table_mocked).can_finalize_job_record(any(str), any(Job)).thenReturn(True)\n uow_dao_mock = mock(UnitOfWorkDao)\n when(uow_dao_mock).get_one(any()).thenReturn(\n create_unit_of_work(PROCESS_UNIT_TEST, 1, 1, None, unit_of_work.STATE_PROCESSED))\n self.pipeline_real.uow_dao = uow_dao_mock\n\n self.pipeline_real.insert_uow = then_return_uow\n pipeline = spy(self.pipeline_real)\n\n job_record = get_job_record(job.STATE_IN_PROGRESS,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=1). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n # verify(pipeline, times=1).\\\n # _compute_and_transfer_to_final_run(any(str), any(str), any(str), any(Job))\n # verify(pipeline, times=1).\\\n # _process_state_final_run(any(str), any(Job))", "def test_get_state_comparison_stats_ailments(self):\n\n # AilmentType and Ailment breakdown per AilmentType should be in returned stats if present\n ailment_type_headache = AilmentTypeFactory(name='Headache')\n ailment_migraine_headache = AilmentFactory(name='Migraine Headache', type=ailment_type_headache)\n ailment_tension_headache = AilmentFactory(name='Tension Headache', type=ailment_type_headache)\n ailment_type_sprain_or_bruise = AilmentTypeFactory(name='Sprain or Bruise')\n ailment_sprain = AilmentFactory(name='Sprain', type=ailment_type_sprain_or_bruise)\n\n # Texas: 2 with sprain and 2 with nothing\n for _ in range(2):\n employee = EmployeeFactory()\n employee.ailments.add(ailment_sprain)\n employee.bureau_states.add(self.texas)\n for _ in range(2):\n employee = EmployeeFactory()\n employee.bureau_states.add(self.texas)\n\n # Kentucky: 1 with migraine headache, 1 with sprain, 1 with tension headache, and 1 with nothing\n for _ in range(1):\n employee = EmployeeFactory()\n employee.ailments.add(ailment_migraine_headache)\n employee.bureau_states.add(self.kentucky)\n for _ in range(1):\n employee = EmployeeFactory()\n employee.ailments.add(ailment_sprain)\n employee.bureau_states.add(self.kentucky)\n for _ in range(1):\n employee = EmployeeFactory()\n employee.ailments.add(ailment_tension_headache)\n employee.bureau_states.add(self.kentucky)\n for _ in range(1):\n employee = EmployeeFactory()\n employee.bureau_states.add(self.kentucky)\n\n # Mississippi: 2 with migraine headache, 2 with tension headache\n for _ in range(2):\n employee = EmployeeFactory()\n employee.ailments.add(ailment_migraine_headache)\n employee.bureau_states.add(self.mississippi)\n for _ in range(2):\n employee = EmployeeFactory()\n employee.ailments.add(ailment_tension_headache)\n employee.bureau_states.add(self.mississippi)\n\n stats = get_state_comparison_stats(number=2)\n\n # With ailment types, ailments should be grouped together\n key = '% With Headache'\n expected_output = [('Mississippi', 100), ('Kentucky', 50)]\n top_states = self.get_state_stats_for_key(stats, key)\n self.assertListEqual(top_states, expected_output,\n f\"'{key}' should contain states with the top x % of employees with headache\")\n\n key = '% With Sprain or Bruise'\n expected_output = [('Texas', 50), ('Kentucky', 25)]\n top_states = self.get_state_stats_for_key(stats, key)\n self.assertListEqual(\n top_states, expected_output,\n f\"'{key}' should contain states with the top x % of employees with sprain or bruise\"\n )\n\n # Ailments should be treated individually\n key = '% With Migraine Headache'\n expected_output = [('Mississippi', 50), ('Kentucky', 25)]\n top_states = self.get_state_stats_for_key(stats, key)\n self.assertListEqual(\n top_states, expected_output,\n f\"'{key}' should contain states with the top x % of employees with migraine headache\"\n )\n\n key = '% With Tension Headache'\n expected_output = [('Mississippi', 50), ('Kentucky', 25)]\n top_states = self.get_state_stats_for_key(stats, key)\n self.assertListEqual(\n top_states, expected_output,\n f\"'{key}' should contain states with the top x % of employees with tension headache\"\n )\n\n # There should be no breakdown for sprain, because it's the only ailment of that type\n key = '% With Sprain'\n self.assertEqual(len([item for item in stats if key in item]), 0,\n \"There should be no breakdown for an ailment if it's the only one of its type\")", "def test_time_type_state_is_midmorning(day):\n\n assert day_time_info(day.hours_0).is_midmorning is False\n assert day_time_info(day.hours_1).is_midmorning is False\n assert day_time_info(day.hours_2).is_midmorning is False\n assert day_time_info(day.hours_3).is_midmorning is False\n assert day_time_info(day.hours_4).is_midmorning is False\n assert day_time_info(day.hours_5).is_midmorning is False\n assert day_time_info(day.hours_6).is_midmorning is False\n assert day_time_info(day.hours_7).is_midmorning is False\n assert day_time_info(day.hours_8).is_midmorning is False\n assert day_time_info(day.hours_9).is_midmorning is False\n assert day_time_info(day.hours_10).is_midmorning is True\n assert day_time_info(day.hours_11).is_midmorning is True\n assert day_time_info(day.hours_12).is_midmorning is False\n assert day_time_info(day.hours_13).is_midmorning is False\n assert day_time_info(day.hours_14).is_midmorning is False\n assert day_time_info(day.hours_15).is_midmorning is False\n assert day_time_info(day.hours_16).is_midmorning is False\n assert day_time_info(day.hours_17).is_midmorning is False\n assert day_time_info(day.hours_18).is_midmorning is False\n assert day_time_info(day.hours_19).is_midmorning is False\n assert day_time_info(day.hours_20).is_midmorning is False\n assert day_time_info(day.hours_21).is_midmorning is False\n assert day_time_info(day.hours_22).is_midmorning is False\n assert day_time_info(day.hours_23).is_midmorning is False", "def test_issue_tracked_times(self):\n pass", "def test_episode_data(self):\n self.assertEquals(\n self.t['lost']['firstaired'],\n '2004-09-22'\n )", "def test_duplicatekeyerror_state_embryo(self):\n self.pipeline_real.insert_uow = then_raise\n pipeline = spy(self.pipeline_real)\n\n job_record = get_job_record(job.STATE_EMBRYO,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=0). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))", "def test_update_team_state(self):\n pass", "def test_update_instances_schedule_state(self):\n pass", "def test_time_type_state_is_morning(day):\n\n assert day_time_info(day.hours_0).is_morning is False\n assert day_time_info(day.hours_1).is_morning is False\n assert day_time_info(day.hours_2).is_morning is False\n assert day_time_info(day.hours_3).is_morning is False\n assert day_time_info(day.hours_4).is_morning is False\n assert day_time_info(day.hours_5).is_morning is True\n assert day_time_info(day.hours_6).is_morning is True\n assert day_time_info(day.hours_7).is_morning is True\n assert day_time_info(day.hours_8).is_morning is True\n assert day_time_info(day.hours_9).is_morning is True\n assert day_time_info(day.hours_10).is_morning is False\n assert day_time_info(day.hours_11).is_morning is False\n assert day_time_info(day.hours_12).is_morning is False\n assert day_time_info(day.hours_13).is_morning is False\n assert day_time_info(day.hours_14).is_morning is False\n assert day_time_info(day.hours_15).is_morning is False\n assert day_time_info(day.hours_16).is_morning is False\n assert day_time_info(day.hours_17).is_morning is False\n assert day_time_info(day.hours_18).is_morning is False\n assert day_time_info(day.hours_19).is_morning is False\n assert day_time_info(day.hours_20).is_morning is False\n assert day_time_info(day.hours_21).is_morning is False\n assert day_time_info(day.hours_22).is_morning is False\n assert day_time_info(day.hours_23).is_morning is False", "def test_time(self):\n M = simulation.StateMonitor(self.G, 'v')\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n self.assertTrue(np.allclose(M.t, np.arange(0, self.t_max, self.dt)))", "def test_time_type_state_is_night(day):\n\n assert day_time_info(day.hours_0).is_night is True\n assert day_time_info(day.hours_1).is_night is True\n assert day_time_info(day.hours_2).is_night is True\n assert day_time_info(day.hours_3).is_night is True\n assert day_time_info(day.hours_4).is_night is True\n assert day_time_info(day.hours_5).is_night is False\n assert day_time_info(day.hours_6).is_night is False\n assert day_time_info(day.hours_7).is_night is False\n assert day_time_info(day.hours_8).is_night is False\n assert day_time_info(day.hours_9).is_night is False\n assert day_time_info(day.hours_10).is_night is False\n assert day_time_info(day.hours_11).is_night is False\n assert day_time_info(day.hours_12).is_night is False\n assert day_time_info(day.hours_13).is_night is False\n assert day_time_info(day.hours_14).is_night is False\n assert day_time_info(day.hours_15).is_night is False\n assert day_time_info(day.hours_16).is_night is False\n assert day_time_info(day.hours_17).is_night is False\n assert day_time_info(day.hours_18).is_night is False\n assert day_time_info(day.hours_19).is_night is False\n assert day_time_info(day.hours_20).is_night is False\n assert day_time_info(day.hours_21).is_night is False\n assert day_time_info(day.hours_22).is_night is False\n assert day_time_info(day.hours_23).is_night is True", "def test_get_field_state_comparisons_tiny(self):\r\n comparison_groupings = get_field_state_comparisons(\r\n self.tiny_dist_matrix_header, self.tiny_dist_matrix,\r\n self.tiny_mapping_header, self.tiny_mapping, self.tiny_field,\r\n ['SampleFieldState1'])\r\n self.assertEqual(comparison_groupings, {})", "def test_past_meeting_details(self):\n pass", "def test_ensure_not_ts_pass(self):\n self.assertEqual(ensure_not_ts(self.jobset1), 'completed')", "def _assert_state(self, state_dict):\n instances = db.instance_get_all(self.context)\n self.assertEqual(len(instances), 1)\n\n if 'vm_state' in state_dict:\n self.assertEqual(state_dict['vm_state'], instances[0]['vm_state'])\n if 'task_state' in state_dict:\n self.assertEqual(state_dict['task_state'],\n instances[0]['task_state'])\n if 'power_state' in state_dict:\n self.assertEqual(state_dict['power_state'],\n instances[0]['power_state'])", "def test_state_after_failure(self):\n pass", "def assert_history(self, rows):\r\n self.assertEqual(self.parse_rows(rows), self.read_history())", "def test_ensure_ts_ts(self):\n self.assertEqual(ensure_ts(self.jobset2), 'imaginary')", "def test_time_field():", "def test_get_state_comparison_stats_born_there(self):\n\n key = '% Employees born there'\n\n place_kentucky = PlaceFactory(region=self.kentucky)\n place_texas = PlaceFactory(region=self.texas)\n\n # Mississippi: 0 employees born in Mississippi, 3 born elsewhere\n for _ in range(3):\n employee = EmployeeFactory(place_of_birth=PlaceFactory())\n employee.bureau_states.add(self.mississippi)\n\n # Texas: 2 employees born in Texas, 2 born elsewhere\n for _ in range(2):\n employee = EmployeeFactory(place_of_birth=place_texas)\n employee.bureau_states.add(self.texas)\n for _ in range(2):\n employee = EmployeeFactory(place_of_birth=PlaceFactory())\n employee.bureau_states.add(self.texas)\n\n # Kentucky: 3 employees born in Kentucky, 1 employee elsewhere, 3 unknown\n for _ in range(3):\n employee = EmployeeFactory(place_of_birth=place_kentucky)\n employee.bureau_states.add(self.kentucky)\n for _ in range(1):\n employee = EmployeeFactory(place_of_birth=PlaceFactory())\n employee.bureau_states.add(self.kentucky)\n for _ in range(3):\n employee = EmployeeFactory()\n employee.bureau_states.add(self.kentucky)\n\n expected_output = [('Kentucky', 75), ('Texas', 50)]\n\n stats = get_state_comparison_stats(number=2)\n top_states = self.get_state_stats_for_key(stats, key)\n\n self.assertListEqual(top_states, expected_output,\n f\"'{key}' should contain states with the top x % employees born there\")" ]
[ "0.6052409", "0.60238993", "0.60199136", "0.5990552", "0.59654564", "0.59051305", "0.58771414", "0.587141", "0.5824163", "0.57776195", "0.575167", "0.57320946", "0.57115203", "0.57089746", "0.57080287", "0.56987756", "0.5684115", "0.5668643", "0.564905", "0.56462383", "0.5624186", "0.5575393", "0.55537695", "0.5505268", "0.54985476", "0.5489427", "0.54696864", "0.5461627", "0.54490864", "0.5433699" ]
0.68013006
0
method tests timetable records in STATE_IN_PROGRESS state
def test_future_timeperiod_state_in_progress(self): when(self.time_table_mocked).can_finalize_job_record(any(str), any(Job)).thenReturn(True) uow_dao_mock = mock(UnitOfWorkDao) when(uow_dao_mock).get_one(any()).thenReturn(create_unit_of_work(PROCESS_UNIT_TEST, 0, 1, None)) self.pipeline_real.uow_dao = uow_dao_mock self.pipeline_real.insert_uow = then_raise pipeline = spy(self.pipeline_real) job_record = get_job_record(job.STATE_IN_PROGRESS, TEST_FUTURE_TIMEPERIOD, PROCESS_SITE_HOURLY) pipeline.manage_pipeline_for_process(job_record.process_name, job_record) verify(self.time_table_mocked, times=0). \ update_job_record(any(str), any(Job), any(UnitOfWork), any(str))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_preset_timeperiod_state_in_progress(self):\n when(self.time_table_mocked).can_finalize_job_record(any(str), any(Job)).thenReturn(True)\n uow_dao_mock = mock(UnitOfWorkDao)\n when(uow_dao_mock).get_one(any()).thenReturn(create_unit_of_work(PROCESS_UNIT_TEST, 0, 1, None))\n self.pipeline_real.uow_dao = uow_dao_mock\n\n self.pipeline_real.insert_uow = then_return_uow\n pipeline = spy(self.pipeline_real)\n\n job_record = get_job_record(job.STATE_IN_PROGRESS,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=0). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n # verify(pipeline, times=1).\\\n # _compute_and_transfer_to_final_run(any(str), any(str), any(str), any(Job))\n # verify(pipeline, times=0).\\\n # _process_state_final_run(any(str), any(Job))", "def test_retry_state_in_progress(self):\n when(self.time_table_mocked).can_finalize_job_record(any(str), any(Job)).thenReturn(True)\n uow_dao_mock = mock(UnitOfWorkDao)\n when(uow_dao_mock).get_one(any()).thenReturn(\n create_unit_of_work(PROCESS_UNIT_TEST, 1, 1, None, unit_of_work.STATE_PROCESSED))\n self.pipeline_real.uow_dao = uow_dao_mock\n\n self.pipeline_real.insert_uow = then_raise\n self.pipeline_real.recover_from_duplicatekeyerror = override_recover_function\n pipeline = spy(self.pipeline_real)\n\n job_record = get_job_record(job.STATE_IN_PROGRESS,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=1). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n # verify(pipeline, times=1).\\\n # _compute_and_transfer_to_final_run(any(str), any(str), any(str), any(Job))\n # verify(pipeline, times=1).\\\n # _process_state_final_run(any(str), any(Job))", "def test_transfer_to_final_timeperiod_state_in_progress(self):\n when(self.time_table_mocked).can_finalize_job_record(any(str), any(Job)).thenReturn(True)\n uow_dao_mock = mock(UnitOfWorkDao)\n when(uow_dao_mock).get_one(any()).thenReturn(\n create_unit_of_work(PROCESS_UNIT_TEST, 1, 1, None, unit_of_work.STATE_PROCESSED))\n self.pipeline_real.uow_dao = uow_dao_mock\n\n self.pipeline_real.insert_uow = then_return_uow\n pipeline = spy(self.pipeline_real)\n\n job_record = get_job_record(job.STATE_IN_PROGRESS,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=1). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n # verify(pipeline, times=1).\\\n # _compute_and_transfer_to_final_run(any(str), any(str), any(str), any(Job))\n # verify(pipeline, times=1).\\\n # _process_state_final_run(any(str), any(Job))", "def test_state_skipped(self):\n pipeline = spy(self.pipeline_real)\n job_record = get_job_record(job.STATE_SKIPPED,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=0). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n verify(self.time_table_mocked, times=0).get_tree(any(str))", "def test_update_single_row_if_status_is_in_progress(self):\n first = generate_mock_result(status='IN_PROGRESS', success=False)\n self.db.insert_single_result(first)\n current = self.db.get_result_by_primary_key(first.get('id'))\n self.assertEqual(current.status, 'IN_PROGRESS')\n second = generate_mock_result(status='SUCCESS', success=True)\n self.db.insert_single_result(second)\n current = self.db.get_result_by_primary_key(first.get('id'))\n self.assertEqual(current.status, 'SUCCESS')", "def _wait_for_table_status(self, expected='ACTIVE'):\n\n achieved_state = False\n while not achieved_state:\n table_description = self.client.describe_table(TableName=self.table_name)\n logging.debug('Waiting for DynamoDB table %s to become %s.',self.table_name,expected)\n current_status = table_description['Table']['TableStatus']\n achieved_state = current_status == expected\n sleep(1)", "def test_ensure_not_ts_pass(self):\n self.assertEqual(ensure_not_ts(self.jobset1), 'completed')", "def test_state_processed(self):\n pipeline = spy(self.pipeline_real)\n job_record = get_job_record(job.STATE_PROCESSED,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=0). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n verify(self.time_table_mocked, times=0).get_tree(any(str))", "def test_retrieve_instances_schedule_state(self):\n pass", "def insync_and_state_check(self):\n self.step('verifying tables are properly synced on all endpoints')\n is_ok = True\n limit, count = 10, 0\n while count < limit:\n try:\n state_check, rc = self.probe('/cluster/pyql/table/state/select')\n assert rc == 200, f\"something wrong happened when checking state table {rc}\"\n for state in state_check['data']:\n if not state['in_sync'] == True or not state['state'] == 'loaded':\n print(f\"found state which was not in_sync=True & 'loaded {state}, retrying\")\n is_ok = False\n self.sync_job_check()\n break\n if is_ok:\n break\n count+=1\n except Exception as e:\n print(f\"something wrong happened when checking state table\")\n break", "def _compute_queue_state(self):\n for record in self:\n if record.queue_line_total_records == record.queue_line_done_records + record.queue_line_cancel_records:\n record.state = \"completed\"\n elif record.queue_line_draft_records == record.queue_line_total_records:\n record.state = \"draft\"\n elif record.queue_line_total_records == record.queue_line_fail_records:\n record.state = \"failed\"\n else:\n record.state = \"partially_completed\"", "def test_state(self, api, state):\n stream = AdsInsights(api=api, start_date=datetime(2010, 1, 1), end_date=datetime(2011, 1, 1), insights_lookback_window=28)\n\n assert stream.state == {}\n\n stream.state = state\n actual_state = stream.state\n actual_state[\"slices\"] = sorted(actual_state.get(\"slices\", []))\n state[\"slices\"] = sorted(state.get(\"slices\", []))\n state[\"time_increment\"] = 1\n\n assert actual_state == state", "def test_get_unsuccessful_ta():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n\n list_failed, list_else = ta.get_unsuccessful_ta('ta_status_bool')\n\n assert list_else[0] == ta.source.data['ta_status_bool'][0]\n assert np.isnan(list_failed[0])", "def test_unfinished_activity(self):\n with open(os.path.dirname(os.path.abspath(__file__)) + \"/time_unfinished.log\", 'r') as f:\n results_list = analysis.parse_time_log(f)\n\n for x in range(len(expected_activity_list)):\n assert results_list[x] == expected_activity_list[x]\n\n # the last event is ongoing so I can't test an exact end time\n expected_ongoing_dt_begin = datetime(2016, 8, 25, 18, 50, 53)\n assert results_list[-1].dt_begin == expected_ongoing_dt_begin \\\n and results_list[-1].activity_type == 'p' \\\n and datetime.now() - results_list[-1].dt_end < timedelta(minutes=1)", "def test_previous_state(self):\n self.report_start(self.whoami())\n\n Device.objects.get(hostname='panda01').state_transition_to(Device.OFFLINE)\n\n # set a series of previous transitions for panda02\n Device.objects.get(hostname='panda02').state_transition_to(Device.OFFLINE)\n Device.objects.get(hostname='panda02').state_transition_to(Device.IDLE)\n Device.objects.get(hostname='panda02').state_transition_to(Device.RESERVED)\n Device.objects.get(hostname='panda02').state_transition_to(Device.RUNNING)\n Device.objects.get(hostname='panda02').state_transition_to(Device.IDLE)\n Device.objects.get(hostname='panda02').state_transition_to(Device.OFFLINE)\n Device.objects.get(hostname='panda02').state_transition_to(Device.IDLE)\n\n Device.objects.get(hostname='panda03').state_transition_to(Device.RUNNING)\n Device.objects.get(hostname='panda04').state_transition_to(Device.RESERVED)\n Device.objects.get(hostname='panda05').state_transition_to(Device.RETIRED)\n Device.objects.get(hostname='panda06').state_transition_to(Device.OFFLINING)\n\n self.panda_type.health_check_job = self.factory.make_job_json(health_check='true')\n self.panda_type.save()\n\n jobs = self.scheduler_tick()\n\n self.assertEqual(len(jobs), 1)\n job = TestJob.objects.get(id=jobs[0].id)\n job_id = job.id\n self.assertEqual(job.status, TestJob.RUNNING)\n\n for job in jobs:\n job_obj = TestJob.objects.get(pk=job.id) # reload\n job_obj.status = TestJob.COMPLETE\n self.job_finished(job_obj)\n\n self.assertEqual(len(jobs), 1)\n job = TestJob.objects.get(id=job_id)\n self.assertEqual(job.status, TestJob.COMPLETE)\n\n self.assertEqual(\n Device.objects.get(hostname='panda02').status,\n Device.IDLE\n )\n\n self.assertEqual(\n Device.objects.get(hostname='panda02').health_status,\n Device.HEALTH_PASS\n )\n\n self.assertEqual(\n Device.objects.get(hostname='panda01').health_status,\n Device.HEALTH_UNKNOWN\n )\n\n panda01 = Device.objects.get(hostname='panda01')\n panda01.status = Device.IDLE\n panda01.save()\n\n jobs = self.scheduler_tick()\n\n self.assertEqual(\n Device.objects.get(hostname='panda01').health_status,\n Device.HEALTH_UNKNOWN\n )\n\n self.assertEqual(Device.objects.get(hostname='panda01').status, Device.RUNNING)\n\n for job in jobs:\n job_obj = TestJob.objects.get(pk=job.id) # reload\n job_obj.status = TestJob.COMPLETE\n self.job_finished(job_obj)\n\n self.assertEqual(Device.objects.get(hostname='panda01').status, Device.IDLE)\n self.assertIsNone(Device.objects.get(hostname='panda01').current_job)\n self.assertEqual(\n Device.objects.get(hostname='panda01').health_status,\n Device.HEALTH_PASS\n )\n self.scheduler_tick()\n self.assertEqual(Device.objects.get(hostname='panda01').status, Device.IDLE)\n self.assertIsNone(Device.objects.get(hostname='panda01').current_job)\n self.assertEqual(\n Device.objects.get(hostname='panda01').health_status,\n Device.HEALTH_PASS\n )\n\n self.cleanup(self.whoami())", "def test_issue_tracked_times(self):\n pass", "def test_update_task_states(self):\r\n changed = self.combinedoe.update_task_states()\r\n self.assertFalse(changed)\r\n\r\n current_task = self.combinedoe.current_task\r\n current_task.change_state(CombinedOpenEndedV1Module.DONE)\r\n changed = self.combinedoe.update_task_states()\r\n\r\n self.assertTrue(changed)", "def test_update_instances_schedule_state(self):\n pass", "def test_state_after_failure(self):\n pass", "def test_get_refresh_job_status(self):\n pass", "def load_status_table():", "def update_status(cls):\n for job in cls.query.filter(cls.finished == False):\n num_hits_left = session.query(BoxHit).filter_by(training_job_id = job.id, outstanding=True).count()\n urls_left = session.query(VideoTrainingURL).filter_by(training_job_id=job.id, processed = False)\n dynamo = DynamoIngestionStatusClient()\n num_urls_left = 0\n for url in urls_left:\n dynamo_url = dynamo.get(url.url)\n if dynamo_url is None or dynamo_url['status'] == 'Failed':\n # will never be processed, so ignore for our purposes\n url.processed = True\n else:\n num_urls_left += 1\n if num_hits_left+num_urls_left == 0:\n job.finished = True\n print '*** Job ID: %s is complete ***' % str(job.id)\n\n print '------------- Stats for Job ID: %s -------------' % str(job.id)\n print 'Total URLs : %i' % VideoTrainingURL.query.filter_by(training_job_id = job.id).count()\n print 'Total HITs : %i' % BoxHit.query.filter_by(training_job_id = job.id).count()\n if not job.finished:\n print 'unprocessed URLs: %i' % num_urls_left\n print 'outstanding HITs: %i\\n' % num_hits_left\n session.flush()", "def test_mark_incompleted(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=True, title=\"Test TODO2\",\n due=datetime.date.today(), additional=\"\",\n )\n\n assert todo.completed is True\n\n self.client.get(reverse('todo_mark_incompleted', args=[todo.pk]))\n todo.refresh_from_db()\n\n assert todo.completed is False", "def _check_completed(self):\n current_rung_df = self.sieve_board.loc[\n self.sieve_board['status'].isin(\n [StatusType.WAITTING, StatusType.RUNNING])\n ]\n if current_rung_df.empty:\n return True\n else:\n return False", "def test_update_state(self):\n # add a task\n self.add(title=\"Sample task doing\", description=\"for sample\", state=\"doing\")\n task = Task.query.filter_by(title='Sample task doing').first()\n\n # change task to todo\n old_id = task.id\n self.update_state(id=old_id, state='todo')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'todo')\n\n # change task to done\n old_id = task.id\n self.update_state(id=old_id, state='done')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'done')", "def test_initial_timestamp_states(\n self, get_pipe_manager, retrospective, which_checkpoint_state\n ):\n\n # Create the manager and make the timestamp call.\n pm = get_pipe_manager(name=\"InitialTimestampState\")\n stage_name = \"quality_control\"\n pm.timestamp(checkpoint=stage_name, finished=retrospective)\n\n # Form expectations.\n if retrospective:\n prev_exp = stage_name\n curr_exp = None\n else:\n prev_exp = None\n curr_exp = stage_name\n\n # Make the assertion on the specified checkpoint state.\n if which_checkpoint_state == \"curr_checkpoint\":\n assert curr_exp == getattr(pm, \"curr_checkpoint\")\n else:\n assert prev_exp == getattr(pm, \"prev_checkpoint\")", "def test_time_successful(self):\n\n url = '/%s/jobs/%i/input_files/?started=%s&ended=%s&time_field=%s' % (self.api, self.job.id,\n '2016-01-10T00:00:00Z',\n '2016-01-13T00:00:00Z',\n 'source')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n results = result['results']\n self.assertEqual(len(results), 2)\n for result in results:\n self.assertTrue(result['id'] in [self.file3.id, self.file4.id])", "def test_pending_job(self):\n\n pending_job = json.loads(TREEHERDER_JOB % (\"unknown\", \"pending\"))\n self.assertEquals(self.query_api.get_job_status(pending_job), PENDING)", "def check_states(results):\n assert results[\"states\"][\"device0\"] == {\"counter\": 10, \"rts\": 0}\n assert results[\"states\"][\"device1\"] == {\"counter\": 10, \"rts\": 0}", "def test_running_job(self):\n\n running_job = json.loads(TREEHERDER_JOB % (\"unknown\", \"running\"))\n self.assertEquals(self.query_api.get_job_status(running_job), RUNNING)" ]
[ "0.71193224", "0.6860334", "0.66941416", "0.628547", "0.6147146", "0.6099557", "0.60727316", "0.59782106", "0.59703326", "0.59561163", "0.58597606", "0.5858871", "0.5750034", "0.57215893", "0.57205486", "0.5681524", "0.5675636", "0.56590813", "0.5616347", "0.56143284", "0.558176", "0.5567624", "0.55467683", "0.5534509", "0.5526768", "0.5515089", "0.550762", "0.55065536", "0.55008715", "0.54807657" ]
0.7257185
0
method tests timetable records in STATE_IN_PROGRESS state
def test_preset_timeperiod_state_in_progress(self): when(self.time_table_mocked).can_finalize_job_record(any(str), any(Job)).thenReturn(True) uow_dao_mock = mock(UnitOfWorkDao) when(uow_dao_mock).get_one(any()).thenReturn(create_unit_of_work(PROCESS_UNIT_TEST, 0, 1, None)) self.pipeline_real.uow_dao = uow_dao_mock self.pipeline_real.insert_uow = then_return_uow pipeline = spy(self.pipeline_real) job_record = get_job_record(job.STATE_IN_PROGRESS, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY) pipeline.manage_pipeline_for_process(job_record.process_name, job_record) verify(self.time_table_mocked, times=0). \ update_job_record(any(str), any(Job), any(UnitOfWork), any(str)) # verify(pipeline, times=1).\ # _compute_and_transfer_to_final_run(any(str), any(str), any(str), any(Job)) # verify(pipeline, times=0).\ # _process_state_final_run(any(str), any(Job))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_future_timeperiod_state_in_progress(self):\n when(self.time_table_mocked).can_finalize_job_record(any(str), any(Job)).thenReturn(True)\n uow_dao_mock = mock(UnitOfWorkDao)\n when(uow_dao_mock).get_one(any()).thenReturn(create_unit_of_work(PROCESS_UNIT_TEST, 0, 1, None))\n self.pipeline_real.uow_dao = uow_dao_mock\n\n self.pipeline_real.insert_uow = then_raise\n pipeline = spy(self.pipeline_real)\n\n job_record = get_job_record(job.STATE_IN_PROGRESS, TEST_FUTURE_TIMEPERIOD, PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=0). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))", "def test_retry_state_in_progress(self):\n when(self.time_table_mocked).can_finalize_job_record(any(str), any(Job)).thenReturn(True)\n uow_dao_mock = mock(UnitOfWorkDao)\n when(uow_dao_mock).get_one(any()).thenReturn(\n create_unit_of_work(PROCESS_UNIT_TEST, 1, 1, None, unit_of_work.STATE_PROCESSED))\n self.pipeline_real.uow_dao = uow_dao_mock\n\n self.pipeline_real.insert_uow = then_raise\n self.pipeline_real.recover_from_duplicatekeyerror = override_recover_function\n pipeline = spy(self.pipeline_real)\n\n job_record = get_job_record(job.STATE_IN_PROGRESS,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=1). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n # verify(pipeline, times=1).\\\n # _compute_and_transfer_to_final_run(any(str), any(str), any(str), any(Job))\n # verify(pipeline, times=1).\\\n # _process_state_final_run(any(str), any(Job))", "def test_transfer_to_final_timeperiod_state_in_progress(self):\n when(self.time_table_mocked).can_finalize_job_record(any(str), any(Job)).thenReturn(True)\n uow_dao_mock = mock(UnitOfWorkDao)\n when(uow_dao_mock).get_one(any()).thenReturn(\n create_unit_of_work(PROCESS_UNIT_TEST, 1, 1, None, unit_of_work.STATE_PROCESSED))\n self.pipeline_real.uow_dao = uow_dao_mock\n\n self.pipeline_real.insert_uow = then_return_uow\n pipeline = spy(self.pipeline_real)\n\n job_record = get_job_record(job.STATE_IN_PROGRESS,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=1). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n # verify(pipeline, times=1).\\\n # _compute_and_transfer_to_final_run(any(str), any(str), any(str), any(Job))\n # verify(pipeline, times=1).\\\n # _process_state_final_run(any(str), any(Job))", "def test_state_skipped(self):\n pipeline = spy(self.pipeline_real)\n job_record = get_job_record(job.STATE_SKIPPED,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=0). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n verify(self.time_table_mocked, times=0).get_tree(any(str))", "def test_update_single_row_if_status_is_in_progress(self):\n first = generate_mock_result(status='IN_PROGRESS', success=False)\n self.db.insert_single_result(first)\n current = self.db.get_result_by_primary_key(first.get('id'))\n self.assertEqual(current.status, 'IN_PROGRESS')\n second = generate_mock_result(status='SUCCESS', success=True)\n self.db.insert_single_result(second)\n current = self.db.get_result_by_primary_key(first.get('id'))\n self.assertEqual(current.status, 'SUCCESS')", "def _wait_for_table_status(self, expected='ACTIVE'):\n\n achieved_state = False\n while not achieved_state:\n table_description = self.client.describe_table(TableName=self.table_name)\n logging.debug('Waiting for DynamoDB table %s to become %s.',self.table_name,expected)\n current_status = table_description['Table']['TableStatus']\n achieved_state = current_status == expected\n sleep(1)", "def test_ensure_not_ts_pass(self):\n self.assertEqual(ensure_not_ts(self.jobset1), 'completed')", "def test_state_processed(self):\n pipeline = spy(self.pipeline_real)\n job_record = get_job_record(job.STATE_PROCESSED,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=0). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n verify(self.time_table_mocked, times=0).get_tree(any(str))", "def test_retrieve_instances_schedule_state(self):\n pass", "def insync_and_state_check(self):\n self.step('verifying tables are properly synced on all endpoints')\n is_ok = True\n limit, count = 10, 0\n while count < limit:\n try:\n state_check, rc = self.probe('/cluster/pyql/table/state/select')\n assert rc == 200, f\"something wrong happened when checking state table {rc}\"\n for state in state_check['data']:\n if not state['in_sync'] == True or not state['state'] == 'loaded':\n print(f\"found state which was not in_sync=True & 'loaded {state}, retrying\")\n is_ok = False\n self.sync_job_check()\n break\n if is_ok:\n break\n count+=1\n except Exception as e:\n print(f\"something wrong happened when checking state table\")\n break", "def _compute_queue_state(self):\n for record in self:\n if record.queue_line_total_records == record.queue_line_done_records + record.queue_line_cancel_records:\n record.state = \"completed\"\n elif record.queue_line_draft_records == record.queue_line_total_records:\n record.state = \"draft\"\n elif record.queue_line_total_records == record.queue_line_fail_records:\n record.state = \"failed\"\n else:\n record.state = \"partially_completed\"", "def test_state(self, api, state):\n stream = AdsInsights(api=api, start_date=datetime(2010, 1, 1), end_date=datetime(2011, 1, 1), insights_lookback_window=28)\n\n assert stream.state == {}\n\n stream.state = state\n actual_state = stream.state\n actual_state[\"slices\"] = sorted(actual_state.get(\"slices\", []))\n state[\"slices\"] = sorted(state.get(\"slices\", []))\n state[\"time_increment\"] = 1\n\n assert actual_state == state", "def test_get_unsuccessful_ta():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n\n list_failed, list_else = ta.get_unsuccessful_ta('ta_status_bool')\n\n assert list_else[0] == ta.source.data['ta_status_bool'][0]\n assert np.isnan(list_failed[0])", "def test_previous_state(self):\n self.report_start(self.whoami())\n\n Device.objects.get(hostname='panda01').state_transition_to(Device.OFFLINE)\n\n # set a series of previous transitions for panda02\n Device.objects.get(hostname='panda02').state_transition_to(Device.OFFLINE)\n Device.objects.get(hostname='panda02').state_transition_to(Device.IDLE)\n Device.objects.get(hostname='panda02').state_transition_to(Device.RESERVED)\n Device.objects.get(hostname='panda02').state_transition_to(Device.RUNNING)\n Device.objects.get(hostname='panda02').state_transition_to(Device.IDLE)\n Device.objects.get(hostname='panda02').state_transition_to(Device.OFFLINE)\n Device.objects.get(hostname='panda02').state_transition_to(Device.IDLE)\n\n Device.objects.get(hostname='panda03').state_transition_to(Device.RUNNING)\n Device.objects.get(hostname='panda04').state_transition_to(Device.RESERVED)\n Device.objects.get(hostname='panda05').state_transition_to(Device.RETIRED)\n Device.objects.get(hostname='panda06').state_transition_to(Device.OFFLINING)\n\n self.panda_type.health_check_job = self.factory.make_job_json(health_check='true')\n self.panda_type.save()\n\n jobs = self.scheduler_tick()\n\n self.assertEqual(len(jobs), 1)\n job = TestJob.objects.get(id=jobs[0].id)\n job_id = job.id\n self.assertEqual(job.status, TestJob.RUNNING)\n\n for job in jobs:\n job_obj = TestJob.objects.get(pk=job.id) # reload\n job_obj.status = TestJob.COMPLETE\n self.job_finished(job_obj)\n\n self.assertEqual(len(jobs), 1)\n job = TestJob.objects.get(id=job_id)\n self.assertEqual(job.status, TestJob.COMPLETE)\n\n self.assertEqual(\n Device.objects.get(hostname='panda02').status,\n Device.IDLE\n )\n\n self.assertEqual(\n Device.objects.get(hostname='panda02').health_status,\n Device.HEALTH_PASS\n )\n\n self.assertEqual(\n Device.objects.get(hostname='panda01').health_status,\n Device.HEALTH_UNKNOWN\n )\n\n panda01 = Device.objects.get(hostname='panda01')\n panda01.status = Device.IDLE\n panda01.save()\n\n jobs = self.scheduler_tick()\n\n self.assertEqual(\n Device.objects.get(hostname='panda01').health_status,\n Device.HEALTH_UNKNOWN\n )\n\n self.assertEqual(Device.objects.get(hostname='panda01').status, Device.RUNNING)\n\n for job in jobs:\n job_obj = TestJob.objects.get(pk=job.id) # reload\n job_obj.status = TestJob.COMPLETE\n self.job_finished(job_obj)\n\n self.assertEqual(Device.objects.get(hostname='panda01').status, Device.IDLE)\n self.assertIsNone(Device.objects.get(hostname='panda01').current_job)\n self.assertEqual(\n Device.objects.get(hostname='panda01').health_status,\n Device.HEALTH_PASS\n )\n self.scheduler_tick()\n self.assertEqual(Device.objects.get(hostname='panda01').status, Device.IDLE)\n self.assertIsNone(Device.objects.get(hostname='panda01').current_job)\n self.assertEqual(\n Device.objects.get(hostname='panda01').health_status,\n Device.HEALTH_PASS\n )\n\n self.cleanup(self.whoami())", "def test_unfinished_activity(self):\n with open(os.path.dirname(os.path.abspath(__file__)) + \"/time_unfinished.log\", 'r') as f:\n results_list = analysis.parse_time_log(f)\n\n for x in range(len(expected_activity_list)):\n assert results_list[x] == expected_activity_list[x]\n\n # the last event is ongoing so I can't test an exact end time\n expected_ongoing_dt_begin = datetime(2016, 8, 25, 18, 50, 53)\n assert results_list[-1].dt_begin == expected_ongoing_dt_begin \\\n and results_list[-1].activity_type == 'p' \\\n and datetime.now() - results_list[-1].dt_end < timedelta(minutes=1)", "def test_issue_tracked_times(self):\n pass", "def test_update_task_states(self):\r\n changed = self.combinedoe.update_task_states()\r\n self.assertFalse(changed)\r\n\r\n current_task = self.combinedoe.current_task\r\n current_task.change_state(CombinedOpenEndedV1Module.DONE)\r\n changed = self.combinedoe.update_task_states()\r\n\r\n self.assertTrue(changed)", "def test_update_instances_schedule_state(self):\n pass", "def test_state_after_failure(self):\n pass", "def test_get_refresh_job_status(self):\n pass", "def load_status_table():", "def update_status(cls):\n for job in cls.query.filter(cls.finished == False):\n num_hits_left = session.query(BoxHit).filter_by(training_job_id = job.id, outstanding=True).count()\n urls_left = session.query(VideoTrainingURL).filter_by(training_job_id=job.id, processed = False)\n dynamo = DynamoIngestionStatusClient()\n num_urls_left = 0\n for url in urls_left:\n dynamo_url = dynamo.get(url.url)\n if dynamo_url is None or dynamo_url['status'] == 'Failed':\n # will never be processed, so ignore for our purposes\n url.processed = True\n else:\n num_urls_left += 1\n if num_hits_left+num_urls_left == 0:\n job.finished = True\n print '*** Job ID: %s is complete ***' % str(job.id)\n\n print '------------- Stats for Job ID: %s -------------' % str(job.id)\n print 'Total URLs : %i' % VideoTrainingURL.query.filter_by(training_job_id = job.id).count()\n print 'Total HITs : %i' % BoxHit.query.filter_by(training_job_id = job.id).count()\n if not job.finished:\n print 'unprocessed URLs: %i' % num_urls_left\n print 'outstanding HITs: %i\\n' % num_hits_left\n session.flush()", "def test_mark_incompleted(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=True, title=\"Test TODO2\",\n due=datetime.date.today(), additional=\"\",\n )\n\n assert todo.completed is True\n\n self.client.get(reverse('todo_mark_incompleted', args=[todo.pk]))\n todo.refresh_from_db()\n\n assert todo.completed is False", "def _check_completed(self):\n current_rung_df = self.sieve_board.loc[\n self.sieve_board['status'].isin(\n [StatusType.WAITTING, StatusType.RUNNING])\n ]\n if current_rung_df.empty:\n return True\n else:\n return False", "def test_update_state(self):\n # add a task\n self.add(title=\"Sample task doing\", description=\"for sample\", state=\"doing\")\n task = Task.query.filter_by(title='Sample task doing').first()\n\n # change task to todo\n old_id = task.id\n self.update_state(id=old_id, state='todo')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'todo')\n\n # change task to done\n old_id = task.id\n self.update_state(id=old_id, state='done')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'done')", "def test_initial_timestamp_states(\n self, get_pipe_manager, retrospective, which_checkpoint_state\n ):\n\n # Create the manager and make the timestamp call.\n pm = get_pipe_manager(name=\"InitialTimestampState\")\n stage_name = \"quality_control\"\n pm.timestamp(checkpoint=stage_name, finished=retrospective)\n\n # Form expectations.\n if retrospective:\n prev_exp = stage_name\n curr_exp = None\n else:\n prev_exp = None\n curr_exp = stage_name\n\n # Make the assertion on the specified checkpoint state.\n if which_checkpoint_state == \"curr_checkpoint\":\n assert curr_exp == getattr(pm, \"curr_checkpoint\")\n else:\n assert prev_exp == getattr(pm, \"prev_checkpoint\")", "def test_pending_job(self):\n\n pending_job = json.loads(TREEHERDER_JOB % (\"unknown\", \"pending\"))\n self.assertEquals(self.query_api.get_job_status(pending_job), PENDING)", "def test_time_successful(self):\n\n url = '/%s/jobs/%i/input_files/?started=%s&ended=%s&time_field=%s' % (self.api, self.job.id,\n '2016-01-10T00:00:00Z',\n '2016-01-13T00:00:00Z',\n 'source')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n results = result['results']\n self.assertEqual(len(results), 2)\n for result in results:\n self.assertTrue(result['id'] in [self.file3.id, self.file4.id])", "def check_states(results):\n assert results[\"states\"][\"device0\"] == {\"counter\": 10, \"rts\": 0}\n assert results[\"states\"][\"device1\"] == {\"counter\": 10, \"rts\": 0}", "def test_running_job(self):\n\n running_job = json.loads(TREEHERDER_JOB % (\"unknown\", \"running\"))\n self.assertEquals(self.query_api.get_job_status(running_job), RUNNING)" ]
[ "0.7257716", "0.6861403", "0.6694048", "0.6286829", "0.61476827", "0.6099142", "0.6072867", "0.59780765", "0.5971016", "0.595613", "0.5860175", "0.58586013", "0.5750365", "0.5722227", "0.57212484", "0.5681915", "0.56768733", "0.56603575", "0.5617645", "0.56150514", "0.5581628", "0.5568725", "0.5546942", "0.5533718", "0.55280554", "0.55153114", "0.55074936", "0.5506668", "0.5500458", "0.54812" ]
0.71204054
1
method tests timetable records in STATE_FINAL_RUN state
def test_processed_state_final_run(self): uow_dao_mock = mock(UnitOfWorkDao) when(uow_dao_mock).get_one(any()).thenReturn( create_unit_of_work(PROCESS_UNIT_TEST, 1, 1, None, unit_of_work.STATE_PROCESSED)) self.pipeline_real.uow_dao = uow_dao_mock pipeline = spy(self.pipeline_real) job_record = get_job_record(job.STATE_FINAL_RUN, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY) pipeline.manage_pipeline_for_process(job_record.process_name, job_record) verify(self.time_table_mocked, times=1). \ update_job_record(any(str), any(Job), any(UnitOfWork), any(str)) verify(self.time_table_mocked, times=1).get_tree(any(str))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_transfer_to_final_timeperiod_state_in_progress(self):\n when(self.time_table_mocked).can_finalize_job_record(any(str), any(Job)).thenReturn(True)\n uow_dao_mock = mock(UnitOfWorkDao)\n when(uow_dao_mock).get_one(any()).thenReturn(\n create_unit_of_work(PROCESS_UNIT_TEST, 1, 1, None, unit_of_work.STATE_PROCESSED))\n self.pipeline_real.uow_dao = uow_dao_mock\n\n self.pipeline_real.insert_uow = then_return_uow\n pipeline = spy(self.pipeline_real)\n\n job_record = get_job_record(job.STATE_IN_PROGRESS,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=1). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n # verify(pipeline, times=1).\\\n # _compute_and_transfer_to_final_run(any(str), any(str), any(str), any(Job))\n # verify(pipeline, times=1).\\\n # _process_state_final_run(any(str), any(Job))", "def IsFinal(self):\n return self.state in FINAL_TEST_RUN_STATES", "def test_future_timeperiod_state_in_progress(self):\n when(self.time_table_mocked).can_finalize_job_record(any(str), any(Job)).thenReturn(True)\n uow_dao_mock = mock(UnitOfWorkDao)\n when(uow_dao_mock).get_one(any()).thenReturn(create_unit_of_work(PROCESS_UNIT_TEST, 0, 1, None))\n self.pipeline_real.uow_dao = uow_dao_mock\n\n self.pipeline_real.insert_uow = then_raise\n pipeline = spy(self.pipeline_real)\n\n job_record = get_job_record(job.STATE_IN_PROGRESS, TEST_FUTURE_TIMEPERIOD, PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=0). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))", "def test_preset_timeperiod_state_in_progress(self):\n when(self.time_table_mocked).can_finalize_job_record(any(str), any(Job)).thenReturn(True)\n uow_dao_mock = mock(UnitOfWorkDao)\n when(uow_dao_mock).get_one(any()).thenReturn(create_unit_of_work(PROCESS_UNIT_TEST, 0, 1, None))\n self.pipeline_real.uow_dao = uow_dao_mock\n\n self.pipeline_real.insert_uow = then_return_uow\n pipeline = spy(self.pipeline_real)\n\n job_record = get_job_record(job.STATE_IN_PROGRESS,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=0). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n # verify(pipeline, times=1).\\\n # _compute_and_transfer_to_final_run(any(str), any(str), any(str), any(Job))\n # verify(pipeline, times=0).\\\n # _process_state_final_run(any(str), any(Job))", "def test_run_ended(self):", "def test_cancelled_state_final_run(self):\n uow_dao_mock = mock(UnitOfWorkDao)\n when(uow_dao_mock).get_one(any()).thenReturn(\n create_unit_of_work(PROCESS_UNIT_TEST, 1, 1, None, unit_of_work.STATE_CANCELED))\n self.pipeline_real.uow_dao = uow_dao_mock\n\n pipeline = spy(self.pipeline_real)\n job_record = get_job_record(job.STATE_FINAL_RUN,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=1). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n verify(self.time_table_mocked, times=0).get_tree(any(str))", "def test_state_after_failure(self):\n pass", "def test_unfinished_activity(self):\n with open(os.path.dirname(os.path.abspath(__file__)) + \"/time_unfinished.log\", 'r') as f:\n results_list = analysis.parse_time_log(f)\n\n for x in range(len(expected_activity_list)):\n assert results_list[x] == expected_activity_list[x]\n\n # the last event is ongoing so I can't test an exact end time\n expected_ongoing_dt_begin = datetime(2016, 8, 25, 18, 50, 53)\n assert results_list[-1].dt_begin == expected_ongoing_dt_begin \\\n and results_list[-1].activity_type == 'p' \\\n and datetime.now() - results_list[-1].dt_end < timedelta(minutes=1)", "def test_run_now(curent_time,state):\n date = datetime(2020,5,5,12,0)\n duration_in_minutes = 65\n run = Run(date, duration_in_minutes/60)\n\n assert run.run_now(curent_time) == state", "def finished_tests(self):\n self.testing = 0", "def test_runner_with_db(dataset, time_start, time_diff):\n\n session = dataset\n\n end_date = datetime.datetime(2020, 5, 17, 13, 0, 5)\n replay_rate = 1.0 \n\n db_connector_test = DataBaseConnector(session=session, \n table_name='timeseries_dataset', \n time_column='timestamp', \n start_date=time_start,\n end_date=end_date)\n\n runner = CentralRunner(db_connection=db_connector_test, \n output_system='mock_output_systerm', \n start_time=time_start, \n end_time=end_date,\n replay_rate=replay_rate )\n\n\n results_test = [\n {'timestamp': datetime.datetime(2021, 1, 1, 10, 1, 0), 'text': 'bob', 'value': 10.0},\n {'timestamp': datetime.datetime(2021, 1, 1, 10, 1, 1), 'text': 'cat', 'value':-10.0},\n {'timestamp': datetime.datetime(2021, 1, 1, 10, 1, 1), 'text': 'eat', 'value': 12.1}\n ]\n \n # test that the trigger_release is working right\n # expect 1\n start = time.perf_counter()\n \n code_start = datetime.datetime.now()\n\n # we need to retink the way that we trigger this....\n runner._trigger_release(result_set=results_test, code_start=code_start, replay_start_time=time_start, \n batch=(datetime.datetime(2021, 1, 1, 10, 1, 0), datetime.datetime(2021, 1, 1, 10, 1, 1)), \n replay_rate=replay_rate)\n \n end = time.perf_counter()\n\n code_time = end - start\n assert int(code_time) == time_diff", "def test_issue_tracked_times(self):\n pass", "def test_update_instances_schedule_state(self):\n pass", "def test_retrieve_instances_schedule_state(self):\n pass", "def test_retry_state_in_progress(self):\n when(self.time_table_mocked).can_finalize_job_record(any(str), any(Job)).thenReturn(True)\n uow_dao_mock = mock(UnitOfWorkDao)\n when(uow_dao_mock).get_one(any()).thenReturn(\n create_unit_of_work(PROCESS_UNIT_TEST, 1, 1, None, unit_of_work.STATE_PROCESSED))\n self.pipeline_real.uow_dao = uow_dao_mock\n\n self.pipeline_real.insert_uow = then_raise\n self.pipeline_real.recover_from_duplicatekeyerror = override_recover_function\n pipeline = spy(self.pipeline_real)\n\n job_record = get_job_record(job.STATE_IN_PROGRESS,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=1). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n # verify(pipeline, times=1).\\\n # _compute_and_transfer_to_final_run(any(str), any(str), any(str), any(Job))\n # verify(pipeline, times=1).\\\n # _process_state_final_run(any(str), any(Job))", "def test_run_is_next_run(curent_time, state):\n date = datetime(2020,5,5,12,0)\n duration_in_minutes = 65\n run = Run(date, duration_in_minutes/60)\n\n assert run.is_next_run(curent_time) == state", "def test_state_processed(self):\n pipeline = spy(self.pipeline_real)\n job_record = get_job_record(job.STATE_PROCESSED,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=0). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n verify(self.time_table_mocked, times=0).get_tree(any(str))", "def test_clean_run(self):\n Historical_ROAs_Parsed_Table(clear=True)\n with Historical_ROAs_Table(clear=True) as t:\n Historical_ROAs_Parser().run()\n assert t.get_count() > 2000000", "def test_ensure_not_ts_pass(self):\n self.assertEqual(ensure_not_ts(self.jobset1), 'completed')", "def test_is_finished(self):\n experiment = Experiment(TasksMock())\n self.assertEquals(False, experiment.is_finished())\n for _ in range(0, 17):\n experiment.press_b_down(time.time())\n self.assertEquals(False, experiment.is_finished())\n experiment.press_b_up(time.time())\n self.assertEquals(False, experiment.is_finished())\n experiment.press_b_down(time.time())\n self.assertEquals(False, experiment.is_finished())\n experiment.press_b_up(time.time())\n self.assertEquals(True, experiment.is_finished())", "def test_output(self):\n # reproducible arbitrariness\n np.random.seed(123423)\n\n N = 20\n tmax = 30.0\n dt = 1.0\n\n n_steps = int_r(tmax/dt)\n\n table = np.random.randn(N, n_steps)\n G = TableLayer(table)\n\n M = simulation.StateMonitor(G, 'out')\n\n sim = simulation.Simulation(G, M, dt=dt)\n sim.run(tmax)\n\n self.assertLess(np.max(np.abs(M.out - table)), 1e-6)", "def test_time(self):\n M = simulation.StateMonitor(self.G, 'v')\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n self.assertTrue(np.allclose(M.t, np.arange(0, self.t_max, self.dt)))", "def test_update_state4(self):\n pass", "def test_run(self):\n class MockProvider(BaseCoverageProvider):\n SERVICE_NAME = \"I do nothing\"\n was_run = False\n\n def run_once_and_update_timestamp(self):\n \"\"\"Set a variable.\"\"\"\n self.was_run = True\n return None\n\n provider = MockProvider(self._db)\n result = provider.run()\n\n # run_once_and_update_timestamp() was called.\n assert True == provider.was_run\n\n # run() returned a CoverageProviderProgress with basic\n # timing information, since run_once_and_update_timestamp()\n # didn't provide anything.\n assert isinstance(result, CoverageProviderProgress)\n now = utc_now()\n assert result.start < result.finish\n for time in (result.start, result.finish):\n assert (now - time).total_seconds() < 5", "def test_completed():\n assert complete == 1\n assert errorflag == 0", "def test_update_state3(self):\n pass", "def test_update_state(self):\n pass", "def test_log_last_completed_datetime(self):\n initial_count = CostUsageReportStatus.objects.count()\n saver = ReportStatsDBAccessor(\"myreport\", self.manifest_id)\n saver.log_last_completed_datetime()\n self.assertIsNotNone(saver.get_last_completed_datetime())\n saver.delete()\n self.assertEqual(CostUsageReportStatus.objects.count(), initial_count)", "def test_run_started(self):", "def test_ensure_ts_ts(self):\n self.assertEqual(ensure_ts(self.jobset2), 'imaginary')" ]
[ "0.66075", "0.6143854", "0.61161435", "0.61069274", "0.6051925", "0.59604484", "0.58838767", "0.58052", "0.5779052", "0.5769028", "0.57552785", "0.5731349", "0.5673748", "0.56650174", "0.56536967", "0.5641838", "0.56204814", "0.5584373", "0.5582211", "0.55599564", "0.55164605", "0.5514844", "0.5505435", "0.5491299", "0.5471248", "0.5445622", "0.5426274", "0.54235333", "0.5415118", "0.53671294" ]
0.65768546
1
method tests timetable records in STATE_SKIPPED state
def test_state_skipped(self): pipeline = spy(self.pipeline_real) job_record = get_job_record(job.STATE_SKIPPED, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY) pipeline.manage_pipeline_for_process(job_record.process_name, job_record) verify(self.time_table_mocked, times=0). \ update_job_record(any(str), any(Job), any(UnitOfWork), any(str)) verify(self.time_table_mocked, times=0).get_tree(any(str))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_skip(self, d_stmt, table): \n pass", "def skip_dti_tests():\n return True", "def IsSkipped(self):\n state = self.GetState()\n return state.status == TestState.SKIPPED", "def test_table_false_positives(self):\n pass", "def test_csv_skipped_status_report(self):\n skipped_rows = [row for row in csv_reader\n if row['Status'] == STATUS_SKIPPED and\n row['AVI Object'] == '']\n self.assertTrue(len(skipped_rows) == 0)", "def test_get_unsuccessful_ta():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n\n list_failed, list_else = ta.get_unsuccessful_ta('ta_status_bool')\n\n assert list_else[0] == ta.source.data['ta_status_bool'][0]\n assert np.isnan(list_failed[0])", "def test_no_timesteps_property(self):\n expected_values = {\n 'no_timesteps': 1000,\n 'no_sweeps': 10,\n 'no_channels': 4,\n }\n test_rec = rt.Recording(\n np.zeros(\n [\n expected_values['no_channels'],\n expected_values['no_timesteps'],\n expected_values['no_sweeps'],\n ]\n ),\n dt=0.1,\n )\n self.assertEqual(\n test_rec.no_timesteps,\n expected_values['no_timesteps'],\n 'Expected {} for `no_timesteps` property; got {} instead.'.format(\n expected_values['no_timesteps'], test_rec.no_timesteps\n ),\n )", "def test_time_type_state_is_noon(day):\n\n assert day_time_info(day.hours_0).is_noon is False\n assert day_time_info(day.hours_1).is_noon is False\n assert day_time_info(day.hours_2).is_noon is False\n assert day_time_info(day.hours_3).is_noon is False\n assert day_time_info(day.hours_4).is_noon is False\n assert day_time_info(day.hours_5).is_noon is False\n assert day_time_info(day.hours_6).is_noon is False\n assert day_time_info(day.hours_7).is_noon is False\n assert day_time_info(day.hours_8).is_noon is False\n assert day_time_info(day.hours_9).is_noon is False\n assert day_time_info(day.hours_10).is_noon is False\n assert day_time_info(day.hours_11).is_noon is False\n assert day_time_info(day.hours_12).is_noon is True\n assert day_time_info(day.hours_13).is_noon is False\n assert day_time_info(day.hours_14).is_noon is False\n assert day_time_info(day.hours_15).is_noon is False\n assert day_time_info(day.hours_16).is_noon is False\n assert day_time_info(day.hours_17).is_noon is False\n assert day_time_info(day.hours_18).is_noon is False\n assert day_time_info(day.hours_19).is_noon is False\n assert day_time_info(day.hours_20).is_noon is False\n assert day_time_info(day.hours_21).is_noon is False\n assert day_time_info(day.hours_22).is_noon is False\n assert day_time_info(day.hours_23).is_noon is False", "def test_ensure_not_ts_pass(self):\n self.assertEqual(ensure_not_ts(self.jobset1), 'completed')", "def tick_skipped(self):\n pass", "def test_noTimedEventsInsertion(self):\n self.test_noTimedEventsExtraction()\n self._testInsertion()", "def test_non_batch(self):\r\n uid = uuid4()\r\n tmp = TestTimestampModel.create(id=uid, count=1)\r\n\r\n TestTimestampModel.get(id=uid).should.be.ok\r\n\r\n tmp.timestamp(timedelta(seconds=5)).delete()\r\n\r\n with self.assertRaises(TestTimestampModel.DoesNotExist):\r\n TestTimestampModel.get(id=uid)\r\n\r\n tmp = TestTimestampModel.create(id=uid, count=1)\r\n\r\n with self.assertRaises(TestTimestampModel.DoesNotExist):\r\n TestTimestampModel.get(id=uid)\r\n\r\n # calling .timestamp sets the TS on the model\r\n tmp.timestamp(timedelta(seconds=5))\r\n tmp._timestamp.should.be.ok\r\n\r\n # calling save clears the set timestamp\r\n tmp.save()\r\n tmp._timestamp.shouldnt.be.ok\r\n\r\n tmp.timestamp(timedelta(seconds=5))\r\n tmp.update()\r\n tmp._timestamp.shouldnt.be.ok", "def test_queryset_total_time_no_records(db):\n assert models.TimeRecord.objects.total_time() == datetime.timedelta(0)", "def test_ensure_not_ts_novib(self):\n self.assertEqual(ensure_not_ts(self.jobset2), 'error')", "def test_preset_timeperiod_state_in_progress(self):\n when(self.time_table_mocked).can_finalize_job_record(any(str), any(Job)).thenReturn(True)\n uow_dao_mock = mock(UnitOfWorkDao)\n when(uow_dao_mock).get_one(any()).thenReturn(create_unit_of_work(PROCESS_UNIT_TEST, 0, 1, None))\n self.pipeline_real.uow_dao = uow_dao_mock\n\n self.pipeline_real.insert_uow = then_return_uow\n pipeline = spy(self.pipeline_real)\n\n job_record = get_job_record(job.STATE_IN_PROGRESS,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=0). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n # verify(pipeline, times=1).\\\n # _compute_and_transfer_to_final_run(any(str), any(str), any(str), any(Job))\n # verify(pipeline, times=0).\\\n # _process_state_final_run(any(str), any(Job))", "def test_ensure_not_ts_ts(self):\n self.assertEqual(ensure_not_ts(self.jobset3), 'error')", "def test_is_approved_no_approval(time_record_factory):\n record = time_record_factory()\n\n assert not record.is_approved", "def skipped(self):\n return self._skipped", "def test_no_spike_after_table(self):\n n = 5\n dt = 1.0\n t_max = 2*dt\n # make sure we have spikes at the end\n table = np.ones((1, n))\n\n G = TableSpikers(n)\n G.spike_table = table\n\n sim = simulation.Simulation(G, dt=dt)\n sim.run(t_max)\n \n self.assertFalse(np.any(G.spike))", "def test_skip_list_run_skip(self):\n mock_sqr = SequenceRun()\n mock_sqr.instrument_run_id = TestConstant.instrument_run_id.value\n\n mock_workflow = Workflow()\n mock_workflow.wfr_id = f\"wfr.{_rand(32)}\"\n mock_workflow.type_name = WorkflowType.BCL_CONVERT.value\n mock_workflow.end_status = WorkflowStatus.SUCCEEDED.value\n mock_workflow.sequence_run = mock_sqr\n mock_workflow.output = \"\"\n\n when(fastq_update_step).perform(...).thenReturn(\"FASTQ_UPDATE_STEP\")\n when(google_lims_update_step).perform(...).thenReturn('GOOGLE_LIMS_UPDATE_STEP')\n when(dragen_wgs_qc_step).perform(...).thenReturn('DRAGEN_WGS_QC_STEP')\n when(dragen_tso_ctdna_step).perform(...).thenReturn('DRAGEN_TSO_CTDNA_STEP')\n when(dragen_wts_step).perform(...).thenReturn('DRAGEN_WTS_STEP')\n\n run_id = TestConstant.instrument_run_id.value\n skiplist = {\n 'global': [],\n 'by_run': {\n run_id: [\n \"DRAGEN_WGS_QC_STEP\"\n ]\n }\n }\n\n results = orchestrator.next_step(mock_workflow, skiplist, None)\n logger.info(results)\n\n self.assertFalse('DRAGEN_WGS_QC_STEP' in results)\n self.assertTrue('DRAGEN_TSO_CTDNA_STEP' in results)\n self.assertTrue('DRAGEN_WTS_STEP' in results)\n\n skiplist = {\n 'global': [\"DRAGEN_WGS_QC_STEP\"],\n 'by_run': {\n run_id: [\n \"DRAGEN_TSO_CTDNA_STEP\",\n \"DRAGEN_WTS_STEP\"\n ]\n }\n }\n\n results = orchestrator.next_step(mock_workflow, skiplist, None)\n logger.info(results)\n\n self.assertFalse('DRAGEN_WGS_QC_STEP' in results)\n self.assertFalse('DRAGEN_TSO_CTDNA_STEP' in results)\n self.assertFalse('DRAGEN_WTS_STEP' in results)", "def test_skip_list_no_skip(self):\n mock_sqr = SequenceRun()\n mock_sqr.instrument_run_id = TestConstant.instrument_run_id.value\n\n mock_workflow = Workflow()\n mock_workflow.wfr_id = f\"wfr.{_rand(32)}\"\n mock_workflow.type_name = WorkflowType.BCL_CONVERT.value\n mock_workflow.end_status = WorkflowStatus.SUCCEEDED.value\n mock_workflow.sequence_run = mock_sqr\n mock_workflow.output = \"\"\n\n when(fastq_update_step).perform(...).thenReturn(\"FASTQ_UPDATE_STEP\")\n when(google_lims_update_step).perform(...).thenReturn('GOOGLE_LIMS_UPDATE_STEP')\n when(dragen_wgs_qc_step).perform(...).thenReturn('DRAGEN_WGS_QC_STEP')\n when(dragen_tso_ctdna_step).perform(...).thenReturn('DRAGEN_TSO_CTDNA_STEP')\n when(dragen_wts_step).perform(...).thenReturn('DRAGEN_WTS_STEP')\n\n skiplist = {\n 'global': [],\n 'by_run': {}\n }\n\n results = orchestrator.next_step(mock_workflow, skiplist, None)\n logger.info(results)\n\n self.assertTrue('DRAGEN_WGS_QC_STEP' in results)\n self.assertTrue('DRAGEN_TSO_CTDNA_STEP' in results)\n self.assertTrue('DRAGEN_WTS_STEP' in results)", "def test_issue_tracked_times(self):\n pass", "def isolate_self_reporting_cases(self, time: int):", "def test_shift_nothing(self):\n records = map(\n to_integer,\n simulate_records(\n self.df_casesrecord,\n self.df_knotdateset,\n self.df_modeldaterange,\n self.df_possibledateset,\n (None, None),\n ),\n )\n record_fra = select(\"FRA\", date(2020, 4, 11), records)\n self.assertDictEqual(\n {\n \"date\": date(2020, 4, 11),\n \"iso_code\": \"FRA\",\n \"area\": 547557,\n \"population\": 67059887,\n \"weekly_avg_cases\": 5670,\n \"summed_avg_cases\": 18970,\n },\n record_fra,\n )\n record_gbr = select(\"GBR\", date(2020, 4, 10), records)\n self.assertDictEqual(\n {\n \"date\": date(2020, 4, 10),\n \"iso_code\": \"GBR\",\n \"area\": 241930,\n \"population\": 66834405,\n \"weekly_avg_cases\": 4781,\n \"summed_avg_cases\": 9581,\n },\n record_gbr,\n )", "def get_skipped(self):\n return [result for result in self.values() if result.outcome == Result.SKIPPED]", "def test_invalid_flag_record(self):\n log.info(\"START QUAL TEST INVALID FLAG RECORD\")\n\n # Made-up data with all flags except the first set to True.\n # First flag is not a zero or one.\n self.clear_sample_data()\n self.event_subscribers.clear_events()\n self.assert_initialize()\n self.create_sample_data('invalid_A0000003.DEC', \"A1000003.DEC\")\n\n # Verify an event was raised and we are in our retry state.\n self.verify_queue_empty()\n self.assert_event_received(ResourceAgentErrorEvent, 10)\n self.assert_state_change(ResourceAgentState.STREAMING, 10)\n\n log.info(\"END QUAL TEST INVALID FLAG RECORD\")", "def test_unfinished_activity(self):\n with open(os.path.dirname(os.path.abspath(__file__)) + \"/time_unfinished.log\", 'r') as f:\n results_list = analysis.parse_time_log(f)\n\n for x in range(len(expected_activity_list)):\n assert results_list[x] == expected_activity_list[x]\n\n # the last event is ongoing so I can't test an exact end time\n expected_ongoing_dt_begin = datetime(2016, 8, 25, 18, 50, 53)\n assert results_list[-1].dt_begin == expected_ongoing_dt_begin \\\n and results_list[-1].activity_type == 'p' \\\n and datetime.now() - results_list[-1].dt_end < timedelta(minutes=1)", "def run_skip(self):\n pass", "def test_time_type_state_is_evening(day):\n\n assert day_time_info(day.hours_0).is_evening is False\n assert day_time_info(day.hours_1).is_evening is False\n assert day_time_info(day.hours_2).is_evening is False\n assert day_time_info(day.hours_3).is_evening is False\n assert day_time_info(day.hours_4).is_evening is False\n assert day_time_info(day.hours_5).is_evening is False\n assert day_time_info(day.hours_6).is_evening is False\n assert day_time_info(day.hours_7).is_evening is False\n assert day_time_info(day.hours_8).is_evening is False\n assert day_time_info(day.hours_9).is_evening is False\n assert day_time_info(day.hours_10).is_evening is False\n assert day_time_info(day.hours_11).is_evening is False\n assert day_time_info(day.hours_12).is_evening is False\n assert day_time_info(day.hours_13).is_evening is False\n assert day_time_info(day.hours_14).is_evening is False\n assert day_time_info(day.hours_15).is_evening is False\n assert day_time_info(day.hours_16).is_evening is False\n assert day_time_info(day.hours_17).is_evening is False\n assert day_time_info(day.hours_18).is_evening is True\n assert day_time_info(day.hours_19).is_evening is True\n assert day_time_info(day.hours_20).is_evening is True\n assert day_time_info(day.hours_21).is_evening is True\n assert day_time_info(day.hours_22).is_evening is True\n assert day_time_info(day.hours_23).is_evening is False", "def testNoFailureFlag(self):\n schema = lsst.afw.table.SourceTable.makeMinimalSchema()\n\n # This is a FlagDefinition structure like a plugin might have\n flagDefs = FlagDefinitionList()\n FIRST = flagDefs.add(\"1st error\", \"this is the first failure type\")\n SECOND = flagDefs.add(\"2nd error\", \"this is the second failure type\")\n fh = FlagHandler.addFields(schema, \"test\", flagDefs)\n # Check to be sure that the FlagHandler was correctly initialized\n for index in range(len(flagDefs)):\n self.assertEqual(flagDefs.getDefinition(index).name, fh.getFlagName(index))\n\n catalog = lsst.afw.table.SourceCatalog(schema)\n\n # Now check to be sure that all of the known failures set the bits correctly\n record = catalog.addNew()\n fh.handleFailure(record)\n self.assertFalse(fh.getValue(record, FIRST.number))\n self.assertFalse(fh.getValue(record, SECOND.number))\n record = catalog.addNew()\n\n record = catalog.addNew()\n error = MeasurementError(FIRST.doc, FIRST.number)\n fh.handleFailure(record, error.cpp)\n self.assertTrue(fh.getValue(record, FIRST.number))\n self.assertFalse(fh.getValue(record, SECOND.number))\n\n record = catalog.addNew()\n error = MeasurementError(SECOND.doc, SECOND.number)\n fh.handleFailure(record, error.cpp)\n self.assertFalse(fh.getValue(record, FIRST.number))\n self.assertTrue(fh.getValue(record, SECOND.number))" ]
[ "0.64526933", "0.62094676", "0.61737317", "0.6080123", "0.6063041", "0.59828675", "0.59114105", "0.5883286", "0.5864295", "0.5789228", "0.578475", "0.5716462", "0.56791854", "0.5611211", "0.5603914", "0.5570936", "0.5559213", "0.55535394", "0.5551212", "0.5548782", "0.5545937", "0.5541316", "0.5520128", "0.5500454", "0.54754937", "0.5475036", "0.54674447", "0.5443703", "0.5439752", "0.54363716" ]
0.6912011
0
method tests timetable records in STATE_PROCESSED state
def test_state_processed(self): pipeline = spy(self.pipeline_real) job_record = get_job_record(job.STATE_PROCESSED, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY) pipeline.manage_pipeline_for_process(job_record.process_name, job_record) verify(self.time_table_mocked, times=0). \ update_job_record(any(str), any(Job), any(UnitOfWork), any(str)) verify(self.time_table_mocked, times=0).get_tree(any(str))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_preset_timeperiod_state_in_progress(self):\n when(self.time_table_mocked).can_finalize_job_record(any(str), any(Job)).thenReturn(True)\n uow_dao_mock = mock(UnitOfWorkDao)\n when(uow_dao_mock).get_one(any()).thenReturn(create_unit_of_work(PROCESS_UNIT_TEST, 0, 1, None))\n self.pipeline_real.uow_dao = uow_dao_mock\n\n self.pipeline_real.insert_uow = then_return_uow\n pipeline = spy(self.pipeline_real)\n\n job_record = get_job_record(job.STATE_IN_PROGRESS,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=0). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n # verify(pipeline, times=1).\\\n # _compute_and_transfer_to_final_run(any(str), any(str), any(str), any(Job))\n # verify(pipeline, times=0).\\\n # _process_state_final_run(any(str), any(Job))", "def test_future_timeperiod_state_in_progress(self):\n when(self.time_table_mocked).can_finalize_job_record(any(str), any(Job)).thenReturn(True)\n uow_dao_mock = mock(UnitOfWorkDao)\n when(uow_dao_mock).get_one(any()).thenReturn(create_unit_of_work(PROCESS_UNIT_TEST, 0, 1, None))\n self.pipeline_real.uow_dao = uow_dao_mock\n\n self.pipeline_real.insert_uow = then_raise\n pipeline = spy(self.pipeline_real)\n\n job_record = get_job_record(job.STATE_IN_PROGRESS, TEST_FUTURE_TIMEPERIOD, PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=0). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))", "def test_transfer_to_final_timeperiod_state_in_progress(self):\n when(self.time_table_mocked).can_finalize_job_record(any(str), any(Job)).thenReturn(True)\n uow_dao_mock = mock(UnitOfWorkDao)\n when(uow_dao_mock).get_one(any()).thenReturn(\n create_unit_of_work(PROCESS_UNIT_TEST, 1, 1, None, unit_of_work.STATE_PROCESSED))\n self.pipeline_real.uow_dao = uow_dao_mock\n\n self.pipeline_real.insert_uow = then_return_uow\n pipeline = spy(self.pipeline_real)\n\n job_record = get_job_record(job.STATE_IN_PROGRESS,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=1). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n # verify(pipeline, times=1).\\\n # _compute_and_transfer_to_final_run(any(str), any(str), any(str), any(Job))\n # verify(pipeline, times=1).\\\n # _process_state_final_run(any(str), any(Job))", "def test_retry_state_in_progress(self):\n when(self.time_table_mocked).can_finalize_job_record(any(str), any(Job)).thenReturn(True)\n uow_dao_mock = mock(UnitOfWorkDao)\n when(uow_dao_mock).get_one(any()).thenReturn(\n create_unit_of_work(PROCESS_UNIT_TEST, 1, 1, None, unit_of_work.STATE_PROCESSED))\n self.pipeline_real.uow_dao = uow_dao_mock\n\n self.pipeline_real.insert_uow = then_raise\n self.pipeline_real.recover_from_duplicatekeyerror = override_recover_function\n pipeline = spy(self.pipeline_real)\n\n job_record = get_job_record(job.STATE_IN_PROGRESS,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=1). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n # verify(pipeline, times=1).\\\n # _compute_and_transfer_to_final_run(any(str), any(str), any(str), any(Job))\n # verify(pipeline, times=1).\\\n # _process_state_final_run(any(str), any(Job))", "def test_processed_state_final_run(self):\n uow_dao_mock = mock(UnitOfWorkDao)\n when(uow_dao_mock).get_one(any()).thenReturn(\n create_unit_of_work(PROCESS_UNIT_TEST, 1, 1, None, unit_of_work.STATE_PROCESSED))\n self.pipeline_real.uow_dao = uow_dao_mock\n\n pipeline = spy(self.pipeline_real)\n\n job_record = get_job_record(job.STATE_FINAL_RUN,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=1). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n verify(self.time_table_mocked, times=1).get_tree(any(str))", "def test_state_skipped(self):\n pipeline = spy(self.pipeline_real)\n job_record = get_job_record(job.STATE_SKIPPED,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=0). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n verify(self.time_table_mocked, times=0).get_tree(any(str))", "def test_retrieve_instances_schedule_state(self):\n pass", "def test_update_instances_schedule_state(self):\n pass", "def test_issue_tracked_times(self):\n pass", "def test_timeout_processing(self):\n # setup\n self.transaction_behaviour.processing_time = None\n\n # operation\n self.transaction_behaviour._timeout_processing()\n\n # after\n self.assert_quantity_in_outbox(0)", "def test_state_embryo(self):\n self.pipeline_real.insert_uow = then_return_uow\n pipeline = spy(self.pipeline_real)\n\n job_record = get_job_record(job.STATE_EMBRYO,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))", "def phone_timezones_have_been_processed():\n if settings.UNIT_TESTING:\n override = getattr(\n settings, 'PHONE_TIMEZONES_HAVE_BEEN_PROCESSED', None)\n if override is not None:\n return override\n return (_get_migration_status_from_threadlocals()\n == MigrationStatus.COMPLETE)", "def test_ensure_not_ts_pass(self):\n self.assertEqual(ensure_not_ts(self.jobset1), 'completed')", "def saveNeventsByProcessingType(neventsByProcessingType, qtime):\n\n try:\n with transaction.atomic():\n for pt, data in neventsByProcessingType.items():\n row = ProdNeventsHistory(processingtype=pt,\n neventstotal=data['total'],\n neventsused=data['used'],\n neventswaiting=data['waiting'],\n neventsrunning=data['running'],\n timestamp=qtime)\n row.save()\n except DatabaseError as e:\n print (e.message)\n return False\n return True", "def test_reading_pk_timestamps_with_counters(self):\n self.prepare()\n self.session.execute(\"\"\"\n CREATE TABLE test_pk_timestamps_with_counters\n (columnname text, day timestamp,\n israndom boolean, columnvalue text, counter counter,\n PRIMARY KEY ((columnname, day, israndom), columnvalue)\n )\"\"\")\n\n records = ['origins|2016-10-01 00:00:00+0000|False|ACTUAL|6\\n',\n 'origins|2016-10-01 00:00:00+0000|False|ADGMOB|4\\n',\n 'origins|2016-10-01 00:00:00+0000|False|ANONPM|4\\n',\n 'origins|2016-10-01 00:00:00+0000|False|CSRT2L|76\\n',\n 'origins|2016-10-01 00:00:00+0000|False|DIAGOP|18\\n',\n 'origins|2016-10-01 00:00:00+0000|False|E-SOFT|17\\n',\n 'origins|2016-10-01 00:00:00+0000|False|E-TASK|10\\n']\n\n tempfile = self.get_temp_file()\n with open(tempfile.name, 'w') as f:\n f.writelines(records)\n\n logger.debug('Importing from csv file: {name}'.format(name=tempfile.name))\n cmds = \"COPY ks.test_pk_timestamps_with_counters FROM '{name}' WITH delimiter = '|'\".format(name=tempfile.name)\n self.run_cqlsh(cmds=cmds)\n\n res = rows_to_list(self.session.execute(\"SELECT COUNT(*) FROM ks.test_pk_timestamps_with_counters\"))[0][0]\n assert len(records) == res, \"Failed to import one or more rows, expected {} but got {}\".format(len(records), res)", "def test_ensure_ts_ts(self):\n self.assertEqual(ensure_ts(self.jobset2), 'imaginary')", "def _compute_queue_state(self):\n for record in self:\n if record.queue_line_total_records == record.queue_line_done_records + record.queue_line_cancel_records:\n record.state = \"completed\"\n elif record.queue_line_draft_records == record.queue_line_total_records:\n record.state = \"draft\"\n elif record.queue_line_total_records == record.queue_line_fail_records:\n record.state = \"failed\"\n else:\n record.state = \"partially_completed\"", "def test_unfinished_activity(self):\n with open(os.path.dirname(os.path.abspath(__file__)) + \"/time_unfinished.log\", 'r') as f:\n results_list = analysis.parse_time_log(f)\n\n for x in range(len(expected_activity_list)):\n assert results_list[x] == expected_activity_list[x]\n\n # the last event is ongoing so I can't test an exact end time\n expected_ongoing_dt_begin = datetime(2016, 8, 25, 18, 50, 53)\n assert results_list[-1].dt_begin == expected_ongoing_dt_begin \\\n and results_list[-1].activity_type == 'p' \\\n and datetime.now() - results_list[-1].dt_end < timedelta(minutes=1)", "def test_initial_timestamp_states(\n self, get_pipe_manager, retrospective, which_checkpoint_state\n ):\n\n # Create the manager and make the timestamp call.\n pm = get_pipe_manager(name=\"InitialTimestampState\")\n stage_name = \"quality_control\"\n pm.timestamp(checkpoint=stage_name, finished=retrospective)\n\n # Form expectations.\n if retrospective:\n prev_exp = stage_name\n curr_exp = None\n else:\n prev_exp = None\n curr_exp = stage_name\n\n # Make the assertion on the specified checkpoint state.\n if which_checkpoint_state == \"curr_checkpoint\":\n assert curr_exp == getattr(pm, \"curr_checkpoint\")\n else:\n assert prev_exp == getattr(pm, \"prev_checkpoint\")", "def test_run_now(curent_time,state):\n date = datetime(2020,5,5,12,0)\n duration_in_minutes = 65\n run = Run(date, duration_in_minutes/60)\n\n assert run.run_now(curent_time) == state", "def test_retrospective_the_prospective_checkpointed_timestamps(\n self, test_type, stage_pair, pm\n ):\n\n stage1, stage2 = stage_pair\n pm.timestamp(checkpoint=stage1, finished=True)\n assert stage1 == pm.prev_checkpoint\n assert pm.curr_checkpoint is None\n pm.timestamp(checkpoint=stage2, finished=False)\n\n if test_type == FILES_TEST:\n expected = [checkpoint_filepath(stage1, pm)]\n assert set(expected) == set(fetch_checkpoint_files(pm))\n else:\n assert pm.prev_checkpoint is None\n assert stage2 == pm.curr_checkpoint", "def __checkInited(self, date=None, tkey=None, ttype=None):\n if ttype == 'crontab':\n return False\n if date is None:\n date = self.ddate\n\n for doc in TaskHistory().search(task_day=date, task_key=tkey):\n if doc.get(\"status\") == \"waiting\":\n return True\n return False", "def test_setting_continuous_processing(processor):\n processor.continuous_processing = False\n assert not processor._state.test('continuous_processing')\n processor.continuous_processing = True\n assert processor._state.test('continuous_processing')", "def identify_exec_running(self, record):\n return [\"running\"]", "def test_prospective_then_retrospective_checkpointed_timestamps(\n self, test_type, stage_pair, pm\n ):\n\n stage1, stage2 = stage_pair\n pm.timestamp(checkpoint=stage1, finished=False)\n assert stage1 == pm.curr_checkpoint\n pm.timestamp(checkpoint=stage2, finished=True)\n\n if test_type == FILES_TEST:\n checkpoint_files = fetch_checkpoint_files(pm)\n expected = [checkpoint_filepath(stage2, pm)]\n assert set(expected) == set(checkpoint_files)\n else:\n # Current checkpoint will be reset by second (retrospective)\n # timestamp call.\n assert stage2 == pm.prev_checkpoint\n assert pm.curr_checkpoint is None", "def test_cancelled_state_final_run(self):\n uow_dao_mock = mock(UnitOfWorkDao)\n when(uow_dao_mock).get_one(any()).thenReturn(\n create_unit_of_work(PROCESS_UNIT_TEST, 1, 1, None, unit_of_work.STATE_CANCELED))\n self.pipeline_real.uow_dao = uow_dao_mock\n\n pipeline = spy(self.pipeline_real)\n job_record = get_job_record(job.STATE_FINAL_RUN,\n TEST_PRESET_TIMEPERIOD,\n PROCESS_SITE_HOURLY)\n\n pipeline.manage_pipeline_for_process(job_record.process_name, job_record)\n verify(self.time_table_mocked, times=1). \\\n update_job_record(any(str), any(Job), any(UnitOfWork), any(str))\n verify(self.time_table_mocked, times=0).get_tree(any(str))", "def load_status_table():", "def test_get_refresh_job_status(self):\n pass", "def test_running_job(self):\n\n running_job = json.loads(TREEHERDER_JOB % (\"unknown\", \"running\"))\n self.assertEquals(self.query_api.get_job_status(running_job), RUNNING)", "def process(rows):\n # current state\n current_guard = None\n sleeping = False\n start = None\n # handle time data as datetime\n # I saw the 23:58 begin time in test data and figured I wanted date classes\n from datetime import datetime\n dateformat = '[%Y-%m-%d %H:%M]'\n schedule = []\n for row in rows:\n parts = row.split()\n time = datetime.strptime(' '.join(parts[0:2]), dateformat)\n if parts[2] == 'Guard':\n if sleeping: # shouldn't happen\n print('Messy input?!')\n schedule.append({\n 'guard': current_guard,\n 'start': start,\n 'stop': time,\n })\n start = None\n current_guard = int(parts[3].strip('#'))\n elif parts[2] == 'falls':\n start = time\n sleeping = True\n elif parts[2] == 'wakes':\n if sleeping:\n schedule.append({\n 'guard': current_guard,\n 'start': start,\n 'stop': time,\n })\n sleeping = False\n else:\n print('Messy input!?')\n return schedule" ]
[ "0.67496896", "0.6644145", "0.6472953", "0.62294924", "0.6101738", "0.5977244", "0.59124064", "0.5691305", "0.55319977", "0.5511322", "0.55038923", "0.5429801", "0.54225284", "0.5324777", "0.5302315", "0.52370924", "0.5232825", "0.5190848", "0.5188738", "0.5178702", "0.51469487", "0.51411235", "0.51252335", "0.51232964", "0.50751364", "0.50745195", "0.5074006", "0.50698984", "0.50698435", "0.5052384" ]
0.72442085
0
function to create employee manager.
def create_manager(self, name, pos, dept): self.manager[dept.upper()].append( { 'name': name, 'pos': pos, 'dept': dept, 'senior': [], 'junior': [], 'trainee': [] } )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_emp(self, name, pos, dept):\n if pos.upper() == 'MANAGER':\n self.create_manager(name, pos, dept)\n elif pos.upper() == 'SENIOR':\n self.create_senior(name, pos, dept)\n elif pos.upper() == 'JUNIOR':\n self.create_junior(name, pos, dept)\n else:\n self.create_trainee(name, pos, dept)", "def create_manager(self, username, tenancy):\n raise NotImplementedError", "def create_employee(self):\n try:\n name = input(\"Enter name: \")\n if not name.isalpha():\n print(\"Invalid data format. Name should contain only alphabets. \")\n return False\n email = input(\"Enter email: \")\n if not InputValidations.validate_email(email):\n return False\n employee = EmployeeModel(name=name, email=email)\n self.admin_repository.create_employee(employee)\n print(\"Employee created successfully!\")\n return True\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def createManager(firstName, lastName, ssn, salary, title, yearBonus):\n manager = Manager(firstName, lastName, ssn, salary, title, yearBonus)\n if firstName != manager.firstName or \\\n lastName != manager.lastName or \\\n ssn != manager.ssn or \\\n salary != manager.salary or \\\n title != manager.title or \\\n yearBonus != manager.yearBonus:\n raise ValueError(\"Failed to initialize Manager\")\n return manager", "def create_employee(self,personal_identity):\r\n new_emp = Employee(*personal_identity)\r\n registration_str = new_emp.get_registration_str()\r\n\r\n return_value = self.save_object_to_DB(\"employee\",registration_str)\r\n return return_value", "def create(user):\n # if the user does not have an email manager yet, create it\n if not EmailManager.objects.filter(user=user).exists():\n\n # create a new email manager obj\n new = EmailManager(\n key=EmailManager.generate_key(),\n user=user\n )\n new.save()\n\n # Send confirmation email\n welcome_email(new)\n\n return user.emailmanager", "def add_employee(self, first_name, last_name):\n self.switch_main_menu(\"PIM\")\n self.click_menu(\"Add Employee\")\n self.pim = AddEmployee(self.driver)\n self.pim.add_user_employee(first_name, last_name)", "def test_create(self):\n\n responses.add(\n responses.POST,\n self.host + \"/manager\",\n json={\"path\": \"manager?project=ProjectTest\", \"action\": \"redirect\", \"status\": \"success\"},\n status=200\n )\n\n self.azk.create(self.project, self.description)", "def setUp(self):\n\n self.user = self.make_user()\n self.employee = Employee.objects.create(\n cpf=\"974.220.200-16\",\n user=self.user,\n departament=Employee.ADMINISTRATION\n )", "async def create_bot_manager(self, guild):\n role_settings = {\"name\": self.manager_role,\n \"permissions\": discord.Permissions.all(),\n \"hoist\": False,\n \"mentionable\": False,\n \"color\": discord.Colour.from_rgb(0, 0, 1)}\n await guild.create_role(**role_settings)", "def test_employee_creation(self):\n helper = EmployeeHelper(name='Andrew', hired_on='2019-10-01T00:00:00', salary=50000, department_id=1)\n\n # Returned result is an OrderedDict\n result = self.client.execute(helper.get_create_employee_query())['data']['createEmployee']['employee']\n\n self.assertEqual(result['name'], helper.name)\n self.assertEqual(result['hiredOn'], helper.hired_on)\n self.assertEqual(result['salary'], helper.salary)\n self.assertEqual(result['departmentId'], helper.department_id)", "def post(self):\n try:\n employee = self.service.add_employee(self.schema, request.json)\n except ValidationError as error:\n return error.messages, 400\n return self.schema.dump(employee), 201", "def create_employee(request, company_id):\n\n company = Company.objects.get(pk=company_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n logUnauthorizedAccess(\"User tried to create_employee\", request)\n raise PermissionDenied()\n form = EmployeeForm(request, initial=dict(company=company))\n form.fields['manager'].queryset = Employee.objects.filter(is_manager=True, company=company)\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company=company) | Q(company__isnull=True))\n # data = {\n # 'employee_form': form.cleaned_data,\n # 'company': company.cleaned_data[\"name\"]\n # }\n\n return TemplateResponse(\n request,\n 'mus/create_employee_form.html',\n {\n 'employee_form': form,\n }\n )\n # data = {\n # 'employee_form': form.cleaned_data,\n # 'company': company.cleaned_data[\"name\"]\n # }\n # return JsonResponse(status=200, data=data)", "def assign_store_manager(user_name: str, new_store_manager_name: str, store_name: str):\n\n user_name = auth.get_username_from_hash(user_name)\n permission_handler.is_permmited_to(user_name, Action.ADD_MANAGER.value, store_name)\n permission_handler.assign_store_employee(action.MANAGER_INITIAL_PERMISSIONS,\n new_store_manager_name,\n store_name)\n user_handler.assign_store_employee(user_name, new_store_manager_name, store_name)", "def create_manager(\n pdb_hierarchy,\n geometry_restraints_manager,\n fmodel,\n wavelength,\n params,\n resolution_factor = 0.25,\n nproc = Auto,\n verbose = False,\n log = None,\n manager_class=None):\n connectivity = \\\n geometry_restraints_manager.shell_sym_tables[0].full_simple_connectivity()\n if (manager_class is None):\n manager_class = manager\n manager_obj = manager_class(\n fmodel = fmodel,\n pdb_hierarchy = pdb_hierarchy,\n xray_structure = fmodel.xray_structure,\n connectivity = connectivity,\n wavelength = wavelength,\n params = params,\n nproc = nproc,\n verbose = verbose,\n log = log)\n return manager_obj", "def post(self):\n data = EmployeeRegister.parser.parse_args()\n new_employee_id = str(uuid.uuid4())\n\n while EmployeeModel.find_by_id(new_employee_id):\n # if this id is already in use\n new_employee_id = str(uuid.uuid4())\n\n employee = EmployeeModel(**data, employee_id=new_employee_id)\n employee.save_to_db()\n\n return {\"message\": \"Employee successfully added to the system\"}, 201 # 201 - Created", "def test_add_team_manager_to_team(self):\n pass", "def test_create(self):\n\n adminuser,adminpass = self.testdata.find_account_for('toolmanager')\n\n self.utils.account.login_as(adminuser,adminpass)\n\n self.contribtool.create(TOOLNAME)", "def get_manager_employees(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_employees = Employee.objects.filter(manager=current_employee, development_plan_type=None).all()\n if manager_employees:\n emp_list=[]\n for emp in manager_employees:\n emp_data={}\n emp_data[\"id\"] = emp.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"manager_id\"] = emp.manager.id\n # emp_data[\"status_questions\"] = emp.status_questions\n # employee_role = EmployeeRole.objects.filter(employee=emp).all()\n # name_role_list = []\n # for obj in employee_role:\n # name_role_list.append(obj.role.name)\n # emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n data = {\"employees:\": emp_list}\n return JsonResponse(status=201, data=data)\n else:\n return JsonResponse(\"The user with id={} isn't a manager for any user\".format(current_employee.user.id),\n status=404)", "def create_user_using_manager(username,password):\n manager = UserManager()\n return manager.create_user(username=username, password=password)", "def create_podmanager(cls, values):\n return cls.dbdriver.create_podmanager(values)", "def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = UserSerializer.create(UserSerializer(), validated_data=user_data)\n employee, created = Employee.objects.update_or_create(user=user,\n employee_id=validated_data.pop('employee_id'),\n location=validated_data.pop('location'),\n avail_start_time= str(validated_data.pop('avail_start_time')),\n avail_end_time= str(validated_data.pop('avail_end_time')))\n return employee", "def setUp(self):\n self.employee = Employee('Lucas', 'Guerra', 45000)", "def create_leader_model(request, company_id):\n\n errors = {'noactions': []}\n company = Company.objects.get(pk=company_id)\n currentEmpl = Employee.objects.get(user__pk=request.user.pk)\n \"\"\":type : Employee \"\"\"\n\n if not currentEmpl.isEnsoUser() and currentEmpl.company.pk != company.pk:\n raise PermissionDenied()\n\n if currentEmpl.isCompanySuperUserOrHigher():\n employeeQS = Employee.objects.filter(\n company__pk=company_id\n )\n else:\n employeeQS = Employee.objects.filter(\n Q(manager=currentEmpl),\n company__pk=company_id\n )\n\n form = MultiLeaderModelForm(request.POST or None)\n form.fields['employees'].queryset = employeeQS\n\n if request.method == 'POST' and form.is_valid():\n\n employees = form.cleaned_data['employees']\n \"\"\":type : list[Employee] \"\"\"\n\n pdf_response = get_leader_model_pdf(currentEmpl, employees)\n\n if isinstance(pdf_response, HttpResponse):\n return pdf_response\n else:\n errors = pdf_response\n\n print(errors)\n\n return TemplateResponse(\n request,\n 'mus/create_leader_model.html', {\n 'form': form,\n 'company': company,\n 'errors': errors\n }\n )", "def add_employee(self, empl):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee values(default,%s,%s,%s,%s,%s,%s,%s,%s)',\n (empl.name, empl.email, empl.office, empl.research_group, empl.title, empl.internOrExtern,\n empl.active, empl.promotor))\n cursor.execute('SELECT LASTVAL()')\n eid = cursor.fetchone()[0]\n empl.id = eid\n # get id and return updated object\n self.dbconnect.commit()\n except(Exception, self.dbconnect.get_error()) as error:\n self.dbconnect.rollback()\n raise Exception('\\nUnable to save Employee!\\n(%s)' % (error))", "def createMentor(self, org):\n self.createProfile()\n self.profile.mentor_for = [org.key()]\n self.profile.put()", "def add_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee(id, name, email, office, extra_info, picture_location, research_group, '\n 'title, is_external, is_admin, is_active) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);',\n (obj.e_id, obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active))\n\n self.dbconnect.commit()\n return obj\n except:\n self.dbconnect.rollback()\n raise", "def add_manager(self, info):\n self.cursor.execute(\"\"\"SELECT COUNT(*) FROM managerpersonal WHERE phone=%s\"\"\", (int(info['phone']),))\n if not self.cursor.fetchone()[0]:\n self.cursor.execute(\"\"\"INSERT INTO managerpersonal VALUES (%s,%s)\"\"\", (int(info['phone']), info['address']))\n self.cursor.execute(\"\"\"INSERT INTO managercredentials (loginID, firstName, lastName, salt, pass_key, phone)\n VALUES (%s,%s,%s,%s,%s,%s)\"\"\", (info['loginID'], info['firstName'], info['lastName'], info['salt'],\n info['key'], int(info['phone'])))\n\n self.db.commit()\n self.cursor.execute(\"\"\"SELECT COUNT(*) FROM customercredentials WHERE loginID=%s\"\"\", (info['loginID'],))\n result = self.cursor.fetchone()\n if result[0]:\n self.cursor.execute(\"\"\"DELETE FROM customerCredentials WHERE loginID=%s\"\"\", (info['loginID'],))\n self.db.commit()\n self.cursor.execute(\"\"\"SELECT COUNT(*) FROM customerCredentials WHERE phone=%s\"\"\", (int(info['phone']),))\n phone_count = self.cursor.fetchone()\n if not phone_count[0]:\n self.cursor.execute(\"\"\"DELETE FROM customerPersonal WHERE phone=%s\"\"\", (int(info['phone']),))\n self.db.commit()\n self.update_book_scores()\n self.update_comment_usefulness()", "def create(self, request):\n serializer = data_serializers.CreateEmployeeSerializer(data=request.data)\n\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n print(F\"Request employee Data: {serializer.data}\")\n\n try:\n new_employee = self.controller.create_employee(request_data=request_data)\n serializer = data_serializers.PresentEmployeeDataSerializer(new_employee)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (domain_exceptions.EmployeeIDIsNotUnique,\n domain_exceptions.WorkArrangementPercentageOutOfRange,\n domain_exceptions.TeamDoesNotExist,\n domain_exceptions.TeamHasALeader,\n domain_exceptions.WorkArrangementPercentageNull\n ) as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def add_employee():\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n form = SignUp_Form()\n \n\n if form.validate_on_submit():\n try: \n employee = Employee.register(\n username = form.username.data,\n password = form.password.data, \n email = form.email.data, \n first_name = form.first_name.data,\n last_name = form.last_name.data,\n hire_date = form.hire_date.data, \n is_admin = form.is_admin.data,\n )\n\n db.session.add(employee)\n\n db.session.commit()\n except IntegrityError:\n flash(\"Email already in use\", \"danger\")\n return render_template(\"/admin/add_user.html\", form = form)\n\n flash(\"Employee Added!\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/add_user.html\", form = form)" ]
[ "0.70986205", "0.70202345", "0.6854372", "0.6521865", "0.63904655", "0.62510014", "0.6242744", "0.6056396", "0.60380864", "0.6034902", "0.6033909", "0.6022222", "0.60213274", "0.5988317", "0.59657365", "0.591008", "0.5900008", "0.58407825", "0.58102566", "0.57738036", "0.57492363", "0.5748451", "0.5742093", "0.57378864", "0.57374865", "0.57279265", "0.5721603", "0.57109344", "0.57106096", "0.5704365" ]
0.74205023
0
function to create employee senior.
def create_senior(self, name, pos, dept): self.senior[dept.upper()].append( { 'name': name, 'pos': pos, 'dept': dept, 'manager': self.manager[dept.upper()][0]['name'], 'junior': [], 'trainee': [] } ) self.manager[dept.upper()][0]['senior'].append( { 'name': name, 'pos': pos, 'dept': dept } )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_emp(self, name, pos, dept):\n if pos.upper() == 'MANAGER':\n self.create_manager(name, pos, dept)\n elif pos.upper() == 'SENIOR':\n self.create_senior(name, pos, dept)\n elif pos.upper() == 'JUNIOR':\n self.create_junior(name, pos, dept)\n else:\n self.create_trainee(name, pos, dept)", "def create_employee(self,personal_identity):\r\n new_emp = Employee(*personal_identity)\r\n registration_str = new_emp.get_registration_str()\r\n\r\n return_value = self.save_object_to_DB(\"employee\",registration_str)\r\n return return_value", "def create_person(self):", "def create_junior(self, name, pos, dept):\n self.junior[dept.upper()].append(\n {\n 'name': name,\n 'pos': pos,\n 'dept': dept,\n 'manager': self.manager[dept.upper()][0]['name'],\n 'senior': self.senior[dept.upper()][0]['name'],\n 'trainee': []\n }\n )\n self.manager[dept.upper()][0]['junior'].append(\n {\n 'name': name,\n 'pos': pos,\n 'dept': dept\n }\n )\n self.senior[dept.upper()][0]['junior'].append(\n {\n 'name': name,\n 'pos': pos,\n 'dept': dept\n }\n )", "def create_teacher(username, password, email, preferred_language,skype_id,name, phone_number, country,availability):\n person.create_person(username,password,email,preferred_language,skype_id,name,phone_number,country)\n teacher_account_id = person.get_last()\n query = 'INSERT INTO teacher VALUES( %s,%s );'\n args = (teacher_account_id, availability)\n database.connection.save_data(query, args)", "def create(self, values):\n if values.get('country_id', False):\n country = self.env['res.country'].browse(values['country_id'])\n if country.code == 'SA':\n values.update({'is_saudi': True})\n else:\n values.update({'is_saudi': False})\n\n res = super(HrEmployee, self).create(values)\n if values.get('user_id', False):\n self.user_id.write({'employee_id': res})\n return res", "def create_employee(self):\n try:\n name = input(\"Enter name: \")\n if not name.isalpha():\n print(\"Invalid data format. Name should contain only alphabets. \")\n return False\n email = input(\"Enter email: \")\n if not InputValidations.validate_email(email):\n return False\n employee = EmployeeModel(name=name, email=email)\n self.admin_repository.create_employee(employee)\n print(\"Employee created successfully!\")\n return True\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def add_employee(self, first_name, last_name):\n self.switch_main_menu(\"PIM\")\n self.click_menu(\"Add Employee\")\n self.pim = AddEmployee(self.driver)\n self.pim.add_user_employee(first_name, last_name)", "def users_create():", "def createStudent(self):\n self.createProfile()\n from soc.modules.gsoc.models.profile import GSoCStudentInfo\n properties = {'key_name': self.profile.key().name(), 'parent': self.profile}\n self.profile.student_info = seeder_logic.seed(GSoCStudentInfo, properties)\n self.profile.put()", "def create_student(self, username):\r\n return self._create_user(username, is_staff=False)", "def createDeveloper(self):\n self.createUser()\n self.user.is_developer = True\n self.user.put()", "def create_member(org_id, group_id, target_group_ids, sex, first_name, last_name, title_name, email):\n user = get_user_by_email(email)\n # --- falls e-mail schon existiert wird nichts unternommen\n if user != None:\n if org_id > 0: # nur bei Schulen wird die Schulnummer vorangestellt\n prefix = '%i_' % org_id\n else:\n prefix = ''\n user = User()\n username = get_username(prefix, first_name, last_name)\n user.username = username\n user.sex = sex\n user.first_name = first_name\n user.last_name = last_name\n user.email = email\n user.title = title_name\n user.is_staff = False\n user.is_active = True\n user.is_superuser = False\n user.date_joined = datetime.datetime.now()\n password = generate_passwd()\n user.set_password(password)\n user.save()\n set_user_org(org_id, user)\n send_password(email, username, password)\n set_user_group(user, get_group_by_id(group_id))\n for group in target_group_ids:\n set_user_group(user, get_group_by_id(group))\n transaction.commit()", "def create_instructor(self, username):\r\n return self._create_user(username, is_staff=True)", "def create_user(schools_dictionnary, domains_to_skills_dictionnary, companies, places, skills_oh, places_oh, domains_oh, rng, _id):\n\n age = rng.randint(20,60)\n schools = rng.choice(list(schools_dictionnary.keys()), rng.choice([1, 2], p = [0.95, 0.05]), replace = False) \n\n available_skills = list(set([skill for school in schools \\\n for domain in schools_dictionnary[school].domains \\\n for skill in domains_to_skills_dictionnary[domain]]))\n\n expo = np.round(rng.exponential(0.3) * len(schools)) + age // 17 + 1\n\n nb_skills_to_choose = min(int(expo), 5 + (len(schools) - 1) * 3)\n\n _skills = rng.choice(available_skills, nb_skills_to_choose, replace = False)\n\n company = rng.choice(companies)\n place = rng.choice(places)\n\n user = User(skills_oh, places_oh, domains_oh, schools_dictionnary, skills = _skills, age = age, place = place, company = company,\n schools = schools, _id = _id)\n\n return user", "def perform_create(self, serializer):\n if self.request.data.get('user_type', None) == 'employee':\n serializer.save(is_staff=False)\n else:\n serializer.save()", "def _create_nsem_user():\n users = User.objects.filter(username=settings.CWWED_NSEM_USER)\n if users.exists():\n user = users[0]\n else:\n user = User.objects.create_user(settings.CWWED_NSEM_USER, password=settings.CWWED_NSEM_PASSWORD)\n group, _ = Group.objects.get_or_create(name=settings.CWWED_NSEM_GROUP)\n perm_names = [\n 'add_{}'.format(NsemPsa._meta.model_name),\n 'add_{}'.format(NamedStormCoveredDataSnapshot._meta.model_name),\n ]\n perms = Permission.objects.filter(codename__in=perm_names)\n # set permission\n user.user_permissions.set(list(perms))\n group.permissions.set(list(perms))\n # add user to group\n group.user_set.add(user)", "def create_individual(self):\n pass", "def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = UserSerializer.create(UserSerializer(), validated_data=user_data)\n employee, created = Employee.objects.update_or_create(user=user,\n employee_id=validated_data.pop('employee_id'),\n location=validated_data.pop('location'),\n avail_start_time= str(validated_data.pop('avail_start_time')),\n avail_end_time= str(validated_data.pop('avail_end_time')))\n return employee", "def create_educator(data):\n\n educator = Educator(\n name=data['name'],\n work_email=data['work_email'],\n organization_name=data['organization_name'],\n org_or_school=data['org_or_school'],\n address_line_1=data['address_line_1'],\n address_line_2=data['address_line_2'],\n city=data['city'],\n state=data['state'],\n zipcode=data['zipcode'],\n num_students=data['num_students']\n )\n educator.save()\n return educator", "def createMentor(self, org):\n self.createProfile()\n self.profile.mentor_for = [org.key()]\n self.profile.put()", "def create_employee(request, company_id):\n\n company = Company.objects.get(pk=company_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n logUnauthorizedAccess(\"User tried to create_employee\", request)\n raise PermissionDenied()\n form = EmployeeForm(request, initial=dict(company=company))\n form.fields['manager'].queryset = Employee.objects.filter(is_manager=True, company=company)\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company=company) | Q(company__isnull=True))\n # data = {\n # 'employee_form': form.cleaned_data,\n # 'company': company.cleaned_data[\"name\"]\n # }\n\n return TemplateResponse(\n request,\n 'mus/create_employee_form.html',\n {\n 'employee_form': form,\n }\n )\n # data = {\n # 'employee_form': form.cleaned_data,\n # 'company': company.cleaned_data[\"name\"]\n # }\n # return JsonResponse(status=200, data=data)", "def create_employee_from_applicant(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n hr_employee = self.pool.get('hr.employee')\n model_data = self.pool.get('ir.model.data')\n act_window = self.pool.get('ir.actions.act_window')\n emp_id = False\n for applicant in self.browse(cr, uid, ids, context=context):\n address_id = contact_name = False\n if applicant.partner_id:\n address_id = self.pool.get('res.partner').address_get(cr, uid, [applicant.partner_id.id], ['contact'])['contact']\n contact_name = self.pool.get('res.partner').name_get(cr, uid, [applicant.partner_id.id])[0][1]\n if applicant.job_id and (applicant.partner_name or contact_name):\n applicant.job_id.write({'no_of_hired_employee': applicant.job_id.no_of_hired_employee + 1})\n create_ctx = dict(context, mail_broadcast=True)\n\n pes=self.browse(cr,uid,ids)[0]\n coy=pes.partner_name\n\n ##### Susunan Keluarga ayah/ibu #####\n le=self.pool.get('hr_recruit.suskel1')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context)\n prod_ids=[] \n for pr in lele:\n prod_ids.append((0,0, {'name':pr.name,'kelamin':pr.kelamin,'kota_id':pr.kota_id.id,'tgl_lahir':pr.tgl_lahir,'type_id':pr.type_id.id,'pekerjaan':pr.pekerjaan,'susunan':pr.susunan}))\n \n ###### Susunan Keluarga Suami/istri #####\n le=self.pool.get('hr_recruit.suskel2')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids1=[] \n for pr in lele:\n prod_ids1.append((0,0, {'susunan':pr.susunan,'name':pr.name,'kelamin':pr.kelamin,'kota_id':pr.kota_id.id,'tgl_lahir':pr.tgl_lahir,'type_id':pr.type_id.id,'pekerjaan':pr.pekerjaan})) \n \n ###### riwayat Pendidikan #######\n le=self.pool.get('hr_recruit.rwt_pend')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids2=[] \n for pr in lele:\n prod_ids2.append((0,0, {'name':pr.name,'jurusan':pr.jurusan.id,'tempat':pr.tempat,'tahun_msk':pr.tahun_msk,'tahun_klr':pr.tahun_klr,'ijazah':pr.ijazah.id})) \n \n ###### bahasa ######\n le=self.pool.get('hr_recruit.bahasa')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids3=[] \n for pr in lele:\n prod_ids3.append((0,0, {'name':pr.name.id,'tulis':pr.tulis.id,'lisan':pr.lisan.id})) \n \n ##### Riwayat Pekerjaan ####\n le=self.pool.get('hr_recruit.rwt_krj')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids4=[] \n for pr in lele:\n prod_ids4.append((0,0, {'no':pr.no,'name':pr.name,'tempat':pr.tempat,'tahun_msk':pr.tahun_msk,'tahun_klr':pr.tahun_klr,'jabatan':pr.jabatan,'gaji':pr.gaji,'alasan':pr.alasan})) \n \n ###### Koneksi Internal #####\n le=self.pool.get('hr_recruit.kon1')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids5=[] \n for pr in lele:\n prod_ids5.append((0,0, {'employee_id':pr.employee_id.name,'alamat':pr.alamat,'job_id':pr.job_id.id,'telepon':pr.telepon})) \n \n ###### Koneksi Eksternal ####\n le=self.pool.get('hr_recruit.kon2')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids6=[]\n for pr in lele: \n prod_ids6.append((0,0, {'name':pr.name,'alamat':pr.alamat,'jabatan':pr.jabatan,'telepon':pr.telepon})) \n\n ####### create Employee ######## \n emp_id = hr_employee.create(cr, uid, {'name': applicant.partner_name or applicant.name,\n 'job_id': applicant.job_id.id,\n 'department_id' : applicant.department_id.id,\n 'address_id2' : applicant.job_id.address_id.id,\n #### informasi Probadi ####\n 'kelamin':applicant.jen_kel,\n 'blood' : applicant.blood,\n 'agama' : applicant.agama_id.id,\n 'birthday' : applicant.tgl_lahir,\n 'place_of_birth' : applicant.kota_id.name,\n 'marital':applicant.status,\n 'sjk_tanggal' : applicant.sjk_tanggal,\n 'mobile_phone':applicant.partner_phone,\n 'country_id' : applicant.country_id.id,\n\n #### Pendidikan ####\n 'type_id':applicant.type_id.id,\n 'bid_id':applicant.bidang_id.id,\n 'jurusan_id':applicant.jurusan_id.id,\n 'pt_id':applicant.pt_id.id,\n 'gelar_id':applicant.gelar_id.id,\n\n #### alamat DOmisili ####\n 'country_id1':applicant.country_id1.id,\n 'prov_id':applicant.prov_id.id,\n 'kab_id' : applicant.kab_id.id,\n 'kec_id':applicant.kec_id.id,\n 'alamat1' : applicant.alamat1,\n 'kodepos' :applicant.kode1,\n 'telp1' : applicant.telp1,\n\n #### kartu identitas ####\n 'jenis_id': applicant.jenis_id,\n 'ktp' : applicant.no_id,\n 'tgl_berlaku' : applicant.tgl_berlaku,\n # 'issued_id' : applicant.dikeluarkan.id,\n \n #### Alamat Sesuai KTP #### \n 'country_id2':applicant.country_id2.id,\n 'prov_id2':applicant.prov_id2.id,\n 'kab_id2':applicant.kab_id2.id,\n 'kec_id2':applicant.kec_id2.id,\n 'alamat2' : applicant.alamat2,\n 'kodepos1':applicant.kode2,\n 'telp2' : applicant.telp2,\n \n # 'status': applicant.status,\n #### IDS ####\n 'susunan_kel1_ids' : prod_ids,\n 'susunan_kel2_ids':prod_ids1,\n 'rwt_pend_ids':prod_ids2,\n 'bahasa_ids':prod_ids3,\n 'rwt_krj_ids':prod_ids4,\n 'koneksi1_ids':prod_ids5,\n 'koneksi2_ids':prod_ids6, \n })\n self.write(cr, uid, [applicant.id], {'emp_id': emp_id}, context=context)\n self.pool['hr.job'].message_post(\n cr, uid, [applicant.job_id.id],\n body=_('New Employee %s Hired') % applicant.partner_name if applicant.partner_name else applicant.name,\n subtype=\"hr_recruitment.mt_job_applicant_hired\", context=context)\n else:\n raise osv.except_osv(_('Warning!'), _('You must define an Applied Job and a Contact Name for this applicant.'))\n\n action_model, action_id = model_data.get_object_reference(cr, uid, 'hr', 'open_view_employee_list')\n dict_act_window = act_window.read(cr, uid, [action_id], [])[0]\n if emp_id:\n dict_act_window['res_id'] = emp_id\n dict_act_window['view_mode'] = 'form,tree'\n return dict_act_window", "def create_student(faculty: str) -> None:\r\n global usernames, pointer, student_file_info\r\n username = usernames[pointer]\r\n password = username[:6][::-1]\r\n student_file_info.append([username, password, faculty])\r\n pointer += 1", "def partner_create(self):\n try:\n mongo_module.mongo_insert(self.partner)\n output = 'sucesfully created'\n code = 201\n except Exception as err:\n output = str(err)\n code = 409\n return output, code", "def setUp(self):\n\n self.user = self.make_user()\n self.employee = Employee.objects.create(\n cpf=\"974.220.200-16\",\n user=self.user,\n departament=Employee.ADMINISTRATION\n )", "def create_user(email, password, f_name, l_name):\n pass", "def createEmployee(firstName, lastName, ssn, salary):\n employee = Employee(firstName, lastName, ssn, salary)\n # verify\n if firstName != employee.firstName or \\\n lastName != employee.lastName or \\\n ssn != employee.ssn or \\\n salary != employee.salary:\n raise ValueError(\"Failed to initialize Employee\")\n return employee", "def generateEmployees(self):\r\n\r\n # Name\r\n maleNames = ['Perry Lovan', 'Horacio Arvidson', 'Gale Skipworth', 'Joshua Lodge', 'Noble Shutter', 'Kristopher Talor', 'Jarod Harrop', 'Joan Henrichs', 'Wilber Vitiello', 'Clayton Brannum', 'Joel Sennett', 'Wiley Maffei', 'Clemente Flore', 'Cliff Saari', 'Miquel Plamondon', 'Erwin Broadus', 'Elvin Defibaugh', 'Ramon Vaquera', 'Roberto Koval', 'Micah Sumter', 'Wyatt Cambareri', 'Jamal Delarosa', 'Franklyn Hayles', 'Riley Haslett', 'Robt Fincher', 'Abraham Denzer', 'Darius Jude', 'Phillip Sunderman', 'August Kindel', 'Jospeh Mawson', 'Damion Postma', 'Gregorio Pasco', 'Rosendo Downing', 'Chance Plascencia', 'Jewell Pankratz', 'Jerrell Tarrance', 'Michal Bliss', 'Josue Larocque', 'Aaron Harpster', 'Zack Hildebrant', 'Frank Souders', 'Lindsay Bechard', 'Agustin Marks', 'Mathew Fredericksen', 'Ivan Hanline', 'Michael Otto', 'Max Oberlander', 'Ricky Mckellar', 'Bernard Friedt', 'King Lorentzen']\r\n femaleNames = ['Lorretta Vansickle', 'Loura Steimle', 'Neomi Fritz', 'Vernie Vanderveen', 'Dede Poehler', 'Margarete Espinoza', 'Leda Leonardo', 'Fae Strand', 'Nichol Winford', 'Danika Ridgeway', 'Elvira Balentine', 'Sharell Xie', 'Sheree Booker', 'Emely Conine', 'Justina Kleve', 'Pia Maxton', 'Sophia Lark', 'Nilsa Albee', 'Felipa Seman', 'Jeraldine Watkins', 'Susann Sowards', 'Asha Irion', 'Shay Koran', 'Rosio Jahn', 'Rachal Slaven', 'Beryl Byron', 'Jona Lira', 'Margert Strite', 'Talia Beauregard', 'Jacqueline Vella', 'Rolande Mccready', 'Margret Hickerson', 'Precious Confer', 'Evita Nicolai', 'Fredda Groner', 'Laquanda Bracken', 'Alana Saddler', 'Melania Harring', 'Shae Everette', 'Marlyn Mcfalls', 'Madeline Nicols', 'Fonda Webster', 'Fumiko Steffy', 'Virginia Sprinkle', 'Lula Frisch', 'Mari Mulherin', 'Alecia Remillard', 'Jeanna Halderman', 'Ocie Waldrep', 'Theresa Knouse']\r\n\r\n for i in range(self.num_of_employees):\r\n\r\n # Clock in an hour before opening, 6 hours after, or 12 hours after\r\n clockIn = random.choice([7, 13, 19])\r\n\r\n # Clock out after 5 hours, 10 hours, or 15 hours\r\n clockOut = random.choice([13, 19, 23])\r\n while clockOut <= clockIn:\r\n clockOut = random.choice([13, 19, 23])\r\n\r\n # Hourly wage\r\n wage = random.choice([8, 9, 10, 12, 20])\r\n\r\n gender = random.choice(['M', 'F'])\r\n if gender == 'M':\r\n name = random.choice(maleNames)\r\n else:\r\n name = random.choice(femaleNames)\r\n\r\n self.c.execute(\"INSERT INTO Employee (Name, ClockIn, ClockOut, Wage) VALUES (?, ?, ?, ?)\", (name, clockIn, clockOut, wage))\r\n self.conn.commit()\r\n\r\n if self.print_employees:\r\n print(\"\\nName:\", name)\r\n print(\"Clock in:\", clockIn)\r\n print(\"Clock out:\", clockOut)\r\n print(\"Wage:\", wage)", "def createStudentWithProject(self):\n self.createStudentWithProposal()\n from soc.modules.gsoc.models.student_project import StudentProject\n properties = {'link_id': self.profile.link_id, 'scope': self.profile,\n 'student': self.profile, 'parent': self.profile}\n seeder_logic.seed(StudentProject, properties)" ]
[ "0.7395388", "0.6873678", "0.6652067", "0.6417113", "0.63125676", "0.6297487", "0.6267351", "0.62289935", "0.6222155", "0.6122551", "0.6114099", "0.6063726", "0.6037256", "0.60092777", "0.60071415", "0.59452486", "0.5924796", "0.5911677", "0.58706707", "0.58596087", "0.5855955", "0.5855521", "0.58510476", "0.58228815", "0.57597584", "0.57444173", "0.57441914", "0.5718857", "0.5710078", "0.56747264" ]
0.72263277
1
functions to create employee junior.
def create_junior(self, name, pos, dept): self.junior[dept.upper()].append( { 'name': name, 'pos': pos, 'dept': dept, 'manager': self.manager[dept.upper()][0]['name'], 'senior': self.senior[dept.upper()][0]['name'], 'trainee': [] } ) self.manager[dept.upper()][0]['junior'].append( { 'name': name, 'pos': pos, 'dept': dept } ) self.senior[dept.upper()][0]['junior'].append( { 'name': name, 'pos': pos, 'dept': dept } )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_emp(self, name, pos, dept):\n if pos.upper() == 'MANAGER':\n self.create_manager(name, pos, dept)\n elif pos.upper() == 'SENIOR':\n self.create_senior(name, pos, dept)\n elif pos.upper() == 'JUNIOR':\n self.create_junior(name, pos, dept)\n else:\n self.create_trainee(name, pos, dept)", "def create_senior(self, name, pos, dept):\n self.senior[dept.upper()].append(\n {\n 'name': name,\n 'pos': pos,\n 'dept': dept,\n 'manager': self.manager[dept.upper()][0]['name'],\n 'junior': [],\n 'trainee': []\n }\n )\n self.manager[dept.upper()][0]['senior'].append(\n {\n 'name': name,\n 'pos': pos,\n 'dept': dept\n }\n )", "def create_person(self):", "def create_user(schools_dictionnary, domains_to_skills_dictionnary, companies, places, skills_oh, places_oh, domains_oh, rng, _id):\n\n age = rng.randint(20,60)\n schools = rng.choice(list(schools_dictionnary.keys()), rng.choice([1, 2], p = [0.95, 0.05]), replace = False) \n\n available_skills = list(set([skill for school in schools \\\n for domain in schools_dictionnary[school].domains \\\n for skill in domains_to_skills_dictionnary[domain]]))\n\n expo = np.round(rng.exponential(0.3) * len(schools)) + age // 17 + 1\n\n nb_skills_to_choose = min(int(expo), 5 + (len(schools) - 1) * 3)\n\n _skills = rng.choice(available_skills, nb_skills_to_choose, replace = False)\n\n company = rng.choice(companies)\n place = rng.choice(places)\n\n user = User(skills_oh, places_oh, domains_oh, schools_dictionnary, skills = _skills, age = age, place = place, company = company,\n schools = schools, _id = _id)\n\n return user", "def create_employee(self,personal_identity):\r\n new_emp = Employee(*personal_identity)\r\n registration_str = new_emp.get_registration_str()\r\n\r\n return_value = self.save_object_to_DB(\"employee\",registration_str)\r\n return return_value", "def create_employee(request, company_id):\n\n company = Company.objects.get(pk=company_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n logUnauthorizedAccess(\"User tried to create_employee\", request)\n raise PermissionDenied()\n form = EmployeeForm(request, initial=dict(company=company))\n form.fields['manager'].queryset = Employee.objects.filter(is_manager=True, company=company)\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company=company) | Q(company__isnull=True))\n # data = {\n # 'employee_form': form.cleaned_data,\n # 'company': company.cleaned_data[\"name\"]\n # }\n\n return TemplateResponse(\n request,\n 'mus/create_employee_form.html',\n {\n 'employee_form': form,\n }\n )\n # data = {\n # 'employee_form': form.cleaned_data,\n # 'company': company.cleaned_data[\"name\"]\n # }\n # return JsonResponse(status=200, data=data)", "def generateEmployees(self):\r\n\r\n # Name\r\n maleNames = ['Perry Lovan', 'Horacio Arvidson', 'Gale Skipworth', 'Joshua Lodge', 'Noble Shutter', 'Kristopher Talor', 'Jarod Harrop', 'Joan Henrichs', 'Wilber Vitiello', 'Clayton Brannum', 'Joel Sennett', 'Wiley Maffei', 'Clemente Flore', 'Cliff Saari', 'Miquel Plamondon', 'Erwin Broadus', 'Elvin Defibaugh', 'Ramon Vaquera', 'Roberto Koval', 'Micah Sumter', 'Wyatt Cambareri', 'Jamal Delarosa', 'Franklyn Hayles', 'Riley Haslett', 'Robt Fincher', 'Abraham Denzer', 'Darius Jude', 'Phillip Sunderman', 'August Kindel', 'Jospeh Mawson', 'Damion Postma', 'Gregorio Pasco', 'Rosendo Downing', 'Chance Plascencia', 'Jewell Pankratz', 'Jerrell Tarrance', 'Michal Bliss', 'Josue Larocque', 'Aaron Harpster', 'Zack Hildebrant', 'Frank Souders', 'Lindsay Bechard', 'Agustin Marks', 'Mathew Fredericksen', 'Ivan Hanline', 'Michael Otto', 'Max Oberlander', 'Ricky Mckellar', 'Bernard Friedt', 'King Lorentzen']\r\n femaleNames = ['Lorretta Vansickle', 'Loura Steimle', 'Neomi Fritz', 'Vernie Vanderveen', 'Dede Poehler', 'Margarete Espinoza', 'Leda Leonardo', 'Fae Strand', 'Nichol Winford', 'Danika Ridgeway', 'Elvira Balentine', 'Sharell Xie', 'Sheree Booker', 'Emely Conine', 'Justina Kleve', 'Pia Maxton', 'Sophia Lark', 'Nilsa Albee', 'Felipa Seman', 'Jeraldine Watkins', 'Susann Sowards', 'Asha Irion', 'Shay Koran', 'Rosio Jahn', 'Rachal Slaven', 'Beryl Byron', 'Jona Lira', 'Margert Strite', 'Talia Beauregard', 'Jacqueline Vella', 'Rolande Mccready', 'Margret Hickerson', 'Precious Confer', 'Evita Nicolai', 'Fredda Groner', 'Laquanda Bracken', 'Alana Saddler', 'Melania Harring', 'Shae Everette', 'Marlyn Mcfalls', 'Madeline Nicols', 'Fonda Webster', 'Fumiko Steffy', 'Virginia Sprinkle', 'Lula Frisch', 'Mari Mulherin', 'Alecia Remillard', 'Jeanna Halderman', 'Ocie Waldrep', 'Theresa Knouse']\r\n\r\n for i in range(self.num_of_employees):\r\n\r\n # Clock in an hour before opening, 6 hours after, or 12 hours after\r\n clockIn = random.choice([7, 13, 19])\r\n\r\n # Clock out after 5 hours, 10 hours, or 15 hours\r\n clockOut = random.choice([13, 19, 23])\r\n while clockOut <= clockIn:\r\n clockOut = random.choice([13, 19, 23])\r\n\r\n # Hourly wage\r\n wage = random.choice([8, 9, 10, 12, 20])\r\n\r\n gender = random.choice(['M', 'F'])\r\n if gender == 'M':\r\n name = random.choice(maleNames)\r\n else:\r\n name = random.choice(femaleNames)\r\n\r\n self.c.execute(\"INSERT INTO Employee (Name, ClockIn, ClockOut, Wage) VALUES (?, ?, ?, ?)\", (name, clockIn, clockOut, wage))\r\n self.conn.commit()\r\n\r\n if self.print_employees:\r\n print(\"\\nName:\", name)\r\n print(\"Clock in:\", clockIn)\r\n print(\"Clock out:\", clockOut)\r\n print(\"Wage:\", wage)", "def create_employee(self):\n try:\n name = input(\"Enter name: \")\n if not name.isalpha():\n print(\"Invalid data format. Name should contain only alphabets. \")\n return False\n email = input(\"Enter email: \")\n if not InputValidations.validate_email(email):\n return False\n employee = EmployeeModel(name=name, email=email)\n self.admin_repository.create_employee(employee)\n print(\"Employee created successfully!\")\n return True\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = UserSerializer.create(UserSerializer(), validated_data=user_data)\n employee, created = Employee.objects.update_or_create(user=user,\n employee_id=validated_data.pop('employee_id'),\n location=validated_data.pop('location'),\n avail_start_time= str(validated_data.pop('avail_start_time')),\n avail_end_time= str(validated_data.pop('avail_end_time')))\n return employee", "def add_employee(self, first_name, last_name):\n self.switch_main_menu(\"PIM\")\n self.click_menu(\"Add Employee\")\n self.pim = AddEmployee(self.driver)\n self.pim.add_user_employee(first_name, last_name)", "def users_create():", "def create_member(org_id, group_id, target_group_ids, sex, first_name, last_name, title_name, email):\n user = get_user_by_email(email)\n # --- falls e-mail schon existiert wird nichts unternommen\n if user != None:\n if org_id > 0: # nur bei Schulen wird die Schulnummer vorangestellt\n prefix = '%i_' % org_id\n else:\n prefix = ''\n user = User()\n username = get_username(prefix, first_name, last_name)\n user.username = username\n user.sex = sex\n user.first_name = first_name\n user.last_name = last_name\n user.email = email\n user.title = title_name\n user.is_staff = False\n user.is_active = True\n user.is_superuser = False\n user.date_joined = datetime.datetime.now()\n password = generate_passwd()\n user.set_password(password)\n user.save()\n set_user_org(org_id, user)\n send_password(email, username, password)\n set_user_group(user, get_group_by_id(group_id))\n for group in target_group_ids:\n set_user_group(user, get_group_by_id(group))\n transaction.commit()", "def create(self, values):\n if values.get('country_id', False):\n country = self.env['res.country'].browse(values['country_id'])\n if country.code == 'SA':\n values.update({'is_saudi': True})\n else:\n values.update({'is_saudi': False})\n\n res = super(HrEmployee, self).create(values)\n if values.get('user_id', False):\n self.user_id.write({'employee_id': res})\n return res", "def test_create_owner(self):\n url = reverse(\n 'projectroles:api_role_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'role': PROJECT_ROLE_OWNER,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def is_employee():\n return _is_member('uw_employee')", "def add_employee():\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n form = SignUp_Form()\n \n\n if form.validate_on_submit():\n try: \n employee = Employee.register(\n username = form.username.data,\n password = form.password.data, \n email = form.email.data, \n first_name = form.first_name.data,\n last_name = form.last_name.data,\n hire_date = form.hire_date.data, \n is_admin = form.is_admin.data,\n )\n\n db.session.add(employee)\n\n db.session.commit()\n except IntegrityError:\n flash(\"Email already in use\", \"danger\")\n return render_template(\"/admin/add_user.html\", form = form)\n\n flash(\"Employee Added!\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/add_user.html\", form = form)", "def insert_employee(self,\n region_name,\n last_name,\n first_name,\n hire_date,\n mi=None):\n\n if self.check_input_type(region_name, \"Region\"):\n if self.check_input_type(hire_date, \"Date\"):\n region_info = self.query_region(region_name)\n region_id = region_info[0][0]\n\n if mi != \"\":\n query_format = \"insert into employee(Region_ID, \" \\\n \"Emp_Lname, Emp_Mi, Emp_Fname, Emp_Hiredate) \" \\\n \"values ((select region_id from region where \" \\\n \"region_id='{}'), '{}', '{}', '{}', '{}')\"\n query = query_format.format(\n region_id, last_name, mi, first_name, hire_date\n )\n else:\n query_format = \"insert into employee(Region_ID, \" \\\n \"Emp_Lname, Emp_Fname, Emp_Hiredate) \" \\\n \"values ((select region_id from region where \" \\\n \"region_id='{}'), '{}', '{}', '{}')\"\n query = query_format.format(\n region_id, last_name, first_name, hire_date\n )\n\n try:\n self.dbCursor.execute(query)\n SuccessMessageWindow(\"Insert success!\")\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n finally:\n self.dbConnection.commit()\n else:\n ErrorMessageWindow(\"Date format not valid!\")\n else:\n ErrorMessageWindow(\"Region input not valid!\")", "def create_user(change):\n return change()", "def setUp(self):\n\n self.user = self.make_user()\n self.employee = Employee.objects.create(\n cpf=\"974.220.200-16\",\n user=self.user,\n departament=Employee.ADMINISTRATION\n )", "def create_leader_model(request, company_id):\n\n errors = {'noactions': []}\n company = Company.objects.get(pk=company_id)\n currentEmpl = Employee.objects.get(user__pk=request.user.pk)\n \"\"\":type : Employee \"\"\"\n\n if not currentEmpl.isEnsoUser() and currentEmpl.company.pk != company.pk:\n raise PermissionDenied()\n\n if currentEmpl.isCompanySuperUserOrHigher():\n employeeQS = Employee.objects.filter(\n company__pk=company_id\n )\n else:\n employeeQS = Employee.objects.filter(\n Q(manager=currentEmpl),\n company__pk=company_id\n )\n\n form = MultiLeaderModelForm(request.POST or None)\n form.fields['employees'].queryset = employeeQS\n\n if request.method == 'POST' and form.is_valid():\n\n employees = form.cleaned_data['employees']\n \"\"\":type : list[Employee] \"\"\"\n\n pdf_response = get_leader_model_pdf(currentEmpl, employees)\n\n if isinstance(pdf_response, HttpResponse):\n return pdf_response\n else:\n errors = pdf_response\n\n print(errors)\n\n return TemplateResponse(\n request,\n 'mus/create_leader_model.html', {\n 'form': form,\n 'company': company,\n 'errors': errors\n }\n )", "def create_manager(self, name, pos, dept):\n self.manager[dept.upper()].append(\n {\n 'name': name,\n 'pos': pos,\n 'dept': dept,\n 'senior': [],\n 'junior': [],\n 'trainee': []\n }\n )", "def create_employee_from_applicant(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n hr_employee = self.pool.get('hr.employee')\n model_data = self.pool.get('ir.model.data')\n act_window = self.pool.get('ir.actions.act_window')\n emp_id = False\n for applicant in self.browse(cr, uid, ids, context=context):\n address_id = contact_name = False\n if applicant.partner_id:\n address_id = self.pool.get('res.partner').address_get(cr, uid, [applicant.partner_id.id], ['contact'])['contact']\n contact_name = self.pool.get('res.partner').name_get(cr, uid, [applicant.partner_id.id])[0][1]\n if applicant.job_id and (applicant.partner_name or contact_name):\n applicant.job_id.write({'no_of_hired_employee': applicant.job_id.no_of_hired_employee + 1})\n create_ctx = dict(context, mail_broadcast=True)\n\n pes=self.browse(cr,uid,ids)[0]\n coy=pes.partner_name\n\n ##### Susunan Keluarga ayah/ibu #####\n le=self.pool.get('hr_recruit.suskel1')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context)\n prod_ids=[] \n for pr in lele:\n prod_ids.append((0,0, {'name':pr.name,'kelamin':pr.kelamin,'kota_id':pr.kota_id.id,'tgl_lahir':pr.tgl_lahir,'type_id':pr.type_id.id,'pekerjaan':pr.pekerjaan,'susunan':pr.susunan}))\n \n ###### Susunan Keluarga Suami/istri #####\n le=self.pool.get('hr_recruit.suskel2')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids1=[] \n for pr in lele:\n prod_ids1.append((0,0, {'susunan':pr.susunan,'name':pr.name,'kelamin':pr.kelamin,'kota_id':pr.kota_id.id,'tgl_lahir':pr.tgl_lahir,'type_id':pr.type_id.id,'pekerjaan':pr.pekerjaan})) \n \n ###### riwayat Pendidikan #######\n le=self.pool.get('hr_recruit.rwt_pend')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids2=[] \n for pr in lele:\n prod_ids2.append((0,0, {'name':pr.name,'jurusan':pr.jurusan.id,'tempat':pr.tempat,'tahun_msk':pr.tahun_msk,'tahun_klr':pr.tahun_klr,'ijazah':pr.ijazah.id})) \n \n ###### bahasa ######\n le=self.pool.get('hr_recruit.bahasa')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids3=[] \n for pr in lele:\n prod_ids3.append((0,0, {'name':pr.name.id,'tulis':pr.tulis.id,'lisan':pr.lisan.id})) \n \n ##### Riwayat Pekerjaan ####\n le=self.pool.get('hr_recruit.rwt_krj')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids4=[] \n for pr in lele:\n prod_ids4.append((0,0, {'no':pr.no,'name':pr.name,'tempat':pr.tempat,'tahun_msk':pr.tahun_msk,'tahun_klr':pr.tahun_klr,'jabatan':pr.jabatan,'gaji':pr.gaji,'alasan':pr.alasan})) \n \n ###### Koneksi Internal #####\n le=self.pool.get('hr_recruit.kon1')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids5=[] \n for pr in lele:\n prod_ids5.append((0,0, {'employee_id':pr.employee_id.name,'alamat':pr.alamat,'job_id':pr.job_id.id,'telepon':pr.telepon})) \n \n ###### Koneksi Eksternal ####\n le=self.pool.get('hr_recruit.kon2')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids6=[]\n for pr in lele: \n prod_ids6.append((0,0, {'name':pr.name,'alamat':pr.alamat,'jabatan':pr.jabatan,'telepon':pr.telepon})) \n\n ####### create Employee ######## \n emp_id = hr_employee.create(cr, uid, {'name': applicant.partner_name or applicant.name,\n 'job_id': applicant.job_id.id,\n 'department_id' : applicant.department_id.id,\n 'address_id2' : applicant.job_id.address_id.id,\n #### informasi Probadi ####\n 'kelamin':applicant.jen_kel,\n 'blood' : applicant.blood,\n 'agama' : applicant.agama_id.id,\n 'birthday' : applicant.tgl_lahir,\n 'place_of_birth' : applicant.kota_id.name,\n 'marital':applicant.status,\n 'sjk_tanggal' : applicant.sjk_tanggal,\n 'mobile_phone':applicant.partner_phone,\n 'country_id' : applicant.country_id.id,\n\n #### Pendidikan ####\n 'type_id':applicant.type_id.id,\n 'bid_id':applicant.bidang_id.id,\n 'jurusan_id':applicant.jurusan_id.id,\n 'pt_id':applicant.pt_id.id,\n 'gelar_id':applicant.gelar_id.id,\n\n #### alamat DOmisili ####\n 'country_id1':applicant.country_id1.id,\n 'prov_id':applicant.prov_id.id,\n 'kab_id' : applicant.kab_id.id,\n 'kec_id':applicant.kec_id.id,\n 'alamat1' : applicant.alamat1,\n 'kodepos' :applicant.kode1,\n 'telp1' : applicant.telp1,\n\n #### kartu identitas ####\n 'jenis_id': applicant.jenis_id,\n 'ktp' : applicant.no_id,\n 'tgl_berlaku' : applicant.tgl_berlaku,\n # 'issued_id' : applicant.dikeluarkan.id,\n \n #### Alamat Sesuai KTP #### \n 'country_id2':applicant.country_id2.id,\n 'prov_id2':applicant.prov_id2.id,\n 'kab_id2':applicant.kab_id2.id,\n 'kec_id2':applicant.kec_id2.id,\n 'alamat2' : applicant.alamat2,\n 'kodepos1':applicant.kode2,\n 'telp2' : applicant.telp2,\n \n # 'status': applicant.status,\n #### IDS ####\n 'susunan_kel1_ids' : prod_ids,\n 'susunan_kel2_ids':prod_ids1,\n 'rwt_pend_ids':prod_ids2,\n 'bahasa_ids':prod_ids3,\n 'rwt_krj_ids':prod_ids4,\n 'koneksi1_ids':prod_ids5,\n 'koneksi2_ids':prod_ids6, \n })\n self.write(cr, uid, [applicant.id], {'emp_id': emp_id}, context=context)\n self.pool['hr.job'].message_post(\n cr, uid, [applicant.job_id.id],\n body=_('New Employee %s Hired') % applicant.partner_name if applicant.partner_name else applicant.name,\n subtype=\"hr_recruitment.mt_job_applicant_hired\", context=context)\n else:\n raise osv.except_osv(_('Warning!'), _('You must define an Applied Job and a Contact Name for this applicant.'))\n\n action_model, action_id = model_data.get_object_reference(cr, uid, 'hr', 'open_view_employee_list')\n dict_act_window = act_window.read(cr, uid, [action_id], [])[0]\n if emp_id:\n dict_act_window['res_id'] = emp_id\n dict_act_window['view_mode'] = 'form,tree'\n return dict_act_window", "def setUp(self):\n\t\tfirst_name = 'Gerson'\n\t\tlast_name = 'Santos'\n\t\tannual_salary = 5000\n\t\tself.gerson = Employee(first_name, last_name, annual_salary)", "def setUp(self):\n self.salary = 40000\n self.custom_rise = 7500\n self.employee = Employee(\"Carlos\", \"Zapata\", self.salary)", "def main():\n name = input(\"Please enter in your name: \")\n\n \"\"\"Ask the user to enter a number if they are a Director, Manager or Staff.\"\"\"\n \"\"\"This will check and make sure the user only enters in 1,2, \n or 3 and a number greater than zero\"\"\"\n while True:\n try:\n designation_number = int(input(\"Please enter in \\n1 for Director \"\n \"\\n2 for Manager \\n3 for Staff\\n\"))\n if 0 < designation_number <= 3:\n break\n print(\"Invalid number entered.\")\n except Exception as e:\n print(e)\n \"\"\"Gets the user salary and makes sure is a number and greater than 0\"\"\"\n while True:\n try:\n salary = float(input(\"Please enter in your salary: \"))\n if salary <= 0:\n print(\"Your salary must be at least 1 dollar. Please enter a number greater than zero.\")\n else:\n break\n except ValueError:\n print(\"Oops! That was not a valid number. Try again...\")\n\n \"\"\"Create Employee\"\"\"\n employee1 = employee.Employee()\n employee1.set_name(name)\n employee1.set_designation(designation_number)\n employee1.set_salary(salary)\n print(employee1)", "def setUp(self):\n\tself.emp = Employee('Lin',10000)\n\tself.emp2 = Employee('Jun',20000)", "def createMentor(self, org):\n self.createProfile()\n self.profile.mentor_for = [org.key()]\n self.profile.put()", "def onUserCreation(event):\n\n client = getUtility(IAdminClient)\n xmpp_users = getUtility(IXMPPUsers)\n storage = getUtility(IPubSubStorage)\n principal = event.principal\n mtool = getToolByName(principal, 'portal_membership')\n\n principal_id = principal.getUserId()\n principal_jid = xmpp_users.getUserJID(principal_id)\n members_jids = [xmpp_users.getUserJID(member.getUserId())\n for member in mtool.listMembers()]\n pass_storage = getUtility(IXMPPPasswordStorage)\n principal_pass = pass_storage.set(principal_id)\n\n storage.leaf_nodes.append(principal_id)\n storage.node_items[principal_id] = []\n storage.collections['people'].append(principal_id)\n storage.publishers[principal_id] = [principal_id]\n\n d = setupPrincipal(client, principal_jid, principal_pass, members_jids)\n return d", "def createEmployee(firstName, lastName, ssn, salary):\n employee = Employee(firstName, lastName, ssn, salary)\n # verify\n if firstName != employee.firstName or \\\n lastName != employee.lastName or \\\n ssn != employee.ssn or \\\n salary != employee.salary:\n raise ValueError(\"Failed to initialize Employee\")\n return employee", "def make_commissioned(self,salary,commission,name):\n id = self.find_employee_id(name)\n if id in self.clsf:\n self.emp_dict[id][5] = \"3\"\n print(\"{}{}\".format(name,\" was successfully changed to be a commissioned employee\"))\n self.emp_dict[id][7] = salary\n self.emp_dict[id][9] = commission\n self.classification()\n return self.emp_dict\n else:\n print(\"Error- employee not found\")\n self.employee_data()" ]
[ "0.6703623", "0.6629079", "0.58912194", "0.5825029", "0.57799065", "0.557808", "0.5561385", "0.55040497", "0.5481859", "0.54713845", "0.5368414", "0.5327975", "0.52699995", "0.52693903", "0.52427757", "0.52367204", "0.52185136", "0.52170134", "0.5204411", "0.5148115", "0.51351565", "0.5132368", "0.51252276", "0.512357", "0.5113103", "0.5106386", "0.5105611", "0.5081404", "0.5071633", "0.5069231" ]
0.6939935
0
function to create employee trainee.
def create_trainee(self, name, pos, dept): self.trainee[dept.upper()].append( { 'name': name, 'pos': pos, 'dept': dept, 'manager': self.manager[dept.upper()][0]['name'], 'senior': self.senior[dept.upper()][0]['name'], 'junior': self.junior[dept.upper()][0]['name'], } ) self.manager[dept.upper()][0]['trainee'].append( { 'name': name, 'pos': pos, 'dept': dept } ) self.senior[dept.upper()][0]['trainee'].append( { 'name': name, 'pos': pos, 'dept': dept } ) self.junior[dept.upper()][0]['trainee'].append( { 'name': name, 'pos': pos, 'dept': dept } )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_emp(self, name, pos, dept):\n if pos.upper() == 'MANAGER':\n self.create_manager(name, pos, dept)\n elif pos.upper() == 'SENIOR':\n self.create_senior(name, pos, dept)\n elif pos.upper() == 'JUNIOR':\n self.create_junior(name, pos, dept)\n else:\n self.create_trainee(name, pos, dept)", "def create_employee(self,personal_identity):\r\n new_emp = Employee(*personal_identity)\r\n registration_str = new_emp.get_registration_str()\r\n\r\n return_value = self.save_object_to_DB(\"employee\",registration_str)\r\n return return_value", "def create_employee(self):\n try:\n name = input(\"Enter name: \")\n if not name.isalpha():\n print(\"Invalid data format. Name should contain only alphabets. \")\n return False\n email = input(\"Enter email: \")\n if not InputValidations.validate_email(email):\n return False\n employee = EmployeeModel(name=name, email=email)\n self.admin_repository.create_employee(employee)\n print(\"Employee created successfully!\")\n return True\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def test_employee_creation(self):\n helper = EmployeeHelper(name='Andrew', hired_on='2019-10-01T00:00:00', salary=50000, department_id=1)\n\n # Returned result is an OrderedDict\n result = self.client.execute(helper.get_create_employee_query())['data']['createEmployee']['employee']\n\n self.assertEqual(result['name'], helper.name)\n self.assertEqual(result['hiredOn'], helper.hired_on)\n self.assertEqual(result['salary'], helper.salary)\n self.assertEqual(result['departmentId'], helper.department_id)", "def post(self):\n try:\n employee = self.service.add_employee(self.schema, request.json)\n except ValidationError as error:\n return error.messages, 400\n return self.schema.dump(employee), 201", "def add_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee(id, name, email, office, extra_info, picture_location, research_group, '\n 'title, is_external, is_admin, is_active) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);',\n (obj.e_id, obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active))\n\n self.dbconnect.commit()\n return obj\n except:\n self.dbconnect.rollback()\n raise", "def create_teacher(username, password, email, preferred_language,skype_id,name, phone_number, country,availability):\n person.create_person(username,password,email,preferred_language,skype_id,name,phone_number,country)\n teacher_account_id = person.get_last()\n query = 'INSERT INTO teacher VALUES( %s,%s );'\n args = (teacher_account_id, availability)\n database.connection.save_data(query, args)", "def setUp(self):\n\n self.user = self.make_user()\n self.employee = Employee.objects.create(\n cpf=\"974.220.200-16\",\n user=self.user,\n departament=Employee.ADMINISTRATION\n )", "def create_educator(data):\n\n educator = Educator(\n name=data['name'],\n work_email=data['work_email'],\n organization_name=data['organization_name'],\n org_or_school=data['org_or_school'],\n address_line_1=data['address_line_1'],\n address_line_2=data['address_line_2'],\n city=data['city'],\n state=data['state'],\n zipcode=data['zipcode'],\n num_students=data['num_students']\n )\n educator.save()\n return educator", "def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = UserSerializer.create(UserSerializer(), validated_data=user_data)\n employee, created = Employee.objects.update_or_create(user=user,\n employee_id=validated_data.pop('employee_id'),\n location=validated_data.pop('location'),\n avail_start_time= str(validated_data.pop('avail_start_time')),\n avail_end_time= str(validated_data.pop('avail_end_time')))\n return employee", "def create(self, request):\n serializer = data_serializers.CreateEmployeeSerializer(data=request.data)\n\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n print(F\"Request employee Data: {serializer.data}\")\n\n try:\n new_employee = self.controller.create_employee(request_data=request_data)\n serializer = data_serializers.PresentEmployeeDataSerializer(new_employee)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (domain_exceptions.EmployeeIDIsNotUnique,\n domain_exceptions.WorkArrangementPercentageOutOfRange,\n domain_exceptions.TeamDoesNotExist,\n domain_exceptions.TeamHasALeader,\n domain_exceptions.WorkArrangementPercentageNull\n ) as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def create_tea():\n # Bypass if user is logged in\n\n form = TeaAddForm()\n # Validate login attempt\n if form.validate_on_submit():\n tea = Tea(\n name=form.name.data,\n price_per_gram=form.price_per_gram.data,\n )\n db.session.add(tea)\n db.session.commit() # Create new tea\n return redirect(url_for(\"main_bp.dashboard\"))\n return render_template(\n \"add_tea.jinja2\",\n title=\"Add a Tea\",\n form=form,\n template=\"add_tea-page\",\n body=\"Add a Tea\",\n )", "def setUp(self):\n self.salary = 40000\n self.custom_rise = 7500\n self.employee = Employee(\"Carlos\", \"Zapata\", self.salary)", "def create_test_run(\n self, assign_user_id, project_id, suite_id, testrun_name, tr_keys):\n data = {\n 'suite_id': suite_id,\n 'name': testrun_name,\n 'assignedto_id': assign_user_id,\n 'include_all': False,\n 'case_ids': tr_keys,\n }\n\n response = self.client.send_post(\n ADD_TESTRUN_URL.format(project_id),\n data,\n cert_check=self.cert_check\n )\n for key, _ in response.items():\n if key == 'error':\n print('Failed to create testrun: {}'.format(response))\n else:\n self.testrun_id = response['id']", "def create_employee(attributes):\n neccessary_keys = [\"empid\", \"gender\", \"sales\", \"bmi\", \"salary\", \"birthday\",\n \"age\"]\n for key in neccessary_keys:\n if not key in attributes.keys():\n raise ValueError(\"employee could not be created: {} is missing\".format(key))\n return Employee(attributes[\"empid\"], attributes[\"gender\"],\n attributes[\"sales\"], attributes[\"bmi\"],\n attributes[\"salary\"], attributes[\"birthday\"],\n attributes[\"age\"])", "def create(self, request):\n serializer = data_serializers.TeamLeaderOrEmployeeRequestDataSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n try:\n respond_data = self.controller.add_team_employee(request_data=request_data)\n serializer = data_serializers.PresentTeamEmployeeDataSerializer(respond_data)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (\n domain_exceptions.TeamDoesNotExist,\n domain_exceptions.EmployeeDoesNotExist,\n domain_exceptions.EmployeeIsATeamMember\n )as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def setUp(self):\n self.my_employee = Employee('knight', 'lee', 10000)", "def create_employee_from_applicant(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n hr_employee = self.pool.get('hr.employee')\n model_data = self.pool.get('ir.model.data')\n act_window = self.pool.get('ir.actions.act_window')\n emp_id = False\n for applicant in self.browse(cr, uid, ids, context=context):\n address_id = contact_name = False\n if applicant.partner_id:\n address_id = self.pool.get('res.partner').address_get(cr, uid, [applicant.partner_id.id], ['contact'])['contact']\n contact_name = self.pool.get('res.partner').name_get(cr, uid, [applicant.partner_id.id])[0][1]\n if applicant.job_id and (applicant.partner_name or contact_name):\n applicant.job_id.write({'no_of_hired_employee': applicant.job_id.no_of_hired_employee + 1})\n create_ctx = dict(context, mail_broadcast=True)\n\n pes=self.browse(cr,uid,ids)[0]\n coy=pes.partner_name\n\n ##### Susunan Keluarga ayah/ibu #####\n le=self.pool.get('hr_recruit.suskel1')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context)\n prod_ids=[] \n for pr in lele:\n prod_ids.append((0,0, {'name':pr.name,'kelamin':pr.kelamin,'kota_id':pr.kota_id.id,'tgl_lahir':pr.tgl_lahir,'type_id':pr.type_id.id,'pekerjaan':pr.pekerjaan,'susunan':pr.susunan}))\n \n ###### Susunan Keluarga Suami/istri #####\n le=self.pool.get('hr_recruit.suskel2')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids1=[] \n for pr in lele:\n prod_ids1.append((0,0, {'susunan':pr.susunan,'name':pr.name,'kelamin':pr.kelamin,'kota_id':pr.kota_id.id,'tgl_lahir':pr.tgl_lahir,'type_id':pr.type_id.id,'pekerjaan':pr.pekerjaan})) \n \n ###### riwayat Pendidikan #######\n le=self.pool.get('hr_recruit.rwt_pend')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids2=[] \n for pr in lele:\n prod_ids2.append((0,0, {'name':pr.name,'jurusan':pr.jurusan.id,'tempat':pr.tempat,'tahun_msk':pr.tahun_msk,'tahun_klr':pr.tahun_klr,'ijazah':pr.ijazah.id})) \n \n ###### bahasa ######\n le=self.pool.get('hr_recruit.bahasa')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids3=[] \n for pr in lele:\n prod_ids3.append((0,0, {'name':pr.name.id,'tulis':pr.tulis.id,'lisan':pr.lisan.id})) \n \n ##### Riwayat Pekerjaan ####\n le=self.pool.get('hr_recruit.rwt_krj')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids4=[] \n for pr in lele:\n prod_ids4.append((0,0, {'no':pr.no,'name':pr.name,'tempat':pr.tempat,'tahun_msk':pr.tahun_msk,'tahun_klr':pr.tahun_klr,'jabatan':pr.jabatan,'gaji':pr.gaji,'alasan':pr.alasan})) \n \n ###### Koneksi Internal #####\n le=self.pool.get('hr_recruit.kon1')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids5=[] \n for pr in lele:\n prod_ids5.append((0,0, {'employee_id':pr.employee_id.name,'alamat':pr.alamat,'job_id':pr.job_id.id,'telepon':pr.telepon})) \n \n ###### Koneksi Eksternal ####\n le=self.pool.get('hr_recruit.kon2')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids6=[]\n for pr in lele: \n prod_ids6.append((0,0, {'name':pr.name,'alamat':pr.alamat,'jabatan':pr.jabatan,'telepon':pr.telepon})) \n\n ####### create Employee ######## \n emp_id = hr_employee.create(cr, uid, {'name': applicant.partner_name or applicant.name,\n 'job_id': applicant.job_id.id,\n 'department_id' : applicant.department_id.id,\n 'address_id2' : applicant.job_id.address_id.id,\n #### informasi Probadi ####\n 'kelamin':applicant.jen_kel,\n 'blood' : applicant.blood,\n 'agama' : applicant.agama_id.id,\n 'birthday' : applicant.tgl_lahir,\n 'place_of_birth' : applicant.kota_id.name,\n 'marital':applicant.status,\n 'sjk_tanggal' : applicant.sjk_tanggal,\n 'mobile_phone':applicant.partner_phone,\n 'country_id' : applicant.country_id.id,\n\n #### Pendidikan ####\n 'type_id':applicant.type_id.id,\n 'bid_id':applicant.bidang_id.id,\n 'jurusan_id':applicant.jurusan_id.id,\n 'pt_id':applicant.pt_id.id,\n 'gelar_id':applicant.gelar_id.id,\n\n #### alamat DOmisili ####\n 'country_id1':applicant.country_id1.id,\n 'prov_id':applicant.prov_id.id,\n 'kab_id' : applicant.kab_id.id,\n 'kec_id':applicant.kec_id.id,\n 'alamat1' : applicant.alamat1,\n 'kodepos' :applicant.kode1,\n 'telp1' : applicant.telp1,\n\n #### kartu identitas ####\n 'jenis_id': applicant.jenis_id,\n 'ktp' : applicant.no_id,\n 'tgl_berlaku' : applicant.tgl_berlaku,\n # 'issued_id' : applicant.dikeluarkan.id,\n \n #### Alamat Sesuai KTP #### \n 'country_id2':applicant.country_id2.id,\n 'prov_id2':applicant.prov_id2.id,\n 'kab_id2':applicant.kab_id2.id,\n 'kec_id2':applicant.kec_id2.id,\n 'alamat2' : applicant.alamat2,\n 'kodepos1':applicant.kode2,\n 'telp2' : applicant.telp2,\n \n # 'status': applicant.status,\n #### IDS ####\n 'susunan_kel1_ids' : prod_ids,\n 'susunan_kel2_ids':prod_ids1,\n 'rwt_pend_ids':prod_ids2,\n 'bahasa_ids':prod_ids3,\n 'rwt_krj_ids':prod_ids4,\n 'koneksi1_ids':prod_ids5,\n 'koneksi2_ids':prod_ids6, \n })\n self.write(cr, uid, [applicant.id], {'emp_id': emp_id}, context=context)\n self.pool['hr.job'].message_post(\n cr, uid, [applicant.job_id.id],\n body=_('New Employee %s Hired') % applicant.partner_name if applicant.partner_name else applicant.name,\n subtype=\"hr_recruitment.mt_job_applicant_hired\", context=context)\n else:\n raise osv.except_osv(_('Warning!'), _('You must define an Applied Job and a Contact Name for this applicant.'))\n\n action_model, action_id = model_data.get_object_reference(cr, uid, 'hr', 'open_view_employee_list')\n dict_act_window = act_window.read(cr, uid, [action_id], [])[0]\n if emp_id:\n dict_act_window['res_id'] = emp_id\n dict_act_window['view_mode'] = 'form,tree'\n return dict_act_window", "def setUp(self):\n self.employee = Employee('Lucas', 'Guerra', 45000)", "def add_employee(schema, employee_json):\n employee = schema.load(employee_json, session=db.session)\n db.session.add(employee)\n db.session.commit()\n return employee", "def create():", "def create():", "def setUp(self):\n\tself.emp = Employee('Lin',10000)\n\tself.emp2 = Employee('Jun',20000)", "def post(self, request):\n data = request.data\n skill_data = data.pop('skills')\n Department_name = data.pop('department')\n department = Department.objects.get(name=Department_name)\n manager_name = data.pop('manager')\n manager = Manager.objects.get(name=manager_name)\n Employee = EmployeeDetail.objects.create(department=department, manager=manager, **data)\n Employee.save()\n for skill in skill_data:\n skill_add, create = Skill.objects.get_or_create(name=skill)\n Employee.skills.add(skill_add)\n return Response(\n data=request.data\n )", "def handle(self, *args, **kwargs):\n seeder = Seed.seeder()\n seeder.add_entity(User, 20)\n\n seeder.add_entity(EmployeeMptt, 20, {\n 'user': lambda x: User.objects.filter(employeemptt=None).first(),\n 'parent': lambda x: EmployeeMptt.objects.order_by(\"?\").first(),\n 'level': lambda x: random.randint(0, 4),\n })\n seeder.execute()", "def setUp(self):\n self.employee = Employee('John', 'Doe', 50000)\n self.raise_amount = 20000", "def create(tesserae, title):\n try:\n return tesserae.create(title)\n except TesseraError, e:\n sys.stderr.write(\"Error: %s\\n\" % str(e))\n return False", "def post(self):\n data = EmployeeRegister.parser.parse_args()\n new_employee_id = str(uuid.uuid4())\n\n while EmployeeModel.find_by_id(new_employee_id):\n # if this id is already in use\n new_employee_id = str(uuid.uuid4())\n\n employee = EmployeeModel(**data, employee_id=new_employee_id)\n employee.save_to_db()\n\n return {\"message\": \"Employee successfully added to the system\"}, 201 # 201 - Created", "async def create(self):\n cur = self.sql.cur\n\n user = Client().get_server(self.server_id).get_member(self.user_id)\n\n self.nickname = user.nick if user.nick else user.name\n\n nickname = self.nickname\n trainer_id = str(uuid.uuid4())\n self.trainer_id = trainer_id\n now = datetime.datetime.now()\n user_id = self.user_id\n server_id = self.server_id\n\n self.current_zone_id = '86'\n self.current_building_id = None\n self.current_region_id = None\n\n cmd = \"\"\"INSERT INTO trainers\n (trainer_id,\n user_id,\n server_id,\n nickname,\n created_on)\n VALUES\n (:trainer_id,\n :user_id,\n :server_id,\n :nickname,\n :now)\"\"\"\n cur.execute(cmd, locals())\n\n cmd = \"\"\"INSERT INTO trainer_stats\n (trainer_id)\n VALUES\n (:trainer_id)\"\"\"\n cur.execute(cmd, locals())\n\n cmd = \"\"\"INSERT INTO trainer_data\n (trainer_id,\n current_region_id,\n current_zone_id,\n current_building_id)\n VALUES\n (:trainer_id,\n :current_region_id,\n :current_zone_id,\n :current_building_id)\"\"\"\n cur.execute(cmd, self.__dict__)\n\n cmd = \"\"\"INSERT INTO trainer_party\n (trainer_id)\n VALUES\n (:trainer_id)\"\"\"\n cur.execute(cmd, locals())\n\n await self.sql.commit(now=True)\n self.log.info(f\"New trainer has been born! Welcome {trainer_id}\")", "def test_admin_can_create_a_employee(self):\n\n account_data = {\n \"username\": \"Mike\",\n \"email\": \"[email protected]\",\n \"password\": \"1234567\",\n \"confirm_password\": \"1234567\"\n }\n response = self.client.post(\n reverse('accounts:create-user'),\n account_data,\n format=\"json\")\n \"\"\"Test the api has bucket creation capability.\"\"\"\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertTrue(\"data\" in json.loads(response.content))" ]
[ "0.71946114", "0.64039135", "0.6397927", "0.60852325", "0.60668916", "0.5989232", "0.59618133", "0.5926233", "0.59155416", "0.584691", "0.5840692", "0.58354217", "0.57622826", "0.5757782", "0.57444525", "0.57208085", "0.5719296", "0.56969446", "0.56930053", "0.56875426", "0.5683447", "0.5683447", "0.566679", "0.56505567", "0.5648289", "0.5639002", "0.5594797", "0.5589957", "0.55689746", "0.5568751" ]
0.696836
1
function to create employee based on postion.
def create_emp(self, name, pos, dept): if pos.upper() == 'MANAGER': self.create_manager(name, pos, dept) elif pos.upper() == 'SENIOR': self.create_senior(name, pos, dept) elif pos.upper() == 'JUNIOR': self.create_junior(name, pos, dept) else: self.create_trainee(name, pos, dept)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_employee(self,personal_identity):\r\n new_emp = Employee(*personal_identity)\r\n registration_str = new_emp.get_registration_str()\r\n\r\n return_value = self.save_object_to_DB(\"employee\",registration_str)\r\n return return_value", "def create_employee(self):\n try:\n name = input(\"Enter name: \")\n if not name.isalpha():\n print(\"Invalid data format. Name should contain only alphabets. \")\n return False\n email = input(\"Enter email: \")\n if not InputValidations.validate_email(email):\n return False\n employee = EmployeeModel(name=name, email=email)\n self.admin_repository.create_employee(employee)\n print(\"Employee created successfully!\")\n return True\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def test_employee_creation(self):\n helper = EmployeeHelper(name='Andrew', hired_on='2019-10-01T00:00:00', salary=50000, department_id=1)\n\n # Returned result is an OrderedDict\n result = self.client.execute(helper.get_create_employee_query())['data']['createEmployee']['employee']\n\n self.assertEqual(result['name'], helper.name)\n self.assertEqual(result['hiredOn'], helper.hired_on)\n self.assertEqual(result['salary'], helper.salary)\n self.assertEqual(result['departmentId'], helper.department_id)", "def post(self):\n try:\n employee = self.service.add_employee(self.schema, request.json)\n except ValidationError as error:\n return error.messages, 400\n return self.schema.dump(employee), 201", "def add_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee(id, name, email, office, extra_info, picture_location, research_group, '\n 'title, is_external, is_admin, is_active) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);',\n (obj.e_id, obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active))\n\n self.dbconnect.commit()\n return obj\n except:\n self.dbconnect.rollback()\n raise", "def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = UserSerializer.create(UserSerializer(), validated_data=user_data)\n employee, created = Employee.objects.update_or_create(user=user,\n employee_id=validated_data.pop('employee_id'),\n location=validated_data.pop('location'),\n avail_start_time= str(validated_data.pop('avail_start_time')),\n avail_end_time= str(validated_data.pop('avail_end_time')))\n return employee", "def create_employee_from_applicant(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n hr_employee = self.pool.get('hr.employee')\n model_data = self.pool.get('ir.model.data')\n act_window = self.pool.get('ir.actions.act_window')\n emp_id = False\n for applicant in self.browse(cr, uid, ids, context=context):\n address_id = contact_name = False\n if applicant.partner_id:\n address_id = self.pool.get('res.partner').address_get(cr, uid, [applicant.partner_id.id], ['contact'])['contact']\n contact_name = self.pool.get('res.partner').name_get(cr, uid, [applicant.partner_id.id])[0][1]\n if applicant.job_id and (applicant.partner_name or contact_name):\n applicant.job_id.write({'no_of_hired_employee': applicant.job_id.no_of_hired_employee + 1})\n create_ctx = dict(context, mail_broadcast=True)\n\n pes=self.browse(cr,uid,ids)[0]\n coy=pes.partner_name\n\n ##### Susunan Keluarga ayah/ibu #####\n le=self.pool.get('hr_recruit.suskel1')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context)\n prod_ids=[] \n for pr in lele:\n prod_ids.append((0,0, {'name':pr.name,'kelamin':pr.kelamin,'kota_id':pr.kota_id.id,'tgl_lahir':pr.tgl_lahir,'type_id':pr.type_id.id,'pekerjaan':pr.pekerjaan,'susunan':pr.susunan}))\n \n ###### Susunan Keluarga Suami/istri #####\n le=self.pool.get('hr_recruit.suskel2')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids1=[] \n for pr in lele:\n prod_ids1.append((0,0, {'susunan':pr.susunan,'name':pr.name,'kelamin':pr.kelamin,'kota_id':pr.kota_id.id,'tgl_lahir':pr.tgl_lahir,'type_id':pr.type_id.id,'pekerjaan':pr.pekerjaan})) \n \n ###### riwayat Pendidikan #######\n le=self.pool.get('hr_recruit.rwt_pend')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids2=[] \n for pr in lele:\n prod_ids2.append((0,0, {'name':pr.name,'jurusan':pr.jurusan.id,'tempat':pr.tempat,'tahun_msk':pr.tahun_msk,'tahun_klr':pr.tahun_klr,'ijazah':pr.ijazah.id})) \n \n ###### bahasa ######\n le=self.pool.get('hr_recruit.bahasa')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids3=[] \n for pr in lele:\n prod_ids3.append((0,0, {'name':pr.name.id,'tulis':pr.tulis.id,'lisan':pr.lisan.id})) \n \n ##### Riwayat Pekerjaan ####\n le=self.pool.get('hr_recruit.rwt_krj')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids4=[] \n for pr in lele:\n prod_ids4.append((0,0, {'no':pr.no,'name':pr.name,'tempat':pr.tempat,'tahun_msk':pr.tahun_msk,'tahun_klr':pr.tahun_klr,'jabatan':pr.jabatan,'gaji':pr.gaji,'alasan':pr.alasan})) \n \n ###### Koneksi Internal #####\n le=self.pool.get('hr_recruit.kon1')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids5=[] \n for pr in lele:\n prod_ids5.append((0,0, {'employee_id':pr.employee_id.name,'alamat':pr.alamat,'job_id':pr.job_id.id,'telepon':pr.telepon})) \n \n ###### Koneksi Eksternal ####\n le=self.pool.get('hr_recruit.kon2')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids6=[]\n for pr in lele: \n prod_ids6.append((0,0, {'name':pr.name,'alamat':pr.alamat,'jabatan':pr.jabatan,'telepon':pr.telepon})) \n\n ####### create Employee ######## \n emp_id = hr_employee.create(cr, uid, {'name': applicant.partner_name or applicant.name,\n 'job_id': applicant.job_id.id,\n 'department_id' : applicant.department_id.id,\n 'address_id2' : applicant.job_id.address_id.id,\n #### informasi Probadi ####\n 'kelamin':applicant.jen_kel,\n 'blood' : applicant.blood,\n 'agama' : applicant.agama_id.id,\n 'birthday' : applicant.tgl_lahir,\n 'place_of_birth' : applicant.kota_id.name,\n 'marital':applicant.status,\n 'sjk_tanggal' : applicant.sjk_tanggal,\n 'mobile_phone':applicant.partner_phone,\n 'country_id' : applicant.country_id.id,\n\n #### Pendidikan ####\n 'type_id':applicant.type_id.id,\n 'bid_id':applicant.bidang_id.id,\n 'jurusan_id':applicant.jurusan_id.id,\n 'pt_id':applicant.pt_id.id,\n 'gelar_id':applicant.gelar_id.id,\n\n #### alamat DOmisili ####\n 'country_id1':applicant.country_id1.id,\n 'prov_id':applicant.prov_id.id,\n 'kab_id' : applicant.kab_id.id,\n 'kec_id':applicant.kec_id.id,\n 'alamat1' : applicant.alamat1,\n 'kodepos' :applicant.kode1,\n 'telp1' : applicant.telp1,\n\n #### kartu identitas ####\n 'jenis_id': applicant.jenis_id,\n 'ktp' : applicant.no_id,\n 'tgl_berlaku' : applicant.tgl_berlaku,\n # 'issued_id' : applicant.dikeluarkan.id,\n \n #### Alamat Sesuai KTP #### \n 'country_id2':applicant.country_id2.id,\n 'prov_id2':applicant.prov_id2.id,\n 'kab_id2':applicant.kab_id2.id,\n 'kec_id2':applicant.kec_id2.id,\n 'alamat2' : applicant.alamat2,\n 'kodepos1':applicant.kode2,\n 'telp2' : applicant.telp2,\n \n # 'status': applicant.status,\n #### IDS ####\n 'susunan_kel1_ids' : prod_ids,\n 'susunan_kel2_ids':prod_ids1,\n 'rwt_pend_ids':prod_ids2,\n 'bahasa_ids':prod_ids3,\n 'rwt_krj_ids':prod_ids4,\n 'koneksi1_ids':prod_ids5,\n 'koneksi2_ids':prod_ids6, \n })\n self.write(cr, uid, [applicant.id], {'emp_id': emp_id}, context=context)\n self.pool['hr.job'].message_post(\n cr, uid, [applicant.job_id.id],\n body=_('New Employee %s Hired') % applicant.partner_name if applicant.partner_name else applicant.name,\n subtype=\"hr_recruitment.mt_job_applicant_hired\", context=context)\n else:\n raise osv.except_osv(_('Warning!'), _('You must define an Applied Job and a Contact Name for this applicant.'))\n\n action_model, action_id = model_data.get_object_reference(cr, uid, 'hr', 'open_view_employee_list')\n dict_act_window = act_window.read(cr, uid, [action_id], [])[0]\n if emp_id:\n dict_act_window['res_id'] = emp_id\n dict_act_window['view_mode'] = 'form,tree'\n return dict_act_window", "def create(self, request):\n serializer = data_serializers.CreateEmployeeSerializer(data=request.data)\n\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n print(F\"Request employee Data: {serializer.data}\")\n\n try:\n new_employee = self.controller.create_employee(request_data=request_data)\n serializer = data_serializers.PresentEmployeeDataSerializer(new_employee)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (domain_exceptions.EmployeeIDIsNotUnique,\n domain_exceptions.WorkArrangementPercentageOutOfRange,\n domain_exceptions.TeamDoesNotExist,\n domain_exceptions.TeamHasALeader,\n domain_exceptions.WorkArrangementPercentageNull\n ) as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def create(self, vals):\n if not vals.get('nik_number'):\n vals['nik_number'] = self.generate_nik(vals)\n return super(Employee, self).create(vals)", "def createEmployee(firstName, lastName, ssn, salary):\n employee = Employee(firstName, lastName, ssn, salary)\n # verify\n if firstName != employee.firstName or \\\n lastName != employee.lastName or \\\n ssn != employee.ssn or \\\n salary != employee.salary:\n raise ValueError(\"Failed to initialize Employee\")\n return employee", "def get_create_employee_query(self):\n template = \"\"\"\n mutation createEmployee {{\n createEmployee(input: {{ {params} }}) {{\n employee {{\n name\n hiredOn\n salary\n departmentId\n }}\n }}\n }}\n \"\"\"\n # Add input parameters as needed\n input_params = 'name:\"{}\",'.format(self.name)\n\n if self.hired_on is not None:\n input_params += 'hiredOn: \"{}\", '.format(self.hired_on)\n\n if self.salary is not None:\n input_params += 'salary: {}, '.format(self.salary)\n\n if self.department_id is not None:\n input_params += 'departmentId: {}'.format(self.department_id)\n\n return template.format(params=input_params)", "def generateEmployees(self):\r\n\r\n # Name\r\n maleNames = ['Perry Lovan', 'Horacio Arvidson', 'Gale Skipworth', 'Joshua Lodge', 'Noble Shutter', 'Kristopher Talor', 'Jarod Harrop', 'Joan Henrichs', 'Wilber Vitiello', 'Clayton Brannum', 'Joel Sennett', 'Wiley Maffei', 'Clemente Flore', 'Cliff Saari', 'Miquel Plamondon', 'Erwin Broadus', 'Elvin Defibaugh', 'Ramon Vaquera', 'Roberto Koval', 'Micah Sumter', 'Wyatt Cambareri', 'Jamal Delarosa', 'Franklyn Hayles', 'Riley Haslett', 'Robt Fincher', 'Abraham Denzer', 'Darius Jude', 'Phillip Sunderman', 'August Kindel', 'Jospeh Mawson', 'Damion Postma', 'Gregorio Pasco', 'Rosendo Downing', 'Chance Plascencia', 'Jewell Pankratz', 'Jerrell Tarrance', 'Michal Bliss', 'Josue Larocque', 'Aaron Harpster', 'Zack Hildebrant', 'Frank Souders', 'Lindsay Bechard', 'Agustin Marks', 'Mathew Fredericksen', 'Ivan Hanline', 'Michael Otto', 'Max Oberlander', 'Ricky Mckellar', 'Bernard Friedt', 'King Lorentzen']\r\n femaleNames = ['Lorretta Vansickle', 'Loura Steimle', 'Neomi Fritz', 'Vernie Vanderveen', 'Dede Poehler', 'Margarete Espinoza', 'Leda Leonardo', 'Fae Strand', 'Nichol Winford', 'Danika Ridgeway', 'Elvira Balentine', 'Sharell Xie', 'Sheree Booker', 'Emely Conine', 'Justina Kleve', 'Pia Maxton', 'Sophia Lark', 'Nilsa Albee', 'Felipa Seman', 'Jeraldine Watkins', 'Susann Sowards', 'Asha Irion', 'Shay Koran', 'Rosio Jahn', 'Rachal Slaven', 'Beryl Byron', 'Jona Lira', 'Margert Strite', 'Talia Beauregard', 'Jacqueline Vella', 'Rolande Mccready', 'Margret Hickerson', 'Precious Confer', 'Evita Nicolai', 'Fredda Groner', 'Laquanda Bracken', 'Alana Saddler', 'Melania Harring', 'Shae Everette', 'Marlyn Mcfalls', 'Madeline Nicols', 'Fonda Webster', 'Fumiko Steffy', 'Virginia Sprinkle', 'Lula Frisch', 'Mari Mulherin', 'Alecia Remillard', 'Jeanna Halderman', 'Ocie Waldrep', 'Theresa Knouse']\r\n\r\n for i in range(self.num_of_employees):\r\n\r\n # Clock in an hour before opening, 6 hours after, or 12 hours after\r\n clockIn = random.choice([7, 13, 19])\r\n\r\n # Clock out after 5 hours, 10 hours, or 15 hours\r\n clockOut = random.choice([13, 19, 23])\r\n while clockOut <= clockIn:\r\n clockOut = random.choice([13, 19, 23])\r\n\r\n # Hourly wage\r\n wage = random.choice([8, 9, 10, 12, 20])\r\n\r\n gender = random.choice(['M', 'F'])\r\n if gender == 'M':\r\n name = random.choice(maleNames)\r\n else:\r\n name = random.choice(femaleNames)\r\n\r\n self.c.execute(\"INSERT INTO Employee (Name, ClockIn, ClockOut, Wage) VALUES (?, ?, ?, ?)\", (name, clockIn, clockOut, wage))\r\n self.conn.commit()\r\n\r\n if self.print_employees:\r\n print(\"\\nName:\", name)\r\n print(\"Clock in:\", clockIn)\r\n print(\"Clock out:\", clockOut)\r\n print(\"Wage:\", wage)", "def create_employee(attributes):\n neccessary_keys = [\"empid\", \"gender\", \"sales\", \"bmi\", \"salary\", \"birthday\",\n \"age\"]\n for key in neccessary_keys:\n if not key in attributes.keys():\n raise ValueError(\"employee could not be created: {} is missing\".format(key))\n return Employee(attributes[\"empid\"], attributes[\"gender\"],\n attributes[\"sales\"], attributes[\"bmi\"],\n attributes[\"salary\"], attributes[\"birthday\"],\n attributes[\"age\"])", "def post(self):\n data = EmployeeRegister.parser.parse_args()\n new_employee_id = str(uuid.uuid4())\n\n while EmployeeModel.find_by_id(new_employee_id):\n # if this id is already in use\n new_employee_id = str(uuid.uuid4())\n\n employee = EmployeeModel(**data, employee_id=new_employee_id)\n employee.save_to_db()\n\n return {\"message\": \"Employee successfully added to the system\"}, 201 # 201 - Created", "def add_employee(self, empl):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee values(default,%s,%s,%s,%s,%s,%s,%s,%s)',\n (empl.name, empl.email, empl.office, empl.research_group, empl.title, empl.internOrExtern,\n empl.active, empl.promotor))\n cursor.execute('SELECT LASTVAL()')\n eid = cursor.fetchone()[0]\n empl.id = eid\n # get id and return updated object\n self.dbconnect.commit()\n except(Exception, self.dbconnect.get_error()) as error:\n self.dbconnect.rollback()\n raise Exception('\\nUnable to save Employee!\\n(%s)' % (error))", "def create(self, values):\n if values.get('country_id', False):\n country = self.env['res.country'].browse(values['country_id'])\n if country.code == 'SA':\n values.update({'is_saudi': True})\n else:\n values.update({'is_saudi': False})\n\n res = super(HrEmployee, self).create(values)\n if values.get('user_id', False):\n self.user_id.write({'employee_id': res})\n return res", "def create_person(self):", "def add_employee(schema, employee_json):\n employee = schema.load(employee_json, session=db.session)\n db.session.add(employee)\n db.session.commit()\n return employee", "def setUp(self):\n\n self.user = self.make_user()\n self.employee = Employee.objects.create(\n cpf=\"974.220.200-16\",\n user=self.user,\n departament=Employee.ADMINISTRATION\n )", "def create_employee(request, company_id):\n\n company = Company.objects.get(pk=company_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n logUnauthorizedAccess(\"User tried to create_employee\", request)\n raise PermissionDenied()\n form = EmployeeForm(request, initial=dict(company=company))\n form.fields['manager'].queryset = Employee.objects.filter(is_manager=True, company=company)\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company=company) | Q(company__isnull=True))\n # data = {\n # 'employee_form': form.cleaned_data,\n # 'company': company.cleaned_data[\"name\"]\n # }\n\n return TemplateResponse(\n request,\n 'mus/create_employee_form.html',\n {\n 'employee_form': form,\n }\n )\n # data = {\n # 'employee_form': form.cleaned_data,\n # 'company': company.cleaned_data[\"name\"]\n # }\n # return JsonResponse(status=200, data=data)", "def post(self, request):\n data = request.data\n skill_data = data.pop('skills')\n Department_name = data.pop('department')\n department = Department.objects.get(name=Department_name)\n manager_name = data.pop('manager')\n manager = Manager.objects.get(name=manager_name)\n Employee = EmployeeDetail.objects.create(department=department, manager=manager, **data)\n Employee.save()\n for skill in skill_data:\n skill_add, create = Skill.objects.get_or_create(name=skill)\n Employee.skills.add(skill_add)\n return Response(\n data=request.data\n )", "def add_employee(self, first_name, last_name):\n self.switch_main_menu(\"PIM\")\n self.click_menu(\"Add Employee\")\n self.pim = AddEmployee(self.driver)\n self.pim.add_user_employee(first_name, last_name)", "def createobj(self, firstname='', middlename='', lasttname='', email='', phno='', gender='', address='',\n peronjson={}, notes='', tenantid=''):\n personobj = {'firstname': firstname, 'middlename': middlename, 'lasttname': lasttname, 'email': email, 'phno':\n phno, 'gender': gender, 'address': address, 'peronjson': peronjson, 'notes': notes, 'tenantid': tenantid}\n self.persontdetailscoll.insert(tenantobj, safe=True)", "def create(self, request):\n serializer = data_serializers.TeamLeaderOrEmployeeRequestDataSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n try:\n respond_data = self.controller.add_team_employee(request_data=request_data)\n serializer = data_serializers.PresentTeamEmployeeDataSerializer(respond_data)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (\n domain_exceptions.TeamDoesNotExist,\n domain_exceptions.EmployeeDoesNotExist,\n domain_exceptions.EmployeeIsATeamMember\n )as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def create_individual(self):\n pass", "def main():\n name = input(\"Please enter in your name: \")\n\n \"\"\"Ask the user to enter a number if they are a Director, Manager or Staff.\"\"\"\n \"\"\"This will check and make sure the user only enters in 1,2, \n or 3 and a number greater than zero\"\"\"\n while True:\n try:\n designation_number = int(input(\"Please enter in \\n1 for Director \"\n \"\\n2 for Manager \\n3 for Staff\\n\"))\n if 0 < designation_number <= 3:\n break\n print(\"Invalid number entered.\")\n except Exception as e:\n print(e)\n \"\"\"Gets the user salary and makes sure is a number and greater than 0\"\"\"\n while True:\n try:\n salary = float(input(\"Please enter in your salary: \"))\n if salary <= 0:\n print(\"Your salary must be at least 1 dollar. Please enter a number greater than zero.\")\n else:\n break\n except ValueError:\n print(\"Oops! That was not a valid number. Try again...\")\n\n \"\"\"Create Employee\"\"\"\n employee1 = employee.Employee()\n employee1.set_name(name)\n employee1.set_designation(designation_number)\n employee1.set_salary(salary)\n print(employee1)", "def create():", "def create():", "def create_employee_structure(employees):\n employees_dict = {}\n for employee in position_sort(employees):\n if not employee.is_secretary:\n adder(employees_dict, employee.prosecutors_office, {'employees': [], 'departments': {}, 'divisions': {}})\n if employee.prosecutors_office and employee.department and employee.division:\n adder(employees_dict[employee.prosecutors_office]['departments'], employee.department, {})\n adder(employees_dict[employee.prosecutors_office]['departments'][employee.department], 'divisions', {})\n adder(employees_dict[employee.prosecutors_office]['departments'][employee.department]['divisions'], employee.division, [])\n employees_dict[employee.prosecutors_office]['departments'][employee.department]['divisions'][employee.division].append(employee)\n elif employee.prosecutors_office and employee.department:\n adder(employees_dict[employee.prosecutors_office]['departments'], employee.department, {})\n adder(employees_dict[employee.prosecutors_office]['departments'][employee.department], 'employees', [])\n employees_dict[employee.prosecutors_office]['departments'][employee.department]['employees'].append(employee)\n elif employee.prosecutors_office and employee.division:\n adder(employees_dict[employee.prosecutors_office]['divisions'], employee.division, [])\n employees_dict[employee.prosecutors_office]['divisions'][employee.division].append(employee)\n elif employee.prosecutors_office:\n employees_dict[employee.prosecutors_office]['employees'].append(employee)\n return employees_dict", "def make_employee_dict(names, ID_numbers, salaries, email_addresses):\r\n d = dict()\r\n for i in range(len(names)):\r\n d[ID_numbers[i]] = Employee(names[i], ID_numbers[i], salaries[i], email_addresses[i])\r\n return d" ]
[ "0.7340392", "0.7060474", "0.6856134", "0.66949964", "0.66828686", "0.6662708", "0.6607132", "0.66030127", "0.6332911", "0.6295497", "0.6276587", "0.62654424", "0.626206", "0.62513125", "0.62459964", "0.6240261", "0.6239734", "0.62260616", "0.6169861", "0.61182666", "0.60744625", "0.60650706", "0.60377353", "0.60375273", "0.60265577", "0.5954808", "0.59057224", "0.59057224", "0.5867052", "0.5863818" ]
0.7889773
0
test the attrs of City when set
def test_set_attrs(self): city2 = City() city2.name = "Hawaii" self.assertEqual(city2.name, "Hawaii") city2.state_id = "<3" self.assertEqual(city2.state_id, "<3") self.assertEqual(City.name, "") self.assertEqual(City.state_id, "")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_attributes(self):\n self.assertTrue(hasattr(self.city, 'name'))\n self.assertTrue(hasattr(self.city, 'state_id'))", "def test_city(self):\n c = City()\n self.assertEqual(c.name, \"\")\n self.assertEqual(c.state_id, \"\")\n c.name = \"San Francisco\"\n c.state_id = \"98\"\n self.assertEqual(c.name, \"San Francisco\")\n self.assertEqual(c.state_id, \"98\")\n self.assertEqual(type(c.state_id), str)", "def test_city(self):\n my_city = City()\n self.assertTrue(hasattr(mycity, \"name\"))\n self.assertEqual(my_city.name, \"\")\n self.assertTrue(hasattr(mycity, \"state_id\"))\n self.assertEqual(my_city.state_id, \"\")", "def test_attr_type(self):\n self.my_city.state_id = \"1c5dd90a-a3df-4516-b1ac-32a8715e5539\"\n self.my_city.name = \"New York\"\n self.assertIsInstance(self.my_city.name, str)\n self.assertIsInstance(self.my_city.state_id, str)", "def test_set_attr(self):\n self.my_city.name = \"Denver\"\n self.assertEqual(self.my_city.name, \"Denver\")", "def test_attrs(self):\n city = City()\n self.assertEqual(city.name, \"\")\n self.assertEqual(City.name, \"\")\n self.assertEqual(city.state_id, \"\")\n self.assertEqual(City.state_id, \"\")\n self.assertIn(\"id\", city.__dict__)\n self.assertIn(\"created_at\", city.to_dict())\n self.assertIn(\"updated_at\", city.to_dict())", "def test_type_of_attributes(self):\n self.assertIsInstance(self.city.name, str)\n self.assertIsInstance(self.city.state_id, str)", "def test_city(self):\n self.assertIsInstance(self.address.city, str)\n self.assertEqual(self.address.city, \"Paris\")", "def testCityId(self):\n place = Place()\n self.assertTrue(hasattr(place, \"city_id\"))\n self.assertEqual(place.city_id, \"\")", "def test_city_id(self):\n place = Place()\n self.assertTrue(hasattr(place, \"city_id\"))\n self.assertEqual(type(place.city_id), str)\n self.assertEqual(place.city_id, \"\")", "def test_class_attributes(self):\n self.assertTrue('__tablename__' in City.__dict__)\n self.assertTrue('name' in City.__dict__)\n self.assertTrue('state_id' in City.__dict__)\n self.assertTrue('places' in City.__dict__)", "def test_state_id(self):\n c = City()\n self.assertTrue(hasattr(c, \"state_id\"))\n self.assertEqual(c.state_id, \"\")", "def set_City(self, value):\n super(AddressValidationInputSet, self)._set_input('City', value)", "def test_city(self):\n my_city = City()\n my_city.name = \"Medellin\"\n self.assertEqual(my_city.name, 'Medellin')", "def city(self, city):\n self._city = city", "def test_addr_city_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_addr_city(input_val)\n self.assertEqual(output_val, self.line.addr_city)", "def setUp(self):\n self.my_city = City()", "def __init__(self, city):\r\n self.city = city", "def test_has_attr(self):\n\n self.assertTrue(hasattr(City, \"save\"))", "def test_required_city(self):\r\n self.url_params['city'] = 'New York'\r\n response = self.client.post(self.url, self.url_params)\r\n self.assertEqual(response.status_code, 200)\r\n obj = json.loads(response.content)\r\n self.assertTrue(obj['success'])", "def city(self, city):\n\n self._city = city", "def city(self, city):\n\n self._city = city", "def city(self, city):\n\n self._city = city", "def city(self, city):\n\n self._city = city", "def city(self, city):\n\n self._city = city", "def city(self, city):\n\n self._city = city", "def city(self, city):\n\n self._city = city", "def city(self, city):\n\n self._city = city", "def city(self, city):\n\n self._city = city", "def test_name(self):\n c = City()\n self.assertTrue(hasattr(c, \"name\"))\n self.assertEqual(c.name, \"\")" ]
[ "0.77947956", "0.74044555", "0.7307759", "0.7248982", "0.71909946", "0.71890724", "0.69839954", "0.68954945", "0.6838572", "0.6797316", "0.6596423", "0.6592114", "0.6484486", "0.64782625", "0.6418063", "0.6380564", "0.6375229", "0.62248665", "0.62208533", "0.61856556", "0.6180521", "0.6180521", "0.6180521", "0.6180521", "0.6180521", "0.6180521", "0.6180521", "0.6180521", "0.6180521", "0.6173024" ]
0.79602754
0
test the inheritance of City from BaseModel
def test_inheritance(self): city3 = City() self.assertIsInstance(city3, BaseModel) self.assertIsInstance(city3, City)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_inheritence(self):\n self.assertTrue(issubclass(City, BaseModel))", "def test_subclass(self):\n self.assertIsInstance(self.city, BaseModel)\n self.assertTrue(hasattr(self.city, \"id\"))\n self.assertTrue(hasattr(self.city, \"created_at\"))\n self.assertTrue(hasattr(self.city, \"updated_at\"))", "def test_is_subclass(self):\n c = City()\n self.assertIsInstance(c, BaseModel)\n self.assertTrue(hasattr(c, \"id\"))\n self.assertTrue(hasattr(c, \"created_at\"))\n self.assertTrue(hasattr(c, \"updated_at\"))", "def test_subclass(self):\n self.assertTrue(issubclass(self.place.__class__, BaseModel), True)", "def test_instance(self):\n self.assertIsInstance(self.my_city, City)", "def test_subclass_of_BaseModel(self):\n my_amenity = Amenity()\n self.assertIsInstance(my_amenity, BaseModel)\n self.assertTrue(hasattr(my_amenity, \"id\"))\n self.assertTrue(hasattr(my_amenity, \"created_at\"))\n self.assertTrue(hasattr(my_amenity, \"updated_at\"))", "def test_city(self):\n my_city = City()\n self.assertTrue(hasattr(mycity, \"name\"))\n self.assertEqual(my_city.name, \"\")\n self.assertTrue(hasattr(mycity, \"state_id\"))\n self.assertEqual(my_city.state_id, \"\")", "def test_is_subclass_place(self):\n place = Place()\n self.assertIsInstance(place, BaseModel)\n self.assertTrue(hasattr(place, \"id\"))\n self.assertTrue(hasattr(place, \"created_at\"))\n self.assertTrue(hasattr(place, \"updated_at\"))", "def test_init_without_kwargs(self):\n c = City()\n self.assertTrue('id' in c.__dict__)\n self.assertTrue('created_at' in c.__dict__)\n self.assertTrue('updated_at' in c.__dict__)\n self.assertIsInstance(c, BaseModel)", "def test_city(self):\n self.assertIsInstance(self.address.city, str)\n self.assertEqual(self.address.city, \"Paris\")", "def test_init(self, fixture_environment):\n\n # Generate city object\n city_object = cit.City(environment=fixture_environment)\n\n # Check inheritance from citydistrict object of pycity\n assert city_object._kind == 'citydistrict'", "def test_class(self):\n city1 = City()\n self.assertEqual(city1.__class__.__name__, \"City\")", "def test_subclass(self):\n inst = Amenity()\n self.assertIsInstance(inst, BaseModel)\n self.assertTrue(hasattr(inst, \"id\"))\n self.assertTrue(hasattr(inst, \"created_at\"))\n self.assertTrue(hasattr(inst, \"updated_at\"))", "def test_city(self):\n c = City()\n self.assertEqual(c.name, \"\")\n self.assertEqual(c.state_id, \"\")\n c.name = \"San Francisco\"\n c.state_id = \"98\"\n self.assertEqual(c.name, \"San Francisco\")\n self.assertEqual(c.state_id, \"98\")\n self.assertEqual(type(c.state_id), str)", "def test_type_of_attributes(self):\n self.assertIsInstance(self.city.name, str)\n self.assertIsInstance(self.city.state_id, str)", "def test_name(self):\n self.assertEqual(type(City.name), str)", "def test_attributes(self):\n self.assertTrue(hasattr(self.city, 'name'))\n self.assertTrue(hasattr(self.city, 'state_id'))", "def test_inheritance(self):\n self.assertTrue(issubclass(type(self.user_1), BaseModel))", "def test_city(self):\n my_city = City()\n my_city.name = \"Medellin\"\n self.assertEqual(my_city.name, 'Medellin')", "def test_class_attributes(self):\n self.assertTrue('__tablename__' in City.__dict__)\n self.assertTrue('name' in City.__dict__)\n self.assertTrue('state_id' in City.__dict__)\n self.assertTrue('places' in City.__dict__)", "def test_inherit(self):\n self.assertTrue(issubclass(User, BaseModel))", "def testCityId(self):\n place = Place()\n self.assertTrue(hasattr(place, \"city_id\"))\n self.assertEqual(place.city_id, \"\")", "def test_set_attrs(self):\n city2 = City()\n city2.name = \"Hawaii\"\n self.assertEqual(city2.name, \"Hawaii\")\n city2.state_id = \"<3\"\n self.assertEqual(city2.state_id, \"<3\")\n self.assertEqual(City.name, \"\")\n self.assertEqual(City.state_id, \"\")", "def test_attr_type(self):\n self.my_city.state_id = \"1c5dd90a-a3df-4516-b1ac-32a8715e5539\"\n self.my_city.name = \"New York\"\n self.assertIsInstance(self.my_city.name, str)\n self.assertIsInstance(self.my_city.state_id, str)", "def test_city_id(self):\n place = Place()\n self.assertTrue(hasattr(place, \"city_id\"))\n self.assertEqual(type(place.city_id), str)\n self.assertEqual(place.city_id, \"\")", "def test_type(self):\n self.assertEqual(type(self.base1), BaseModel)\n self.assertEqual(type(self.base2), BaseModel)", "def test_has_attr(self):\n\n self.assertTrue(hasattr(City, \"save\"))", "def test_has_attr(self):\n self.assertTrue(hasattr(City, \"save\"))", "def test_inherit(self):\n\n new_jawn = Amenity()\n self.assertIsInstance(new_jawn, BaseModel)", "def test_save(self):\n\n base_class = BaseModel()" ]
[ "0.8789952", "0.8498339", "0.8354064", "0.7497503", "0.7128531", "0.70219684", "0.700617", "0.698174", "0.6981573", "0.6797616", "0.6792511", "0.6769275", "0.6727983", "0.66833794", "0.6660511", "0.6656261", "0.65980506", "0.6585381", "0.6571345", "0.6492039", "0.64787066", "0.6450331", "0.64402336", "0.64303166", "0.6427531", "0.6416702", "0.64165545", "0.64051247", "0.6364319", "0.6359309" ]
0.86559975
1
check that fifo matches expected types and perms, catch security hold were it could be replace with another file
def __checkFifo(path): pass # FIXME implement
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vsys_fifo_exists(path):\n if not os.path.exists(path):\n collectd.error('File does not exist: %s' % path)\n return False\n if not stat.S_ISFIFO(os.stat(path).st_mode):\n collectd.error('File is not a fifo: %s' % path)\n return False\n return True", "def _check_fifo(self):\n if not os.path.exists(self.fifo_path):\n raise FifoIsNotAvailable(\"trying to load from session that it's fifo is deleted!\")", "def _verify_descriptors(self, msg):\n self.assertTrue(is_writable_file(msg.chlderr))\n self.assertTrue(is_writable_file(msg.chldout))\n self.assertTrue(is_writable_file(msg.chldnul))", "def test_noPermission(self):\n log = logfile.LogFile(self.name, self.dir)\n log.write(\"abc\")\n\n # change permissions so rotation would fail\n os.chmod(self.dir, 0555)\n\n # if this succeeds, chmod doesn't restrict us, so we can't\n # do the test\n try:\n f = open(os.path.join(self.dir,\"xxx\"), \"w\")\n except (OSError, IOError):\n pass\n else:\n f.close()\n return\n\n log.rotate() # this should not fail\n\n log.write(\"def\")\n log.flush()\n\n f = log._file\n self.assertEquals(f.tell(), 6)\n f.seek(0, 0)\n self.assertEquals(f.read(), \"abcdef\")\n log.close()", "def test_noPermission(self):\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n log.write(\"abc\")\n\n # change permissions so rotation would fail\n os.chmod(self.dir, 0o555)\n\n # if this succeeds, chmod doesn't restrict us, so we can't\n # do the test\n try:\n f = open(os.path.join(self.dir, \"xxx\"), \"w\")\n except OSError:\n pass\n else:\n f.close()\n return\n\n log.rotate() # this should not fail\n\n log.write(\"def\")\n log.flush()\n\n f = log._file\n self.assertEqual(f.tell(), 6)\n f.seek(0, 0)\n self.assertEqual(f.read(), b\"abcdef\")", "def test_provider_system_hook_file_chmod(change_dir, fix_file_perms):\n tackle(context_file='chmod.yaml', no_input=True)\n assert oct(os.stat('tackle.yaml').st_mode)[-3:] == \"600\"", "def _check_stream_writable(self, fe_commit):\n if not self._current_branch.stream_name:\n return\n prefix = self._current_branch.writable_stream_name + '/'\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_path(gwt_path).to_depot()\n if depot_path.startswith(prefix):\n continue\n\n human_msg = (_(\n \"Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\"\n \" Paths not in stream '{stream}' are read-only for branch '{b}'.\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , stream = self._current_branch.writable_stream_name\n , b = self._current_branch.branch_id ))\n raise PreflightException(human_msg)", "def test_files(*fns, test_nonzero=False, allow_pipes=False):\n\n for fn in fns:\n is_file = os.path.isfile(fn)\n is_pipe = pathlib.Path(fn).is_fifo()\n if allow_pipes:\n if not is_file or is_pipe:\n error('File \"{}\" does not exist.'.format(fn))\n else:\n if is_pipe:\n if not is_file:\n error('File \"{}\" is a process substitution or a device.'.format(fn))\n else:\n if not is_file:\n error('File \"{}\" does not exist.'.format(fn))\n\n if test_nonzero and not allow_pipes:\n if not file_sizes(fn)[0]:\n error('File \"{}\" has size 0.'.format(fn))", "def test_permissions(self):\n self.assertEqual(dir_perm, 0o2750)\n self.assertEqual(file_perm, 0o0440)", "def _unblock_open_fifo_operation(self) -> None:\n if os.path.exists(self._fifo_out_path):\n open(self._fifo_out_path, 'wb', buffering=0)\n if os.path.exists(self._fifo_in_path):\n open(self._fifo_in_path, 'rb', buffering=0)", "def any(self, fifo: int, /) -> bool:", "def test_provider_system_hook_file_remove(change_dir, fix_file_perms):\n o = tackle(context_file='remove.yaml', no_input=True)\n assert o['if_file']\n assert not o['not_file']\n assert o['if_files']\n assert not o['not_files']", "def test_is_special_file_socket(mocker: MockerFixture, tmp_path: Path) -> None:\n mocker.patch(\"stat.S_ISSOCK\", return_value=True)\n tmp_file = tmp_path / \"foo\"\n tmp_file.touch()\n assert is_special_file(tmp_file)", "def test_rotatePermissionFileNotOk(self):\n log = logfile.DailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n os.chmod(log.path, 0o444)\n previousFile = log._file\n log.rotate()\n self.assertEqual(previousFile, log._file)", "def test_specifiedPermissions(self):\n log1 = logfile.LogFile(self.name, self.dir, defaultMode=0o066)\n self.addCleanup(log1.close)\n mode = stat.S_IMODE(os.stat(self.path)[stat.ST_MODE])\n if runtime.platform.isWindows():\n # The only thing we can get here is global read-only\n self.assertEqual(mode, 0o444)\n else:\n self.assertEqual(mode, 0o066)", "def _check_stream_in_classic(self, fe_commit):\n if self._current_branch.stream_name:\n return\n\n depot_re = re.compile(r'^//([^/]+)/([^/]+)/.*$')\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_path(gwt_path).to_depot()\n m = depot_re.match(depot_path)\n if m:\n depot = m.group(1)\n if depot in self.stream_depots:\n stream = '//{}/{}'.format(m.group(1), m.group(2))\n human_msg = (\n _(\"Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\"\n \" Paths in stream '{stream}' are read-only for branch '{b}'.\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , stream = stream\n , b = self._current_branch.branch_id ))\n raise PreflightException(human_msg)", "def test_specifiedPermissions(self):\n log1 = logfile.LogFile(self.name, self.dir, defaultMode=0066)\n mode = stat.S_IMODE(os.stat(self.path)[stat.ST_MODE])\n if runtime.platform.isWindows():\n # The only thing we can get here is global read-only\n self.assertEquals(mode, 0444)\n else:\n self.assertEquals(mode, 0066)", "def check_perms(resource):\r\n stmode = os.stat(resource).st_mode\r\n return (getattr(stat, 'S_IROTH') & stmode) > 0", "def check_perms(resource):\r\n stmode = os.stat(resource).st_mode\r\n return (getattr(stat, 'S_IROTH') & stmode) > 0", "def test_modePreservation(self):\n open(self.path, \"w\").close()\n os.chmod(self.path, 0o707)\n mode = os.stat(self.path)[stat.ST_MODE]\n log = logfile.LogFile(self.name, self.dir)\n self.addCleanup(log.close)\n log.write(\"abc\")\n log.rotate()\n self.assertEqual(mode, os.stat(self.path)[stat.ST_MODE])", "def io_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, io.IOBase):\n name = type(var).__name__\n raise IOObjError(\n 'Function {} expected file-like object, {} got instead.'.format(func, name))", "def can_handle(file_io):\r\n raise NotImplementedError(\"Please implement this in your importer\")", "def test_provider_system_hook_file(change_dir, clean_files):\n tackle(no_input=True)\n assert 'thing.yaml' in os.listdir()\n assert 'stuff' in os.listdir()\n # If the file has been moved properly there should be only one file\n assert len(os.listdir('stuff')) == 3", "def test_file_unused(self):\n try:\n with get_temp_file() as (fd, name):\n pass\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))\n else:\n file_exists = os.access(name, os.F_OK)\n self.assertFalse(file_exists)", "def sort_permissions(fl):\n\n if oct(os.stat(fl).st_mode)[4:] != '666':\n os.chmod(fl, 0o666)", "def set_file_permissions(host, fqpath, perms):\n command = \"chmod %s %s\" % (perms, fqpath)\n rcode, _, rerr = g.run(host, command)\n\n if rcode == 0:\n return True\n\n g.log.error('chmod failed: %s' % rerr)\n return False", "def checkIfAllowedToModify(self):\n\n oldBytes = b''\n testFileName = self.MAPSTUDIO + self.inputFiles[0] + '.msb'\n\n with open(testFileName, 'rb') as oldf:\n oldBytes = oldf.read()\n\n # Try writing something to the file\n\n try:\n with open(testFileName, 'wb') as outf:\n outf.write(b'TESTINGIFICANWRITEINTOTHISFILE')\n except:\n return False\n\n # Because apparently for _some_ reason it doesn't throw an error sometimes(?) so we confirm if the file was actually modified\n\n newBytes = b''\n with open(testFileName, 'rb') as oldf:\n newBytes = oldf.read()\n\n if (oldBytes == newBytes):\n return False\n\n # Restore the file to normal\n\n with open(testFileName, 'wb') as outf:\n outf.write(oldBytes)\n\n oldBytes = b''\n newBytes = b''\n\n return True", "def test_command_edit_info_disk_type():\n # this is pathological, don't do this in real life\n def f(inputfile):\n with tempfile.NamedTemporaryFile() as tmp:\n shutil.copy(inputfile, tmp.name)\n\n # disk_type = 1 is ok\n wozardry.parse_args([\"edit\", \"-i\", \"disk_type:1\", tmp.name])\n with open(tmp.name, \"rb\") as tmpstream:\n woz = wozardry.WozDiskImage(tmpstream)\n assert woz.info[\"disk_type\"] == 1\n\n # disk_type = 2 is ok\n wozardry.parse_args([\"edit\", \"-i\", \"disk_type:2\", tmp.name])\n with open(tmp.name, \"rb\") as tmpstream:\n woz = wozardry.WozDiskImage(tmpstream)\n assert woz.info[\"disk_type\"] == 2\n\n # disk_type = 0 is not ok\n with pytest.raises(wozardry.WozINFOFormatError_BadDiskType):\n wozardry.parse_args([\"edit\", \"-i\", \"disk_type:0\", tmp.name])\n\n # disk_type = 3 is not ok\n with pytest.raises(wozardry.WozINFOFormatError_BadDiskType):\n wozardry.parse_args([\"edit\", \"-i\", \"disk_type:3\", tmp.name])\n f(kValid1)\n f(kValid2)", "def test_fifo_sync_random():\n pass", "def test_overwrite_corrupted_files(overwrite_on_tape_topology, core_config_mock, caches_mock):\n rse1_id, rse2_id, rse3_id, did1, did2 = overwrite_on_tape_topology(did1_corrupted=True, did2_corrupted=True)\n all_rses = [rse1_id, rse2_id, rse3_id]\n\n class _FTSWrapper(FTSWrapper):\n @staticmethod\n def on_receive(job_params):\n for job in (job_params if isinstance(job_params, list) else [job_params]):\n for file in job.get('files', []):\n if (file.get('file_metadata', {}).get('dst_type') == 'TAPE'\n and file.get('file_metadata', {}).get('dst_file', {}).get('file_on_tape') is not None):\n # Fake that dst_file metadata contains file_on_tape == True\n # As we don't really have tape RSEs in our tests, file_on_tape is always false\n file['file_metadata']['dst_file']['file_on_tape'] = True\n return job_params\n\n with patch('rucio.daemons.conveyor.poller.FTS3Transfertool', _FTSWrapper):\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n # Both transfers must be marked as failed because the file size is incorrect\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.FAILED\n\n # Re-submit the failed requests. They must fail again, because overwrite_corrupted_files is False\n # 2 runs: for multihop, finisher works one hop at a time\n finisher(once=True, partition_wait_time=0)\n finisher(once=True, partition_wait_time=0)\n request = request_core.get_request_by_did(rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.QUEUED\n request = request_core.get_request_by_did(rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.QUEUED\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n # Set overwrite to True before running the poller or finisher\n core_config.set('transfers', 'overwrite_corrupted_files', True)\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.FAILED\n\n # Re-submit one more time. Now the destination file must be overwritten\n finisher(once=True, partition_wait_time=0)\n finisher(once=True, partition_wait_time=0)\n request = request_core.get_request_by_did(rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.QUEUED\n request = request_core.get_request_by_did(rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.QUEUED\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n request = request_core.get_request_by_did(rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.SUBMITTED\n assert __wait_for_fts_state(request, expected_state='ARCHIVING') == 'ARCHIVING'\n request = request_core.get_request_by_did(rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.SUBMITTED\n assert __wait_for_fts_state(request, expected_state='ARCHIVING') == 'ARCHIVING'" ]
[ "0.6131964", "0.60713536", "0.57345945", "0.56298536", "0.5624965", "0.56156325", "0.5565259", "0.5526661", "0.5506482", "0.5501304", "0.5494585", "0.5419358", "0.5417479", "0.5339467", "0.5335473", "0.5235633", "0.5192825", "0.51720446", "0.51720446", "0.51584595", "0.51447344", "0.5086432", "0.50669557", "0.5034095", "0.50339377", "0.5033747", "0.5032271", "0.5030001", "0.50275093", "0.5026844" ]
0.6538033
0
Returns all the grid positions that are currently available for scoring. YB is only available after YZ has been scored other than 0 or NB, NT and GT are never available for scoring If 13 positions have been scored in the grid, no further positions can be filled
def available_positions(self): if len([x for x in self.grid.values() if x[0] != None]) < 13: return [x for x in assignable_positions if self.grid[x][1] == "---"] else: return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def free_positions(self):\n positions = []\n for i in range(self.grid_size):\n for j in range(self.grid_size):\n if self.grid[i][j] == 0:\n positions.append((i, j))\n if positions == []:\n raise GameException('Game Over. No free position left.')\n return positions", "def filled_positions(self):\n return [x for x in assignable_positions if self.grid[x][0]]", "def available_positions(self):\n available_positions = []\n for i in range(self.positions_count):\n if self.board[i] == 0:\n available_positions.append(i+1)\n return available_positions", "def collide_grid(self):\n topleft = self.absolute_collide_topleft\n bottomright = self.absolute_collide_bottomright\n tlx, tly = self.currentLevel.toGridCoord(topleft)\n brx, bry = self.currentLevel.toGridCoord(bottomright)\n collide_grid = []\n for x in range(tlx, brx+1):\n for y in range(tly, bry+1):\n collide_grid.append( (x,y) )\n if not collide_grid:\n collide_grid = [(tlx,tly)]\n return collide_grid", "def board_empty_positions(self, x, y):\n board = self.boards[x][y]\n coords = [(x, y, i, j) for (i, j) in board.empty_squares]\n return self.coords_to_positions(coords)", "def get_allowed_positions(coordXY, grid):\n\n\tsurrounding_coord = []\n\ttesting_coord = []\n\n\t# Get the coordinates of the external square\n\tfor i in range(coordXY[0] - 1, coordXY[0] + 2, 2):\n\t\tfor j in range(coordXY[1] - 1, coordXY[1] +2, 1):\n\t\t\tif (i,j) == coordXY:\n\t\t\t\tpass\n\t\t\telif i < 0 or j < 0:\n\t\t\t\tsurrounding_coord.append('None')\n\t\t\telse:\n\t\t\t\tsurrounding_coord.append((i,j))\n\n\t# Get the coordinates of the internal square\n\tfor i in range(coordXY[0] - 2, coordXY[0] + 3, 4):\n\t\tfor j in range(coordXY[1] - 2, coordXY[1] + 3, 2):\n\t\t\tif i < 0 or j < 0 or i > 7 or j > 7:\n\t\t\t\ttesting_coord.append('None')\n\t\t\telse:\n\t\t\t\ttesting_coord.append((i,j))\n\n\t# Get the position of Bottom and Top of the 2 squares\n\tTC = [(coordXY[0], coordXY[1] + 2), (coordXY[0], coordXY[1] - 2)]\n\tfor elem in TC:\n\n\t\tif elem[0] not in range(8) or elem[1] not in range(8):\n\t\t\ttesting_coord.append('None')\n\t\telse:\n\t\t\ttesting_coord.append(elem)\n\n\n\tSC = [(coordXY[0], coordXY[1] + 1), (coordXY[0], coordXY[1] - 1)]\n\tfor elem in SC:\n\t\tif elem[0] not in range(8) or elem[1] not in range(8):\n\t\t\tsurrounding_coord.append('None')\n\t\telse:\n\t\t\tsurrounding_coord.append(elem)\n\n\treturn testing_coord, surrounding_coord", "def get_positions(self):\r\n null_pos, black_pos, white_pos = set(), set(), set()\r\n for pos in BOARD_POSITIONS:\r\n if self.state[pos[0]][pos[1]] == 0:\r\n null_pos.add(pos)\r\n elif self.state[pos[0]][pos[1]] == 1:\r\n black_pos.add(pos)\r\n else:\r\n white_pos.add(pos)\r\n return null_pos, black_pos, white_pos", "def available_moves(self):\n moves = []\n for x, y in self.available_boards:\n moves.extend([self.to_position(x, y, i, j) for (i, j)\n in self.boards[x][y].empty_squares])\n return moves", "def _get_neighbours(self, position):\n grid = self._grid\n x, y = position\n neighbours = []\n offsets = [(0,1),(1,0),(0,-1),(-1,0)]\n shuffle(offsets)\n for offset in offsets:\n i, j = offset\n position = (x + i, y + j)\n if grid.valid_position(position) and position not in self.shots:\n neighbours.append(position)\n return neighbours", "def get_grid_locations(self, top_left, other_pos):\n cell_x = torch.floor(((other_pos[:, 0] - top_left[:, 0]) / self.neighborhood_size) *self.grid_size)\n\n # Added this part to implementation, otherwise the pooling is going to run into an indexing error\n cell_x[cell_x == self.grid_size] -= 1\n cell_y = torch.floor(((top_left[:, 1] - other_pos[:, 1]) / self.neighborhood_size) *self.grid_size)\n cell_y[cell_y == self.grid_size] -= 1\n grid_pos = cell_x + cell_y * self.grid_size\n\n return grid_pos", "def checked_positions():\n for base_position in chain([me.shipyard], me.get_dropoffs()):\n x_shipyard = base_position.position.x\n y_shipyard = base_position.position.y\n for x in range(-search_range, search_range):\n for y in range(-search_range, search_range):\n yield hlt.Position(\n x=x_shipyard + x,\n y=y_shipyard + y)", "def empty_spots(self):\n\t\tret = []\n\t\tfor i in range(0, self.size):\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tif(self.grid[i][j] == self.terminal):\n\t\t\t\t\tret.append((i,j))\n\t\treturn ret", "def calc_grid(self):\n return int(self._posn.x / cell_size), int(self._posn.y / cell_size)", "def available_spots(self):\n occupied_tiles = self.board.keys()\n neighbors = lambda x, y: ((x+1, y), (x-1, y), (x, y+1), (y, y-1))\n tiles_near_occupied = set(neighbor for tile in occupied_tiles\n for neighbor in neighbors(*tile))\n unnoccupied_titles_near_occupied = tiles_near_occupied - set(occupied_tiles)\n return unnoccupied_titles_near_occupied", "def grid_points(self):\n for i in range(self.rows):\n for j in range(self.cols):\n min_lat,max_lat,min_lon,max_lon = self.coords_to_min_max_lat_lon((i,j))\n if i == 0:\n print_gps(max_lat,max_lon,\"grid\")\n if j == 0:\n print_gps(max_lat,min_lon,\"grid\")\n if j == 0:\n print_gps(min_lat,min_lon,\"grid\")\n print_gps(min_lat,max_lon,\"grid\")", "def get_grid(self, grid_idx):\n end_idx = self.sample_idx[grid_idx]\n start_idx = self.sample_idx[grid_idx-1] if grid_idx != 0 else 0\n grid = self.im_data[start_idx]\n label = self.label_data[start_idx:end_idx]\n state = self.state_data[start_idx:end_idx]\n goal = self.find_goal(grid[1])\n return grid, state, label, goal", "def get_grid_coords(self, count, boundry_x, boundry_y, grid_size):\n\n coords = []\n\n boundry_x = int(boundry_x/10)\n boundry_y = int(boundry_y/10)\n\n while len(coords) < count:\n seed()\n\n\n x = randint(-boundry_x, boundry_x)\n y = randint(-boundry_y, boundry_y)\n\n if len(coords) == 0:\n coords.append((x*grid_size, y*grid_size))\n else:\n for coord in coords:\n if (x not in range(coord[0]-buffer*grid_size, coord[0]+buffer*grid_size)) and (y not in range(coord[1]-buffer, coord[1]+buffer)):\n pass\n else:\n break", "def find_excited_locations(self):\n return np.asarray(np.where(self._grid == 8)).T", "def _get_gpos ( self ):\n bpos = mgrid[self.x_min:self.x_max:self.nxsteps*1j, \\\n self.y_min:self.y_max:self.nysteps*1j, \\\n self.z_min:self.z_max:self.nzsteps*1j]\n bpos.resize((3, self.size))\n return bpos", "def available_boards(self):\n if self.state != State.IN_PROGRESS:\n return []\n if self.last_move is None:\n return self.active_boards\n x, y = self.last_move[-2:]\n if self.boards[x][y].state == State.IN_PROGRESS:\n return [(x, y)]\n return self.active_boards", "def get_grid(loc=GRID_LOC):\n\n sref = ncepgrib2.Grib2Decode(loc, gribmsg=False)\n lats, lons = sref[0].grid()\n\n return lats, lons", "def get_agent_positions_in_grid(agent, grid):\n grid_position_list = []\n\n for position in agent.positions:\n for (grid_row, grid_row_index) in zip(grid, range(len(grid))):\n grid_col_index = _get_position_grid_column(position, grid_row)\n if grid_col_index or grid_col_index == 0:\n grid_position_list.append(Position(grid_col_index, grid_row_index))\n break\n\n return grid_position_list", "def getCoordinates(self):\n return list(self.gridVars.keys())", "def get_available_moves(self, board):\n available_moves = []\n for fieldx in range(len(board)):\n column = []\n for fieldy in range(len(board)):\n legit_move = board[self.posy][self.posx].is_valid_move(board, fieldx, fieldy)\n column.append(legit_move)\n available_moves.append(column)\n return available_moves", "def get_played_positions(board):\n return np.argwhere(board.state != -1)", "def _get_gpos ( self ):\n bpos = mgrid[self.x_min:self.x_max:self.nxsteps*1j, \\\n self.y_min:self.y_max:self.nysteps*1j, \\\n self.z:self.z+0.1]\n bpos.resize((3, self.size))\n return bpos", "def get_all_spawnable_cells(self):\n spawnable_positions = []\n\n for i in range(self.grid.width):\n for j in range(self.grid.height):\n n_list = self.grid.get_cell_list_contents([(i, j)])\n\n if len(n_list) <= 0:\n spawnable_positions.append((i, j))\n elif len(n_list) > 0:\n n = n_list[0]\n if not any(map(lambda t: isinstance(n, t), self.not_spawnable_objects)):\n spawnable_positions.append((i, j))\n\n return spawnable_positions", "def get_neighbors(self):\n return list(map(self.game.square, [self.position - self.game.rules[\"row_len\"], self.position + 1, self.position + self.game.rules[\"row_len\"], self.position - 1]))", "def getGridPoints(x, y, robot):\r\n roundedGrid = (round(x), round(y))\r\n total_radius = (robot.RADIUS + robot.BALL_RADIUS) / robot.grid.scale\r\n scanAmount = math.ceil(total_radius)\r\n scan = range(-scanAmount, scanAmount + 1)\r\n corners = ((0, 0), (0, 1), (1, 1), (1, 0))\r\n points = []\r\n for i in scan:\r\n for j in scan:\r\n for corner in corners:\r\n newX = roundedGrid[0] + i + corner[0]\r\n newY = roundedGrid[1] + j + corner[1]\r\n if grid_distance(newX, newY, x, y) < total_radius:\r\n points.append((newX, newY))\r\n\r\n return points", "def boarderPosition(self, gameState):\n if gameState.isOnRedTeam(self.index):\n i = self.midWidth - 1\n else:\n i = self.midWidth + 1\n boudaries = [(i,j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not gameState.hasWall(i[0],i[1]):\n validPositions.append(i)\n return validPositions" ]
[ "0.6839561", "0.6820858", "0.6375092", "0.6179692", "0.6175604", "0.61393595", "0.6134617", "0.6129079", "0.61183804", "0.6112216", "0.6101577", "0.6083837", "0.5964636", "0.59512347", "0.58837503", "0.5817103", "0.5804341", "0.5783051", "0.5754948", "0.57523113", "0.5745287", "0.5742185", "0.5738482", "0.57205856", "0.5703979", "0.56788343", "0.56679934", "0.56586814", "0.5656796", "0.56427264" ]
0.72613794
0
Returns all the grid positions that have been filled in.
def filled_positions(self): return [x for x in assignable_positions if self.grid[x][0]]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def free_positions(self):\n positions = []\n for i in range(self.grid_size):\n for j in range(self.grid_size):\n if self.grid[i][j] == 0:\n positions.append((i, j))\n if positions == []:\n raise GameException('Game Over. No free position left.')\n return positions", "def board_empty_positions(self, x, y):\n board = self.boards[x][y]\n coords = [(x, y, i, j) for (i, j) in board.empty_squares]\n return self.coords_to_positions(coords)", "def empty_spots(self):\n\t\tret = []\n\t\tfor i in range(0, self.size):\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tif(self.grid[i][j] == self.terminal):\n\t\t\t\t\tret.append((i,j))\n\t\treturn ret", "def get_empty_positions(self):\n\n empty_positions = []\n\n for i in range(self._dimension):\n for j in range(self._dimension):\n if self._board[i][j] == ' ':\n empty_positions.append((i, j))\n\n return empty_positions", "def available_positions(self):\n if len([x for x in self.grid.values() if x[0] != None]) < 13:\n return [x for x in assignable_positions if self.grid[x][1] == \"---\"]\n else:\n return []", "def available_positions(self):\n available_positions = []\n for i in range(self.positions_count):\n if self.board[i] == 0:\n available_positions.append(i+1)\n return available_positions", "def get_empty_board_indecies(self):\n empty_indecies = []\n for row_num in range(len(self.board)):\n for col_num in range(len(self.board)):\n if self.board[row_num][col_num] and self.board[row_num][col_num].state == PegState.EMPTY:\n empty_indecies.append((row_num, col_num))\n return empty_indecies", "def get_empty_squares(self):\n empty = []\n for row in range(self._dim):\n for col in range(self._dim):\n if self._board[row][col] == EMPTY:\n empty.append((row, col))\n return empty", "def get_empty_square(self) -> list:\n empty_square = []\n for line_index in range(len(self.grid)):\n for col_index in range(len(self.grid[line_index])):\n if self.grid[line_index][col_index].color is None:\n empty_square.append((line_index, col_index))\n\n return empty_square", "def get_all_pieces(self):\n occupied = []\n for pieces in self.piece_locs.values():\n occupied += pieces\n return occupied", "def findEmpty(grid):\n for x in range(len(grid.board)):\n for y in range(len(grid.board[0])):\n if grid.board[x][y] == 0:\n return [x,y]", "def get_positions(self):\r\n null_pos, black_pos, white_pos = set(), set(), set()\r\n for pos in BOARD_POSITIONS:\r\n if self.state[pos[0]][pos[1]] == 0:\r\n null_pos.add(pos)\r\n elif self.state[pos[0]][pos[1]] == 1:\r\n black_pos.add(pos)\r\n else:\r\n white_pos.add(pos)\r\n return null_pos, black_pos, white_pos", "def grid_points(self):\n for i in range(self.rows):\n for j in range(self.cols):\n min_lat,max_lat,min_lon,max_lon = self.coords_to_min_max_lat_lon((i,j))\n if i == 0:\n print_gps(max_lat,max_lon,\"grid\")\n if j == 0:\n print_gps(max_lat,min_lon,\"grid\")\n if j == 0:\n print_gps(min_lat,min_lon,\"grid\")\n print_gps(min_lat,max_lon,\"grid\")", "def get_empty_tiles(self) -> List[Point]:\n\t\tempty_tiles = []\n\t\tfor x in range(self.size):\n\t\t\tfor y in range(self.size):\n\t\t\t\tif self.tiles[x][y] == 0:\n\t\t\t\t\tempty_tiles.append(Point(x,y))\n\t\treturn empty_tiles", "def fill_grid_np(self):\n\n self.grid_np = [None for i in range(GRID_HEIGHT*GRID_HEIGHT*MAX_CELL_SIZE)]\n grid = self.grid_np\n # cell_size = self.cell_size\n for obj in self.levels[self.curient_level].objects:\n obj.position_grid[X], obj.position_grid[Y] = get_grid_xy(obj.position_np, ZOMBIE_SIZE)\n x, y = obj.position_grid[X], obj.position_grid[Y]\n grid[y*GRID_WIDTH + x] = obj\n # if cell_size[y*GRID_WIDTH + x] < MAX_CELL_SIZE:\n # cell_size[y*GRID_WIDTH + x] += 1", "def grid_coords(self):\n return [(x, y) for y in range(self.height) for x in range(self.width)]", "def all_cells(self):\n \"\"\"\n Note that we use the convention that the first cell is (1,1)\n \"\"\"\n spart_star = self.circle_star()\n part = Partition(list(spart_star))\n coordinates = part.cells()\n coordinates = [(x+1, y+1) for x, y in coordinates]\n return coordinates", "def get_start_grid(cols=4, rows=4):\n grid = [[0]*cols for i in range(rows)]\n for i in range(2):\n empties = get_empty_cells(grid)\n y,x = random.choice(empties)\n grid[y][x] = 2 if random.random() < 0.9 else 4\n return grid", "def get_empty_cells(grid):\n empty = []\n for j,row in enumerate(grid):\n for i,val in enumerate(row):\n if not val:\n empty.append((j,i))\n return empty", "def get_empty_cells(grid):\n\tempty = []\n\tfor j,row in enumerate(grid):\n\t\tfor i,val in enumerate(row):\n\t\t\tif not val:\n\t\t\t\tempty.append((j,i))\n\treturn empty", "def full_board(self):\n board = [[0] * self.rows for _ in range(self.cols)]\n for x in range(0, self.rows):\n for y in range(0, self.cols):\n if Location.objects.filter(board=self, col=x, row=y).exists():\n board[x][y-1] = '1'\n return board", "def getStartSpots(self):\n spots = []\n if self.index == 0:\n startRow = 1\n endRow = 4\n if self.index == 1:\n startRow = 6\n endRow = 9\n for row in range(startRow, endRow):\n for col in range(1,9):\n spots += [(col, row)]\n return spots", "def fill_grid(self):\n\n for row_margin, row in enumerate(range(self.rows)):\n self.grid.append([])\n\n for col_margin, col in enumerate(range(self.cols)):\n x = col*self.cell_size + col_margin\n y = row*self.cell_size + row_margin\n\n rect = pygame.Rect(x, y, self.cell_size, self.cell_size)\n\n cell = Cell(row, col, rect)\n\n if row == 7 and col == 3:\n cell.root = True\n self.root = cell\n elif row == 7 and col == 16:\n cell.goal = True\n self.goal = cell\n\n self.grid[row].append(cell)", "def __FreeTiles(self, grid, log=False):\n\n x_pos, _ = np.where(grid == 0)\n return len(x_pos)", "def get_visible_cells(self):\r\n ux, uy = self.GetScrollPixelsPerUnit()\r\n sx, sy = self.GetViewStart()\r\n w, h = self.GetGridWindow().GetClientSize().Get()\r\n sx *= ux\r\n sy *= uy\r\n start_col = self.XToCol(sx)\r\n start_row = self.YToRow(sy)\r\n end_col = self.XToCol(sx + w, True)\r\n end_row = self.YToRow(sy + h, True)\r\n return start_row, end_row, start_col, end_col", "def calc_grid(self):\n return int(self._posn.x / cell_size), int(self._posn.y / cell_size)", "def get_empty_cells(state):\n cells = []\n for row_index, row in enumerate(state.board):\n for col_index, cell in enumerate(row):\n if cell == 0:\n cells.append([row_index, col_index])\n return cells", "def find_empty(puzzle):\r\n empty_squares = []\r\n for y in range(len(puzzle.squares)):\r\n for x in range(len(puzzle.squares[0])):\r\n if puzzle.squares[y][x].is_editable() is True:\r\n empty_squares.append((x, y))\r\n return empty_squares", "def get_empty_cells(self):\n empty_cells = []\n for cell_row in self.board:\n for current_cell in cell_row:\n if current_cell is not None:\n if current_cell.get_cell_state() == 0:\n empty_cells.append(current_cell)\n return empty_cells", "def find_empty(self):\n num_rows = len(self.board)\n num_cols = len(self.board[0])\n\n for i in range(num_rows):\n for j in range(num_cols):\n if self.board[i][j] == 0:\n return (i, j)" ]
[ "0.77568364", "0.723596", "0.7119489", "0.70678014", "0.7052116", "0.69976157", "0.68781596", "0.68656486", "0.68310523", "0.6827777", "0.67618763", "0.66904217", "0.6631231", "0.65709645", "0.65524876", "0.65520054", "0.65291786", "0.65274036", "0.6515479", "0.6496653", "0.6454438", "0.6445513", "0.6435733", "0.64186096", "0.64111537", "0.64053166", "0.6393756", "0.63700914", "0.63662046", "0.63583726" ]
0.80966496
0
Assigns a tuple to a position in the grid, of the form (hand, score of this hand for this position)
def assign(self, hand, position): assert isinstance(hand, h.Hand) # print "POSITION:", position # print self try: assert self.grid[position][1] == "---" except AssertionError: raise FilledInError self.grid[position] = (hand, self.score(hand, position)) self.update_totals()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def score_tuple( hand ):\n m = matches(hand)\n #print( m )\n #royal_flush -- a special case of straight flush.\n if flush(hand) and straight(hand) and hand[4].rank == 14:\n return (8, hand[4].rank, 0)\n #straight_flush\n elif flush(hand) and straight(hand):\n return (8, hand[4].rank, 0)\n #four_of_a_kind\n elif len(m) == 2 and m[0].count == 4:\n return (7, m[0].card.rank, 0)\n #full_house\n elif len(m) == 2 and m[0].count == 3 and m[1].count == 2:\n return (6, m[0].card.rank, m[1].card.rank)\n #flush\n elif flush(hand):\n return (5, hand[4].rank, 0)\n #straight\n elif straight(hand):\n return (4, hand[4].rank, 0)\n #three_of_a_kind\n elif len(m) == 3 and m[0].count == 3:\n return (3, m[0].card.rank, 0)\n #two_pair\n elif len(m) == 3 and m[0].count == 2 and m[1].count == 2:\n return (2, m[0].card.rank, m[1].card.rank)\n #one_pair\n elif len(m) == 4 and m[0].count == 2 and m[1].count == 1:\n return (1, m[0].card.rank, m[1].card.rank)\n # Simple high card. Is this adequate? We'll know if we get ties.\n else:\n return (0, hand[4].rank, 0) # or (0, m[0].card.rank, 0)", "def set(self,row,col,value):\r\n self.puzzle[row][col] = value\r\n print(\"Entered value \",value)\r\n if self.puzzle[row][col] == self.rows[row][col]:\r\n self.score = self.score+5\r\n else:\r\n self.score = self.score-5", "def mark(board, player, row, col):\r\n pass", "def put(self, choice, token):\n x, y = choice\n self.grid[x][y] = token", "def make_move(board, picked_column, player):\n row = find_first_free_cell(board, picked_column)\n board[row][picked_column] = player\n return board, row", "def mark(board, player, row, col):\n pass", "def get_score(location, grid, shape):", "def place_marker(board, score, marker, position):\n board[position] = marker\n inc = 0\n if marker == \"X\":\n inc = 1\n else:\n inc = -1\n if position == 7:\n score[0] += inc\n score[3] += inc\n score[6] += inc\n elif position == 8:\n score[0] += inc\n score[4] += inc\n elif position == 9:\n score[0] += inc\n score[5] += inc\n score[7] += inc\n elif position == 4:\n score[1] += inc\n score[3] += inc\n elif position == 5:\n score[1] += inc\n score[4] += inc\n score[6] += inc\n score[7] += inc\n elif position == 6:\n score[1] += inc\n score[5] += inc\n elif position == 1:\n score[2] += inc\n score[3] += inc\n score[7] += inc\n elif position == 2:\n score[2] += inc\n score[4] += inc\n elif position == 3:\n score[2] += inc\n score[5] += inc\n score[6] += inc", "def mm_move(board, player):\r\n if board.check_win() == provided.PLAYERX:\r\n return SCORES[provided.PLAYERX],(-1,-1)\r\n elif board.check_win() == provided.PLAYERO:\r\n return SCORES[provided.PLAYERO],(-1,-1)\r\n elif board.check_win() == provided.DRAW:\r\n return SCORES[provided.DRAW],(-1,-1)\r\n else:\r\n empty_tuple_list = board.get_empty_squares()\r\n score_pos_tuple_list = []\r\n best_score = None\r\n best_pos = None\r\n for idx1 in range(len(empty_tuple_list)):\r\n empty_tuple = empty_tuple_list[idx1]\r\n board_clone = board.clone()\r\n board_clone.move(empty_tuple[0],empty_tuple[1],player)\r\n score_pos_tuple = mm_move(board_clone,provided.switch_player(player))\r\n score_pos_tuple_list.append(score_pos_tuple)\r\n\r\n #decide best score and pos fast!!!\r\n if score_pos_tuple[0]*SCORES[player] == 1:\r\n return (score_pos_tuple[0],empty_tuple)\r\n\r\n #decide best score and pos\r\n for idx2 in range(len(score_pos_tuple_list)):\r\n if idx2 == 0:\r\n best_score = score_pos_tuple_list[idx2][0]\r\n best_pos = empty_tuple_list[idx2]\r\n else:\r\n if score_pos_tuple_list[idx2][0]*SCORES[player] > best_score*SCORES[player]:\r\n best_score = score_pos_tuple_list[idx2][0]\r\n best_pos = empty_tuple_list[idx2]\r\n\r\n return (best_score,best_pos)", "def guess(self, row, col) -> Tuple[int, Optional[ship.Ship]]:\n my_ship: ship.Ship = self._board_matrix[row][col]\n\n # if my_ship is None the guess is a miss, otherwise its a hit\n\n # --------- BEGIN YOUR CODE ----------\n\n # This is exactly the same as Human.guess, just copy the code over\n\n # --------- END YOUR CODE ----------", "def setPiece(self, column, rank, piece):\n try:\n self.values[int(int(rank)-1)*8+self.getColIdx(column)] = piece\n except:\n print column, rank\n rospy.loginfo(\"setPiece: invalid row/column\")", "def setPiece(self, column, rank, piece):\n try:\n self.values[int(int(rank)-1)*8+self.getColIdx(column)] = piece\n except:\n print column, rank\n rospy.loginfo(\"setPiece: invalid row/column\")", "def loc_from_tuple(self, coords):\n self.x, self.y = coords", "def set_tile(self, row, col, value):\n # replace with your code\n self.grid[row][col] = value", "def change_position(board: Board, position: Position, character: str) -> Board:\n board = list(board)\n \n row = board[position[0]]\n new_row = row[:position[-1]] + character + row[position[-1] + 1:]\n board[position[0]] = new_row\n\n board = tuple(board) \n\n return board", "def set_position(self, position):\n self.position = tuple(position)", "def __init__(self, pos, score=0):\n self.__pos = pos\n self.__score = score", "def set_game_params(self, board):\n self.board = np.copy(board)\n self.board_min_len = np.min(len(board))\n\n tmp_player_pos = np.where(board == 1)\n self.player_pos = (tmp_player_pos[0][0], tmp_player_pos[1][0])\n tmp_rival_pos = np.where(board == 2)\n self.rival_pos = (tmp_rival_pos[0][0], tmp_rival_pos[1][0])", "def position(self, value):\n if type(value) is not tuple or len(value) != 2 or \\\n type(value[0]) is not int or value[0] < 0 or \\\n type(value[1]) is not int or value[1] < 0:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n else:\n self.__position = value", "def swap_tile(grid: tuple[int, ...], move: int) -> tuple[int, ...]:\n tile_to_swap: int = grid.index(0) + move\n value_to_swap: int = grid[tile_to_swap]\n\n mutable_grid: list[int] = list(grid)\n mutable_grid[grid.index(0)] = value_to_swap\n mutable_grid[tile_to_swap] = 0\n swapped_grid = tuple(mutable_grid)\n\n return swapped_grid", "def __init__(self):\n self.played_pos = []\n self.grid = [['-', '-', '-'],\n ['-', '-', '-'],\n ['-', '-', '-']]\n self.player_played_pos = {'p1': set(), 'p2': set()}", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value", "def getGameState(self):\n ### Student code goes here\n row1 = [-1, -1, -1]\n row2 = [-1, -1, -1]\n row3 = [-1, -1, -1]\n for i in self.kb.kb_ask(parse_input(\"fact: (pos ?t ?px ?py\")):\n if str(i.bindings_dict['?t'])=='empty':\n t = -1\n else:\n t = int(i.bindings_dict['?t'][4])\n xpx = int(i.bindings_dict['?px'][3])\n xpy = int(i.bindings_dict['?py'][3])\n if xpy == 1:\n row1[xpx-1] = t\n elif xpy == 2:\n row2[xpx-1] = t\n elif xpy == 3:\n row3[xpx-1] = t\n return tuple((tuple(row1),tuple(row2),tuple(row3)))", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value;", "def position(self, value):\n if type(value) is not tuple or len(value) != 2 \\\n or type(value[0]) is not int or type(value[1]) is not int \\\n or value[0] < 0 or value[1] < 0:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n else:\n self.__position = value", "def position(square):\n first = square[0]\n second = square[1]\n col = parseCol(first)\n row = parseRow(second)\n return (row, col)", "def score(self, hand, position):\n\n try:\n assert self.grid[position][1] == \"---\"\n except AssertionError:\n print self\n print position\n raise FilledInError\n except KeyError:\n print \"\\nCheck your code. This is not a valid position:\", position, \"\\n\"\n raise\n\n if position.startswith(\"n\"): # Return sum of relevant number\n n = int(position[1])\n return sum(d for d in hand.dice if d == n)\n\n elif position in [\"k3\", \"k4\", \"ch\"]: # Return total sum\n if position == \"k3\" and hand.max_tally()[0] < 3:\n return 0 # The is not a three of a kind\n elif position == \"k4\" and hand.max_tally()[0] < 4:\n return 0 # The is not a four of a kind\n return sum(hand.dice)\n\n elif position in [\"fh\", \"ss\", \"ls\", \"yz\", \"yb\"]: # Return fixed score\n if position == \"fh\":\n tallies = hand.get_dicedict().values()\n if 1 in tallies:\n return 0 # This is not a full house\n\n elif position in [\"ss\", \"ls\"]:\n ds = \"\".join(str(x) for x in hand.sort_by_value())\n if position == [\"ss\"]:\n if \"1234\" not in ds and \"2345\" not in ds and \"3456\" not in ds:\n return 0\n else:\n if \"12345\" not in ds and \"23456\" not in ds:\n return 0\n\n else:\n if hand.max_tally()[0] < 5:\n return 0 # This is not a yahtzee\n if position == \"yb\" and self.grid[\"yz\"] == \"---\":\n return 0 # YB only scores points if there already is a YZ\n\n return fixed_scores[position]\n\n else:\n raise InvalidPositionError", "def __getitem__(self, item):\n if type(item) != int:\n raise TypeError('Grid index must be int, not {}'.format(type(item)))\n if not 0 <= item < len(self):\n raise IndexError('Grid index out of range')\n point = []\n for i in range(len(self.sizes)):\n point.append(item % self.sizes[i])\n item //= self.sizes[i]\n return tuple(point)", "def _insert_piece(cls, board, coord, piece):\n rank = board[coord[0]][:coord[1]] + (piece,) + board[coord[0]][coord[1]+1:]\n return board[:coord[0]] + (rank,) + board[coord[0]+1:]" ]
[ "0.6446688", "0.60046214", "0.5860571", "0.5825233", "0.57931954", "0.5780548", "0.57786965", "0.5750296", "0.57402545", "0.57364994", "0.5714486", "0.5714486", "0.57086986", "0.5672831", "0.5660482", "0.5629688", "0.5591937", "0.5587683", "0.55868065", "0.55848455", "0.5583773", "0.5582667", "0.5582667", "0.55697054", "0.5556777", "0.55557", "0.5551011", "0.55397934", "0.5536432", "0.5532568" ]
0.7689742
0
Aggregate the statistics of a log dict
def aggregate_log_dict(agg_dict, new_dict) -> dict: for k in new_dict: # init new if not present if k not in agg_dict: agg_dict[k] = { 'n': 0, 'sum': 0.0, 'max': new_dict[k], 'min': new_dict[k], } # aggregate agg_dict[k]['n'] += 1 agg_dict[k]['sum'] += new_dict[k] agg_dict[k]['max'] = max(new_dict[k], agg_dict[k]['max']) agg_dict[k]['min'] = min(new_dict[k], agg_dict[k]['min']) # TODO: add more stats (e.g. stdev, max, minin the future) return agg_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def aggregate_logging_outputs(logging_outputs):\n cider_sum = sum(log.get('cider', 0) for log in logging_outputs)\n loss_sum = sum(log.get('loss', 0) for log in logging_outputs)\n sc_loss_sum = sum(log.get('sc_loss', 0) for log in logging_outputs)\n n_pos_sum = sum(log.get('n_positive', 0) for log in logging_outputs)\n ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)\n nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)\n sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)\n\n # print('| n_pos_sum: {} | nsentences: {}'.format(n_pos_sum, nsentences))\n agg_output = {\n 'cider': cider_sum / nsentences,\n 'loss': loss_sum / sample_size / math.log(2),\n 'sc_loss': sc_loss_sum / ntokens / math.log(2) if ntokens > 0 else 0.,\n 'n_positive_rate': n_pos_sum / nsentences,\n 'ntokens': ntokens,\n 'nsentences': nsentences,\n 'sample_size': sample_size,\n }\n return agg_output", "def aggregate_logging_outputs(logging_outputs):\n loss = sum(log.get('loss', 0) for log in logging_outputs)\n ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)\n nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)\n sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)\n acc = sum(log.get('acc', 0) for log in logging_outputs)\n\n agg_output = {\n 'loss': loss / sample_size / math.log(2),\n 'nll_loss': sum(log.get('nll_loss', 0) for log in logging_outputs) / sample_size / math.log(\n 2) if ntokens > 0 else 0.,\n 'ntokens': ntokens,\n 'nsentences': nsentences,\n 'sample_size': sample_size,\n 'acc': acc / sample_size,\n }\n\n if 'mlm_loss' in logging_outputs[0]:\n mlm_loss = sum(log.get('mlm_loss', 0) for log in logging_outputs)\n mlm_acc = sum(log.get('mlm_acc', 0) for log in logging_outputs)\n agg_output['mlm_loss'] = mlm_loss / sample_size / math.log(2)\n agg_output['mlm_acc'] = mlm_acc / sample_size\n return agg_output", "def aggregate_logging_outputs(logging_outputs):\n loss_sum = sum(log.get('loss', 0) for log in logging_outputs)\n sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)\n ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)\n nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)\n downstream_loss = sum(log.get('downstream_loss', 0) for log in logging_outputs)\n bleu_stats = reduce(utils.reduce_bleu_stats,\n [log.get('bleu', utils.get_zero_bleu_stats()) for log in logging_outputs])\n intermediate_loss = defaultdict(lambda: AverageMeter())\n all_loss_dicts = [log.get('intermediate_loss', defaultdict(lambda: AverageMeter())) for log in logging_outputs]\n # Aggregate\n for single_dict in all_loss_dicts:\n for key, value in single_dict.items():\n intermediate_loss[key] = utils.reduce_average_meter(intermediate_loss[key], value)\n # same for bleu\n intermediate_bleu = defaultdict(lambda: AverageMeter())\n all_bleu_dicts = [log.get('intermediate_bleu', defaultdict(lambda: AverageMeter()))\n for log in logging_outputs]\n # Aggregate\n for single_dict in all_bleu_dicts:\n for key, value in single_dict.items():\n intermediate_bleu[key] = utils.reduce_average_meter(intermediate_bleu[key], value)\n agg_output = {'loss': loss_sum / float(sample_size), 'sample_size': sample_size, 'ntokens': ntokens,\n 'nsentences': nsentences, 'downstream_loss': downstream_loss / float(sample_size),\n 'bleu': bleu_stats, **intermediate_loss, **intermediate_bleu}\n return agg_output", "def aggregate_logging_outputs(logging_outputs):\n loss_sum = sum(log.get('loss', 0) for log in logging_outputs)\n ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)\n nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)\n sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)\n agg_output = {\n 'loss': loss_sum,\n 'ntokens': ntokens,\n 'nsentences': nsentences,\n 'sample_size': sample_size,\n }\n if sample_size != ntokens:\n agg_output['nll_loss'] = loss_sum / math.log(2)\n return agg_output", "def _aggregate_log_values(self, source, dest):\n remove = []\n for key, item in source.items():\n if \"data\" not in item:\n # Assume it's a sub-group\n dest[key] = {}\n self._aggregate_log_values(item, dest[key])\n else:\n aggregator = self._get_aggregator_for_key(key, item['agg'])\n value = aggregator(item['data'])\n if item['precision'] is not None:\n value = round(value, item['precision'])\n dest[key] = value\n if item['scope'] == 'get':\n remove.append(key)\n for key in remove:\n del source[key]", "def aggregate_logging_outputs(logging_outputs):\n loss_sum = sum(log.get(\"loss\", 0) for log in logging_outputs)\n nll_loss_sum = sum(log.get(\"nll_loss\", 0) for log in logging_outputs)\n ntokens = sum(log.get(\"ntokens\", 0) for log in logging_outputs)\n nsentences = sum(log.get(\"nsentences\", 0) for log in logging_outputs)\n sample_size = sum(log.get(\"sample_size\", 0) for log in logging_outputs)\n return {\n \"loss\": loss_sum / sample_size if sample_size > 0 else 0.0,\n \"nll_loss\": nll_loss_sum / ntokens / math.log(2),\n \"ntokens\": ntokens,\n \"nsentences\": nsentences,\n \"sample_size\": sample_size,\n }", "def aggregate_logging_outputs(logging_outputs):\n ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)\n nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)\n sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)\n return {\n 'loss': sum(log.get('loss', 0) for log in logging_outputs) / sample_size / math.log(2) if sample_size > 0 else 0.,\n 'nll_loss': sum(log.get('nll_loss', 0) for log in logging_outputs) / ntokens / math.log(2) if ntokens > 0 else 0.,\n 'ntokens': ntokens,\n 'nsentences': nsentences,\n 'sample_size': sample_size,\n }", "def aggregate_logging_outputs(logging_outputs):\n # assert len(logging_outputs) == 1\n log = logging_outputs[0]\n loss = log.get('loss', 0)\n ntokens = log.get('ntokens', 0)\n batch_sizes = log.get('nsentences', 0)\n sample_size = log.get('sample_size', 0)\n agg_output = {\n 'loss': loss,\n 'ntokens': ntokens,\n 'nsentences': batch_sizes,\n 'sample_size': sample_size,\n }\n return agg_output", "def log_dict(self, source, agg=\"auto\", group=None):\n for key, val in source.items():\n if isinstance(val, dict):\n sub_group = key if group is None else group+\"->\"+key\n self.log_dict(val, agg=agg, group=sub_group)\n else:\n self.log(key, val, group=group, agg=agg)", "def means(self) -> dict:\n return {k: 0 if not vs else sum(vs) / len(vs) for k, vs in self.logs.items()}", "def _aggregate_traj_stats(traj_stats_a, traj_stats_b):\n merged_stats = {}\n for k in traj_stats_a:\n n_a, avg_a, M2_a = traj_stats_a[k][\"n\"], traj_stats_a[k][\"mean\"], traj_stats_a[k][\"sqdiff\"]\n n_b, avg_b, M2_b = traj_stats_b[k][\"n\"], traj_stats_b[k][\"mean\"], traj_stats_b[k][\"sqdiff\"]\n n = n_a + n_b\n mean = (n_a * avg_a + n_b * avg_b) / n\n delta = (avg_b - avg_a)\n M2 = M2_a + M2_b + (delta ** 2) * (n_a * n_b) / n\n merged_stats[k] = dict(n=n, mean=mean, sqdiff=M2)\n return merged_stats", "def aggregate_metrics(metrics):\n if len(metrics) == 1:\n return metrics[0]\n else:\n agg_metrics = metrics[0]\n for metric in agg_metrics.keys():\n vals = [x[metric] for x in metrics]\n agg_metrics[metric] = [np.mean(vals), np.std(vals)]\n return agg_metrics", "def aggregate_results(self, results):\n result = dict()\n result['MAE'] = self.average_dict_items(results, 'MAE')\n result['MdAE'] = self.average_dict_items(results, 'MdAE')\n result['RMSE'] = self.average_dict_items(results, 'RMSE')\n result['SMAPE'] = self.average_dict_items(results, 'SMAPE')\n result['num_values'] = self.average_dict_items(results, 'num_values')\n return result", "def get_total_counts(self):\n ret = {}\n all_loggers_count = 0\n for logger, name_map in self.acc_map.items():\n cur_logger_count = 0\n ret[logger.name] = {}\n for name, status_map in name_map.items():\n cur_name_count = 0\n ret[logger.name][name] = {}\n for status, acc in status_map.items():\n cur_count = acc.total_count\n ret[logger.name][name][status] = cur_count\n cur_name_count += cur_count\n cur_logger_count += cur_count\n all_loggers_count += cur_count\n ret[logger.name][name]['__all__'] = cur_name_count\n ret[logger.name]['__all__'] = cur_logger_count\n ret['__all__'] = all_loggers_count\n return ret", "def extract_test_log(self, log: dict) -> dict:\n test_log = self.extract_generic_log(log, 'test')\n test_log['adv_acc'] = (log['correct_adv'] / log['total']) * 100.0\n\n return test_log", "def sum_dstats(self, stats, smetrics):\n avg = {}\n\n for disk, metrics in stats.iteritems():\n for mname, metric in metrics.iteritems():\n if mname not in smetrics:\n continue\n if mname in avg:\n avg[mname] += metric\n else:\n avg[mname] = metric\n\n return avg", "def summaries(e_dict, m_dict):\n for key, value in m_dict.items():\n e_dict[key].append(np.mean(value))\n return e_dict", "def collect_log_output(activity_log, result):\n\n test_name = activity_log.get('identifier')\n if test_name:\n result.append(test_name['_value'])\n\n duration = activity_log.get('duration')\n if duration:\n output = str(\"{:.2f}\".format(float(duration['_value'])))\n result.append(output)\n\n performance_metrics = activity_log.get('performanceMetrics')\n if not performance_metrics is None:\n metrics = performance_metrics.get('_values')\n for metric in metrics:\n measurement = metric.get('measurements')\n values = measurement.get('_values')\n value_sum = 0\n for value in values:\n value_sum += float(value.get('_value'))\n output = str(value_sum / len(values))\n result.append(output)", "def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None:\n loss_sum = sum(log.get('loss', 0) for log in logging_outputs)\n ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)\n sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)\n nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)\n\n metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)", "def aggregate_statistics(self, stat_col, stat_agg):\n self.module.aggregate_statistics(stat_col, stat_agg)", "def apply(self):\n counter = {}\n for act in self.activities:\n freq = []\n for trace in self.log:\n freq.append(len(self.project_trace(trace, [act])))\n if not len(freq) == 0:\n counter[act] = {'sum': sum(freq), 'min': min(freq),\n 'max': max(freq)}\n return counter", "def score_aggregate(results):\n scores = []\n truth_count = detected_count = segment_count = 0\n\n for res in results:\n scores.append(res[\"scores\"])\n truth_count += len(res[\"labels\"])\n detected_count += len(res[\"detected\"])\n segment_count += len(res[\"scores\"][\"segments\"])\n\n ret = dict()\n ret[\"scores\"] = sum_scores(scores)\n ret[\"stats\"] = dict(truth_count=truth_count, detected_count=detected_count, segment_count=segment_count)\n return ret", "def summarize_rec_data(data):\n\n # Warning: not all collectible data has a summary stats implemented below!\n # See get_rec_stats() above!\n\n stats = {}\n\n if 'hc_ro' in data:\n # Entropy across HC units average over samples.\n hc_ro_arr = np.array(list(data['hc_ro'].values()))\n stats['H HC ro'] = utils.entropy(hc_ro_arr.T).mean()\n\n if 'vs_state' in data:\n # Sum of vS reward estimates change (from first to last sample).\n vs_state = data['vs_state']\n stats['d vS'] = sum(vs_state[max(vs_state.keys())] - vs_state[0])\n\n if 'co_occs' in data:\n # Mean entropy of real location and HC state co-occurance frequencies.\n co_occs = data['co_occs'][max(data['co_occs'].keys())]\n stats['H HC co'] = np.nanmean(get_hc_co_occ_entropy(co_occs))\n stats['H loc co'] = np.nanmean(get_loc_co_occ_entropy(co_occs))\n\n return stats", "def get_all_stat(self):\n all_stat=dict()\n for stat_type in self.log_book.keys():\n stat = self.get_stat(stat_type)\n all_stat[stat_type] = stat\n return all_stat", "def aggregate_statistics(self, new_stats):\n \n if isinstance(new_stats,RunStatistics):\n new_stats = [new_stats, ]\n elif isinstance(new_stats,list):\n if any(not isinstance(_,RunStatistics) for _ in new_stats):\n raise MadGraph5Error, \"The 'new_stats' argument of the function \"+\\\n \"'updtate_statistics' must be a (possibly list of) \"+\\\n \"RunStatistics instance.\"\n \n keys = set([])\n for stat in [self,]+new_stats:\n keys |= set(stat.keys())\n\n new_stats = new_stats+[self,]\n for key in keys:\n # Define special rules\n if key=='max_precision':\n # The minimal precision corresponds to the maximal value for PREC\n self[key] = min( _[key] for _ in new_stats if key in _)\n elif key=='min_precision':\n # The maximal precision corresponds to the minimal value for PREC\n self[key] = max( _[key] for _ in new_stats if key in _)\n elif key=='averaged_timing':\n n_madloop_calls = sum(_['n_madloop_calls'] for _ in new_stats if\n 'n_madloop_calls' in _)\n if n_madloop_calls > 0 :\n self[key] = sum(_[key]*_['n_madloop_calls'] for _ in \n new_stats if (key in _ and 'n_madloop_calls' in _) )/n_madloop_calls\n else:\n # Now assume all other quantities are cumulative\n self[key] = sum(_[key] for _ in new_stats if key in _)", "def get_errors_summary(statistics):\n all_errors = (report['errors'] for report in statistics) \n \n errors_summary = defaultdict(int)\n for doc in all_errors:\n for key, value in doc.items():\n errors_summary[key] += value\n\n return errors_summary", "def _aggregate_perf_data(perf_all_ordinals: List[str]):\n aggregate = {}\n\n pd = PerfData()\n for data in perf_all_ordinals:\n worker_pd = PerfData(**json.loads(data))\n if len(perf_all_ordinals) > 1:\n aggregate.setdefault(\"ordinals\", [])\n aggregate[\"ordinals\"].append(worker_pd.throughput_dict())\n\n pd.merge(worker_pd)\n\n aggregate.update(dataclasses.asdict(pd))\n return aggregate", "def collect_filterstats_from_log(fp):\n D = {}\n while (line := fp.readline()): # noqa: E203, E231\n if \"Filter results for\" in line:\n filt = line.split()[-1]\n fp.readline() # empty\n fp.readline() # \"Filtering\"\n fp.readline() # Array size\n fp.readline() # cumulative masked\n fp.readline() # previously masked\n filtered_line = fp.readline() # new filtered\n if \"New filtered\" not in filtered_line:\n raise ValueError(\"Expected 'filtered' six lines after \"\n f\"'filter results', got {filtered_line:s}\")\n count = int(filtered_line.split()[-1])\n if filt in D:\n raise ValueError(f\"Found results for {filt:s} multiple times!\")\n D[filt] = count\n return D", "def parse_stats(stats: dict, res: dict):\n for k, v in stats.items():\n if k not in res.keys():\n res.update({k: {}})\n if isinstance(v, list):\n for element in v:\n for metric, value in element.items():\n res[k].update({metric: [value]})", "def get_logs_with_timestamp(res: List[Dict[str, Any]]) -> Dict[str, List[str]]:\n\n list_of_logs = (\n {log[\"result\"][\"timestamp\"]: log[\"result\"][\"logs\"]}\n for log in res\n if log[\"result\"][\"logs\"]\n )\n\n logs_with_timestamp = defaultdict(list)\n\n for result in list_of_logs:\n k, v = list(result.items())[0]\n logs_with_timestamp[parser.parse(k)] += v\n\n return logs_with_timestamp" ]
[ "0.72006446", "0.7089349", "0.7069627", "0.6966819", "0.6952808", "0.6841634", "0.683194", "0.6784331", "0.65914315", "0.64268196", "0.606027", "0.60507745", "0.6043965", "0.6035893", "0.60186166", "0.5961135", "0.59582734", "0.59497863", "0.5914297", "0.5878846", "0.5870959", "0.5861917", "0.57707447", "0.5761949", "0.57606405", "0.57302153", "0.57230663", "0.57104474", "0.5689965", "0.56839955" ]
0.7331248
0
Ansible module to verify IP reachability using Ping RPC over NETCONF.
def main(): module = AnsibleModule( argument_spec=dict( host=dict(type='str', required=True), destination=dict(type='str', required=True), repeat_count=dict(type='int', default=5), vrf_name=dict(type='str'), min_success_rate=dict(type='int', default=100) ), supports_check_mode=True ) if module.check_mode: module.exit_json(changed=False) try: retvals = ping(module.params['host'], module.params['destination'], module.params['repeat_count'], module.params['vrf_name']) except Exception as exc: module.fail_json(msg='Reachability validation failed ({})'.format(exc)) retvals['changed'] = False if retvals['success_rate'] >= module.params['min_success_rate']: module.exit_json(**retvals) else: module.fail_json(msg=('Success rate lower than expected ({}<{})'). format(retvals['success_rate'], module.params['min_success_rate']))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ping():\n return ping_response()", "def ping():\n return ping_response()", "def ping():\n return ping_response()", "def ping():\n return ping_response()", "def ip_check():\n hosts = []\n valid_hosts = []\n for item in sys.argv:\n if '@' in item:\n hosts.append(item)\n for i in hosts:\n host = i.split('@')[1].split(':')[0]\n command = os.system('ping -c 1 '+host+' > /dev/null')\n if command == 0:\n valid_hosts.append(i)\n if valid_hosts:\n path_check(valid_hosts)", "def test_ping(self):\n status, output = commands.getstatusoutput('ping -c 5 %s' % self.known_ip)\n assert status == 0", "def icmp_probe(self, ip):\n\n\t\tcmd = 'ping %s -n 10' % ip\n\t\tp = Popen(cmd, shell=True, stdin=PIPE, stderr=PIPE, stdout=PIPE)\n\t\tres = p.stdout.read()\n\n\t\tres = res.decode()\n\t\tif len(p.stderr.read()) == 0:\n\t\t\tif 'Destination host unreachable' in res:\n\t\t\t\treturn False\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def ping():\n requestor = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)\n logger.info(f\"Health check requested by ip='{requestor}'\")\n return make_response(\n jsonify(status=\"Serving\",\n body=\"pong\"), 200)", "async def ping(self, ctx, ip):\n\n # Check for valid IP else do DNS lookup\n valid_ip = re.compile(\"[0-9]{,3}\\.[0-9]{,3}\\.[0-9]{,3}\")\n valid_hostname = re.compile(\".*\\.[a-zA-Z]{2,}\")\n valid = False\n\n if valid_ip.match(ip):\n valid = True\n elif valid_hostname.match(ip):\n valid = True\n try:\n await self.bot.say('Doing DNS lookup...')\n ip = socket.gethostbyname(ip)\n\n if valid == True:\n start = time.time()\n response = os.system(\"sudo ping -c 1 -w3 \" + ip)\n duration = time.time() - start\n duration = round(duration * 1000, 0)\n if response == 0:\n await self.bot.say(ip + ' is up and responding in ' +\n str(duration) + 'ms.')\n else:\n await self.bot.say(ip + ' is not reachable.')\n else:\n await self.bot.say(ip + ' is not a valid IP or Domain.')\n\n except socket.gaierror:\n await self.bot.say('Whoops! That Address cant be resolved!')", "def verify_dot1x(task):\n # run \"show dot1x all\" on each host\n sh_dot1x = task.run(task=netmiko_send_command, command_string=\"show dot1x all\")\n # TTP template for dot1x status\n dot1x_ttp_template = \"Sysauthcontrol {{ status }}\"\n # magic TTP parsing\n parser = ttp(data=sh_dot1x.result, template=dot1x_ttp_template)\n parser.parse()\n dot1x_status = json.loads(parser.result(format=\"json\")[0])\n\n # write dot1x verification report for each host\n with open(f\"output/{task.host}_dot1x_verified.txt\", \"w+\") as file:\n file.write(sh_dot1x.result)\n\n # print dot1x status\n c_print(f\"*** {task.host} dot1x status: {dot1x_status[0]['status']} ***\")", "def check_vm_connectivity(env, os_conn, vm_keypair=None, timeout=4 * 60):\n servers = os_conn.get_servers()\n for server1 in servers:\n ips_to_ping = [settings.PUBLIC_TEST_IP]\n for server2 in servers:\n if server1 == server2:\n continue\n ips_to_ping += os_conn.get_nova_instance_ips(\n server2).values()\n check_ping_from_vm(env, os_conn, server1, vm_keypair, ips_to_ping,\n timeout=timeout)", "def ping(self):\n raise AssertionError(\"Ping function is not implemented\")", "def ping_ip(ip_address, count):\n reply = subprocess.run(\n f\"ping -c {count} -n {ip_address}\",\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n encoding=\"utf-8\",\n )\n if reply.returncode == 0:\n return True\n else:\n return False", "def check_vpn_interface():\n return validate_vpn_interface(call_command('netstat -i')[0].split('\\n'))", "def checkPingVMStatus(self, conn, vmHost, hostUser, hostPass, vmNames):\n #temp - do a simple ping test to each vm\n hypervisor = libclient.SfClient()\n\n try:\n hypervisor.Connect(vmHost, hostUser, hostPass)\n mylog.info(\"The connection to the hypervisor has been established\")\n except libclient.ClientError as e:\n mylog.error(\"There was an error connecting to the hypervisor. Message: \" + str(e))\n #return False\n\n #get the list of VM MAC and IP addresses\n retcode, stdout, stderr = hypervisor.ExecuteCommand(\"cat /var/lib/libvirt/dnsmasq/default.leases\")\n\n ip_list = []\n mac_list = []\n mac_ip_list = []\n full_info = []\n\n #split the info\n stdout = stdout.split()\n for element in stdout:\n if libsf.IsValidIpv4Address(element):\n ip_list.append(element)\n if self.isValidMACAddress(element):\n mac_list.append(element)\n\n #make sure there are the same number of MAC and IP addresses\n if len(mac_list) == len(ip_list):\n for x in xrange(0, len(mac_list)):\n temp_info = mac_list[x], ip_list[x]\n mac_ip_list.append(temp_info)\n\n\n #match VM name with mac address\n for name in vmNames:\n try:\n vm = conn.lookupByName(name)\n except libvirt.libvirtError as e:\n mylog.error(str(e))\n xml = ElementTree.fromstring(vm.XMLDesc(0))\n\n for elem in xml.iterfind('devices/interface/mac'):\n for addr in mac_ip_list:\n if elem.attrib.get('address') == addr[0]:\n temp = addr[0], addr[1], name\n full_info.append(temp)\n\n #temp - try to ping all the VMs \n for ip in full_info:\n recode, stdout, stderr = hypervisor.ExecuteCommand(\"ping -n -i 0.2 -c 3 -W 1 -q \" + ip[1])\n if recode == 0:\n mylog.info(\"Was able to ping \" + ip[2])\n else:\n mylog.error(\"Was not able to ping \" + ip[2])\n return False\n return True", "def test_ip(response):\n \n # from comeon_core import update\n ip = getIP()\n print(ip)\n #init_db(engine)\n #update()\n assert True", "def _checknet():\n exit_code = os.system('ping -c 1 www.baidu.com 1>/dev/null 2>&1')\n return exit_code", "def _get_ipaddress(node):\n if \"ipaddress\" not in node:\n with settings(hide('stdout'), warn_only=True):\n output = sudo('ohai ipaddress')\n if output.succeeded:\n node['ipaddress'] = json.loads(output)[0]\n return True\n return False", "def ping(self) -> None:\n ...", "def ping(host):\r\n \r\n # Ping command count option as function of OS\r\n param = '-n' if system_name().lower()=='windows' else '-c'\r\n\r\n # Building the command. Ex: \"ping -c 1 google.com\"\r\n command = ['ping', param, '1', host]\r\n\r\n process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) \r\n streamdata = process.communicate()[0]\r\n #print_util.print_std(streamdata)\r\n streamdata = string_util.decode_chs_str(streamdata)\r\n print_util.print_std(streamdata)\r\n if system_name().lower()=='windows':\r\n #On windows use this to check ping pass or fail.\r\n if 'TTL=' in str(streamdata):\r\n return True\r\n else:\r\n return False\r\n return process.returncode==0", "async def ping(self):\n uri = \"/fapi/v1/ping\"\n success, error = await self.request(\"GET\", uri)\n return success, error", "def ip_status(ip, rnge):\n\tactive = []\n\tdown = []\n\ttimed_out = []\n\t\n\t\"\"\" Get the first 3 fields of IP \"\"\"\n\taddres = ip.split(\".\")[:3]\n\tne_ip = \".\".join(addres)\n\t\n\t\"\"\" Get the last field of IP \"\"\"\n\tlst_field = ip.split(\".\")[3]\n\t\n\tif rnge == \"0\" or rnge == \"\":\n\t\trnge = 1\n\t\n\tif 0 <= int(rnge) <= 255 and ip_pattern.match(ip):\n\t\t\"\"\" Calculate the actual range \"\"\"\n\t\tactual_rnge = int(lst_field) + int(rnge)\n\n\t\t\"\"\" Range of IP address to be pinged \"\"\"\n\t\t\"\"\" Right now it starts from 0 through 10 but can be changed in the below line \"\"\"\n\t\n\t\tfor i in range(int(lst_field), actual_rnge):\t\t\n\t\t\n\t\t\tip_fin = ne_ip + \".\" + str(i)\n\t\t\tstatus = sp.check_output([\"ping \", ip_fin])\n\t\t\n\t\t\tif pattern1.search(status) :\n\t\t\t\tdown.append(ip_fin)\n\t\t\telif pattern2.search(status):\n\t\t\t\ttimed_out.append(ip_fin)\n\t\t\telse:\n\t\t\t\tactive.append(ip_fin)\n\t\n\t\tresult = \"\\nActive IP addresses are: \" + str(active) + \"\\n\\n\" + \"Unreachable IP: \" + str(down) + \"\\n\\n\" + \"Timed out IP's: \" + str(timed_out)\n\t\treturn result\n\t\n\telse:\n\t\treturn \"1) Range should be in between 0 and 255 inclusive. \\n2) Check the IP format.\"", "def rpc_ping(self):\n\t\treturn True", "def test_check_nip(client):\n is_assigned, request_id = client.check_nip(\n \"8655104670\", \"41146786026458860703735932\"\n )\n\n assert is_assigned", "def check_ip_fwd(duthosts, all_cfg_facts, nbrhosts, tbinfo):\n for porttype in [\"ethernet\", \"portchannel\"]:\n for version in [4, 6]:\n\n ports = pick_ports(duthosts, all_cfg_facts, nbrhosts, tbinfo, port_type_a=porttype, version=version)\n\n for ttl, size in [(2, 64), (1, 1450)]:\n # local interfaces\n check_packet(sonic_ping, ports, 'portB', 'portA', size=size, ttl=ttl, ttl_change=0)\n\n # local neighbors\n check_packet(sonic_ping, ports, 'portA', 'portA',\n dst_ip_fld='nbr_ip', size=size, ttl=ttl, ttl_change=0)\n\n vm_host_to_A = nbrhosts[ports['portA']['nbr_vm']]['host']\n\n check_packet(eos_ping, ports, 'portD', 'portA', dst_ip_fld='my_lb4096_ip', src_ip_fld='nbr_lb',\n dev=vm_host_to_A, size=size, ttl=ttl)\n\n # loopbacks\n check_packet(sonic_ping, ports, 'portA', 'portA', dst_ip_fld='nbr_lb', size=size, ttl=ttl, ttl_change=0)\n\n # inband\n check_packet(sonic_ping, ports, 'portA', 'portA', src_ip_fld='inband', size=size, ttl=ttl, ttl_change=0)\n\n # DUT loopback\n # these don't decrement ttl\n check_packet(sonic_ping, ports, 'portA', 'portA', src_ip_fld='my_lb_ip', dst_ip_fld='my_ip', size=size,\n ttl=ttl, ttl_change=0)\n check_packet(sonic_ping, ports, 'portA', 'portA', src_ip_fld='my_lb_ip', dst_ip_fld='nbr_ip', size=size,\n ttl=ttl, ttl_change=0)\n check_packet(sonic_ping, ports, 'portA', 'portA', src_ip_fld='my_lb_ip', dst_ip_fld='nbr_lb', size=size,\n ttl=ttl, ttl_change=0)\n\n vm_host_to_A = nbrhosts[ports['portA']['nbr_vm']]['host']\n check_packet(eos_ping, ports, 'portA', 'portA', dst_ip_fld='my_lb4096_ip', src_ip_fld='nbr_lb',\n dev=vm_host_to_A, size=size, ttl=ttl, ttl_change=0)\n\n # end to end\n vm_host_to_A = nbrhosts[ports['portA']['nbr_vm']]['host']\n check_packet(eos_ping, ports, 'portB', 'portA', dst_ip_fld='nbr_lb', src_ip_fld='nbr_lb',\n dev=vm_host_to_A, size=size, ttl=ttl)\n check_packet(eos_ping, ports, 'portC', 'portA', dst_ip_fld='nbr_lb', src_ip_fld='nbr_lb',\n dev=vm_host_to_A, size=size, ttl=ttl)\n check_packet(eos_ping, ports, 'portD', 'portA', dst_ip_fld='nbr_lb', src_ip_fld='nbr_lb',\n dev=vm_host_to_A, size=size, ttl=ttl)", "def test_ip_addresses_exists():\n load_ips()\n validate_names()", "def checklan(ipaddr, network):\n return True", "def Ping(self): # real signature unknown; restored from __doc__\n pass", "def ping():\n api_online = bool(check_url(\"https://rest.ensembl.org/info/ping?\"))\n vertebrate_url_online = bool(check_url(\"http://ftp.ensembl.org\"))\n other_url_online = bool(check_url(\"http://ftp.ensemblgenomes.org\"))\n return api_online and vertebrate_url_online and other_url_online", "def test_ip(self):\n ##Todo: Improve this check\n ip = socket.gethostbyname(socket.gethostname())\n ip = [int(i) for i in ip.split('.')]\n assert len(ip) == 4\n assert ip[0] == 10\n assert ip[1] == 137\n assert ip[2] == 1\n assert ip[3] >= 1 and ip[3] <= 255" ]
[ "0.6234672", "0.6234672", "0.6234672", "0.6234672", "0.6228038", "0.6227247", "0.616574", "0.60879576", "0.607467", "0.5944119", "0.5944096", "0.58443975", "0.57498163", "0.5747767", "0.57241446", "0.57212025", "0.57056487", "0.57054055", "0.5702251", "0.56238407", "0.561111", "0.56032324", "0.5597133", "0.5589898", "0.5584241", "0.5577437", "0.557345", "0.5567787", "0.55620044", "0.5549474" ]
0.65288395
0
Store the datapoint in the Database.
def store_datapoint(sql, parts): t = datetime.fromtimestamp(parts[0]) humid = parts[1] temp_c = parts[2] temp_f = parts[3] heat_c = parts[4] heat_f = parts[5] c = sql.cursor() c.execute("INSERT INTO points VALUES (?,?,?,?,?,?)", (t, humid, temp_c, temp_f, heat_c, heat_f)) sql.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _insert_datapoint(self):\n # Insert\n if db_datapoint.idx_datapoint_exists(1) is False:\n record = Datapoint(\n id_datapoint=general.encode(self.reserved),\n agent_label=general.encode(self.reserved),\n agent_source=general.encode(self.reserved)\n )\n database = db.Database()\n database.add(record, 1047)", "def save_data(self):\n db.session.add(self)\n db.session.commit( )", "def saveData(self):\n pass", "def _save (self, expires):\n\n pickled_data = pickle.dumps (self._data, self.pickle_protocol)\n\n self._delete ()\n self._exec (\n \"\"\"\\\n insert into table_name (id, expires, data)\n values (%(id)s, %(expires)s, %(data)s)\n \"\"\",\n data = pickled_data,\n expires = expires\n )", "def save_data(self, gauge_name, date_key, data):\n pass", "def savepoint(self, id):\n self.execute(\"SAVEPOINT {}\".format(id))", "def save_data(self):\n pass", "def save_data(self, record):\n self.dbm.addRecord(record)", "def dbWrite(dbPoint, formatedValue):\n raise NotImplementedError('dbWrite in simu mode')", "def save_event(self, data):\n rdb.table(self.rdb_table).insert(data)", "def store(self, dataFrame, filename):\n columns = [\"longitude\", \"latitude\", \"elevation\", \"noise_mean_day\", \"noise_mean_evening\", \"noise_mean_night\", \"noise_weighted_24h\", \"noise_mean_24h\"]\n self.store_in_csv(dataFrame, filename=filename, columns=columns)\n\n columns.insert(0, \"id\") # pandas adds a id in the front\n self.store_in_database(filename=filename, columns=columns)", "def save(self):\n store = datastore.DataStore()\n store.connect()\n store.setup()\n store.put(self.as_doc())", "def run(self):\n self.db.table('points').insert({\n 'name': 'biblioteca',\n 'rfid': '123456'\n })", "def _db_store(self, labels: Sequence[Tuple[int, np.ndarray]], table: str) -> None:\r\n # Labels are expected to be\r\n # [\r\n # (class, points),\r\n # (class, points)\r\n # .\r\n # .\r\n # .\r\n # ]\r\n # Where points are np.arrays\r\n # There should also always be one fish in the scene => len(labels) >= 1\r\n\r\n n_points = np.prod(labels[0][1].shape)\r\n\r\n gen = ((self.n, class_, *points.ravel().round(3)) for class_, points in labels)\r\n\r\n # First two \"?\" are for image id and class respectively, rest are for points\r\n sql_command = (\r\n f'INSERT INTO {table} VALUES {(\"?\",\"?\",*[\"?\" for i in range(n_points)])}'\r\n ).replace(\"'\", \"\")\r\n\r\n self.cursor.executemany(sql_command, gen)", "def save(self):\n if self._data is None and self._meta is None:\n w = \"No data/meta components found in the DataSet.\"\n warnings.warn(w)\n return None\n ds_clone = self.clone()\n self._cache['savepoint'] = ds_clone.split()\n return None", "def commitToDatabase(self, tiltseriesdata):\n\t\treturn", "def insert_data(self):\n\n pass", "def store(self, topic_id, start_date, end_date, date_axis, count_axis, parties_proportions):\n document = {'topic_id': topic_id,\n 'start_date': start_date,\n 'end_date': end_date,\n 'date_axis': date_axis,\n 'count_axis': count_axis,\n 'parties_proportions': parties_proportions}\n self.insert(document)", "def test_write_to_db(self):\n\n measurement, sensor_name = [\"testItem\", \"sensorA\"]\n query = f\"Select * from {measurement}\"\n\n def get_points(_query):\n \"\"\"Get measurement points from db given a query\"\"\"\n\n try:\n return self.test_client.query(\n _query).raw[\"series\"][0][\"values\"]\n except KeyError:\n return []\n\n test_point = main.generate_a_measurement_point(\n measurement,\n sensor_name=sensor_name,\n sensor_output_file_dir=\"tests/datafiles/\",\n output_filename=\"sensor_out_valid.txt\")\n\n original_points_in_db = get_points(query)\n main.write_data_to_db(self.test_client, [test_point])\n new_points_in_db = get_points(query)\n\n assert len(new_points_in_db) > len(original_points_in_db)\n assert sensor_name in str(new_points_in_db[0])\n assert str(self.expected_temp_val) in str(new_points_in_db[0])", "def store(self):\n\n pass", "def add_to_db_single(self, element):\r\n def quot(string):\r\n \"\"\" Replace \" with ' in text strings that goes into the\r\n db, right now it is only done on the name, but it should\r\n be done on all fields that might contain such characters\r\n \"\"\"\r\n return string.replace('\"', \"'\")\r\n\r\n # Make entry i measurements table\r\n query = ('INSERT INTO {table} SET '\r\n 'time=FROM_UNIXTIME({time}), '\r\n 'type=2, '\r\n 'timestep={timestep}, '\r\n 'comment=\"{comment}\", '\r\n 'pass_energy={pass_energy}, '\r\n 'excitation_energy={excitation_energy}, '\r\n 'number_of_scans={number_of_scans}, '\r\n 'project=\"{project}\", '\r\n 'file_name=\"{file_name}\", '\r\n 'name=\"{name}\";').format(\r\n table=self.tables['measurements'],\r\n time=element[0]['date'],\r\n timestep=element[0]['dwell_time'],\r\n comment=element[0]['unique_name'],\r\n pass_energy=element[0]['pass_energy'],\r\n excitation_energy=element[0]['excitation_energy'],\r\n number_of_scans=element[0]['num_scans'],\r\n project=element[0]['project'],\r\n file_name=element[0]['unique_name'].replace('\\\\', '\\\\\\\\'),\r\n name=quot(element[0]['name']))\r\n\r\n # Christian, comment this in to see a list of metadata\r\n #print element[0]\r\n self.cursor.execute(query) # COMMENT\r\n\r\n # Get the id of it\r\n query = ('select id from {table} where type=2 '\r\n 'order by id desc limit 1;').\\\r\n format(table=self.tables['measurements'])\r\n self.cursor.execute(query)\r\n id_ = self.cursor.fetchall()[0][0]\r\n\r\n # Add the data to xy_values table in chunks of 100 data points\r\n counter = 0\r\n query_reset = 'INSERT INTO {table} (measurement, x, y) VALUES'.format(\r\n table=self.tables['xy'])\r\n query = query_reset\r\n # element[1] is tuple of data: (Array(x0, x1, x2), Array(y0, y1, y2)).\r\n # The zip statement (where * pulls out both value) turns it into:\r\n # [(x0, y0), (x1, y1), (x2, y2)]\r\n for x_value, y_value in zip(*element[1]):\r\n counter += 1\r\n query += '({0},{1},{2})'.format(id_, x_value, y_value)\r\n if counter < 100:\r\n query += ','\r\n else:\r\n query += ';'\r\n self.cursor.execute(query)\r\n counter = 0\r\n query = query_reset\r\n # Remember to write the last less than 100 points\r\n if query != query_reset:\r\n # Remove the last , and add a ;\r\n query = query[0: -1] + ';'\r\n self.cursor.execute(query)", "def write_sensor_data_to_db(self, sensor_data):\n df = pd.DataFrame(sensor_data)\n df.to_sql('sensor_data', self.conn, if_exists='append', index = False)", "def _storeData(self, data, table, query=None):\n print ('Storing data')\n conn = dbo.getConnection()\n\n if query == None:\n num_cols = len(data[0])\n cols = ','.join(['%s ' for i in range(0, num_cols)])\n query = \"INSERT INTO \" + table + \" VALUES (\" + cols + \")\"\n\n dbo.execute_query(conn, query, data, multiple=True)\n dbo.closeConnection(conn)\n return", "def insetData(jsonData, ibHost, ibPort):\n inclient = InfluxDBClient(\n host=ibHost,\n port=ibPort,\n username='admin',\n password='password'\n )\n inclient.switch_database('efergy')\n inclient.write_points(jsonData)", "def save(joined_data, gps_collection_file, pollutant_collection_file, pollutant):\n for _, row in joined_data.iterrows():\n time_geo = models.TimeGeo(\n collection_file=gps_collection_file,\n time=row[\"time\"],\n location=geos.Point(row[\"lon\"], row[\"lat\"]),\n )\n time_geo.save()\n pollutant_value = models.PollutantValue(\n collection_file=pollutant_collection_file,\n time_geo=time_geo,\n pollutant=pollutant,\n value=row[\"measurement\"],\n )\n pollutant_value.save()", "def save(self):\n data = self.serialize()\n\n self.validate(data)\n\n saved_data = DATABASE_CONNECTION.insert(self.__class__.__name__, data)\n\n self.__dict__.update(saved_data)", "def save(self):\n db = DBStorage()\n p = self.createPatient()\n db.add_prescription(p)", "def persist(self, values):\n pass", "def save(self):\n if not self.id:\n self.id = uuid4()\n DataStore.add_instance(self)", "def save(self, key, value):\n # deepcopy so that later modifications to value aren't reflected in the db\n self.data[key] = copy.deepcopy(value)" ]
[ "0.7809628", "0.6474018", "0.6452069", "0.6451785", "0.6422399", "0.64067936", "0.63725233", "0.6366098", "0.63641155", "0.6357713", "0.6354461", "0.6286375", "0.6228851", "0.6175777", "0.61217165", "0.60342234", "0.60303307", "0.6002851", "0.5999837", "0.5990769", "0.59858", "0.5980798", "0.59717935", "0.59626037", "0.59540576", "0.5943122", "0.5933539", "0.5923552", "0.59230214", "0.59149796" ]
0.75592405
1
Gets the connection with the Amazon S3 server. Raises an error if conn cannot be established
def get_conn(): global S3Conn S3Conn = tinys3.Connection(plug.options['aws_access_key'], plug.options['aws_secret_key'], default_bucket=plug.options['bucket'], tls=True) # Check that the given bucket exists by doing a HEAD request try: S3Conn.head_bucket() except requests.HTTPError as httpe: err = u"Cannot reach Onitu bucket {}".format(plug.options['bucket']) if httpe.response.status_code == 404: err += u": The bucket doesn't exist." if httpe.response.status_code == 403: err += u": Invalid credentials." err += u" Please check your Amazon S3 configuration - {}".format(httpe) raise DriverError(err) plug.logger.debug("Connection with Amazon S3 account successful") return S3Conn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_s3_connection(self):\n return connection.S3Connection(\n config.get('nereid_s3', 'access_key'),\n config.get('nereid_s3', 'secret_key')\n )", "def _get_aws_s3_connection(cls, access_key, secret_access_key):\n return boto.connect_s3(access_key, secret_access_key)", "def connect():\n # Reduce the number of retries to 1 if it's not set already so requests\n # fail quickly rather than delaying the downloading of photos\n if not boto.config.has_option('Boto', 'num_retries'):\n if not boto.config.has_section('Boto'):\n boto.config.add_section('Boto')\n boto.config.set('Boto', 'num_retries', '1')\n cfg = settings.config()\n try:\n aws_access_key = cfg.get('s3', 'access_key')\n aws_secret_key = cfg.get('s3', 'secret_key')\n aws_s3_bucket = cfg.get('s3', 'bucket')\n except NoOptionError as e:\n l.error(\"Error reading a setting from the config.cfg file: %s\", e)\n raise\n conn = S3Connection(aws_access_key, aws_secret_key)\n bucket = conn.get_bucket(aws_s3_bucket, validate=False)\n return bucket", "def s3_data_conn ( self ) :\n if not self.s3_data :\n self.s3_data = boto.s3.connection.S3Connection( self.access_key, self.access_key_secret )\n return self.s3_data", "def _connect_to_s3(self, credentials):\n connection = s3.S3Connection(credentials['token'], credentials['secret'])\n bucket = connection.get_bucket(credentials['bucket'])\n return connection, bucket", "def create_connection(bucket_name):\n conn = boto.connect_s3()\n bucket = conn.get_bucket(bucket_name)\n return conn, bucket", "def connect(self):\n try:\n self.session = Session(aws_access_key_id=access_key, aws_secret_access_key=secret_access_key)\n self.s3_resource = self.session.resource('s3')\n self.bucket = self.s3_resource.Bucket(self.bucket_name)\n except Exception as e:\n raise Exception('Some Error occurred while connecting to the cloud storage')\n return", "def s3_infrastructure_conn ( self ) :\n if not self.s3_infra :\n self.s3_infra = boto.s3.connection.S3Connection( aws_access_key_id = esp_nonprod[ 'access_key' ],\n aws_secret_access_key = decrypt_data( self.decryption_key,\n esp_nonprod[ 'access_key_secret' ] ) )\n return self.s3_infra", "async def connect(self):\n # if the connection returns None then either there isn't a connection to\n # the server in the pool, or there is no connection that is available\n self._conn_obj = s3aioFileObject._connection_pool.get(self._server)\n if self._conn_obj is None:\n try:\n session = aiobotocore.get_session()\n config = botocore.config.Config(\n connect_timeout=self._connect_timeout,\n read_timeout=self._read_timeout\n )\n s3c = session.create_client(\n \"s3\",\n endpoint_url=self._server,\n aws_access_key_id=self._credentials[\"accessKey\"],\n aws_secret_access_key=self._credentials[\"secretKey\"],\n config=config\n )\n # call await s3c.__aenter__ : this is needed for newer versions\n # of aiobotocore\n s3c = await s3c.__aenter__()\n # add the connection to the connection pool\n self._conn_obj = s3aioFileObject._connection_pool.add(\n s3c, self._server\n )\n except ClientError as e:\n raise IOException(\n \"Could not connect to S3 endpoint {} {}\".format(\n self._server, e)\n )\n\n if ('r' in self._mode and '*' not in self._path and\n '?' not in self._path):\n # if this is a read method then check the file exists\n response = await self._conn_obj.conn.list_objects_v2(\n Bucket=self._bucket,\n Prefix=self._path\n )\n exists = False\n for obj in response.get('Contents', []):\n if obj['Key'] == self._path:\n exists = True\n if not exists:\n raise IOException(\n \"Object does not exist: {}/{}/{}\".format(\n self._server, self._bucket, self._path\n )\n )\n if 'w' in self._mode:\n # if this is a write method then create a bytes array\n self._current_part = 1\n if 'a' in self._mode or '+' in self._mode:\n raise APIException(\n \"Appending to files is not supported {}\".format(self._path)\n )\n return True", "def _connect_s3(self):\n if self.sts:\n self.s3 = boto.connect_s3(aws_access_key_id=self.sts[\"access_key\"],\n aws_secret_access_key=self.sts[\"secret_key\"],\n security_token=self.sts[\"session_token\"])\n elif self.config.aws_profile:\n self.s3 = boto.connect_s3(profile_name=self.config.aws_profile)\n else:\n self.s3 = boto.connect_s3()", "def __get_s3_client(self):\n if self.AWS_ACCESS_KEY:\n s3_client = boto3.client(\n \"s3\",\n aws_access_key_id=self.AWS_ACCESS_KEY,\n aws_secret_access_key=self.AWS_SECRET_ACCESS_KEY,\n )\n else:\n s3_client = boto3.client(\"s3\")\n return s3_client", "def get_s3_client():\n return boto3.resource('s3')", "def s3client(self):\n return self._s3client", "def s3_client(self):\n return boto3.client('s3', \n aws_access_key_id=os.environ.get(\"MINIO_ACCESS_KEY\"),\n aws_secret_access_key=os.environ.get(\"MINIO_SECRET_KEY\"),\n endpoint_url=f'http://{os.environ.get(\"MINIO_SERVER\")}',\n config=Config(signature_version='s3v4')\n )", "def make_connection(path=\"esisan.db\"):\n return s3.connect(path)", "def access_s3():\n try:\n s3helper = S3Helper()\n bucket = s3helper.get_bucket(get_archive_bucket())\n LOG.info('Access S3 bucket name: {0}'.format(bucket.name))\n except Exception:\n LOG.exception('check_database_connection')\n return False\n\n return True", "def test_connect_to_aws_s3(self):\n conn = boto3.resource('s3', region_name='us-east-1')\n # We need to create the bucket since this is all in Moto's 'virtual' AWS account\n conn.create_bucket(Bucket='foobucket')\n\n s3_connector = S3Connector()\n\n try:\n s3_connector.connect(\"default\")\n except:\n self.fail(\"Could not connect to aws using mock aws s3\")", "def get_boto_client(self) -> S3Client:\n if self._boto_client is None:\n config = Config(signature_version=botocore.UNSIGNED)\n self._boto_client = self.session.client(\n \"s3\",\n region_name=settings.S3_REGION,\n endpoint_url=settings.S3_ENDPOINT_URL,\n config=config,\n )\n return self._boto_client", "def boto_init_s3(bucket_name):\n c = boto.connect_s3(aws_access_key_id=settings.AWS_ACCESS_KEY_ID,\n aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY)\n b = c.get_bucket(bucket_name)\n\n return b", "def _get_connection(rse, endpoint):\n\n key = \"connection:%s_%s\" % (rse, endpoint)\n result = REGION.get(key)\n if type(result) is NoValue:\n try:\n logging.debug(\"Creating connection object\")\n result = None\n credentials = _get_credentials(rse, endpoint)\n if 'access_key' in credentials and credentials['access_key'] and \\\n 'secret_key' in credentials and credentials['secret_key'] and \\\n 'is_secure' in credentials and credentials['is_secure'] is not None:\n\n parsed = urlparse.urlparse(endpoint)\n hostname = parsed.netloc.partition(':')[0]\n port = parsed.netloc.partition(':')[2]\n\n result = boto.connect_s3(aws_access_key_id=credentials['access_key'],\n aws_secret_access_key=credentials['secret_key'],\n host=hostname,\n port=int(port),\n is_secure=credentials['is_secure'],\n calling_format=boto.s3.connection.OrdinaryCallingFormat())\n\n REGION.set(key, result)\n logging.debug(\"Created connection object\")\n else:\n raise exception.CannotAuthenticate(\"Either access_key, secret_key or is_secure is not defined for RSE %s endpoint %s\" % (rse, endpoint))\n except exception.RucioException as e:\n raise e\n except:\n raise exception.RucioException(\"Failed to get connection for RSE(%s) endpoint(%s), error: %s\" % (rse, endpoint, traceback.format_exc()))\n return result", "def _get_connection(self):\n return boto3.client('sns', region_name=self.region)", "def connect(**kwargs) -> Minio:\n global client\n client = Minio(\n SETTINGS.s3.endpoint,\n access_key=SETTINGS.s3.access_key,\n secret_key=SETTINGS.s3.secret_key,\n secure=SETTINGS.s3.secure,\n **kwargs,\n )\n logger.debug(\n f\"Successfully connected to S3 server on endpoint: {SETTINGS.s3.endpoint}\"\n )", "def s3resource(self):\n return self._s3resource", "def connect(self, access_key_id=None, secret_access_key=None, **kwargs):\r\n\r\n connection_args = dict(self.connection_args or ())\r\n # Use OrdinaryCallingFormat instead of boto-default\r\n # SubdomainCallingFormat because the latter changes the hostname\r\n # that's checked during cert validation for HTTPS connections,\r\n # which will fail cert validation (when cert validation is enabled).\r\n # Note: the following import can't be moved up to the start of\r\n # this file else it causes a config import failure when run from\r\n # the resumable upload/download tests.\r\n from boto.s3.connection import OrdinaryCallingFormat\r\n connection_args['calling_format'] = OrdinaryCallingFormat()\r\n connection_args.update(kwargs)\r\n if not self.connection:\r\n if self.scheme == 's3':\r\n from boto.s3.connection import S3Connection\r\n self.connection = S3Connection(access_key_id,\r\n secret_access_key,\r\n **connection_args)\r\n elif self.scheme == 'gs':\r\n from boto.gs.connection import GSConnection\r\n self.connection = GSConnection(access_key_id,\r\n secret_access_key,\r\n **connection_args)\r\n elif self.scheme == 'file':\r\n from boto.file.connection import FileConnection\r\n self.connection = FileConnection(self)\r\n else:\r\n raise InvalidUriError('Unrecognized scheme \"%s\"' %\r\n self.scheme)\r\n self.connection.debug = self.debug\r\n return self.connection", "def s3_resource(self):\n return boto3.resource('s3', \n aws_access_key_id=os.environ.get(\"MINIO_ACCESS_KEY\"),\n aws_secret_access_key=os.environ.get(\"MINIO_SECRET_KEY\"),\n endpoint_url=f'http://{os.environ.get(\"MINIO_SERVER\")}',\n config=Config(signature_version='s3v4')\n )", "def boto_connection(self):\n import boto.ec2\n region = boto.ec2.get_region(self._availability_zone())\n ec2_access_id = self.access_id()\n ec2_secret_key = self.secret_key()\n return region.connect(aws_access_key_id=ec2_access_id, aws_secret_access_key=ec2_secret_key)", "def connection(self):\n ctx = stack.top\n if ctx is not None:\n if not hasattr(ctx, 'simple_connection'):\n ctx.simple_connection = connect_to_region(\n self.app.config['AWS_REGION'],\n aws_access_key_id = self.app.config['AWS_ACCESS_KEY_ID'],\n aws_secret_access_key = self.app.config['AWS_SECRET_ACCESS_KEY'],\n )\n\n return ctx.simple_connection", "def get_s3_client(profile_name):\n try:\n session = boto3.session.Session(profile_name=profile_name)\n except ProfileNotFound as e:\n print(e, file=sys.stderr)\n raise FailureException from e\n return session.resource('s3')", "def client() -> botocore.client.BaseClient:\n global _client\n if _client is None:\n endpoint_url = os.environ.get('LOCALSTACK_S3_URL')\n # If endpoint_url is None, botocore constructs the default AWS URL\n _client = boto3.client('s3', endpoint_url=endpoint_url)\n return _client", "def __set_amazon_s3_service__(self, access_key, secret_key):\n self.s3_conn = S3Connection(access_key, secret_key)" ]
[ "0.82608676", "0.81773007", "0.78756386", "0.7746647", "0.7277447", "0.7145592", "0.7025781", "0.69264907", "0.6921318", "0.6873866", "0.68642884", "0.6810215", "0.6697892", "0.6688311", "0.6616437", "0.6614548", "0.6512589", "0.6480621", "0.6454841", "0.6413643", "0.63492787", "0.6322943", "0.62836355", "0.6200543", "0.6194717", "0.6194247", "0.61890334", "0.6177589", "0.61552906", "0.6109542" ]
0.8209997
1
Returns the float timestamp based on the date format timestamp stored by Amazon. Prefixes the given filename with Onitu's root.
def get_file_timestamp(filename): plug.logger.debug(u"Getting timestamp of {}", filename) metadata = S3Conn.head_object(u(filename)) timestamp = metadata.headers['last-modified'] # convert timestamp to timestruct... timestamp = time.strptime(timestamp, HEAD_TIMESTAMP_FMT) # ...timestruct to float timestamp = time.mktime(timestamp) return timestamp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTimestamp(self, filename=None):\n if filename == None:\n return float(self.filename[:-4])\n else:\n return float(filename[:-4])", "def get_timestamp_from_path(file_path):\n return int(file_path.split('_')[1].split('.')[0])", "def timestamp(filename, source='auto'):\n if source == 'auto':\n try:\n return exif_timestamp(filename)\n except exceptions.ICExifReadError:\n return stat_timestamp(filename)\n elif source == 'stat':\n return stat_timestamp(filename)\n elif source == 'exif':\n return exif_timestamp(filename)\n else:\n raise ValueError(\"source not in ['stat', 'exif', 'auto']\")", "def get_file_timestamp(file_name):\n result = subprocess.check_output([osmconvert,\n \"--out-timestamp\", file_name])\n file_timestamp = strtodatetime(result)\n if not file_timestamp:\n # try to get the timestamp from the file's statistics\n logging.info(\"file %s has no file timestamp.\" % file_name)\n logging.info(\"Running statistics to get the timestamp.\")\n result = subprocess.check_output([osmconvert,\n \"--out-statistics\", file_name])\n p = result.find(\"timestamp max: \")\n if p:\n file_timestamp = strtodatetime(result[p + 15:p + 35])\n logging.info(\"Aging the timestamp by 4 hours for safety reasons.\")\n file_timestamp = file_timestamp - timedelta(hours=4)\n if not file_timestamp:\n logging.info(\"(no timestamp)\")\n else:\n logging.info(\"timestamp of %s: %s\" % (file_name,\n file_timestamp.isoformat()))\n\n return file_timestamp", "def get_previous_upload_timestamp(nightscout_data_file_name):\r\n return int(nightscout_data_file_name.split('_')[0])", "def determine_timestamp(year, file_date):\n timestamp = datetime.strptime(file_date, '%Y-%m-%d')\n\n if timestamp.year != int(year):\n timestamp = datetime.strptime(year, '%Y')\n\n return timestamp.strftime('%Y-%m-%d 00:00:00')", "def datetimestamp(cinefilename):\n\n fname = os.path.basename(filename)\n\n cleanf = os.path.splitext(fname)[0]\n\n # all our experiments are on Wednesdays!!!\n datestring, usec, nsec = cleanf.split('Wed')[-1].strip().split('.')\n\n return time.strptime(datestring, '%b %d %Y %H %M %S')", "def date_from_filename(filename):\n date_string = filename.split('.')[0] + '+0000'\n return datetime.datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%S%z')", "def to_filetime(self):\n try:\n dt_obj = duparser.parse(timestamp)\n self.out_filetime = str(int((dt_obj - self.epoch_1970).total_seconds() * self.hundreds_nano + self.epoch_as_filetime))\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.out_filetime = False\n return self.out_filetime", "def GetPersistTime(ar_filename):\n try:\n with open(ar_filename) as f:\n return float(f.read())\n except (IOError, ValueError):\n return 0.0", "def get_timestamp(file_path):\n mtime = os.stat(file_path).st_mtime\n return datetime.datetime.fromtimestamp(mtime).isoformat()", "def get_file_date(self, file: str) -> date:", "def timestamped_filename(\n filename, fmt='%Y-%m-%d-%H-%M-%S',\n timestruct=None, sep='_'):\n head, ext = _os.path.splitext(filename)\n timestr = timestamp(fmt, timestruct)\n return '%s%s%s%s' % (head, sep, timestr, ext)", "def update_timestamp():\n\n with open(\"timestamp.data\", \"r+\") as file:\n old = float(file.read())\n now = time.mktime(datetime.datetime.now().timetuple())\n file.seek(0)\n file.write(str(now))\n file.truncate()\n return old", "def filename_to_timestamp(file_path: str, target_path: str) -> None:\n # Extract the date and time from filenames\n # We assume the name convention local_path/proj_root/path_to_data/lander_planet_date_time.csv\n dt = ''.join(file_path.split('.')[-2].split('_')[-2:])\n new_target_path = get_project_root() + '/' + target_path + file_path.split('/')[-1]\n with open(file_path, 'r') as infile, open(new_target_path, 'w') as outfile:\n reader = csv.reader(infile)\n writer = csv.writer(outfile)\n writer.writerow(next(reader) + ['timestamp'])\n for row in reader:\n writer.writerow(row + [str(datetime.strptime(dt, '%Y%m%d%H%M%S'))])", "def stat_timestamp(filename):\n return os.stat(filename).st_mtime", "def file_timestamp(binblob):\n import pdb;pdb.set_trace()\n try:\n dt = datetime.datetime(1601,1,1,0,0,0) + datetime.timedelta(microseconds=binblob/10)\n except:\n dt = \"This field is incorrectly identified as a file timestamp in the template\"\n return dt", "def _get_timestamp():\n return '{}Z'.format(\n datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3]\n )", "def datetime_to_file_name(timestamp):\n # Example datetime object.\n # datetime(year, month, day, hour, min, tzinfo=timezone.utc)\n assert isinstance(timestamp, datetime)\n\n return \"netatmo_%s%s%s_%s%s.json.gz\" % (\n str(timestamp.year).zfill(4),\n str(timestamp.month).zfill(2),\n str(timestamp.day).zfill(2),\n str(timestamp.hour).zfill(2),\n str(timestamp.minute).zfill(2)\n )", "def creation_date(filename):\n try:\n if filename.endswith(JPEG_EXTENSIONS):\n timestamp = creation_date_from_exif(filename)\n else:\n timestamp = creation_date_from_hachoir(filename)\n except ValueError:\n timestamp = None\n\n try:\n path_date = creation_date_from_path(filename)\n except ValueError:\n return timestamp\n\n if not timestamp:\n return path_date\n\n # Preserve path timestamp if incompatible with image timestamp\n if timestamp < path_date - timedelta(days=2):\n return path_date\n if timestamp > path_date + timedelta(days=1):\n return path_date\n\n return timestamp", "def datetime_filename(prefix='output_',extension='.txt'):\n outputname = prefix + '{:%Y%m%d%H%M%S}utc{}'.format(\n datetime.datetime.utcnow(),extension)\n return outputname", "def fileTime(ft):\n return datetime(1601, 1, 1) + timedelta(microseconds=ft / 10)", "def _logstamp(self,fname):\n\t\ttry:\n\t\t\tst = stat(fname)\n\t\t\tif st:\n\t\t\t\tMODULE.info(\" >> log file stamp = '%s'\" % st[9])\n\t\t\t\treturn st[9]\n\t\t\treturn 0\n\t\texcept:\n\t\t\treturn 0", "def time_stamping(file):\n time_stamp = datetime.now().date()\n\n # 1st remove path like /home/\n path_file = file.split(\"/\")\n # 2nd removes file formats\n file_ = path_file[len(path_file)-1].split(\".\", 1)\n path_file.pop()\n # 3rd add time_stamp\n file_[0] = str(file_[0])+\"_\"+str(time_stamp)\n # 4th all is back together\n file = '.'.join(map(str, file_))\n\n path_file.append(file)\n file = '/'.join(map(str, path_file))\n print(file)\n return file", "def systime_to(timestamp_file_path: str) -> None:\n cmd = f\"date +%s 1> {timestamp_file_path:s}\"\n proc = subprocess.Popen(cmd, stderr=subprocess.PIPE, shell=True)\n (_, err) = proc.communicate()\n return err.decode(\"utf-8\").strip()", "def get_gps_timestamp(file, time_offset):\n reference_date = get_reference_datetime(file)\n absolute_date = get_absolute_datetime(reference_date, time_offset)\n timestamp, nanosecond = datetime_to_gpstimestamp_nanoseconds(absolute_date)\n\n return timestamp, nanosecond", "def get_changefile_timestamp(changefile_type, file_sequence_number):\n url = get_url(changefile_type) + \"/\"\n url = url + (\"%03i/%03i/%03i\" % (file_sequence_number / 1000000,\n file_sequence_number / 1000 % 1000,\n file_sequence_number % 1000))\n url = url + \".state.txt\"\n changefile_timestamp = None\n for result in urllib.urlopen(url):\n # get timestamp\n timestamp_p = result.find(\"timestamp=\")\n if timestamp_p != -1:\n # found timestamp line\n timestamp_p += 10 # jump over text\n result = result[timestamp_p:].replace(\"\\\\\", \"\").strip()\n changefile_timestamp = strtodatetime(result)\n\n if not changefile_timestamp:\n logging.info(\"(no timestamp)\")\n if file_sequence_number == 0:\n changefile_timestamp = datetime(1900, 1, 1)\n else:\n AssertionError(\"no timestamp for %s changefile %i.\" %\n (changefile_type, file_sequence_number))\n else:\n logging.info(\"%s, id: %i, timestamp: %s\" %\n (changefile_type, file_sequence_number,\n changefile_timestamp.isoformat()))\n return changefile_timestamp", "def parseTimeFromFilename(name, dataset):\r\n if dataset.lower() in ['zandmotor']:\r\n date = map(int, name[name.rfind('/')+1:-4].split('_'))\r\n return reader.daySinceEpoch(date[0], date[1], date[2])\r\n elif dataset.lower() in ['coastline']:\r\n return int(name[name.rfind('/')+1:name.rfind('/')+5])", "def date_from_filename(filename: str) -> datetime.datetime:\n\n if not filename.startswith((\"st\", \"tr\")) or not filename.endswith(\".hld.root.root\"):\n raise Exception(\"Filename must be like tryydoyhhmmss.hld.root.root \"\n \"or styydoyhhmmss.hld.root.root\")\n\n yy = int(f\"20{filename[2:4]}\")\n doy = int(filename[4:7])\n hh = int(filename[7:9])\n mm = int(filename[9:11])\n ss = int(filename[11:13])\n\n return datetime.datetime.combine(\n datetime.date(yy, 1, 1) + datetime.timedelta(doy + 1),\n datetime.time(hour=hh, minute=mm, second=ss)\n )", "def get_seviri_file_time(file):\n if hasattr(file, '__iter__'):\n filenames = [f.split('/')[-1] for f in file]\n date = [datetime(int(f[38:42]), int(f[42:44]),\n int(f[44:46]), int(f[46:48]),\n int(f[48:50])) for f in filenames]\n else:\n f = file.split('/')[-1]\n date = datetime(int(f[38:42]), int(f[42:44]),\n int(f[44:46]), int(f[46:48]),\n int(f[48:50]))\n return date" ]
[ "0.78132105", "0.6773026", "0.6582669", "0.6577208", "0.65521187", "0.6412246", "0.63317764", "0.6314977", "0.6306793", "0.6304807", "0.6289728", "0.62649685", "0.6254887", "0.6239393", "0.62276626", "0.62186366", "0.61868083", "0.6158499", "0.6081906", "0.6031934", "0.6020739", "0.60159224", "0.6011052", "0.6003094", "0.5989348", "0.5971012", "0.593947", "0.5917722", "0.5896242", "0.58697027" ]
0.7196581
1
Caches a multipart upload. Checks that the cache isn't growing past MAX_CACHE_SIZE and that it isn't in the cache yet.
def add_to_cache(multipart_upload): if len(cache) < MAX_CACHE_SIZE: if multipart_upload.uploadId not in cache: cache[multipart_upload.uploadId] = multipart_upload
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_from_cache(multipart_upload):\n if multipart_upload.uploadId in cache:\n del cache[multipart_upload.uploadId]", "def StoreOrUpdateInCache(self, filename, data):\n try:\n if not memcache.add('%s%s' % (self.CACHE_PREFIX, filename), data):\n memcache.replace('%s%s' % (self.CACHE_PREFIX, filename), data)\n except (ValueError), err:\n logging.warning('Data size too large to cache\\n%s' % err)", "def StoreOrUpdateInCache(self, filename, data):\n try:\n if not memcache.add('%s%s' % (self.CACHE_PREFIX, filename), data):\n memcache.replace('%s%s' % (self.CACHE_PREFIX, filename), data)\n except (ValueError), err:\n logging.warning('Data size too large to cache\\n%s' % err)", "def updateCache(self):\n for root, dirs, files in os.walk(cachedFilesPath):\n for file in files:\n if file.endswith(cachedFileExtensionSuffix):\n path = os.getcwd()+'/'+cachedFilesPath+file\n with open(path, mode='r') as f:\n payload_json = f.read()\n payload_obj=jsonpickle.decode(payload_json)\n r= self.upload(payload_obj)\n if isinstance(r, types.NoneType):\n #do nothing\n print(\"\")\n else:\n if r.status_code == 200 :\n #uploaded!\n if cacheArhive:\n #move it to archive\n dst=os.getcwd()+'/'+cachedArchivePath+file\n shutil.move(path, dst)\n print(\"archived log: \", file)\n else:\n #delete it\n os.remove(path)", "def copy_to_cache(cls, target_filename):\n is_cached = cls.is_remote_cached(target_filename)\n if not is_cached:\n cache = cls.CACHE_BACKEND()\n cache.upload(target_filename)\n logger.debug('File %r was uploaded to %r', target_filename, cls.CACHE_BACKEND)", "def test_upload_no_overwrite(self):\n request = DummyRequest()\n request.access = DummyAccess(request)\n cache = DummyCache(request)\n request.access.allow_overwrite = []\n name, version, filename = \"a\", \"1\", \"a-1.tar.gz\"\n cache.upload(filename, BytesIO(b\"test1234\"), name, version)\n with self.assertRaises(ValueError):\n cache.upload(filename, BytesIO(b\"test1234\"), name, version)", "def __handle_cache_size(self):\n if self.__maxSize is not None:\n self.__handle_cache_size_bytes()\n if self.__maxItemSize is not None:\n self.__handle_cache_size_items()", "def test_upload_overwrite(self):\n request = DummyRequest()\n request.access = DummyAccess(request)\n cache = DummyCache(request)\n request.access.allow_overwrite = [\"everyone\"]\n name, filename, content = \"a\", \"a-1.tar.gz\", BytesIO(b\"new\")\n cache.upload(filename, BytesIO(b\"old\"), name)\n cache.upload(filename, content, name)\n\n all_versions = cache.all(name)\n self.assertEqual(len(all_versions), 1)\n data = cache.storage.open(all_versions[0]).read()\n self.assertEqual(data, b\"new\")\n\n stored_pkgs = list(cache.storage.list(cache.new_package))\n self.assertEqual(len(stored_pkgs), 1)", "def ensure_space(self,\n context: context.RequestContext,\n volume: objects.Volume) -> bool:\n\n # Check to see if the cache is actually limited.\n if self.max_cache_size_gb == 0 and self.max_cache_size_count == 0:\n return True\n\n # Make sure that we can potentially fit the image in the cache\n # and bail out before evicting everything else to try and make\n # room for it.\n if (self.max_cache_size_gb != 0 and\n volume.size > self.max_cache_size_gb):\n return False\n\n # Assume the entries are ordered by most recently used to least used.\n entries = self.db.image_volume_cache_get_all(\n context,\n **self._get_query_filters(volume))\n\n current_count = len(entries)\n\n current_size = 0\n for entry in entries:\n current_size += entry['size']\n\n # Add values for the entry we intend to create.\n current_size += volume.size\n current_count += 1\n\n LOG.debug('Image-volume cache for %(service)s current_size (GB) = '\n '%(size_gb)s (max = %(max_gb)s), current count = %(count)s '\n '(max = %(max_count)s).',\n {'service': volume.service_topic_queue,\n 'size_gb': current_size,\n 'max_gb': self.max_cache_size_gb,\n 'count': current_count,\n 'max_count': self.max_cache_size_count})\n\n while (((current_size > self.max_cache_size_gb and\n self.max_cache_size_gb > 0)\n or (current_count > self.max_cache_size_count and\n self.max_cache_size_count > 0))\n and len(entries)):\n entry = entries.pop()\n LOG.debug('Reclaiming image-volume cache space; removing cache '\n 'entry %(entry)s.', {'entry': self._entry_to_str(entry)})\n self._delete_image_volume(context, entry)\n current_size -= entry['size']\n current_count -= 1\n LOG.debug('Image-volume cache for %(service)s new size (GB) = '\n '%(size_gb)s, new count = %(count)s.',\n {'service': volume.service_topic_queue,\n 'size_gb': current_size,\n 'count': current_count})\n\n # It is only possible to not free up enough gb, we will always be able\n # to free enough count. This is because 0 means unlimited which means\n # it is guaranteed to be >0 if limited, and we can always delete down\n # to 0.\n if self.max_cache_size_gb > 0:\n if current_size > self.max_cache_size_gb > 0:\n LOG.warning('Image-volume cache for %(service)s does '\n 'not have enough space (GB).',\n {'service': volume.service_topic_queue})\n return False\n\n return True", "def copy_to_cache(cls, file_name):\n random.shuffle(cls.CACHE_BACKENDS)\n for cb in cls.CACHE_BACKENDS:\n if not cb.health_check():\n continue\n # attempt upload\n cb.copy_to_cache(file_name)\n # confirm presence\n if cls.get_from_cache(file_name):\n break", "def reaper(self):\n if not self.superuser_request:\n self.abort(402, 'uploads must be from an authorized drone')\n with tempfile.TemporaryDirectory(prefix='.tmp', dir=config.get_item('persistent', 'data_path')) as tempdir_path:\n try:\n file_store = files.FileStore(self.request, tempdir_path)\n except files.FileStoreException as e:\n self.abort(400, str(e))\n now = datetime.datetime.utcnow()\n fileinfo = dict(\n name=file_store.filename,\n created=now,\n modified=now,\n size=file_store.size,\n hash=file_store.hash,\n tags=file_store.tags,\n metadata=file_store.metadata\n )\n container = reaperutil.create_container_hierarchy(file_store.metadata)\n f = container.find(file_store.filename)\n target_path = os.path.join(config.get_item('persistent', 'data_path'), util.path_from_hash(fileinfo['hash']))\n if not f:\n file_store.move_file(target_path)\n container.add_file(fileinfo)\n rules.create_jobs(config.db, container.acquisition, 'acquisition', fileinfo)\n elif not file_store.identical(util.path_from_hash(fileinfo['hash']), f['hash']):\n file_store.move_file(target_path)\n container.update_file(fileinfo)\n rules.create_jobs(config.db, container.acquisition, 'acquisition', fileinfo)\n throughput = file_store.size / file_store.duration.total_seconds()\n log.info('Received %s [%s, %s/s] from %s' % (file_store.filename, util.hrsize(file_store.size), util.hrsize(throughput), self.request.client_addr))", "def cacheFull(webhook_cache):\n \n return webhook_cache.__len__() > 0", "def use_cached_files(self, cache_key):\r\n pass", "def _update_blob_cache(self, tbuf, tid):\n\n # Not sure why _update_cache() would be called on a closed storage.\n if self._cache is None:\n return\n\n if self.fshelper is not None:\n blobs = tbuf.blobs\n had_blobs = False\n while blobs:\n oid, blobfilename = blobs.pop()\n self._blob_data_bytes_loaded += os.stat(blobfilename).st_size\n self.fshelper.getPathForOID(oid, create=True)\n target_blob_file_name = self.fshelper.getBlobFilename(oid, tid)\n lock = _lock_blob(target_blob_file_name)\n try:\n ZODB.blob.rename_or_copy_blob(\n blobfilename,\n target_blob_file_name,\n )\n finally:\n lock.close()\n had_blobs = True\n\n if had_blobs:\n self._check_blob_size(self._blob_data_bytes_loaded)", "def cache(self):\n\n if self.url and not self.photo:\n result = urllib.urlretrieve(self.url)\n self.photo.save(\n os.path.basename(self.url),\n File(open(result[0]))\n )\n self.save()", "def cache_image(self):\n img_temp = NamedTemporaryFile()\n # Header required for HTTPS connections\n request = Request(self.url, headers={'User-Agent': ''})\n response = urlopen(request)\n type_file = dict(response.info()._headers)['Content-Type']\n if 'image' not in type_file:\n raise ValidationError(\"The URL does not contains any image. (Content-Type: {0}) (URL: {1})\".format(type, self.url))\n # Store the filename with extension\n url_image = urlparse(self.url)\n filename, file_ext = splitext(basename(url_image.path))\n # If the file doesn't have a extension, find it out from the header\n if file_ext == '':\n file_ext = type_file.replace('image/', '')\n self.filename = \"{0}.{1}\".format(filename, file_ext)\n source_data = response.read()\n # Compress the image\n source_data = optimize(source_data)\n img_temp.write(source_data)\n img_temp.flush()\n # Save the image in the server\n self.image .save(self.url, File(img_temp))", "def update(self, key_path, content, t_mserver):\n#\t\tif key not in self.cache and len(self.cache) >= self.max_cache_size:\n#\t\t\tself.remove_oldest()\n\t\t\n\t\tcurrent_time = int(time.time())\n\t\tif key_path not in self.cache:\n\t\t\tself.add_cache(key_path, content, t_mserver)\n\t\t\t\t\n\t\telif current_time >= self.cache[key_path]['time_validated']:\n\t\t\tself.cache[key_path] = {'time_validated': int(time.time()),\n\t\t\t\t\t\t\t\t\t't_mclient': int(t_mserver),\n\t\t\t\t\t\t\t\t\t'content': content}\n\t\telse:\n\t\t\tprint(\"Content is not updates OR time_accessed went wrong!\")", "def _update_cache(self, clean=False):\n with self.connection as db:\n update = []\n commit = False\n for n in db.execute(\"SELECT path FROM cache WHERE size IS NULL\"):\n try:\n path = n[0]\n if os.path.isdir(path):\n kind = \"directory\"\n size = 0\n for root, _, files in os.walk(path):\n for f in files:\n size += os.path.getsize(os.path.join(root, f))\n else:\n kind = \"file\"\n size = os.path.getsize(path)\n update.append((size, kind, path))\n except Exception:\n if clean:\n db.execute(\"DELETE from cache WHERE path=?\", (path,))\n commit = True\n\n if update:\n db.executemany(\"UPDATE cache SET size=?, type=? WHERE path=?\", update)\n\n if update or commit:\n db.commit()", "def _cache_image(self, instance):\n\n image_name = '%s.tar.gz' % instance['image_id']\n full_image_path = '%s/%s' % (FLAGS.ovz_image_template_dir, image_name)\n\n if not os.path.exists(full_image_path):\n # These objects are required to retrieve images from the object store.\n # This is known only to work with glance so far but as I understand it\n # glance's interface matches that of the other object stores.\n user = manager.AuthManager().get_user(instance['user_id'])\n project = manager.AuthManager().get_project(instance['project_id'])\n\n # Grab image and place it in the image cache\n images.fetch(instance['image_id'], full_image_path, user, project)\n return True\n else:\n return False", "def refresh_cache_file(form, model, is_created):\n common.save_serialized_file()\n app.global_content = common.load_cached()", "def check_av_cache(self):\n self.set_file_md5()\n request = copy.deepcopy(self.request_template)\n request['request'][0]['md5'] = self.md5\n print(\"file {} md5: {}\".format(self.file_name, self.md5))\n data = json.dumps(request)\n print(\"Sending AV Query request before upload in order to check AV cache for file {}\".format(self.file_name))\n response = requests.post(url=self.url + \"query\", data=data, verify=False)\n response_j = response.json()\n return response_j", "def _attempt_resumable_upload(self, key, fp, file_length, headers, cb,\r\n num_cb):\r\n (server_start, server_end) = self.SERVER_HAS_NOTHING\r\n conn = key.bucket.connection\r\n if self.tracker_uri:\r\n # Try to resume existing resumable upload.\r\n try:\r\n (server_start, server_end) = (\r\n self._query_server_pos(conn, file_length))\r\n self.server_has_bytes = server_start\r\n key=key\r\n if conn.debug >= 1:\r\n print 'Resuming transfer.'\r\n except ResumableUploadException, e:\r\n if conn.debug >= 1:\r\n print 'Unable to resume transfer (%s).' % e.message\r\n self._start_new_resumable_upload(key, headers)\r\n else:\r\n self._start_new_resumable_upload(key, headers)\r\n\r\n # upload_start_point allows the code that instantiated the\r\n # ResumableUploadHandler to find out the point from which it started\r\n # uploading (e.g., so it can correctly compute throughput).\r\n if self.upload_start_point is None:\r\n self.upload_start_point = server_end\r\n\r\n if server_end == file_length:\r\n # Boundary condition: complete file was already uploaded (e.g.,\r\n # user interrupted a previous upload attempt after the upload\r\n # completed but before the gsutil tracker file was deleted). Set\r\n # total_bytes_uploaded to server_end so we'll attempt to upload\r\n # no more bytes but will still make final HTTP request and get\r\n # back the response (which contains the etag we need to compare\r\n # at the end).\r\n total_bytes_uploaded = server_end\r\n else:\r\n total_bytes_uploaded = server_end + 1\r\n fp.seek(total_bytes_uploaded)\r\n conn = key.bucket.connection\r\n\r\n # Get a new HTTP connection (vs conn.get_http_connection(), which reuses\r\n # pool connections) because httplib requires a new HTTP connection per\r\n # transaction. (Without this, calling http_conn.getresponse() would get\r\n # \"ResponseNotReady\".)\r\n http_conn = conn.new_http_connection(self.tracker_uri_host,\r\n conn.is_secure)\r\n http_conn.set_debuglevel(conn.debug)\r\n\r\n # Make sure to close http_conn at end so if a local file read\r\n # failure occurs partway through server will terminate current upload\r\n # and can report that progress on next attempt.\r\n try:\r\n return self._upload_file_bytes(conn, http_conn, fp, file_length,\r\n total_bytes_uploaded, cb, num_cb)\r\n except (ResumableUploadException, socket.error):\r\n resp = self._query_server_state(conn, file_length)\r\n if resp.status == 400:\r\n raise ResumableUploadException('Got 400 response from server '\r\n 'state query after failed resumable upload attempt. This '\r\n 'can happen if the file size changed between upload '\r\n 'attempts', ResumableTransferDisposition.ABORT)\r\n else:\r\n raise\r\n finally:\r\n http_conn.close()", "def cache_size(self):\n return 0", "def insert_file_to_cache(self, file_path: Path):\n new_file_size = FileUtils.get_file_size_in_bytes(file_path)\n\n if new_file_size > self.capacity:\n raise FileTooBigException(f'{file_path} is bigger than cache, action cancelled')\n\n if not self.check_if_file_exist_in_cache(file_path):\n # File isn't already in cache\n\n if not self.is_there_enough_free_space_for_file(new_file_size):\n # There's enough free space - insert file\n self.delete_files_to_insert_new_file(new_file_size)\n\n self._insert_file_to_cache_storage(file_path, new_file_size)", "def test_index_files_cache():\n index_file_cache = ReadIndexFilesCache()\n index_file_cache.add(\"file_1\", None)\n index_file_cache.add(\"file_1\", None)\n assert len(index_file_cache.lookup_set) == 1\n assert index_file_cache.has_not_read(\"file_1\") is False\n assert index_file_cache.has_not_read(\"file_2\") is True\n index_file_cache.add(\"file_2\", None)\n index_file_cache.add(\"file_3\", None)\n index_file_cache.add(\"file_4\", None)\n assert len(index_file_cache.lookup_set) == 4\n\n # Test cache eviction logic\n\n index_file_cache.cache_limit = 2 # override cache limit\n index_file_cache.add(\"file_5\", \"file_1\")\n assert len(index_file_cache.lookup_set) == 5 # No elements evicted\n index_file_cache.add(\"file_6\", \"file_4\")\n assert (\n len(index_file_cache.lookup_set) == 3\n ) # Elements in the cache will be file_4, file_5, file_6", "def fill_request_cache():\n if not request_cache.cache.get(\"bingo_request_cache_filled\"):\n\n # Assume that we're going to grab both BingoCache and\n # BingoIdentityCache from memcache\n memcache_keys = [\n BingoCache.CACHE_KEY,\n BingoIdentityCache.key_for_identity(identity())\n ]\n\n # Try to grab BingoCache from instance cache\n bingo_instance = instance_cache.get(BingoCache.CACHE_KEY)\n if bingo_instance:\n # If successful, use instance cached version...\n request_cache.cache[BingoCache.CACHE_KEY] = bingo_instance\n # ...and don't load BingoCache from memcache\n memcache_keys.remove(BingoCache.CACHE_KEY)\n\n # Load necessary caches from memcache\n dict_memcache = memcache.get_multi(memcache_keys)\n\n # Decompress BingoCache if we loaded it from memcache\n if BingoCache.CACHE_KEY in dict_memcache:\n dict_memcache[BingoCache.CACHE_KEY] = CacheLayers.decompress(\n dict_memcache[BingoCache.CACHE_KEY])\n\n # Update request cache with values loaded from memcache\n request_cache.cache.update(dict_memcache)\n\n if not bingo_instance:\n # And if BingoCache wasn't in the instance cache already, store\n # it with a 1-minute expiry\n instance_cache.set(BingoCache.CACHE_KEY,\n request_cache.cache.get(BingoCache.CACHE_KEY),\n expiry=CacheLayers.INSTANCE_SECONDS)\n\n request_cache.cache[\"bingo_request_cache_filled\"] = True", "def is_cache_valid(self):\n if os.path.isfile(self.cache_filename):\n mod_time = os.path.getmtime(self.cache_filename)\n current_time = time()\n if (mod_time + self.cache_max_age) > current_time:\n return True\n return False", "def put(self, url, localfile):\n\n cachedir = self._cachedir(url)\n filename = localfile.name\n\n logger.debug(f\"Storing {localfile} in cache for {url}\")\n shutil.copy2(localfile, cachedir / filename)\n self._writefilename(cachedir, filename)", "def is_cache_valid(self):\n if os.path.isfile(self.cache_path_cache):\n mod_time = os.path.getmtime(self.cache_path_cache)\n current_time = time()\n if (mod_time + self.cache_max_age) > current_time:\n if os.path.isfile(self.cache_path_index):\n return True\n return False", "async def _upload(self) -> None:\n\n # filename given?\n filename = str(uuid.uuid4()) if self.filename is None else self.filename\n\n # check\n if self._upload_path is None:\n raise ValueError(\"No upload URL given.\")\n\n # send data and return image ID\n async with aiohttp.ClientSession() as session:\n data = aiohttp.FormData()\n data.add_field(\"file\", self._buffer, filename=self.filename)\n async with session.post(self._upload_path, auth=self._auth, data=data, timeout=self._timeout) as response:\n if response.status == 401:\n log.error(\"Wrong credentials for uploading file.\")\n raise FileNotFoundError\n elif response.status != 200:\n log.error(f\"Could not upload file to filecache: {response.status} {response.reason}\")\n raise FileNotFoundError" ]
[ "0.6604833", "0.5890147", "0.5890147", "0.5878093", "0.58533037", "0.5794422", "0.55992955", "0.55967", "0.5541061", "0.5466576", "0.54418707", "0.5400634", "0.53629166", "0.5326438", "0.5253554", "0.52396715", "0.52342", "0.5201345", "0.51884687", "0.51225877", "0.50966024", "0.5070047", "0.5059834", "0.5049938", "0.5027695", "0.5011617", "0.50025666", "0.49873498", "0.49828354", "0.49763888" ]
0.77208346
0
Removes the given MultipartUpload from the cache, if in it.
def remove_from_cache(multipart_upload): if multipart_upload.uploadId in cache: del cache[multipart_upload.uploadId]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_file_from_cache(self, md5_hash):\n self.used_space -= len(self.storage[md5_hash])\n self.storage.pop(md5_hash)\n self.remove_from_usage_queue(md5_hash)", "def add_to_cache(multipart_upload):\n if len(cache) < MAX_CACHE_SIZE:\n if multipart_upload.uploadId not in cache:\n cache[multipart_upload.uploadId] = multipart_upload", "def remove(self, hashlock):\n self._cached_lock_hashes.remove(sha3(self.get(hashlock).lock.as_bytes))\n self._cached_root = None\n del self.locked[hashlock]", "def cancel_upload(self):\r\n self.bucket.cancel_multipart_upload(self.key_name, self.id)", "def removeFromDownloadQueue(self, _src):\n for dl in self.downloadQueue:\n if _src in dl['src']:\n self.downloadQueue.pop(self.downloadQueue.index(dl))\n return", "def photo_edit_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.upload.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def rem_file(self, key):\n del self.fileList[key]\n\n path = os.path.join(self.file_path, '%s.xoj' % key)\n try:\n os.remove( path )\n except:\n print \"Unable to remove\", path\n self.save()", "def _forget_file(self, snapshot: Bug, filepath: str) -> None:\n try:\n cache_key = (snapshot.name, filepath)\n del self.__cache_offsets[cache_key]\n del self.__cache_file_contents[cache_key]\n except KeyError:\n pass", "def delete(self, path):\n \n try:\n self._client.remove(self._getEncodedUri(path), force=True)\n except ClientError, error:\n raise SubversionError(error)\n else:\n self._sharedState.removeFromCache(path)", "def remove(self, key):\n with self._lock:\n self._check_expire()\n\n if key in self._obj_cache:\n self._log.debug(\"removing entry '%s' (type=%s)\",\n key, type(self._obj_cache[key]))\n del self._obj_cache[key]\n del self._obj_last_access[key]\n del self._obj_timeouts[key]", "def remove(self, key):\n \n # If there used to be a key, there must exist an old value blob somewhere in the database. It should be deallocated after a successful commit to disk.\n if key in self.keys:\n if self.keys[key] is not None:\n punchat,punchlen = self.keys[key]\n self.awaitingpunch.append((punchat, punchlen))\n\n self.keys.pop(key, None)\n self.buffered.pop(key, None)\n self.cache.pop(key, None)\n\n if self.autocommit:\n commit()", "def remove_cached_item(self, path):\n item_path = '%s/%s' % (\n self.cache_folder,\n path.strip('/')\n )\n\n self.container.delete_object(item_path)\n\n try:\n while True:\n self.container.get_object(item_path)\n time.sleep(0.5)\n except NoSuchObject:\n return True", "def remove(self):\n self.remove_file()", "def teardown_upload(self, upload, filesystem_only=True):\n # This is like \"rm -rf path\"\n shutil.rmtree(upload.path, ignore_errors=True)\n if filesystem_only:\n return\n for input in upload.input_set.all():\n input.delete()\n upload.delete()", "def remove_image_file(sender, instance, **kwargs):\n # Pass false so ImageField doesn't save the model.\n instance.image.delete(False)", "def remote_abortUpload(self, upload_id):\n self.transfers_register.deallocate_upload_slot(upload_id)\n\n # Also remove the file if the upload is already completed\n completed = settings.completed_root.child(upload_id)\n if completed.exists():\n completed.remove()", "def delete(self, *args, **kwargs):\n self.file.delete(save=False)\n self.thumbnail.delete(save=False)\n\n super(File, self).delete(*args, **kwargs)", "def remove(self, name):\n path = '%s/%s' % (self.path, name)\n lock = '%s%s' % (path, LOCKED_SUFFIX)\n os.unlink(path)\n os.unlink(lock)", "def _cache_remove(self, objId, methodname):\n \n self._lock.acquire()\n key = (objId, methodname)\n if self.cache.has_key(key):\n del self.cache[key] \n self._lock.release()", "def delete(self):\r\n if self.provider.readonly:\r\n raise DAVError(HTTP_FORBIDDEN)\r\n\r\n self.provider.cache_fs.remove(self.path)\r\n if self.nibbler.find(self.path):\r\n self.nibbler.remove_file(self.path)\r\n\r\n self.removeAllProperties(True)\r\n self.removeAllLocks(True)", "def remove(self):\n path = os.path.abspath(path)\n if path in self.files:\n del self.files[path]\n return True\n return False", "def cache_remove(item: str) -> None:\n\titem = str(item)\n\tcache = \"Cached/\" + item\n\n\tif os.path.exists(cache):\n\t\tdelete_file(cache)", "def remove_file(self, key=None):\n if self.file_is_present(key):\n os.remove(self.file_path(key))", "def delete(self, cache_key):\r\n pass", "def remove_from_cache(self, query):\n return", "def delete_thumbnail(self, thumbnail_name):", "def delete_upload(arn=None):\n pass", "def remove(self, tile_index: int) -> None:\n octree_chunk = self._tiles[tile_index].octree_chunk\n self._chunks.remove(octree_chunk)\n del self._tiles[tile_index]", "def remove(self, member):\n with self.lock:\n try:\n self.pool.remove(member)\n except KeyError:\n pass", "def _delete_image_volume(self,\n context: context.RequestContext,\n cache_entry: dict) -> None:\n volume = objects.Volume.get_by_id(context, cache_entry['volume_id'])\n\n # Delete will evict the cache entry.\n self.volume_api.delete(context, volume)" ]
[ "0.6391853", "0.6139608", "0.57140166", "0.5713958", "0.56205344", "0.5557041", "0.5550686", "0.5528383", "0.5524926", "0.552387", "0.551779", "0.5497478", "0.54907894", "0.54887533", "0.544679", "0.54364336", "0.54243356", "0.5414434", "0.5392673", "0.5388353", "0.53837955", "0.5383707", "0.53606313", "0.5337674", "0.5337445", "0.5318374", "0.53139514", "0.5311959", "0.52752507", "0.5274969" ]
0.8447665
0
Returns the multipart upload we have the ID of in metadata. As Amazon allows several multipart uploads at the same time for the same file, the ID is the only unique, reliable descriptor.
def get_multipart_upload(metadata): multipart_upload = None metadata_mp_id = None filename = metadata.path if filename.startswith(u"/"): filename = filename[1:] plug.logger.debug(u"Getting multipart upload of {}", filename) # Retrieve the stored multipart upload ID try: metadata_mp_id = metadata.extra['mp_id'] except KeyError: # No multipart upload ID # Raise now is faster (doesn't go through all the MP uploads) raise DriverError("Unable to retrieve multipart upload ID") if metadata_mp_id not in cache: # Try to only request multipart uploads of this file for mp in S3Conn.list_multipart_uploads(prefix=filename): # Go through all the multipart uploads # to find the one of this transfer if mp.uploadId == metadata_mp_id: multipart_upload = mp add_to_cache(mp) break else: multipart_upload = cache[metadata_mp_id] # At this point it shouldn't be None in any case if multipart_upload is None: raise DriverError("Cannot find upload for file '{}'" .format(filename)) plug.logger.debug(u"Found multipart upload of {} - ID {}", filename, multipart_upload.uploadId) return multipart_upload
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initiate_multipart_upload(self):\n request = self.s3.create_request(\"OBJECT_POST\", uri = self.uri, headers = self.headers_baseline, extra = \"?uploads\")\n response = self.s3.send_request(request)\n data = response[\"data\"]\n self.upload_id = getTextFromXml(data, \"UploadId\")\n return self.upload_id", "def multipart_upload_id(self, multipart_upload_id):\n\n self._multipart_upload_id = multipart_upload_id", "def media_entry_id(self):\n return self.getattr('media_entry_id')", "def complete_multipart_upload(self):\n debug(\"MultiPart: Completing upload: %s\" % self.upload_id)\n\n parts_xml = []\n part_xml = \"<Part><PartNumber>%i</PartNumber><ETag>%s</ETag></Part>\"\n for seq, etag in self.parts.items():\n parts_xml.append(part_xml % (seq, etag))\n body = \"<CompleteMultipartUpload>%s</CompleteMultipartUpload>\" % (\"\".join(parts_xml))\n\n headers = { \"content-length\": len(body) }\n request = self.s3.create_request(\"OBJECT_POST\", uri = self.uri, headers = headers, extra = \"?uploadId=%s\" % (self.upload_id))\n response = self.s3.send_request(request, body = body)\n\n return response", "def __get_image_id(self):\n return self.__get_multi_images_ids(1)", "def get_picture_id(path):\n\t\tif path is None:\n\t\t\treturn\n\t\tcon = mdb.connect('localhost', 'root', 'sensepass', 'sensecambrowser')\n\t\twith con:\n\t\t\tquery = \"SELECT id from fileuploader_picture WHERE file=%s\" % (path)\n\t\t\tcur = con.cursor()\n\t\t\tcur.execute(query)\n\t\t\tdata = cur.fetchall()\n\t\t\tprint \"len(data)\"\n\t\t\tprint data\n\t\t\tif len(data) > 0:\n\t\t\t\treturn data[0]\n\t\t\treturn None", "def get_multipart_by_legacy_recid(recid):\n search = SeriesSearch().query(\n 'bool',\n filter=[\n Q('term', mode_of_issuance='MULTIPART_MONOGRAPH'),\n Q('term', legacy_recid=recid),\n ]\n )\n result = search.execute()\n hits_total = result.hits.total if lt_es7 else result.hits.total.value\n if not result.hits or hits_total < 1:\n click.secho('no multipart found with legacy recid {}'.format(recid),\n fg='red')\n # TODO uncomment with cleaner data\n # raise MultipartMigrationError(\n # 'no multipart found with legacy recid {}'.format(recid))\n elif hits_total > 1:\n raise MultipartMigrationError(\n 'found more than one multipart with recid {}'.format(recid))\n else:\n return Series.get_record_by_pid(result.hits[0].pid)", "def get_upload(arn=None):\n pass", "def split(self, f):\n x = os.path.split(f)\n subjectid = os.path.split(x[-2])[-1]\n imagefile = x[-1]\n return (subjectid, imagefile)", "def id(self) -> FileID:\n _args: list[Arg] = []\n _ctx = self._select(\"id\", _args)\n return _ctx.execute_sync(FileID)", "def upload_all_parts(self):\n if not self.upload_id:\n raise RuntimeError(\"Attempting to use a multipart upload that has not been initiated.\")\n\n if self.file.name != \"<stdin>\":\n size_left = file_size = os.stat(self.file.name)[ST_SIZE]\n nr_parts = file_size / self.chunk_size + (file_size % self.chunk_size and 1)\n debug(\"MultiPart: Uploading %s in %d parts\" % (self.file.name, nr_parts))\n else:\n debug(\"MultiPart: Uploading from %s\" % (self.file.name))\n\n\tself.chunk_size = self.s3.config.multipart_chunk_size_mb * 1024 * 1024\n\n seq = 1\n\tif self.file.name != \"<stdin>\":\n while size_left > 0:\n offset = self.chunk_size * (seq - 1)\n current_chunk_size = min(file_size - offset, self.chunk_size)\n size_left -= current_chunk_size\n labels = {\n 'source' : unicodise(self.file.name),\n 'destination' : unicodise(self.uri.uri()),\n 'extra' : \"[part %d of %d, %s]\" % (seq, nr_parts, \"%d%sB\" % formatSize(current_chunk_size, human_readable = True))\n }\n try:\n self.upload_part(seq, offset, current_chunk_size, labels)\n except:\n error(u\"Upload of '%s' part %d failed. Aborting multipart upload.\" % (self.file.name, seq))\n self.abort_upload()\n raise\n seq += 1\n else:\n while True:\n buffer = self.file.read(self.chunk_size)\n offset = self.chunk_size * (seq - 1)\n current_chunk_size = len(buffer)\n labels = {\n 'source' : unicodise(self.file.name),\n 'destination' : unicodise(self.uri.uri()),\n 'extra' : \"[part %d, %s]\" % (seq, \"%d%sB\" % formatSize(current_chunk_size, human_readable = True))\n }\n if len(buffer) == 0: # EOF\n break\n try:\n self.upload_part(seq, offset, current_chunk_size, labels, buffer)\n except:\n error(u\"Upload of '%s' part %d failed. Aborting multipart upload.\" % (self.file.name, seq))\n self.abort_upload()\n raise\n seq += 1\n\n debug(\"MultiPart: Upload finished: %d parts\", seq - 1)", "def partid(self):\n return self._partid", "def partid(self):\n return self._part_id", "def complete_multipart(self, multipart_id):\n return h3lib.complete_multipart(self._handle, multipart_id, self._user_id)", "def fetch_sample_mid(self, sample_id: str) -> str or None:\n if not self.sample_exists(sample_id):\n return None\n file_grp = [f for f in self.fcs_files if f.primary_id == sample_id][0]\n return file_grp.id.__str__()", "def media_id(self):\n try:\n return Html.toId(self.content)\n except:\n Mp3Error(1)", "def get_upload_key(self):\n\n if not hasattr(self, '_upload_key'):\n self._upload_key = self.get_storage().bucket.get_key(\n self.cleaned_data['key_name'])\n return self._upload_key", "def multipart_push(self, upload_id, url, part_number, chunk_size, data, md5=None):\n path = self.base_path / url\n assert path.is_file(), f\"{self}: multipart upload file {path} does not exist.\"\n with path.open(\"r+b\") as stream:\n stream.seek((part_number - 1) * chunk_size)\n shutil.copyfileobj(data, stream, 1024 * 1024)\n return dict()", "def _get_id(mf, url=None):\n\n\tprops = mf['properties']\n\n\tif 'uid' in props:\n\t\treturn props['uid'][0]\n\telif 'url' in props:\n\t\treturn props['url'][0]\n\telse:\n\t\treturn None", "def media_content_id(self):\n return self._media_uri_final", "def complete_multipart_upload(Bucket=None, Key=None, MultipartUpload=None, UploadId=None, RequestPayer=None):\n pass", "def id(self):\n return self.metadata[\"id\"]", "def get_upload_ticket(self):\n r = HTTPClient().fetch(self.config['apiroot'] + self.ticket_path, method=\"POST\",\n body=urlencode({'type': 'streaming'}), headers = self.standard_headers,\n validate_cert=not self.config['dev'])\n response = json.loads(r.body)\n return response['ticket_id'], response['upload_link_secure'], response['complete_uri']", "def content_metadata_id(self):\n return self._content_metadata_id", "def get_id(self):\n #return self.__str__().__hash__()\n object_type = self['object_type']\n shortname = self.get_description()\n object_name = self['name']\n filename = self['filename']\n id = \"%s-%s-%s-%s\" % ( object_type, shortname, object_name, filename)\n import md5\n return md5.new(id).hexdigest()\n return id", "def complete_upload(self):\r\n xml = self.to_xml()\r\n return self.bucket.complete_multipart_upload(self.key_name,\r\n self.id, xml)", "def get_file_name(self):\n return self.upload.name[6:]", "def _get_upload_part(self, upload_buffer):\n if upload_buffer.intent_count() == 1 and upload_buffer.get_intent(0).is_upload():\n intent = upload_buffer.get_intent(0)\n relative_offset = upload_buffer.start_offset - intent.destination_offset\n length = upload_buffer.length\n definition = UploadEmergePartDefinition(intent.outbound_source, relative_offset, length)\n else:\n subparts = []\n fragment_start = upload_buffer.start_offset\n for intent, fragment_end in upload_buffer.iter_items():\n relative_offset = fragment_start - intent.destination_offset\n length = fragment_end - fragment_start\n if intent.is_upload():\n subpart_class = LocalSourceUploadSubpart\n elif intent.is_copy():\n subpart_class = RemoteSourceUploadSubpart\n else:\n raise RuntimeError('This cannot happen!!!')\n subparts.append(subpart_class(intent.outbound_source, relative_offset, length))\n fragment_start = fragment_end\n definition = UploadSubpartsEmergePartDefinition(subparts)\n return EmergePart(definition)", "def media_content_id(self):\n return int(self._gallery_status[\"current_item\"])", "def upload_chunk(self, file_obj, length, offset=0, upload_id=None):\n\n params = dict()\n\n if upload_id:\n params['upload_id'] = upload_id\n params['offset'] = offset\n\n url, ignored_params, headers = self.request(\"/chunked_upload\", params,\n method='PUT', content_server=True)\n\n try:\n reply = self.rest_client.PUT(url, file_obj, headers)\n return reply['offset'], reply['upload_id']\n except ErrorResponse as e:\n raise e" ]
[ "0.7365177", "0.59189004", "0.56275564", "0.56237596", "0.5594857", "0.5565518", "0.5528612", "0.54932034", "0.54911405", "0.5483178", "0.5480892", "0.54717666", "0.5466336", "0.54513574", "0.5451055", "0.53193146", "0.5319063", "0.53091484", "0.5307384", "0.52786773", "0.527505", "0.5270689", "0.52556497", "0.5240762", "0.52363926", "0.5206081", "0.5188882", "0.51762134", "0.51590884", "0.51302636" ]
0.74951166
0
Get the points to estimate head pose sideways
def head_pose_points(image, rotation_vector, translation_vector, camera_matrix): rear_size = 1 rear_depth = 0 front_size = image.shape[1] front_depth = front_size*2 val = [rear_size, rear_depth, front_size, front_depth] point_2d = get_2d_points(image, rotation_vector, translation_vector, camera_matrix, val) y = (point_2d[5] + point_2d[8])//2 x = point_2d[2] return (x, y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_oriented_points(self):\n return g.points_from_probe(self)", "def pose_2d_pts(self,image):\n '''\n image- rgb image \n return:-\n pts - list of 2d pose landmarks as img coords\n image- rgb image on which the 2d pose landmarks are drawn\n ''' \n pts=[]\n imgRGB=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\n results=pose.process(imgRGB)\n if results.pose_landmarks:\n mpDraw.draw_landmarks(image,results.pose_landmarks,mpPose.POSE_CONNECTIONS)\n for id,lm in enumerate(results.pose_landmarks.landmark):\n h,w,c=image.shape\n imgx,imgy=int(lm.x*w),int(lm.y*h)\n \n pts.append((imgx,imgy)) \n return pts,image", "def extract_poses(self):\n if len(self.path) == 0:\n raise ValueError(\"Path Empty\")\n return\n \n path = np.array(self.readLeaderPath())\n # Path distance at target position\n target_dist = path[-1,3] - self.distance\n # Filter by distance\n far_pose_ids = np.where(path[:,3] < target_dist) #path[path[:,3] < target_dist]\n\n if len(far_pose_ids[0]) != 0:\n target_pose_id = far_pose_ids[0][-1]\n target_pose = path[target_pose_id,:3]\n\n # Delete Path history\n self.thread_lock.acquire()\n self.path = path[target_pose_id:,:].tolist()\n self.thread_lock.release()\n else:\n # No target\n raise ValueError(\"No target\")\n return\n\n current_pose = np.array(self.readPose())\n return current_pose, target_pose", "def test_estimate_head_pose_by_bounding_box(self):\n angles = TestHeadPose.headPoseEstimator.estimateByBoundingBox(self.detection.boundingBox, self.image)\n self.assertHeadPose(angles)", "def waypoint_coordinate_extractor(waypoint):\n return [waypoint.pose.pose.position.x, waypoint.pose.pose.position.y]", "def getSkeletonPoints(self):\n self.SkeletonPoints = []\n for s in self.Intersections:\n self.SkeletonPoints.append(s.centroid)", "def get_pose_map(path) -> np.ndarray:\n hands = mp_hands.Hands(\n static_image_mode=True,\n max_num_hands=2,\n min_detection_confidence=0.5\n )\n # Read an image, flip it around y-axis for correct handedness output (see\n # above).\n image = cv2.imread(path) # range 0 - 255. 3d\n image = cv2.resize(image, IMG_SHAPE[:2][::-1],fx=1, fy=1, interpolation=cv2.INTER_CUBIC)\n # show_img(image)\n # Convert the BGR image to RGB before processing.\n results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n\n # Print handedness and draw hand landmarks on the image.\n print('Handedness:', results.multi_handedness)\n \n image_height, image_width, channels = image.shape\n annotated_image = image.copy()\n empty_image = np.zeros((image_height, image_width, channels))\n if not results.multi_hand_landmarks:\n return annotated_image\n for hand_landmarks in results.multi_hand_landmarks:\n # print('hand_landmarks:', hand_landmarks)\n # print(\n # f'Index finger tip coordinates: (',\n # f'{hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].x * image_width}, '\n # f'{hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].y * image_hight})'\n # )\n mp_drawing.draw_landmarks(\n annotated_image, hand_landmarks, mp_hands.HAND_CONNECTIONS)\n mp_drawing.draw_landmarks(\n empty_image, hand_landmarks, mp_hands.HAND_CONNECTIONS)\n # cv2.imwrite(\n # '/tmp/annotated_image' + str(idx) + '.png', cv2.flip(annotated_image, 1))\n hands.close()\n # return preprocess_image(np.asarray(annotated_image))\n return annotated_image, empty_image", "def _get_init_pose(self):\n return self.init_pose_R, self.init_pose_t", "def head_position_estimator_process(self, frame):\n frame = self.frame_pre_process(frame)\n\n # Clean Head Position detection from previous frame\n self.head_estimator.clear()\n\n # Predict and return head position[Yaw, Pitch, Roll]\n self.head_estimator.start_async(frame, self.rois)\n headPoseAngles = self.head_estimator.get_headposition()\n\n return (headPoseAngles)", "def test_estimate_head_pose_hight_level_with_use_orientation_mode(self):\n\n faceEngine = VLFaceEngine()\n faceEngine.faceEngineProvider.faceDetV3Settings.useOrientationMode = 1\n detector = VLFaceDetector(DetectorType.FACE_DET_V3, faceEngine)\n\n angles0 = detector.detectOne(VLImage.load(filename=ROTATED0)).headPose\n angles90 = detector.detectOne(VLImage.load(filename=ROTATED90)).headPose\n\n assert pytest.approx(angles90.pitch, abs=2) == angles0.pitch\n assert pytest.approx(angles90.roll, abs=2) == angles0.roll\n assert pytest.approx(angles90.yaw, abs=2) == angles0.yaw", "def get_robot_poses(self): \n x_poses = np.array([])\n y_poses = np.array([])\n for i in range(self.no_robots):\n odom_topic = '/robot_' + str(i) + '/odom'\n msg = rospy.wait_for_message(odom_topic, Odometry)\n x_pos = msg.pose.pose.position.x\n y_pos = msg.pose.pose.position.y\n x_poses = np.append(x_poses, x_pos)\n y_poses = np.append(y_poses, y_pos)\n return x_poses, y_poses", "def get_goal_ee_pose(self):\n #self.target_endpoint = #magic tf call that I can add ie the pose of the palm from camera aruco detection\n while True:\n try:\n translation, rotation = self.listener.lookupTransform('world_frame', 'palm_frame_camera', rospy.Time()) # ee_frame_camera_flipped\n break # once the transform is obtained move on\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n continue # if it fails try again\n point = [translation[0], translation[1], translation[2]]\n self.target_endpoint = np.array(point)\n # rospy.logerr(self.target_endpoint)", "def get_poses_in_frame(self, frame: int) -> np.ndarray:\n return self._get_objects_in_frame(frame, np.array(['poseAIK']), 'pid', self.persons)", "def test_estimate_head_pose_with_use_orientation_mode(self):\n\n faceEngine = VLFaceEngine()\n faceEngine.faceEngineProvider.faceDetV3Settings.useOrientationMode = 1\n detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)\n\n images = [VLImage.load(filename=ROTATED0), VLImage.load(filename=ROTATED90)]\n detections = detector.detect(images, detect68Landmarks=True)\n angles0 = TestHeadPose.headPoseEstimator.estimate(detections[0][0].landmarks68)\n angles90 = TestHeadPose.headPoseEstimator.estimate(detections[1][0].landmarks68)\n\n assert pytest.approx(angles90.pitch, abs=2) == angles0.pitch\n assert pytest.approx(angles90.roll, abs=2) == angles0.roll\n assert pytest.approx(angles90.yaw, abs=2) == angles0.yaw", "def _get_halluc_points(_, halluc_pts):\n if len(halluc_pts) > 0:\n return halluc_pts\n else:\n return halluc_pts", "def test_estimate_head_pose_by_bounding_box_from_other_image(self):\n image = VLImage.load(filename=NO_FACES)\n angles = TestHeadPose.headPoseEstimator.estimateByBoundingBox(self.detection.boundingBox, image)\n self.assertHeadPose(angles)", "def get_relative_poses(self):\n if self.backend is not None:\n return self.backend.relative_odometry_poses()\n return self.odometry.get_relative_poses()", "def get_points(self):\r\n return self.nx*self.ny*self.nz", "def getPose(self):\n\t\treturn self.__subs['pose'].getData()", "def get_points(self):\n\t\treturn self.points", "def batch_get_pose_obj(th_pose_3d, smpl, init_pose=False):\r\n batch_size = len(th_pose_3d)\r\n verts, _, _, _ = smpl.forward()\r\n J, face, hands = smpl.get_landmarks()\r\n\r\n J_observed = torch.stack([th_pose_3d[i]['pose_keypoints_3d'] for i in range(batch_size)]).cuda()\r\n face_observed = torch.stack([th_pose_3d[i]['face_keypoints_3d'] for i in range(batch_size)]).cuda()\r\n\r\n # Bharat: Why do we need to loop? Shouldn't we structure th_pose_3d as [key][batch, ...] as opposed to current [batch][key]?\r\n # This would allow us to remove the loop here.\r\n hands_observed = torch.stack(\r\n [torch.cat((th_pose_3d[i]['hand_left_keypoints_3d'], th_pose_3d[i]['hand_right_keypoints_3d']), dim=0) for i in\r\n range(batch_size)]).cuda()\r\n\r\n idx_mask = hands_observed[:, :, 3] < HAND_VISIBLE\r\n hands_observed[:, :, :3][idx_mask] = 0.\r\n\r\n if init_pose:\r\n pose_init_idx = torch.LongTensor([0, 2, 5, 8, 11])\r\n return (((J[:, pose_init_idx, : ] - J_observed[:, pose_init_idx, : 3])\r\n *J_observed[:, pose_init_idx, 3].unsqueeze(-1)) ** 2).mean()\r\n else:\r\n return ( (((J - J_observed[:, :, :3]) *J_observed[:, :, 3].unsqueeze(-1))**2).mean() +\\\r\n (((face - face_observed[:, :,: 3]) *face_observed[:, :, 3].unsqueeze(-1))**2).mean() +\\\r\n (((hands - hands_observed[:, :, :3]) *hands_observed[:, :, 3].unsqueeze(-1))**2).mean() ).unsqueeze(0)/3\r\n # return (((J - J_observed[:, :, :3]) *J_observed[:, :, 3].unsqueeze(-1))**2).mean().unsqueeze(0) #only joints\r", "def test_head_pose_as_dict(self):\n angles = TestHeadPose.headPoseEstimator.estimateBy68Landmarks(self.detection.landmarks68)\n self.assertHeadPose(angles)\n assert {\"pitch\": angles.pitch, \"roll\": angles.roll, \"yaw\": angles.yaw} == angles.asDict()", "def get_pose_data(self, raw_output):\n outputs = self._convert_raw_outputs(raw_output)\n personwise_keypoints = self.decode_results(outputs)\n\n # If the frame got scaled and padded before sending the frame to the NN\n # (in get_input_frame in case of sending a local file) we need to\n # remove this padding and scaling from the keypoints\n if personwise_keypoints.shape[0] > 0:\n if self._pad_top is not None:\n keypoint_ids = np.nonzero(personwise_keypoints[:, :, -1])\n personwise_keypoints[keypoint_ids[0], keypoint_ids[1], :2] -= [\n self._pad_left, self._pad_top\n ]\n if self._scale_factor is not None and self._scale_factor != 1:\n personwise_keypoints[:, :, :2] /= self._scale_factor\n\n return personwise_keypoints", "def task_two(points):\n assert len(points) == 4, \"Given too much point to calculate\"\n assert len(points[0][0]) == len(points[0][1]) == 2,\\\n \"Inappropriate stucture of points list\"\n # P` = H * P where\n # P - initial points\n # P` - homography translated points\n # P` = H * U * S * VT\n # H ~ P` * V * S^(-2) * UT\n P = np.array([[x[0, 0], x[0, 1], 1] for x in points]).T\n P_tilda = np.array([[x[1, 0], x[1, 1], 1] for x in points]).T\n W, U, VT = cv.SVDecomp(P)\n W_rev = np.diag(1 / W.flatten())\n H = P_tilda.dot(VT.T).dot(W_rev).dot(U.T)\n print(\"Homography matrix:\\n\", H)", "def preprocess_output(self, outputs, head_pose_estimate):\n\n roll = head_pose_estimate[2]\n outputs = outputs[self.output_name][0]\n cos_theta = math.cos(roll * math.pi / 180)\n sin_theta = math.sin(roll * math.pi / 180)\n x = outputs[0] * cos_theta + outputs[1] * sin_theta\n y = outputs[1] * cos_theta - outputs[0] * sin_theta\n\n return (x, y), outputs", "def joint_pairs(self):\n return [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16], #17 body keypoints\n [20-3, 23-3], [21-3, 24-3], [22-3, 25-3], [26-3, 42-3], [27-3, 41-3], [28-3, 40-3], [29-3, 39-3], [30-3, 38-3], \n [31-3, 37-3], [32-3, 36-3], [33-3, 35-3], [43-3, 52-3], [44-3, 51-3], [45-3, 50-3], [46-3, 49-3], [47-3, 48-3], \n [62-3, 71-3], [63-3, 70-3], [64-3, 69-3], [65-3, 68-3], [66-3, 73-3], [67-3, 72-3], [57-3, 61-3], [58-3, 60-3],\n [74-3, 80-3], [75-3, 79-3], [76-3, 78-3], [87-3, 89-3], [93-3, 91-3], [86-3, 90-3], [85-3, 81-3], [84-3, 82-3],\n [94-3, 115-3], [95-3, 116-3], [96-3, 117-3], [97-3, 118-3], [98-3, 119-3], [99-3, 120-3], [100-3, 121-3],\n [101-3, 122-3], [102-3, 123-3], [103-3, 124-3], [104-3, 125-3], [105-3, 126-3], [106-3, 127-3], [107-3, 128-3],\n [108-3, 129-3], [109-3, 130-3], [110-3, 131-3], [111-3, 132-3], [112-3, 133-3], [113-3, 134-3], [114-3, 135-3]]", "def find_start_pose(self):\n\n # Find start position\n y,x = [k for k,v in self.mp.items() if v == 94 or v == 60 \\\n or v == 62 or v == 118][0]\n\n\n # Assign orientation\n dy,dx, theta = 0,0, 0\n if self.mp[y,x] == ord('^'): theta = np.pi/2\n elif mp[y,x] == ord('<'): theta = -np.pi\n elif mp[y,x] == ord('>'): theta = 0\n else: theta = -np.pi/2\n\n return y, x, theta", "def get_pos(self,angles=None):\n if angles != self.angles and angles != None:\n self.forward_kinematics()\n return [self.final_T[0][2],self.final_T[1][2],0]\n else:\n self.forward_kinematics()\n return [self.final_T[0][2],self.final_T[1][2],0]", "def gripper_pose(self):\n return self._limb.endpoint_pose()", "def computeHomography(src_pnt: np.ndarray, dst_pnt: np.ndarray) -> (np.ndarray, float):\r\n\r\n A = []\r\n for i in range(0, len(src_pnt)):\r\n x, y = src_pnt[i][0], src_pnt[i][1]\r\n u, v = dst_pnt[i][0], dst_pnt[i][1]\r\n A.append([x, y, 1, 0, 0, 0, -u * x, -u * y, -u])# like we saw in class append for evey point two rows\r\n A.append([0, 0, 0, x, y, 1, -v * x, -v * y, -v])\r\n\r\n A = np.asarray(A)\r\n U, S, Vh = np.linalg.svd(A) # use SVD to find the values of the variables in the matrix\r\n L = Vh[-1, :] / Vh[-1, -1] # divided by the last row like we see in the exercise\r\n H = L.reshape(3, 3) # reshaping to 3 by 3\r\n print(H) # print our Homography\r\n #print openCv homography\r\n M, mask = cv2.findHomography(src_pnt, dst_pnt)\r\n print(\"=======================\")\r\n print(M)\r\n return H" ]
[ "0.6658804", "0.6222457", "0.6192936", "0.615454", "0.61042935", "0.60609454", "0.6035024", "0.6020878", "0.60114807", "0.59733033", "0.5970817", "0.59246963", "0.58942825", "0.58829874", "0.5882882", "0.5877781", "0.58656156", "0.58611846", "0.58332133", "0.5820129", "0.5814287", "0.580828", "0.5800305", "0.57914704", "0.57899624", "0.57769334", "0.5776908", "0.5765584", "0.57620937", "0.5760401" ]
0.6316923
1
Return a list of capabilities for the given user.
def getCapabilities4User(session_key, user=None): roles = [] capabilities = [] # Get user info if user is not None: logger.debug('Retrieving role(s) for current user: %s', user) userEntities = entity.getEntities('authentication/users/%s' % user, count=-1, sessionKey=session_key) for stanza, settings in userEntities.items(): if stanza == user: for key, val in settings.items(): if key == 'roles': logger.debug('Successfully retrieved role(s) for user: %s', user) roles = val # Get capabilities for role in roles: logger.debug('Retrieving capabilities for current user: %s', user) roleEntities = entity.getEntities('authorization/roles/%s' % role, count=-1, sessionKey=session_key) for stanza, settings in roleEntities.items(): if stanza == role: for key, val in settings.items(): if key == 'capabilities' or key == 'imported_capabilities': logger.debug('Successfully retrieved %s for user: %s', key, user) capabilities.extend(val) return capabilities
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def capabilities(self):\n return []", "def capabilities(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"capabilities\")", "def capabilities(self) -> Sequence['outputs.SkuCapabilityResponse']:\n return pulumi.get(self, \"capabilities\")", "def capabilities(self):\n pass", "def available_modules(self, user):\n return [sitecomp for sitecomp in self.enabled_modules() if sitecomp.has_perm(user)]", "def _listAllowedRolesAndUsers(self, user):\n result = list(user.getRoles())\n if hasattr(aq_base(user), 'getGroups'):\n result = result + ['user:%s' % x for x in user.getGroups()]\n result.append('Anonymous')\n return result", "def get_capabilities(params,defaults):\n cap = CapabilitiesController (params,defaults)\n return cap.get_capabilities()", "def get_caps(self):\n return ObjectCapabilities.get_capabilities(self)", "def get_capabilities(self):\n\n service = self.__get_service()\n capability = self.__get_capability()\n contents = {\"service\" : service, \"capability\" : capability}\n return contents, self.params['format']", "def get_capabilities(self):\n return Capabilities(javabridge.call(self.jobject, \"getCapabilities\", \"()Lweka/core/Capabilities;\"))", "def capability(self):\n code, data, capabilities = (\n self.__send_command(\"CAPABILITY\", withcontent=True))\n if code == \"OK\":\n return capabilities\n return None", "def default_capabilities(self):\n return CAPABILITIES", "def capabilities(self) -> dto.Capabilities:\n raise NotImplementedError", "def get_permissions(cls, user):\n try:\n user_perm = UserPermission.objects.get(user_id=user.id)\n return user_perm.permission_list.split(',')\n except UserPermission.DoesNotExist:\n return []", "def capabilities(self):\n return None", "def extended_capabilities(self):\n buf = (ctypes.c_uint8 * 32)()\n self._dll.JLINKARM_GetEmuCapsEx(buf, 32)\n return list(buf)", "def list_caps():\n global _CAPABILITIES_MAP\n\n try:\n return tuple(sorted(_CAPABILITIES_MAP.keys()))\n\n except NameError:\n pass # We can remedy this.\n\n loop = get_loop()\n\n controller_connection = CioRoot(loop)\n\n _CAPABILITIES_MAP = {}\n\n for capability_id in controller_connection.init():\n _CAPABILITIES_MAP[capability_id] = {\n 'acquire': controller_connection.acquire,\n 'release': controller_connection.release,\n }\n\n return tuple(sorted(_CAPABILITIES_MAP.keys()))", "def capabilities(self):\n return Capabilities(\n immutable = True,\n deferred = False,\n persistent = True,\n appendable = False,\n remote=True,\n )", "def capabilities(self):\n return Capabilities(\n immutable = True,\n deferred = False,\n persistent = True,\n appendable = False,\n remote=True,\n )", "def getcapabilities(self):\n reader = WFSCapabilitiesReader(self.version, auth=self.auth)\n return openURL(\n reader.capabilities_url(self.url), timeout=self.timeout,\n headers=self.headers, auth=self.auth\n )", "def capabilities(self):\n\n class Capabilities(ct.Structure):\n _fields_ = [(\"Size\", ct.c_ulong),\n (\"AcqModes\", ct.c_ulong),\n (\"ReadModes\", ct.c_ulong),\n (\"FTReadModes\", ct.c_ulong),\n (\"TriggerModes\", ct.c_ulong),\n (\"CameraType\", ct.c_ulong),\n (\"PixelModes\", ct.c_ulong),\n (\"SetFunctions\", ct.c_ulong),\n (\"GetFunctions\", ct.c_ulong),\n (\"Features\", ct.c_ulong),\n (\"PCICard\", ct.c_ulong),\n (\"EMGainCapability\", ct.c_ulong)]\n\n stru = Capabilities()\n stru.Size = ct.sizeof(stru)\n self.lib.GetCapabilities(ct.pointer(stru))\n\n return stru", "def get_capabilities(self, method='get'):\n self.client.getcapabilities()\n\n self._has_capabilities = True", "def detect_supported_caps():\n result = []\n # generate list of supported capabilities\n\n # Intel RDT L3 CAT\n if common.PQOS_API.is_l3_cat_supported():\n result.append(common.CAT_L3_CAP)\n\n # Intel RDT L2 CAT\n if common.PQOS_API.is_l2_cat_supported():\n result.append(common.CAT_L2_CAP)\n\n # Intel RDT MBA\n if common.PQOS_API.is_mba_supported():\n result.append(common.MBA_CAP)\n\n if sstbf.is_sstbf_enabled():\n result.append(common.SSTBF_CAP)\n\n if power.is_sstcp_enabled():\n result.append(common.POWER_CAP)\n\n return result", "def to_capabilities(self):", "async def capabilities(self, abilities):\n capabilities = []\n for ability in abilities:\n if self.privileged_to_run(ability) and ability.find_executors(self.executors, self.platform):\n capabilities.append(ability)\n return capabilities", "def supported_capabilities(self) -> Optional['outputs.SupportedCapabilitiesResponse']:\n return pulumi.get(self, \"supported_capabilities\")", "def capabilities(self):\n return self._dll.JLINKARM_GetEmuCaps()", "def list_user_access(self, user):\n return self._user_manager.list_user_access(user)", "def allowed_capabilities(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"allowed_capabilities\")", "def allowed_capabilities(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"allowed_capabilities\")" ]
[ "0.6409061", "0.6338583", "0.6190463", "0.59944904", "0.5973943", "0.59410274", "0.5928636", "0.58201957", "0.581861", "0.57884574", "0.577865", "0.57566994", "0.5692723", "0.5676897", "0.5660047", "0.56488127", "0.56191415", "0.5557506", "0.5557506", "0.55458957", "0.5538102", "0.55139184", "0.54618025", "0.54553366", "0.5422826", "0.5417152", "0.5416631", "0.53897846", "0.5378154", "0.5378154" ]
0.80392367
0
Return a list of the review statuses in dictionary with the key set to the label (or stanza, if label is undefined).
def refreshStatusLabelMap(self, session_key, force_refresh=False): if force_refresh or self.status_label_map is None: logger.debug("Reloading the review statuses list") reviewStatusesEntities = entity.getEntities('alerts/reviewstatuses', count=-1, sessionKey=session_key) self.status_label_map = {stanza: settings.get("label", stanza) for stanza, settings in reviewStatusesEntities.iteritems()} logger.debug("%s review statuses loaded", len(self.status_label_map))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_labels(pr_id):\n label_json = get_status_json(pr_id, 'labels')\n current_labels = [l['name'] for l in label_json]\n return current_labels", "def labels_list(issue):\n return [x['name'] for x in issue['labels']]", "def get_review_status(pr_id):\n reviews = get_status_json(pr_id, 'reviews')\n requests = get_status_json(pr_id, 'reviewRequests')\n\n requested_authors = [r[\"login\"] for r in requests]\n\n review_status = {}\n for r in reviews:\n author = r['author']['login']\n date = datetime.fromisoformat(r['submittedAt'].strip('Z'))\n state = r['state']\n if author not in review_status:\n review_status[author] = ReviewComment(state, date, author)\n elif state != 'COMMENTED' and review_status[author].date < date:\n review_status[author] = ReviewComment(state, date, author)\n for a in review_status:\n if a in requested_authors:\n review_status[a] = ReviewComment('REVIEW_REQUESTED', review_status[a].date, a)\n for a in requested_authors:\n if a not in review_status:\n review_status[a] = ReviewComment('UNRESPONSIVE', None, a)\n return review_status, requested_authors", "def award_status_populator():\n award_status_list = funding_data[\"Project Status:\"].unique()\n return [{'label': i, 'value': i} for i in award_status_list]", "def extract_labels_scope(sentence_dicts, config):\n labels = []\n for sent in sentence_dicts:\n if not sent['neg']:\n continue\n for cue_i, (cue, cue_position, cue_type) in enumerate(sent['cues']):\n prev_label = 1\n for key, value in sent.items():\n if isinstance(key, int):\n scope = sent['scopes'][cue_i]\n if any(key in s for s in scope):\n if prev_label == 1:\n labels.append(2)\n prev_label = 2\n else:\n labels.append(0)\n prev_label = 0\n elif key == cue_position:\n labels.append(3)\n prev_label = 3\n else:\n labels.append(1)\n prev_label = 1\n return labels", "def get_labels():\n return {\"contradiction\": 0, \"neutral\": 1, \"entailment\": 2}", "def process_statuses(statuses):\n status_dicts = list()\n\n for status in statuses:\n status_dict = dict()\n\n text = status.text\n splittext = text.split('\\n')\n\n if is_changed(splittext[0]): # adjust if text's first line is \"changed\"\n b = 1\n else:\n b = 0\n\n try:\n homeaway = get_teams(splittext[0 + b])\n status_dict['HOME'] = homeaway[0]\n status_dict['AWAY'] = homeaway[1]\n\n split_line1 = splittext[1+b].split(' / ')\n status_dict['EXPECTED GOAL DIFFERENCE'] = split_line1[0]\n status_dict['EXPECTED GOAL DIFFERENCE NEUTRAL'] = split_line1[1]\n\n split_line2 = splittext[2+b].split()\n status_dict['HOME TEAM CHANCE'] = split_line2[1]\n status_dict['DRAW CHANCE'] = split_line2[3]\n status_dict['AWAY TEAM CHANCE'] = split_line2[5]\n\n split_line3 = splittext[3+b].split()\n status_dict['HOME TEAM CHANCE NEUTRAL'] = split_line3[1]\n status_dict['DRAW CHANCE NEUTRAL'] = split_line3[3]\n status_dict['AWAY TEAM CHANCE NEUTRAL'] = split_line3[5]\n\n for key, val in status_dict.items():\n status_dict[key] = unidecode(val) # transliterate unicode characters\n\n except IndexError as e:\n print(e)\n continue\n\n status_dict['DATE'] = status.created_at.strftime('%d/%m/%Y')\n\n status_dicts.append(status_dict)\n\n return status_dicts", "def list_labels(self, repository):\n data = self._get_all_data('/repos/{}/labels'.format(repository))\n return {l['name']: str(l['color']) for l in data}", "def get_status(pos, neg, names):\n status = {}\n for i in names:\n #print str(i) +'\\n'+ str(pos) +'\\n'+ str(neg)+'\\n'+'\\n'\n if i in pos:\n status[i] = \"1\"\n elif i in neg:\n status[i] = \"0\"\n else:\n status[i] = \"NA\"\n return status", "def get_book_statuses() -> list:\n return data.get_book_statuses()", "def list_straten_adapter(obj, request):\n naam = obj.label\n for name, language in obj.namen:\n if language == 'nl' and name:\n naam = name\n break\n return {\n 'id': obj.id,\n 'label': obj.label,\n 'naam': naam,\n 'status': {\n 'id': obj.status.id,\n 'naam': obj.status.naam,\n 'definitie': obj.status.definitie\n },\n }", "def get_labels_vector(reviews, ref='label'):\n labels = [-1 if review.polarities[ref].is_negative() else\n 0 if review.polarities[ref].is_objective() else\n +1 if review.polarities[ref].is_positive() else None\n for review in reviews]\n\n return labels", "def by_label(self, value):\n return {k: v for k, v in self.items() if k == value}", "def get_statuses():\n statuses = list()\n\n for status in tweepy.Cursor(api.user_timeline, id=836104384366936066).items():\n if is_prediction(status):\n statuses.append(status)\n else:\n continue\n\n return statuses", "def status():\n statuses = get_all_statuses()\n return json.dumps(statuses, indent=4)", "def get_labels():\n json_request = request.json # get the json from the server\n keys = sort_keys(json_request.keys()) # sort the keys (i.e. the token ids)\n labels = []\n for k in keys:\n # get the labels that the user input to the UI\n val = (json_request[k]['text'], json_request[k]['value'])\n labels.append(val)\n return labels", "def labels_fixed(annotation, label_keys):\n labels = dict(annotation.labels.order_by('key').values_list('key', 'title'))\n return [labels.get(key, '') for key in label_keys]", "def _get_loadbalancer_statuses(self, lb_id):\n resource_path = \"%s/%s/%s/statuses\" % (RESOURCE_PREFIX,\n LBS_RESOURCE,\n lb_id)\n try:\n statuses = self.client.retrieve_resource(\n \"GLOBAL\", resource_path)[1]['dict']\n except ncc_client.NCCException as e:\n if e.is_not_found_exception():\n return {\"lb_statuses\": None}\n else:\n return None\n statuses = statuses[\"statuses\"]\n return {\"lb_statuses\": statuses}", "def status_games(game=None):\n if game is None:\n aux = {}\n for i in range(len(games)):\n aux[\"Game \" + str(i)] = games.games_status[i]['Status']\n return aux\n else:\n game = int(game)\n return {\"Game \" + str(game): games.games_status[game]['Status']}", "def stat_review(self, id):\n stats = dict()\n review = self.get_review(id)\n stats['status'] = 'Closed' if review['closed'] else 'Open'\n stats['created'] = review['created']\n stats['reviewers'] = len(review['reviewers'])\n stats['messages'] = len(review['messages'])\n stats['patchsets'] = len(review['patchsets'])\n return stats", "def parse_labels(labels: [{}]) -> {str: str}:\n labels_dict = {}\n for label in labels:\n match = re.search(\"{([^=]+)=(.+)}\", label['name'])\n if match:\n key = match.group(1).strip().lower().title()\n value = match.group(2).strip()\n labels_dict[key] = value\n return labels_dict", "def _get_labels(self, label_vector):\n return () if label_vector is None else \\\n list(OrderedDict.fromkeys([label for term in label_vector \\\n for label, power in term if power != 0]))", "def status() -> Dict[str, Any]:", "def status_get(): # noqa: E501\n db = get_db()\n return [{'id': sample, 'status': db['samples'][sample]['status']} for sample in db['samples'].keys()]", "def label_summaries(self):\n labels = defaultdict(list)\n for task in self.tasks:\n labels[task.label].append(task)\n labels = {label: LabelSummary(label, tasks) for label, tasks in labels.items()}\n return labels", "def _map_status_fields(self, tweet):\n data = {\n # status\n \"date\": tweet.created_at.strftime('%Y-%m-%d %H:%M:%S'),\n \"id\": tweet.id_str,\n \"text\": tweet.text,\n \"truncated\": tweet.truncated,\n \"lang\": tweet.lang,\n # user\n \"user_id\": tweet.user.id_str,\n \"user_screen_name\": tweet.user.screen_name,\n \"user_verified\": tweet.user.verified,\n \"user_lang\": tweet.user.lang,\n # reply\n \"reply_to_id\": tweet.in_reply_to_status_id_str,\n # quote\n \"quoted_id\": None,\n \"quoted_text\": None,\n # retweet\n \"retweeted_id\": None,\n \"retweeted_text\": None\n }\n # full text\n try:\n data.update({\n \"text\": tweet.extended_tweet['full_text']\n })\n except AttributeError:\n pass\n # quote\n if hasattr(tweet, \"quoted_status\"):\n data.update({\"quoted_id\": tweet.quoted_status.id_str})\n try:\n data.update({\n \"quoted_text\":\n tweet.quoted_status.extended_tweet['full_text']\n })\n except AttributeError:\n data.update({\n \"quoted_text\":\n tweet.quoted_status.text\n })\n # retweet\n if hasattr(tweet, \"retweeted_status\"):\n data.update({\"retweeted_id\": tweet.retweeted_status.id_str})\n try:\n data.update({\n \"retweeted_text\":\n tweet.retweeted_status.extended_tweet['full_text']\n })\n except AttributeError:\n data.update({\n \"retweeted_text\":\n tweet.retweeted_status.text\n })\n data.update({\n \"tweet_url\":\n \"https://twitter.com/%s/status/%s\" %\n (tweet.user.screen_name, tweet.id_str)\n })\n return(data)", "def build_expected_user_labels_response(self):\n labels = [\n {\n \"key\": \"key1\",\n \"value\": \"value1\"\n },\n {\n \"key\": \"key2\",\n \"value\": \"value2\"\n }\n ]\n return labels", "def _build_label(self):\n counter = Counter()\n _, labels = self.read_json()\n counter.update(labels)\n dictionary = dict()\n for i, word in enumerate(counter.most_common()):\n dictionary[word[0]] = i\n return dictionary", "def labels(self) -> Dict[str, str]:\n return self.attrs.get(\"Labels\", {})", "def get_status_num_keys():\n\n result = []\n for curr_status in SAMPLE_STATUSES:\n status_w_underscores = curr_status.replace(\"-\", \"_\")\n result.append(\"num_{0}\".format(status_w_underscores))\n\n return result" ]
[ "0.561644", "0.5421235", "0.5323489", "0.5313752", "0.52700627", "0.5267757", "0.5250117", "0.52430445", "0.5184294", "0.518364", "0.5169982", "0.50796485", "0.50787926", "0.5018987", "0.5011362", "0.4985156", "0.49694437", "0.49398953", "0.49062294", "0.48961285", "0.48911056", "0.48621064", "0.48559636", "0.48449916", "0.4842341", "0.48325315", "0.48032045", "0.47867754", "0.4763537", "0.47625783" ]
0.5855251
0
Return the urgency override state.
def isUrgencyOverrideAllowed(session_key): notable_en = entity.getEntity(NotableEventUpdate.LOG_REVIEW_REST_URL, 'notable_editing', namespace=NotableEventUpdate.DEFAULT_NAMESPACE, owner=NotableEventUpdate.DEFAULT_OWNER, count=-1, sessionKey=session_key) if 'allow_urgency_override' in notable_en: return splunk.util.normalizeBoolean(notable_en['allow_urgency_override']) else: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def urgency(self):\n\n try:\n tts = self.raw_time_to_resolve\n except:\n tts = 666\n\n defcon = \"normal\"\n\n if tts < 1:\n defcon = \"critical\"\n elif tts < 2:\n defcon = \"warning\"\n\n return defcon", "def get_level(self) -> int:\n return self.rstate.level()", "def get_closed_state(self):\n return 1 if self.pull_up_down == GPIO.PUD_DOWN else 0", "def enablement_state(self):\n return self.__enablement_state", "def get_priority(self):\n priorities = dict(PRIORITY_CHOICES)\n return priorities.get(self.priority, \"N/A\")", "def UpgradeState(self):\n if self.force_auto_sync:\n self.get('UpgradeState')\n return self._UpgradeState", "def get_human_state(self):\n return ReferralState(self.state).label", "def _get_state(self):\n fw_wp_en = (self._interface.get('fw_wp_en') == 'on')\n fw_wp = (self._interface.get('fw_wp') == 'on')\n if fw_wp_en:\n return self._STATE_FORCE_ON if fw_wp else self._STATE_FORCE_OFF\n else:\n return self._STATE_ON if fw_wp else self._STATE_OFF", "def level(self) -> int:\n return self.__state.level()", "def state(self):\n return self.roller.battery", "def state(self):\n return STATE_ON if self._state == 1.0 else STATE_OFF", "def state(self) -> str | None:\n if self.zone.Power is True:\n state = self.coordinator.data.nowplaying[self.zone.SourceID].Status\n return STATUS_TO_STATES.get(state, None)\n else:\n return STATE_OFF", "def getSeverityOverride(self):\n return _libsbml.XMLErrorLog_getSeverityOverride(self)", "def state(self):\n if self._key in self._product.get_data_states():\n return self._product.get_data_states()[self._key]\n return \"UNAVAILABLE\"", "def state(self):\n return self._battery", "def on_state_change(self, state):\n return state", "def change_status(self):\n if self.status == 'in progress':\n self.status = 'done'\n return self.status\n elif self.status == 'done':\n self.status = 'in progress'\n self.eisenhower_priority()\n return self.status", "def state(self):\n return self.status", "def get_priority(self):\n return False", "def state(self) -> Union[None, str, int, float]:\n if self._away_zones == self._armed & self._away_zones:\n self._state = STATE_ALARM_ARMED_AWAY\n\n if self._home_zones == self._armed & self._home_zones:\n self._state = STATE_ALARM_ARMED_HOME\n\n if self._alarm:\n self._state = STATE_ALARM_TRIGGERED\n\n if self._exittime or self._exittime10:\n self._state = STATE_ALARM_ARMING\n\n if self._entrytime:\n self._state = STATE_ALARM_PENDING\n\n if not any(\n (\n self._armed,\n self._alarm,\n self._exittime,\n self._exittime10,\n self._entrytime,\n )\n ):\n self._state = STATE_ALARM_DISARMED\n\n return self._state", "def get_state(self):\r\n alarm = self._alarm()\r\n return alarm.state", "def state(self):\n return self._attributes['status']", "def tax_override(self):\n return self._tax_override", "def get_gripper_state(self):\n if self.gripper.get_open_ammount() is 1:\n return 0\n elif self.gripper.get_open_ammount() is 0:\n return 3\n else:\n return 2", "def state(self):\n if self._is_standby:\n return STATE_OFF\n else:\n return STATE_PLAYING", "def urgency_explanation(self):\n return (\n factory.Faker(\"text\", max_nb_chars=500).generate()\n if self.urgency_level.requires_justification\n else \"\"\n )", "def reliability(self):\n return self._reliability", "def state(self):\n return STATE_ON if self.is_on else STATE_OFF", "def state(self):\n return STATE_ON if self.is_on else STATE_OFF", "def state(self):\n return STATE_ON if self.is_on else STATE_OFF" ]
[ "0.7169567", "0.555879", "0.5551994", "0.5436678", "0.5436553", "0.5412196", "0.5387082", "0.5367364", "0.5339496", "0.5331872", "0.53277886", "0.5325559", "0.5320186", "0.5314916", "0.5301885", "0.52887714", "0.52858543", "0.5284677", "0.5282586", "0.5275696", "0.5273975", "0.5264074", "0.5256718", "0.5249086", "0.5245061", "0.522486", "0.52142584", "0.5213564", "0.5213564", "0.5213564" ]
0.588646
1
Returns the length of the comment required.
def commentLengthRequired(session_key): comment_en = entity.getEntity(NotableEventUpdate.LOG_REVIEW_REST_URL, 'comment', namespace=NotableEventUpdate.DEFAULT_NAMESPACE, owner=NotableEventUpdate.DEFAULT_OWNER, sessionKey=session_key, count=-1) # Determine if a comment is required is_required = splunk.util.normalizeBoolean(comment_en['is_required']) # If a comment is not required then return 0 if is_required is None or not is_required: return 0 # Determine what length of a comment is required if comment_en['minimum_length'] is None: return 0 else: minimum_length = comment_en['minimum_length'] # Convert the length to an integer try: return int(minimum_length) except ValueError: # The minimum length is invalid, print an error message logger.warn("The value for the minimum length is invalid: %s", minimum_length) return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sent_count(comment):\n return comment.__len__()", "def comments_count(self) -> int:\n return pulumi.get(self, \"comments_count\")", "def get_comments_num(self): \n num=len(self.comments_set.all())\n return num", "def count_comments(self):\n return self.run_query(f\"count({self.r}/comment)\")", "def length(self):\n return self._info.length # pylint: disable=E1101", "def getCommentCount(self, source):\n commentStart = source.find('item?id=')\n commentCountStart = source.find('>', commentStart) + 1\n commentEnd = source.find('</a>', commentStart)\n commentCountString = source[commentCountStart:commentEnd]\n if commentCountString == \"discuss\":\n return 0\n elif commentCountString == \"\":\n return 0\n else:\n commentCountString = commentCountString.split(' ')[0]\n return int(commentCountString)", "def getLength(self):\n return None", "def getLength(self):\n return None", "def line_length(self, dLine = 0):\n return self.buffer.line_length(self.line + dLine)", "def _get_length(self):\n return self._length", "def getNumberOfComments(node, catalog=None):\n if catalog is None:\n catalog = getToolByName(node, 'portal_catalog')\n return len(catalog(\n object_provides=IComment.__identifier__,\n path='/'.join(node.getPhysicalPath())))", "def Length(self) -> int:", "def Length(self) -> int:", "def _getOldCodeLength(self):\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith(\"+\"):\n nb_lines += 1\n return nb_lines", "def get_number_of_comments(self):\n return self._performed_actions[WRITE_COMMENT]", "def _getNewCodeLength(self):\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith(\"-\"):\n nb_lines += 1\n return nb_lines", "def get_length(self):\n return self._length", "def get_length(self):\n return self._length", "def get_length(self):\n\n return self.length", "def length(self):\n return len(self.text)", "def __len__(self):\n # TODO: Is this method used?\n return self._info['length']", "def getLength(self):\n return self.n", "def length(self):\n\t\treturn self.n", "def len(self):\n\t\t\n\t\treturn len(self.line)", "def getLength(self):\n return self.length", "def length(self) -> int:\n pass", "def length(self) -> 'int':\n return self._info.len", "def length(self):\n return self.length", "def get_length(self):\n\n return self._length", "def test_get_comments_len(self):\n comments = self.story.get_comments()\n soup = utils.get_item_soup(7324236)\n for anchor in soup.find_all('a'):\n if 'More' in anchor.text:\n more_button_present = True\n\n # Note: Hacker News is not consistent about the number of comments per\n # page. On multiple comment page stories, the number of comments on a\n # page is never less than 90. On single comment page stories, the\n # number of comments on the sole page is always less than 110.\n if more_button_present:\n self.assertTrue(len(comments) > 90)\n else:\n self.assertTrue(len(comments) < 110)" ]
[ "0.73293304", "0.705401", "0.6813874", "0.67454207", "0.6679172", "0.6659644", "0.65434384", "0.65434384", "0.64944047", "0.648698", "0.64770037", "0.64671075", "0.64671075", "0.64651716", "0.6432444", "0.6423683", "0.64124656", "0.64124656", "0.63907695", "0.6389172", "0.6384101", "0.6376061", "0.63737833", "0.63683975", "0.6328435", "0.632168", "0.63093704", "0.62941337", "0.6272926", "0.6272445" ]
0.78513294
0
Returns the status ID of the default systemwide review status.
def getDefaultStatus(session_key): # Get the list of statuses logger.debug("Getting the default status") statuses_list = entity.getEntities(NotableEventUpdate.REVIEW_STATUSES_REST_URL, namespace=NotableEventUpdate.DEFAULT_NAMESPACE, owner=NotableEventUpdate.DEFAULT_OWNER, sessionKey=session_key, count=-1) # Get the first status defined a default (there should be only one) for status_id in statuses_list: # Get the status as a dictionary notable_status = statuses_list[status_id] # Get the disabled if 'disabled' in notable_status: disabled = splunk.util.normalizeBoolean(notable_status['disabled']) else: disabled = False # Get the default status if 'default' in notable_status: default = splunk.util.normalizeBoolean(notable_status['default']) else: default = False # If the status is both enabled and default then return it as the default if disabled is False and default: return status_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_default_status(self):\n return self.bot_data_file[\"bot_status\"][\"defaultStatus\"]", "def get_review_status(self):\n if not hasattr(self, 'credential_review'):\n status = 'Awaiting review'\n elif self.credential_review.status <= 20:\n status = 'Awaiting review'\n elif self.credential_review.status == 30:\n status = 'Awaiting a response from reference'\n elif self.credential_review.status >= 40:\n status = 'Awaiting final approval'\n\n return status", "def Status(self, default=None):\n return self.data.get('status', default)", "def default_snat_status(self) -> Optional[pulumi.Input['DefaultSnatStatusArgs']]:\n return pulumi.get(self, \"default_snat_status\")", "def defaultStatus(self):\n raise NotImplementedError", "def status(self) -> Optional[int]:\n return pulumi.get(self, \"status\")", "def get_status(self):\n statuses = dict(ACTIVITY_STATUS_CHOICES)\n return statuses.get(self.status, \"N/A\")", "def id_status_impressao(self):\n return self._id_status_impressao", "def defaultStatus(self, value=None):\n raise NotImplementedError", "def get_receipt_id_status(self):\n return self.get_document_status_choice(self.receipt_id_status)", "def _get_status(trial: dict) -> int:\n if trial['overall_status'] in {'Not yet recruiting', 'Active, not recruiting'}:\n return 0\n elif trial['overall_status'] in {'Enrolling by invitation', 'Recruiting', 'Available'}:\n return 1\n elif trial['overall_status'] in {'Approved for marketing'}:\n return 2\n else:\n return 3", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status_id(self) -> \"str\":\n return self._attrs.get(\"statusId\")", "def id(self):\n return self.status.id", "def get_status(self):\n # TODO retrieve from db if not set\n return self.status", "def default_resource_discovery_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"default_resource_discovery_id\")", "def status(self):\n return STATUS[self.fields['status']]", "def get_review_status(pr_id):\n reviews = get_status_json(pr_id, 'reviews')\n requests = get_status_json(pr_id, 'reviewRequests')\n\n requested_authors = [r[\"login\"] for r in requests]\n\n review_status = {}\n for r in reviews:\n author = r['author']['login']\n date = datetime.fromisoformat(r['submittedAt'].strip('Z'))\n state = r['state']\n if author not in review_status:\n review_status[author] = ReviewComment(state, date, author)\n elif state != 'COMMENTED' and review_status[author].date < date:\n review_status[author] = ReviewComment(state, date, author)\n for a in review_status:\n if a in requested_authors:\n review_status[a] = ReviewComment('REVIEW_REQUESTED', review_status[a].date, a)\n for a in requested_authors:\n if a not in review_status:\n review_status[a] = ReviewComment('UNRESPONSIVE', None, a)\n return review_status, requested_authors", "def Default():\n return _DEFAULT", "def get_status_student():\n if (self.status == self.CH_GRADED or \n (self.status == self.CH_AUTOGRADED and self.autograderresult.visible)):\n return \"Graded\"\n \n elif self.status == self.CH_PREVIOUS:\n return \"Previous Sub.\"\n \n return \"Submitted\"", "def _get_status(self):\n return u'%s' % (self.get_status_display())", "def get_status(self):\n statuses = dict(PRODUCTHISTORY_CHOICES)\n return statuses.get(self.status, \"N/A\")" ]
[ "0.68067926", "0.6296829", "0.60826635", "0.60303974", "0.5814972", "0.5710746", "0.5666026", "0.56644005", "0.5596166", "0.5588872", "0.5538899", "0.5532661", "0.5532661", "0.5532661", "0.5532661", "0.5532661", "0.5532661", "0.5532661", "0.5532661", "0.5532661", "0.5516928", "0.5494112", "0.54116666", "0.53945524", "0.5375695", "0.5322904", "0.5281309", "0.525562", "0.52514714", "0.52477825" ]
0.7114933
0
Refresh the list of correlation searches from splunkd via REST.
def refreshCorrelationSearches(self, session_key): logger.debug("Reloading the correlation searches") self.correlation_searches = entity.getEntities('alerts/correlationsearches', count=-1, sessionKey=session_key) self.correlation_search_info = {k: {'rule_name': v['rule_name'], 'default_status': v['default_status']} for k, v in self.correlation_searches.iteritems()} logger.debug("%s correlation searches loaded", len(self.correlation_searches))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh(self):\n self.dto = self.res.get()\n log.debug(f\"Refreshed {self.url}\")", "def repopall(ctx):\n c = ctx.obj['client']\n if not c.login:\n return False\n\n r = requests.request(\"GET\", urljoin(c.BASE_URL, '/apiproxy/JobService.js'), params={'accesskey': c.login, 'method': 'PopulateAllSearches'})\n print(r.status_code, r.text)\n\n if r.status_code == 200:\n return True\n else:\n return False", "def refresh_from_api(self):\n self.populate_from_api(self.get_from_api())", "def getRefreshList(self, startIndex=0, force=False):", "def refresh(self, url, args, cancellationSignal):\n pass", "def refresh(dataset, client):\n pass", "def refresh(self):\n pass", "def refresh(self):\n pass", "def refresh(self):\n if self.is_server_process and self.cache_manager.is_refreshing():\n raise RefreshInProgressError()\n catalogs = MetadataManager(schemaspace=ComponentCatalogs.COMPONENT_CATALOGS_SCHEMASPACE_ID).get_all()\n for catalog in catalogs:\n self._insert_request(self.refresh_queue, catalog, \"modify\")", "def refresh_list(self):\n if self._dominfo_lock.acquire(False):\n try:\n return self._refresh_list()\n finally:\n self._dominfo_lock.release()\n else:\n # wait until the refresh done by the other party is complete\n with self._dominfo_lock:\n pass", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh(self):\n self.__dict__ = self._api.get_customers(id=self.id).__dict__", "def _refresh(self):\n resp = self._cb.get_object(self._build_api_request_uri())\n self._info = resp\n self._last_refresh_time = time.time()\n return True", "def _refresh(self):\n resp = self._cb.get_object(self._build_api_request_uri())\n self._info = resp\n self._last_refresh_time = time.time()\n return True", "def refresh(self, unused_http):\n self._metadata_service.refresh()", "def refresh(cls):\n # Flip the order of the links so that the first URL listed is the\n # highest priority and will take precedence\n for url in current_app.config['MATLAB_DOC_LINKS'][::-1]:\n resp = requests.get(url)\n soup = BeautifulSoup(resp.text, 'html.parser')\n\n terms = soup.findAll('td', {'class': 'term'})\n links = [term.find('a') for term in terms]\n\n for link in links:\n\n function = link.text.rstrip()\n\n doc = cls.query.filter_by(name=function).first()\n doc_url = urljoin(url, link['href'])\n\n # Create an entry if one doesn't already exist\n if doc is None:\n doc = cls(name=function)\n\n doc.link = doc_url\n doc.save()\n\n # Make sure to remove i and j entries\n toremove = cls.query.filter(or_(cls.name == 'i', cls.name == 'j')).all()\n for item in toremove:\n item.delete()\n\n return cls.query.all()", "def refresh(self):\n raise NotImplementedError", "def refresh(self):\n raise NotImplementedError", "def refreshTable(self):\n ds = []\n for id in self.protocol.getRefreshIDs():\n node = Node(id)\n nearest = self.protocol.router.findNeighbors(node, self.alpha)\n spider = NodeSpiderCrawl(self.protocol, node, nearest)\n ds.append(spider.find())\n\n def republishKeys(_):\n ds = []\n # Republish keys older than one hour\n for key, value in self.storage.iteritemsOlderThan(3600):\n ds.append(self.set(key, value))\n return defer.gatherResults(ds)\n\n d = defer.gatherResults(ds)\n d.addCallback(republishKeys)\n d.addErrback(self.onError)\n return d", "def refresh(self, parameters = {}):\n\n self.__enforce_connected()\n self.collection.refresh(self, parameters = parameters)", "def _refresh(self, unused_http):\n # Refreshing can also be done by directly calling this method, instead of just through\n # refresh() above!\n self._metadata_service.refresh()", "def refresh(self):\n self.proxies = self._init_proxies(self.proxy_providers)", "def Refresh(self):\n pass", "def gears_refresh_cluster(self, **kwargs) -> ResponseT:\n return self.execute_command(\"REDISGEARS_2.REFRESHCLUSTER\", **kwargs)", "def refresh(self):\n raise NotImplementedError(\"To be implemented\")", "def _refresh_query(session, query_id):\n resp = session.post('{}/api/queries/{}/refresh'.format(REDASH_HOST, query_id))\n return resp", "def update_table():\n global detections_results\n data = []\n ricoh_data = []\n # get ricoh state\n # Smell from WiFi implementation.. cleanup if thats the approach\n # append to data as separate json 'list'\n data.append(ricoh_data)\n\n detection_data = []\n\n for detect in reversed(detections_results):\n detection_data.append(detect.toJSON())\n\n if len(detection_data) == 25:\n break\n\n data.append(detection_data)\n\n response = make_response(json.dumps(data))\n response.content_type = 'application/json'\n return response", "def refresh(self):\n self.__refresh()" ]
[ "0.59978175", "0.575787", "0.55191344", "0.54884845", "0.54615736", "0.5445839", "0.5445396", "0.5445396", "0.53740287", "0.53508", "0.5328238", "0.5328238", "0.5328238", "0.53184426", "0.53174514", "0.53174514", "0.531563", "0.52705693", "0.5265398", "0.5265398", "0.52478623", "0.5237739", "0.5229892", "0.5225111", "0.522063", "0.5206027", "0.5193088", "0.51482296", "0.513868", "0.51107323" ]
0.7061147
0
Create an audit record for a list of updated events.
def create_audit_records(self, status_records, session_key): uri = '/services/receivers/simple' getargs = {'index': '_audit', 'sourcetype': 'incident_review', 'output_mode': 'json'} # Double list-comprehension: # a. Comma-separate the fields in each record, replacing "None" with the # empty string # b. Newline-separate the records so that the incident_review sourcetype # can pick up the individual audit records via SHOULD_LINEMERGE=false. data = '\n'.join([','.join([str(getattr(r, k)) if getattr(r, k) is not None else '' for k in self.DEFAULT_AUDIT_FIELD_ORDER]) for r in status_records]) response, content = splunk.rest.simpleRequest(uri, sessionKey=session_key, method='POST', getargs=getargs, jsonargs=data) if response['status'] != str(httplib.OK): logger.error('HTTP error when auditing notable events: response="%s"', response) return False else: parsed_content = json.loads(content) if len(data) != parsed_content['bytes']: # Some audit data was not received. logger.error('Audit records could not be created for some notable event updates: content="%s"', content) return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateEvents(self, status_records, session_key, existing_statuses=None):\n\n for status_record in status_records:\n status_record.update_from_existing(existing_statuses.get(status_record.rule_id))\n\n # Update.\n unused_response, content = self.kv.batch_create([\n vars(i) for i in status_records], session_key, self.DEFAULT_OPTIONS)\n\n # Audit.\n audited_bool = self.create_audit_records(status_records, session_key)\n\n # Note: we DO NOT abort or raise an exception for failure to audit events,\n # to preserve the previous behavior of incident review's former CSV-backed\n # implementation.\n if not audited_bool:\n logger.error('Could not create some audit record for notable event status changes: changed_records=\"%s\"', content)\n\n # The content object contains a JSON list of the records that were updated,\n # in the format [ <rule_id>_<timestamp>, ... ]\n parsed_content = json.loads(content)\n return len(parsed_content)", "def update_events_in_database(self):\n for i in range(0, len(self._event_id_list), 1):\n e_id = self._event_id_list[i] # DB ID\n e_ind = self._event_index_list[i] # Index of the event\n e_db = Event.objects.get(id=e_id) # Event as stored in the DB\n e_db = e_db[0] # list to Event object\n e_new = self._event_list[i] # new Event object\n\n # Compare the old and the new event, detect which fields have\n # changed and update them in the old.\n change = e_db.compare(e_new)\n for (name, val) in change:\n setattr(e_db, name, val)\n\n # Save the updated old event, saving of only the fields which\n # have changed\n e_db.save(update_fields=[name for (name, val) in change])\n\n # Mark the event status as not requiring an update anymore\n self.gdc.write_update_status_nth_event(e_ind, False)", "def create_test_audit(context, **kw):\n audit = get_test_audit(context, **kw)\n audit.create()\n return audit", "def test_otoroshi_controllers_adminapi_events_controller_audit_events(self):\n pass", "def test_update(self, init_db, audit):\n params = {\n \"resource_type\": \"Category\",\n \"action\": \"Updated\",\n \"activity\": \"changed name\"\n }\n audit.update(**params)\n assert audit.resource_type == params['resource_type']\n assert audit.action == params['action']\n assert audit.activity == params['activity']", "def load_updated_events_list(self):\n self._event_index_list, self._event_id_list = \\\n zip(*self.gdc.updated_events_indices_and_ids)\n self.populate_event_list_from_index_list()", "def add_audit(self, entity_name, object_name, operation,\n data, auth_ctx, session):", "def add_elasticsearch_records(self, data_list):\n actions = [self.create_data_record(data_dict) for data_dict in data_list]\n self.actions_buffer.extend(actions)", "async def addAudit(self, name, description, status, type, data, userid) -> CreateAuditResponse:\n return await self.stub.CreateAudit(\n CreateAuditRequest(name=name,\n description=description, type=type, status=status, data=data, created_by=userid\n ))", "def audit(self, icon, message, only_if=False, **updates):\n\n changes = {}\n dirty = not only_if\n\n def diff(a, b):\n if isinstance(a, (dict, list)) or isinstance(b, (dict, list)):\n return ujson.dumps(a, sort_keys=True) != ujson.dumps(b, sort_keys=True)\n return str(a) != str(b)\n\n for key, values in updates.items():\n if isinstance(values, tuple):\n if len(values) == 2:\n if diff(values[0], values[1]):\n dirty = True\n changes[key] = {\n \"old\": values[0],\n \"new\": values[1]\n }\n else:\n changes[key] = values\n\n if not dirty:\n return\n\n self.audit_log = {\n \"icon\": icon,\n \"message\": message,\n \"payload\": {\n \"changes\": changes,\n \"context\": self.context\n }\n }", "def update_logs(event, log, action_log, error_log):\n\tif event[\"type\"] == \"error\":\n\t\t#Update the error log file\n\telse:\n\t\t# event[\"type\"] == \"action\"\n\t\t#Update action file", "def update_event_escalated_at_to_now(self, records: List[EventRecord]) -> None: # pylint: disable=invalid-name\n self.update_timestamp_column_to_now(records, \"escalated_at\")", "def update_logs(items: List[Dict[str, Any]]):\n dynamodb = boto3.resource(\"dynamodb\")\n\n table = dynamodb.Table(TABLE_NAME)\n\n for item in items:\n ids = item.get(\"id\")\n title = item.get(\"title\")\n iine = item.get(\"likes_count\")\n\n try:\n response = table.update_item(\n Key={\"ids\": ids},\n UpdateExpression=\"set iine = :newiine, title = :title\",\n ConditionExpression=\"attribute_not_exists(ids) or iine <> :newiine\",\n ExpressionAttributeValues={\":newiine\": iine, \":title\": title},\n )\n except ClientError as e:\n if e.response[\"Error\"][\"Code\"] == \"ConditionalCheckFailedException\":\n print(e.response[\"Error\"][\"Message\"])\n else:\n raise", "def update(self, log_ids: list, dest='logs'):\n self.logs_updated = []\n for i in range(len(log_ids)):\n self.logs_updated.append(dict(\n filename=self.logs[i].get('filename'),\n data=log_ids[i],\n filesize=len(self.logs[i].get('data')) if self.logs[i].get('data') else 0,\n ))\n\n for item in self.items:\n item[dest] = self.logs_updated", "def add_events_to_database(self):\n # Adding events sequentially deals with the case where duplicate\n # events exist inside the _event_list field.\n for i in range(0, len(self._event_index_list), 1):\n e = self._event_list[i]\n e_ind = self._event_index_list[i]\n if not(SimpleDeduplicator.is_duplicate(e)):\n e.save()\n self.gdc.write_id_nth_event(e_ind, e.id)\n self._event_id_list.append(e.id)\n # Add categories whether it is a duplicate or not.\n # ManyToMany relationships work like sets, so there won't be a\n # problem with categories appearing more than once if added twice.\n c_cat_list = self.gdc.get_categories_nth_element(e_ind)\n for cat in c_cat_list:\n assert isinstance(cat, Category)\n e.category.add(cat)", "def _update_event(klass, event, *data_dicts):\n # Create d\n d = {}\n for data_dict in data_dicts:\n d.update(data_dict)\n d = klass._remove_none_fields(d)\n d = dict((\"set__\" + k, v) for k, v in d.iteritems())\n\n # Update and save.\n event.update(**d)\n event.save()", "async def updateAudit(self, auditid, name, description, status, type, data, userid) -> UpdateAuditResponse:\n return await self.stub.UpdateAudit(\n UpdateAuditRequest(_id=auditid, name=name,\n description=description, status=status, type=type, created_by=userid\n ))", "def test_updateEvent(self):\n date = {'date': '2015-08-21T00:00:00.000Z'}\n eventprev = dict(start = '2015-08-21T01:23:00.000Z',\n end = '2015-08-21T01:25:00.000Z',\n date = '2015-08-21T00:00:00.000Z')\n eventcurr = dict(start = '2015-08-21T02:23:00.000Z',\n end = '2015-08-21T02:25:00.000Z',\n date = '2015-08-21T00:00:00.000Z')\n eventnext = dict(start = '2015-08-21T03:23:00.000Z',\n end = '2015-08-21T03:25:00.000Z',\n date = '2015-08-21T00:00:00.000Z')\n i=0\n # Create sample itinerary for alex for the event day\n self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = date['date']\n ))\n\n uid = str('alex_' + eventprev['start'] + eventprev['end'])\n uidcurr = str('alex_' + eventcurr['start'] + eventcurr['end'])\n uidnext = str('alex_' + eventnext['start'] + eventnext['end'])\n invuid = '00000000000000000000000'\n\n rv = self.json_post('/createEvent/alex', eventprev)\n assert uid in str(rv.data)\n\n rv = self.json_post('/createEvent/alex', eventnext)\n assert uidnext in str(rv.data)\n\n rv = self.json_post('/createEvent/alex', eventcurr)\n assert uidcurr in str(rv.data)\n\n rv = self.json_post('/updateEvent/bbbb', {'uid': uid})\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_post('/updateEvent/alex', {'uid': invuid})\n assert 'Event not found' in str(rv.data)\n\n rv = self.json_get('/getSuggestions/bbbb', {'uid': uid,\n 'query': 'Homewood Campus, Baltimore'})\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_get('/getSuggestions/bbbb', {'uid': uid,\n 'query': 'Homewood Campus, Baltimore'})\n assert 'Invalid username' in str(rv.data)\n\n # Set prev event\n rv = self.json_get('/getSuggestions/alex', {'uid': uid,\n 'query': 'Homewood Campus, Baltimore'})\n sugId = json.loads(rv.data)['uid']\n placeId = json.loads(rv.data)['business'][1]['id']\n assert 'business' in str(rv.data)\n\n rv = self.json_post('/updateEvent/alex', {'uid': uid,\n 'choice': '1',\n 'suggestionId': sugId})\n assert 'yelpId' in str(rv.data)\n\n rv = self.json_post('/ratePlace/alex', {'uid': placeId,\n 'rating': 5})\n assert 'ratings' in str(rv.data)\n\n rv = self.json_post('/ratePlace/alex', {'uid': placeId,\n 'rating': 4})\n assert 'ratings' in str(rv.data)\n\n # Reset prev event\n rv = self.json_get('/getSuggestions/alex', {'uid': uid,\n 'query': 'Homewood Campus, Baltimore'})\n sugId = json.loads(rv.data)['uid']\n assert 'business' in str(rv.data)\n\n rv = self.json_post('/updateEvent/alex', {'uid': uid,\n 'choice': '0',\n 'suggestionId': sugId})\n assert 'yelpId' in str(rv.data)\n\n # Set next event\n rv = self.json_get('/getSuggestions/alex', {'uid': uidnext,\n 'query': 'Homewood Campus, Baltimore'})\n sugId = json.loads(rv.data)['uid']\n assert 'business' in str(rv.data)\n\n rv = self.json_post('/updateEvent/alex', {'uid': uidnext,\n 'choice': '2',\n 'suggestionId': sugId})\n assert 'yelpId' in str(rv.data)\n\n # Set curr event\n rv = self.json_get('/getSuggestions/alex', {'uid': uidcurr,\n 'query': 'Towson, MD'})\n print(rv.data)\n sugId = json.loads(rv.data)['uid']\n assert 'business' in str(rv.data)\n\n rv = self.json_post('/updateEvent/alex', {'uid': uidnext,\n 'choice': '0',\n 'suggestionId': sugId})\n assert 'yelpId' in str(rv.data)\n\n rv = self.json_get('/getEventFromId/alex', {'uid': invuid})\n assert 'Event not found' in str(rv.data)\n\n rv = self.json_get('/getSuggestions/alex', {'uid': invuid,\n 'query': 'Homewood Campus, Baltimore'})\n assert 'Event not found' in str(rv.data)", "def cli_update_record(field_list, record_data):\n api.update_record(field_list, record_data)", "def test_audit_log_view(self):\n initial_datetime = now()\n with reversion.create_revision():\n company = CompanyFactory(\n description='Initial desc',\n )\n\n reversion.set_comment('Initial')\n reversion.set_date_created(initial_datetime)\n reversion.set_user(self.user)\n\n changed_datetime = now()\n with reversion.create_revision():\n company.description = 'New desc'\n company.save()\n\n reversion.set_comment('Changed')\n reversion.set_date_created(changed_datetime)\n reversion.set_user(self.user)\n\n versions = Version.objects.get_for_object(company)\n version_id = versions[0].id\n url = reverse('api-v4:company:audit-item', kwargs={'pk': company.pk})\n\n response = self.api_client.get(url)\n response_data = response.json()['results']\n\n # No need to test the whole response\n assert len(response_data) == 1\n entry = response_data[0]\n\n assert entry['id'] == version_id\n assert entry['user']['name'] == self.user.name\n assert entry['comment'] == 'Changed'\n assert entry['timestamp'] == format_date_or_datetime(changed_datetime)\n assert entry['changes']['description'] == ['Initial desc', 'New desc']\n assert not set(EXCLUDED_BASE_MODEL_FIELDS) & entry['changes'].keys()", "def __create_audit_alerts():\n\n # Create a log-based metric to count all calls to SetIamPolicy:\n metric1_name = \"iam-policy-change\"\n run_command('gcloud logging metrics create {} --description=\"Count of IAM policy changes.\" --project={} --log-filter=\"\\\n resource.type=project AND \\\n protoPayload.serviceName=cloudresourcemanager.googleapis.com AND \\\n protoPayload.methodName=SetIamPolicy\"'.format(metric1_name, PROJECT_ID))\n\n # Create a log-based metric to count all calls to setIamPermissions or storage.objects.update on GCS buckets:\n metric2_name = \"bucket-permission-change\"\n run_command('gcloud logging metrics create {} --description=\"Count of GCS permission changes.\" --project={} --log-filter=\"\\\n resource.type=gcs_bucket AND \\\n protoPayload.serviceName=storage.googleapis.com AND \\\n (protoPayload.methodName=storage.setIamPermissions OR protoPayload.methodName=storage.objects.update)\"'\n .format(metric2_name, PROJECT_ID))\n\n # Create a log-based metric to count unexpected accesses to the data bucket:\n metric3_name = \"unexpected-bucket-access-{}\".format(DATA_BUCKET_ID)\n logFilter = 'resource.type=gcs_bucket AND \\\n logName=projects/{}/logs/cloudaudit.googleapis.com%2Fdata_access AND \\\n protoPayload.resourceName=projects/_/buckets/{} AND \\\n protoPayload.authenticationInfo.principalEmail!=({})'\\\n .format(PROJECT_ID, DATA_BUCKET_ID, WHITELIST_USERS)\n\n run_command('gcloud logging metrics create {} \\\n --description=\\\"Count of unexpected data access to {}.\\\" \\\n --project={} --log-filter=\\\"{}\\\"'.format(metric3_name, DATA_BUCKET_ID, PROJECT_ID, logFilter))\n\n # Create an email notification channel. Refer to https://cloud.google.com/monitoring/support/notification-options\n notification_channel_name = __create_notification_channel()\n\n # There is a lag between when log-based metrics are created and when they become available in Stackdriver.\n # 30 seconds should work, but you may have to adjust it.\n time.sleep(30)\n\n # Create an alert based on metric 1:\n __create_alert_policy (\"global\", metric1_name, notification_channel_name, \"IAM Policy Change Alert\",\n \"This policy ensures the designated user/group is notified when IAM policies are altered.\")\n\n # Create an alert based on metric 2:\n __create_alert_policy(\"gcs_bucket\", metric2_name, notification_channel_name, \"Bucket Permission Change Alert\",\n \"This policy ensures the designated user/group is notified when bucket/object permissions are altered.\")\n\n # Create an alert based on metric 3:\n __create_alert_policy (\"gcs_bucket\", metric3_name, notification_channel_name, \"Unexpected Bucket Access Alert\",\n \"This policy ensures the designated user/group is notified when data bucket is \\\n accessed by an unexpected user.\")", "def audit(self, key=None, record=None, start=None, end=None, **kwargs):\n start = start or find_in_kwargs_by_alias('timestamp', kwargs)\n startstr = isinstance(start, str)\n endstr = isinstance(end, str)\n if isinstance(key, int):\n record = key\n key = None\n if key and record and start and not startstr and end and not endstr:\n data = self.client.auditKeyRecordStartEnd(key, record, start, end, self.creds, self.transaction,\n self.environment)\n elif key and record and start and startstr and end and endstr:\n data = self.client.auditKeyRecordStartstrEndstr(key, record, start, end, self.creds, self.transaction,\n self.environment)\n elif key and record and start and not startstr:\n data = self.client.auditKeyRecordStart(key, record, start, self.creds, self.transaction, self.environment)\n elif key and record and start and startstr:\n data = self.client.auditKeyRecordStartstr(key, record, start, self.creds, self.transaction, self.environment)\n elif key and record:\n data = self.client.auditKeyRecord(key, record, self.creds, self.transaction, self.environment)\n elif record and start and not startstr and end and not endstr:\n data = self.client.auditRecordStartEnd(record, start, end, self.creds, self.transaction,\n self.environment)\n elif record and start and startstr and end and endstr:\n data = self.client.auditRecordStartstrEndstr(record, start, end, self.creds, self.transaction,\n self.environment)\n elif record and start and not startstr:\n data = self.client.auditRecordStart(record, start, self.creds, self.transaction, self.environment)\n elif record and start and startstr:\n data = self.client.auditRecordStartstr(record, start, self.creds, self.transaction, self.environment)\n elif record:\n data = self.client.auditRecord(record, self.creds, self.transaction, self.environment)\n else:\n require_kwarg('record')\n data = pythonify(data)\n data = OrderedDict(sorted(data.items()))\n return data", "def update_cloud_watch_obj_list(old_list, new_list):\n\n # Add new.\n for new_item in new_list:\n if new_item not in old_list:\n new_item.added = True\n old_list.append(new_item)\n\n # Remove deleted.\n for old_item in old_list:\n if old_item not in new_list:\n old_list.remove(old_item)\n\n return old_list", "def record_updater(records, observations):\n for record in records:\n try:\n record = ast.literal_eval(records[record])\n except Exception:\n record = record\n try:\n if type(records[record]) is dict:\n records[record] = Common.record_updater(\n records[record], observations\n )\n elif type(records[record]) is list:\n list_records = []\n for list_record in records[record]:\n for observation in observations:\n if observation != \"_id\":\n try:\n if re.search(observation, f\"{list_record}\"):\n if not re.search(\n observations[observation],\n f\"{records[record]}\",\n ):\n if not re.search(\"-->\", f\"{list_record}\"):\n list_records.append(\n f\"{list_record}\"\n + \" --> \"\n + observations[observation]\n )\n else:\n list_records.append(list_record)\n else:\n list_records.append(list_record)\n else:\n list_records.append(list_record)\n except Exception as ex:\n Common.logger.warning(\n f\"Exception happened in observation comparison {ex}\"\n )\n records[record] = list_records\n else:\n records = Common.data_comparison(observations, records, record)\n except Exception:\n records = Common.data_comparison(observations, records, record)\n return records", "def newDataEntryAUTOR(crime):\n entry = { 'lstevents': None}\n \n entry['lstevents'] = lt.newList('ARRAY_LIST')\n return entry", "def update_audit_info(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n progress_controller.maximum = 2\n\n from stalker.db.session import DBSession\n from stalker import LocalSession\n\n with DBSession.no_autoflush:\n local_session = LocalSession()\n logged_in_user = local_session.logged_in_user\n progress_controller.increment()\n\n if logged_in_user:\n # update the version updated_by\n from anima.dcc import mayaEnv\n\n m_env = mayaEnv.Maya()\n v = m_env.get_current_version()\n if v:\n v.updated_by = logged_in_user\n\n from stalker.db.session import DBSession\n\n DBSession.commit()\n progress_controller.increment()\n progress_controller.complete()", "def getTenantAttributeUpdateAuditTrail(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_multiple_updates(self):\n response = self.api.put(self.assessment, {\"test_plan\": \"steps\"})\n self.assert200(response)\n\n response = self.api.put(self.assessment, {\"title\": \"new title\"})\n self.assert200(response)\n\n notifs, notif_data = common.get_daily_notifications()\n updated = notif_data[\"[email protected]\"][\"assessment_updated\"]\n self.assertEqual(len(notifs), 1)\n self.assertEqual(\n updated[self.assessment.id][\"updated_data\"][\"TITLE\"],\n (\"new title\", \"Assessment1\")\n )\n self.assertEqual(\n updated[self.assessment.id][\"updated_data\"][\"ASSESSMENT PROCEDURE\"],\n (\"steps\", \"\")\n )", "def view_update(self, context):\n\n for collection in self._watch_list:\n collection_name = get_collection_name(collection)\n collection_set = set(collection)\n tracking_set = self._tracking_sets[collection_name]\n\n # Check for new items\n add_set = collection_set - tracking_set\n self.add_delta[collection_name] = add_set\n tracking_set |= add_set\n\n # Check for removed items\n remove_set = tracking_set - collection_set\n self.remove_delta[collection_name] = remove_set\n tracking_set -= remove_set\n\n # Check for updates\n update_set = {item for item in collection if item.is_updated}\n self.update_delta[collection_name] = update_set", "def create_audit(selenium, program, **kwargs):\n audit = entities_factory.AuditsFactory().create(**kwargs)\n audits_service = webui_service.AuditsService(selenium)\n audits_service.create_obj_via_tree_view(program, audit)\n audit.url = audits_service.open_widget_of_mapped_objs(\n program).tree_view.tree_view_items()[0].url()\n return audit" ]
[ "0.63237685", "0.59012693", "0.5659949", "0.5493767", "0.54725164", "0.5468641", "0.5305744", "0.5234044", "0.5232979", "0.522035", "0.5220307", "0.52176887", "0.5212649", "0.51756454", "0.5148049", "0.51328814", "0.5120397", "0.51175773", "0.5109951", "0.50947446", "0.5041445", "0.50199777", "0.49995118", "0.49955124", "0.4975167", "0.496274", "0.49581417", "0.49518937", "0.49471268", "0.4932079" ]
0.6022617
1
Get the search results for the given search ID.
def getSearchResults(searchID, session_key): job = splunk.search.getJob(searchID, sessionKey=session_key) if not job.isDone: raise SearchNotDoneException("Search is not done, search must be completed before results can be processed") if job.reportSearch: logger.warn("The search ID %s is not an event search but one that provides processed results; only an event search can be used for editing notable events", searchID) raise NotEventSearchException("Search must be an event search that provides raw events (not results)") # Reset the fetch options for faster retrieval. job.clearFetchOptions() job.setFetchOptions(field_list=['rule_id', 'source'], max_lines=1, output_mode='xml', show_empty_fields=False, time_format='%Y-%m-%dT%H:%M:%S%z') return getattr(job, 'events')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def get_search_results(search_string: str):\n database = get_db()\n result = []\n search_string = search_string.lower()\n search_strings = search_utils.preprocess_search_string(\n search_string[:150]\n )\n query_search = database.AQLQuery(\n query=search_queries.QUERY_SEARCH,\n bindVars={\n \"search_string_tib\": search_strings['tib'],\n \"search_string_chn\": search_strings['chn'],\n \"search_string_skt\": search_strings['skt'],\n \"search_string_pli\": search_strings['pli'],\n \"search_string_skt_fuzzy\": search_strings['skt_fuzzy']\n },\n batchSize=300,\n rawResults=True,\n )\n query_result = query_search.result[0]\n result = search_utils.postprocess_results(search_string, query_result)\n return {\"searchResults\": result}", "def get_results(self, scan_id):\n\n if not isinstance(scan_id, basestring):\n raise TypeError(\"Expected string, got %r instead\" % type(scan_id))\n\n m_response = None\n try:\n m_response = self.__manager.make_xml_request('<get_results task_id=\"%s\"/>' % scan_id, xml_result=True)\n except ServerError, e:\n raise VulnscanServerError(\"Can't get the results for the task %s. Error: %s\" % (scan_id, e.message))\n\n return self.transform(m_response)", "def load_search(self, search_id: Hashable) -> dict:\n job_ids = self.load_all_job_ids(search_id)\n with self._redis.pipeline() as pipe:\n for job_id in job_ids:\n pipe.json().get(f\"job:{job_id}\", \".\")\n data = pipe.execute()\n for i, job_id in enumerate(job_ids):\n data[i][\"job_id\"] = job_id\n return data", "def get_results_for(t_client, search_q):\n results = t_client.search(q=\"#\"+search_q)\n\n # This can be refactored\n return [\n {\n \"author\": \"@%s\" % t.from_user,\n \"text\": t.text,\n \"id\": t.id,\n \"date_h\": t.created_at.strftime(\"%H:%M:%S %d/%m/%Y\"),\n \"date\": time.mktime(t.created_at.timetuple()),\n } for t in results\n ]", "def results(self):\n\n return self._search_resut", "def search_results(self):\r\n route_name = self.request.matched_route.name\r\n mdict = self.matchdict\r\n rdict = self.GET\r\n\r\n if 'terms' in mdict:\r\n phrase = \" \".join(mdict['terms'])\r\n else:\r\n phrase = rdict.get('search', '')\r\n\r\n # Always search the fulltext content\r\n with_content = True\r\n\r\n conn_str = self.settings.get('sqlalchemy.url', False)\r\n searcher = get_fulltext_handler(conn_str)\r\n\r\n # check if we have a page count submitted\r\n params = self.params\r\n page = params.get('page', 0)\r\n count = params.get('count', 50)\r\n\r\n if rdict.get('search_mine') or 'username' in mdict:\r\n with_user = True\r\n else:\r\n with_user = False\r\n\r\n username = None\r\n if with_user:\r\n if 'username' in mdict:\r\n username = mdict.get('username')\r\n elif self.request.user and self.request.user.username:\r\n username = self.request.user.username\r\n\r\n res_list = searcher.search(\r\n phrase,\r\n content=with_content,\r\n username=username if with_user else None,\r\n ct=count,\r\n page=page,\r\n )\r\n\r\n # if the route name is search_ajax we want a json response\r\n # else we just want to return the payload data to the mako template\r\n if 'ajax' in route_name or 'api' in route_name:\r\n return {\r\n 'success': True,\r\n 'message': \"\",\r\n 'payload': {\r\n 'search_results': [dict(res) for res in res_list],\r\n 'result_count': len(res_list),\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'username': username,\r\n }\r\n }\r\n else:\r\n return {\r\n 'search_results': res_list,\r\n 'count': len(res_list),\r\n 'max_count': 50,\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'username': username,\r\n }", "def get_results(self, session_id):\n if session_id is None:\n raise ValueError('session_id is required and was not provided')\n\n response = self._client.request('GET', 'sessions/%s/results' % session_id)\n return SessionResult(response)", "def get_results(self, task_id=None):\n\n m_query = None\n if task_id:\n m_query = '<get_results task_id=\"%s\"/>' % scan_id\n else:\n m_query = '<get_results/>'\n\n return self.__manager.xml(m_query, xml_result=True)", "def get_results(self, job_id):\n ujs = self.__ujs_client()\n res = ujs.get_results(job_id)\n return res", "def results(self, scanid=None):\n params = {}\n if scanid is not None:\n params['scanId'] = scanid\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/results/', params)))", "def fetch_search_results (self, search_str, list_from=0, list_to=10):\n # properly encode the search string\n encoded_search_string = quote(search_str)\n\n paths = [\n ['search', encoded_search_string, 'titles', {'from': list_from, 'to': list_to}, ['summary', 'title']],\n ['search', encoded_search_string, 'titles', {'from': list_from, 'to': list_to}, 'boxarts', '_342x192', 'jpg'],\n ['search', encoded_search_string, 'titles', ['id', 'length', 'name', 'trackIds', 'requestId']],\n ['search', encoded_search_string, 'suggestions', 0, 'relatedvideos', {'from': list_from, 'to': list_to}, ['summary', 'title']],\n ['search', encoded_search_string, 'suggestions', 0, 'relatedvideos', {'from': list_from, 'to': list_to}, 'boxarts', '_342x192', 'jpg'],\n ['search', encoded_search_string, 'suggestions', 0, 'relatedvideos', ['id', 'length', 'name', 'trackIds', 'requestId']]\n ]\n response = self._path_request(paths=paths)\n return self._process_response(response=response, component='Search results')", "def get_search_results(self):\n return self.get_list_of_names(self.SEARCH_RESULTS)", "def search(self, issue_id=None):\n query = self._get_search_query()\n\n try:\n results, stats = self._get_results(self._get_search_query(), issue_id=issue_id)\n except Exception as e:\n raise WBCApiError('Error while searching: {} {}'.format(e.__class__, str(e)))\n\n return jsonify({\n 'query': query,\n 'results': results,\n 'stats': stats\n })", "def search(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"search\"), kwargs)", "def search(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"search\"), kwargs)", "def search(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"search\"), kwargs)", "def search(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"search\"), kwargs)", "def search(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"search\"), kwargs)", "def search(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"search\"), kwargs)", "def search(self, **kwargs):\n\n return self.api_list_request(self._get_method_fullname(\"search\"), kwargs)", "def __ui_search_student_by_id(self, search):\n try:\n result = self.__student_controller.search_by_id(search)\n for student in result:\n print(str(student))\n\n except RepositoryException as re:\n print(re)\n return", "def search_documents():\n req_body = request.get_json(force=True)\n search_query = req_body.get('query')\n\n results = app.search_flow.search(\n inputs=Document(text=search_query),\n return_results=True\n )\n\n res = {\n 'matches': [match.id for match in results[0].docs[0].matches]\n }\n return res", "def get_results(self, ids):\n self.join()\n return [self.results[id] for id in ids]", "def search(self, id):\n\n db = self.connection(\"imdb\")\n\n try:\n cur = db.cursor()\n sql = \"SELECT * FROM film WHERE id = %s;\"\n cur.execute(sql, (id,))\n return cur.fetchall()\n except:\n print(\"Cannot find the film!\")\n\n db.close()", "def lookup(self, search_string):\n url = self.create_search_url(search_string)\n self.logger.debug(\"lookup: using search url: %s\" % url)\n search_results = self.get_search_results(url)\n results = []\n # Search results is an XML string with basic top level info about\n # all the entities that matched our search string..\n #\n dom = parseString(search_results).firstChild\n entity = first_child(dom, \"entity\")\n while entity:\n if self.parser.content == \"movies\":\n results.append(Movie(entity, self))\n else:\n results.append(Series(entity, self))\n entity = next_sibling(entity, \"entity\")\n return results", "def search_results(request):\r\n mdict = request.matchdict\r\n rdict = request.GET\r\n\r\n if 'terms' in mdict:\r\n phrase = \" \".join(mdict['terms'])\r\n else:\r\n phrase = rdict.get('search', '')\r\n\r\n if rdict.get('search_mine') or 'username' in mdict:\r\n with_user = True\r\n else:\r\n with_user = False\r\n\r\n username = None\r\n if with_user:\r\n if 'username' in mdict:\r\n username = mdict.get('username')\r\n elif request.user and request.user.username:\r\n username = request.user.username\r\n\r\n # with content is always in the get string\r\n search_content = asbool(rdict.get('with_content', False))\r\n\r\n conn_str = request.registry.settings.get('sqlalchemy.url', False)\r\n searcher = get_fulltext_handler(conn_str)\r\n\r\n # check if we have a page count submitted\r\n page = rdict.get('page', 0)\r\n count = rdict.get('count', 10)\r\n\r\n try:\r\n res_list = searcher.search(\r\n phrase,\r\n content=search_content,\r\n username=username if with_user else None,\r\n ct=count,\r\n page=page\r\n )\r\n except ValueError:\r\n request.response.status_int = 404\r\n ret = {'error': \"Bad Request: Page number out of bound\"}\r\n return _api_response(request, ret)\r\n\r\n constructed_results = []\r\n for res in res_list:\r\n return_obj = dict(res)\r\n return_obj['tags'] = [dict(tag[1]) for tag in res.tags.items()]\r\n\r\n # the hashed object is there as well, we need to pull the url and\r\n # clicks from it as total_clicks\r\n return_obj['url'] = res.hashed.url\r\n return_obj['total_clicks'] = res.hashed.clicks\r\n\r\n constructed_results.append(return_obj)\r\n\r\n return _api_response(request, {\r\n 'search_results': constructed_results,\r\n 'result_count': len(constructed_results),\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'with_content': search_content,\r\n 'username': username,\r\n })", "def get_search_results(self, url):\n src_url = ScrapeURL(url, cache = self.cache)\n self.logger.debug(\"get_search_results: downloading %s\" % src_url.url)\n url_data = src_url.get()\n\n # We pass the page we got from the url, and the url itself into\n # our scaper parser as buffer parameters 1 & 2.\n #\n self.parser.set_buffer(1, url_data)\n self.parser.set_buffer(2, src_url.url)\n\n # Parse the <GetSearchResults> tag from our XML definition.\n #\n search_results = self.parser.parse(FN_GET_SEARCH_RESULTS, self.settings)\n return search_results", "def full_results(self, scanid):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/fullResults/', {'scanId': scanid})))", "def perform_search(search: str, max_records: int) -> List[str]:\n results = []\n url = \"%s?format=json&action=query&list=search&srlimit=%d&srsearch=%s\" % (WIKIDATA_URL, max_records, quote(search))\n # Perform request\n print_debug(\"Sending GET %s\" % url)\n response = requests.get(url)\n data = response.json()\n print_debug(\"%s -> %d\" % (url, response.status_code))\n print_debug(\"%s\" % response.text)\n # Get search results\n records = data[\"query\"][\"search\"]\n # Iterate over records\n for record in records:\n results.append(record[\"title\"])\n return results", "def search_by_fileid():\n file_id = request.form[\"file-id\"]\n tags = [t.lower() for t in request.form[\"keywords\"].split(\";\")]\n search_result = scraper.search(file_id, tags)\n return jsonify(search_result)" ]
[ "0.6724352", "0.65330744", "0.64275545", "0.6412478", "0.63617796", "0.6330999", "0.6315197", "0.6310618", "0.630068", "0.6298764", "0.629138", "0.62622744", "0.6177091", "0.6141086", "0.6141086", "0.6141086", "0.6141086", "0.6141086", "0.6141086", "0.6141086", "0.6118462", "0.61030066", "0.6091952", "0.60915834", "0.60176194", "0.60160214", "0.6014639", "0.59727454", "0.59554356", "0.5904172" ]
0.71253186
0
Set the status of the events that match a search with the given ID.
def setStatusBySearchID(self, searchID, urgency, status, comment, newOwner, reviewTime, capabilities, session_key, currentUser=None, force_refresh=False, rule_ids_to_change=None, existing_statuses=None): # This class instance will record the number of events successfully changed status_change_meta = LogReviewStatusChanges() # Get the search job (this will throw a splunk.ResourceNotFound exception if the search cannot be found) try: dataset = self.getSearchResults(searchID, session_key) except splunk.ResourceNotFound: logger.warn("The search ID %s is no longer accessible, please refresh and try editing the events again", searchID) status_change_meta.incrementFailureCountEx(["The search is no longer accessible, please refresh and try editing the events again"]) return status_change_meta except NotEventSearchException: status_change_meta.incrementFailureCountEx(["The search is not an event search; searches returning results (instead of events) cannot be used"]) return status_change_meta except SearchNotDoneException: status_change_meta.incrementFailureCountEx(["The search is not done; the search must be completed before results can be processed"]) return status_change_meta # Get the existing statuses so that the entries can inherit items as necessary if existing_statuses is None: existing_statuses = self.getCurrentValues(session_key, rule_ids_to_change) # Make sure the comment is the minimum length (if defined) minimum_length = self.commentLengthRequired(session_key) if len(comment.strip()) < minimum_length: status_change_meta.incrementFailureCountEx(["comment length does not meet minimum requirement (must be %d characters long or more)" % (minimum_length)]) return status_change_meta # Determine if urgency changes are allowed allowUrgencyChanges = self.isUrgencyOverrideAllowed(session_key) # If we are not allowed to change the urgency, then set it to none to indicate that it ought not be changed if allowUrgencyChanges is False: urgency = None # Make a copy of the rules IDs that we are planning to change so that we can exit early from looping through # the search results once we get done editing the entries rule_ids_to_change_left = None if rule_ids_to_change is not None: rule_ids_to_change_left = rule_ids_to_change[:] # Make a copy, we don't want to edit the original # Counters evaluated = 0 # Notable events to be edited status_records = [] # Create a status entry for each event for event in dataset: evaluated += 1 # Stop processing the events if already handled all of the events we expected to handle if rule_ids_to_change_left is not None and len(rule_ids_to_change_left) == 0: break if 'rule_id' in event: rule_id = str(event['rule_id']) # Only change the given event if it is in the list to change if rule_ids_to_change is not None and rule_id not in rule_ids_to_change: continue if 'source' in event: correlation_search = str(event['source']) else: correlation_search = None rule_name = self.correlation_search_info.get(correlation_search, {}).get('rule_name') # Make sure that the user has the capability capability_issues = self.checkTransition(rule_id, correlation_search, status, capabilities, session_key, existing_statuses, force_refresh) # Stop if the permission check failed if capability_issues is not None and len(capability_issues) > 0: status_change_meta.incrementFailureCountEx(capability_issues) else: # Add the record to the list of records to be saved. status_records.append(LogReviewStatus(reviewTime, rule_id, newOwner, urgency, status, comment, currentUser, rule_name, rule_id + '_' + str(reviewTime))) if rule_ids_to_change_left is not None: rule_ids_to_change_left.remove(rule_id) else: status_change_meta.incrementFailureCount("rule_id field not found in the event") logger.debug("Evaluated %i events for editing", evaluated) success_count = 0 # Perform the save in chunks and return status. for chunk in [status_records[i:i + self.BATCH_SAVE_LIMIT] for i in range(0, len(status_records), self.BATCH_SAVE_LIMIT)]: try: success_count += self.updateEvents(chunk, session_key, existing_statuses) except Exception as e: logger.exception('Exception when updating notable events: %s', e) # Update status change metadata. # Case 1: updating all events in the search # Case 2: updating only selected events if (not rule_ids_to_change and success_count == evaluated) or (rule_ids_to_change and len(rule_ids_to_change) == success_count): # All successful. status_change_meta.incrementSuccessCount(success_count) else: # Some failures. status_change_meta.incrementSuccessCount(success_count) status_change_meta.incrementFailureCount('some notable event(s) could not be updated', evaluated - success_count) return status_change_meta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def id_status(self, id_status):\n self._id_status = id_status", "def update_status(self, id, status):\n sql = f\"UPDATE incidences SET status = \\'{status}\\'\\\n WHERE incidences.id = {id}\"\n conn = Db().con\n curr = conn.cursor()\n curr.execute(sql)\n conn.commit()", "def status(self, id):", "def set_match_id(match_id):\n conn = get_connect()\n conn.execute(\"UPDATE match SET isSearched = 1 WHERE matchId = \" + str(match_id))\n conn.commit()\n conn.close()\n print(\"matchId \" + str(match_id) + \" has been searched\")\n return", "def update_status(self, id, status):\n\n records = self.db.get_table()\n index = -1\n\n for i in range(0, len(records)):\n if str(records[i][\"id\"]) == str(id):\n index = i\n\n if index == -1:\n return False\n\n records[index][\"status\"] = status\n self.db.update_cell(index, 'status', status)\n\n return records[index]", "def updatestatus(id, status):\n username = os.getlogin()\n res = requests.put('{}update/{}/'.format(base_url, id),\n data={\"keyword_fetching_status\": status, \"user_fetched\": username})\n res = res.json()\n return res", "def status_id(self, status_id):\n\n self._status_id = status_id", "def set_account_id(account_id):\n conn = get_connect()\n conn.execute(\"UPDATE account SET isSearched = 1 WHERE accountId = \" + str(account_id))\n conn.commit()\n conn.close()\n print(\"accountId \" + str(account_id) + \" has been searched\")\n return", "def query_event_by_id():\n try:\n event_id = request.args['event_id']\n response = requests.put(app.config['EVENTS_ENDPOINT'] + event_id)\n if response.status_code == 200:\n return render_template(\n 'search_results.html',\n auth=is_organizer(get_user()),\n events=parse_events(response.json()),\n app_config=app.config\n )\n else:\n return 'Unable to retrieve events', 500\n except BadRequestKeyError as error:\n return f'Error: {error}.', 400", "def setStatuses(self, urgency, status, comment, newOwner, currentUser, ruleUIDs, searchID, reviewTime, existing_statuses, capabilities, session_key):\n\n # Print a log message noting that an operation is about to happen\n if ruleUIDs is not None and searchID is not None:\n logger.info(\"About to edit events matching search %s (though only %d events are to be modified)\", searchID, len(ruleUIDs))\n if searchID is None and (ruleUIDs is not None and len(ruleUIDs) > 0):\n logger.info(\"About to edit events by ID (%d events are to be modified)\", searchID, len(ruleUIDs))\n else:\n logger.info(\"About to edit events matching all events matching search %s\", searchID)\n\n # Refresh the correlation searches list so we don't have to later\n self.refreshCorrelationSearches(session_key)\n\n # Perform the changes\n if searchID is None:\n result = self.setStatusByIDs(ruleUIDs, urgency, status, comment, newOwner, reviewTime, session_key, currentUser, existing_statuses=existing_statuses)\n logger.info(\"Done editing events\")\n return result\n else:\n result = self.setStatusBySearchID(searchID, urgency, status, comment, newOwner, reviewTime, capabilities, session_key, currentUser, force_refresh=False, rule_ids_to_change=ruleUIDs, existing_statuses=existing_statuses)\n logger.info(\"Done editing events matching search %s\", searchID)\n return result", "def check_status(self, id):\n raise NotImplementedError()", "def change_status(id):\n query = \"\"\"UPDATE parcels SET status = %s WHERE id = %s\"\"\"\n tuple =('delivered' , id)\n db.insert(query, tuple)", "def put(self, id):\n taskroom_service.change_status(id)\n return {'Message': \"Room status changed to Active\"}", "def update_status(request_id, status):\n pass", "def set_event_status(self, new_event_status):\n self.event_status = new_event_status", "def set_status(self, status: Status) -> None:\n if status.status_code == StatusCode.ERROR:\n self.elastic_span.outcome = constants.OUTCOME.FAILURE\n elif status.status_code == StatusCode.OK:\n self.elastic_span.outcome = constants.OUTCOME.SUCCESS\n else:\n self.elastic_span.outcome = constants.OUTCOME.UNKNOWN", "def update_event(id):\n oEvent, error = Event.get_by_id(id)\n if error:\n return make_response(jsonify({\"error\" : error }), 400)\n json_data = request.get_json()\n data, error = EventSchema().load(json_data)\n if error:\n return make_response(jsonify({\"error\": error}), 400)\n oEvent = oEvent.update(data)\n return make_response(jsonify(oEvent.as_dict()))", "def id_status_conta(self, id_status_conta):\n self._id_status_conta = id_status_conta", "def change_status(self, status, application_id):", "def response_status_id(self, response_status_id):\n\n self._response_status_id = response_status_id", "def set_statement_status_for_search(self, status_list):\n self.multiple_items_selection_from_kendo_dropdown(self.statement_status_dropdown_locator, status_list)\n self.wait_for_ajax_spinner_load()", "def update_status(self, value, incident_id):\n payload = {\"incident\":{\"state\": value}}\n response = self.session.put(\n \"{0}/incidents/{1}.json\".format(self.uri, incident_id),\n json=payload\n )\n return response.status_code", "def search_among_logs(self, log_ids):\n self._id_list = log_ids", "def setLclPatientStatus(self, condition, patientId, status=True):\n logger.debug('setLclPatientStatus %s %s %s', condition, patientId, status)\n if status:\n self.setDict[condition].add(patientId)\n elif patientId in self.setDict[condition]:\n self.setDict[condition].remove(patientId)", "def set_status(self, scenario_id, status):\n self.cur.execute(\n \"UPDATE execute_list SET status = %s WHERE id = %s\",\n (status, scenario_id),\n )", "def SetStatus(self, status):\r\n self.status = status", "def update_target_status(self, target_id: str, status: str) -> dict:\n status_data = {\n \"data\": {\"status\": status},\n \"q\": {\n \"condition\": \"OR\",\n \"rules\": [\n {\n \"id\": \"table.id\",\n \"field\": \"table.id\",\n \"type\": \"object\",\n \"input\": \"text\",\n \"operator\": \"equal\",\n \"value\": target_id\n }\n ]\n }\n }\n status_string = json.dumps(status_data)\n\n response = self.rc.execute(\"PATCH\",\n self._get_uri(PATCH_TARGET_URI),\n data=status_string,\n headers=self.header,\n verify=self.verify)\n return response.json()", "def on_searchin_changed(self):\r\n\r\n self.check_searchin()", "def set_status(self, p_id, status):\n try:\n cursor = self.conn.cursor()\n command = '''\n UPDATE Player\n SET Status = ?\n WHERE P_ID = ?\n '''\n cursor.execute(command, (status, p_id))\n self.conn.commit()\n except BaseException as e:\n self.log.log_error('Fehler beim setzen des Status', e)\n raise e", "def set_message_status(self, message_id, status):\n\t\tself.c.execute(\"UPDATE messages SET status = ? WHERE message_id = ?\", (status, message_id))\n\t\tself.save()" ]
[ "0.6252509", "0.5942144", "0.59207493", "0.5787411", "0.5685566", "0.56427914", "0.5544263", "0.5534634", "0.5514875", "0.54568875", "0.5452327", "0.5425086", "0.531803", "0.529227", "0.5255102", "0.52439374", "0.5222686", "0.52150995", "0.52004564", "0.51656383", "0.51502025", "0.51424927", "0.51418686", "0.5130135", "0.5117764", "0.50833684", "0.5081036", "0.50786847", "0.50662667", "0.50610495" ]
0.6068505
1