query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Get hosted zone by looking up account name and using that to tell which zone to return.
def get_hosted_zone(session): account = get_account_id_from_session(session) if account == hosts.PROD_ACCOUNT: return hosts.PROD_DOMAIN elif account == hosts.DEV_ACCOUNT: return hosts.DEV_DOMAIN else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_zone(cls, name):\n\n def get_closest(n):\n \"\"\"\n Return closest matching zone\n \"\"\"\n while n:\n try:\n return DNSZone.objects.get(name=n)\n except DNSZone.DoesNotExist:\n pass\n n = \".\".join(n.split(\".\")[1:])\n return None\n\n if not name:\n return None\n if is_ipv4(name):\n # IPv4 zone\n n = name.split(\".\")\n n.reverse()\n return get_closest(\"%s.in-addr.arpa\" % (\".\".join(n[1:])))\n elif is_ipv6(name):\n # IPv6 zone\n d = IPv6(name).digits\n d.reverse()\n c = \".\".join(d)\n return get_closest(\"%s.ip6.arpa\" % c) or get_closest(\"%s.ip6.int\" % c)\n else:\n return get_closest(name)", "def get_zone(self, conn, host):\n fl = 'name=\"%s\"' % host\n request = conn.instances().aggregatedList(project=PROJECT, filter=fl)\n \twhile request is not None:\n \t\tresponse = request.execute()\n \t\tzones = response.get('items', {})\n \t\tfor zone in zones.values():\n \t\t\tfor inst in zone.get('instances', []):\n \t\t\t\tif inst['name'] == host:\n \t\t\t\t\treturn inst['zone'].split(\"/\")[-1]\n \t\trequest = conn.instances().aggregatedList_next(previous_request=request, previous_response=response)\n \traise Exception(\"Unable to determin the zone for instance %s\" % (host))", "def _get_zone():\n r = get('http://metadata.google.internal/'\n 'computeMetadata/v1/instance/zone',\n headers={'Metadata-Flavor': 'Google'})\n if r.status_code == 200:\n return sub(r'.+zones/(.+)', r'\\1', r.text)\n else:\n return ''", "def get_zone(self, kwargs):\n dns_zone = kwargs[\"dns_zone\"]\n try:\n results = self.engine.query(\n self.engine.ZONE_FILTER(),\n base=','.join([f\"DC={dns_zone}\", \"CN=MicrosoftDNS,DC=DomainDNSZones\", self.engine.base_dn])\n )\n except LdapActiveDirectoryView.ActiveDirectoryLdapException as e:\n error(e)\n else:\n self.display(results)", "def hosted_zone_by_id(client: boto3.client, zone_id: str) -> AWSResponse:\n try:\n return client.get_hosted_zone(Id=zone_id)\n except ClientError as e:\n logger.exception(e.response['Error']['Message'])\n return {'HostedZone': []}", "def test_get_zone_from_account(self):\n account = Account('test-account')\n zone = Zone('test.example.com')\n account.add_zone(zone)\n self.assertEqual(account.get_zone('test.example.com'), zone)", "def find_zone_by_name(self, zone_name):\n zones = self.client.collections.zones\n return next((z.id for z in zones if z.name == zone_name), None)", "def get_zone_id(ctx, param, zone_name):\n del ctx #unused\n del param #unused\n cf = CloudFlare.CloudFlare()\n zones = cf.zones.get(params={'name': zone_name})\n if len(zones) != 1:\n raise click.ClickException('Invalid zone name: {}'.format(zone_name))\n return (zones[0]['id'], zones[0]['name'])", "def get_zone(zone_id: int, allow_unloaded_zones: bool=False) -> Zone:\n return services.get_zone(zone_id, allow_uninstantiated_zones=allow_unloaded_zones)", "def _find_managed_zone(self, domain, record_name):\n\n zone_dns_name_guesses = [record_name] + dns_common.base_domain_name_guesses(domain)\n\n logger.debug(\"Guesses: \")\n for zone_name in zone_dns_name_guesses:\n logger.debug(\" - %s\", zone_name)\n\n for zone_name in zone_dns_name_guesses:\n # get the zone id\n try:\n logger.debug(\"looking for zone: %s\", zone_name)\n try:\n response = self.dns_client.get_zone(zone_name)\n if response.status == 200:\n logger.debug(\"Response data %s\", response.data)\n logger.debug(\"Found zone: %s\", zone_name)\n logger.debug(\"OCID: %s\", response.data.id)\n logger.debug(\"Compartment: %s\", response.data.compartment_id)\n return response.data.id, zone_name\n except oci.exceptions.ServiceError as e:\n logger.debug(\"Zone '%s' not found\", zone_name)\n except errors.PluginError as e:\n pass\n return None, None", "def get_zone(self, zone_id):\n data = self.connection.request(\"/v2/domains/%s\" % (zone_id)).object[\"domain\"]\n\n return self._to_zone(data)", "def get_current_zone() -> Zone:\n return services.current_zone()", "def get_hosted_zone_id(session, hosted_zone):\n if session is None:\n return None\n\n client = session.client('route53')\n response = client.list_hosted_zones_by_name(\n DNSName=hosted_zone,\n MaxItems='1'\n )\n if len(response['HostedZones']) >= 1:\n full_id = response['HostedZones'][0]['Id']\n id_parts = full_id.split('/')\n return id_parts.pop()\n else:\n return None", "def get_hosted_zone(self, hosted_zone_id):\r\n uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id)\r\n response = self.make_request('GET', uri)\r\n body = response.read()\r\n boto.log.debug(body)\r\n if response.status >= 300:\r\n raise exception.DNSServerError(response.status,\r\n response.reason,\r\n body)\r\n e = boto.jsonresponse.Element(list_marker='NameServers',\r\n item_marker=('NameServer',))\r\n h = boto.jsonresponse.XmlHandler(e, None)\r\n h.parse(body)\r\n return e", "def get_zone(self, zone_id):\n zones = self.list_zones()\n zone = [zone for zone in zones if zone.id == zone_id]\n if len(zone) == 0:\n raise ZoneDoesNotExistError(\n driver=self, value=\"The zone doesn't exists\", zone_id=zone_id\n )\n return zone[0]", "def find_availability_zone(self, name_or_id, ignore_missing=False):\n return self._find(_availability_zone.AvailabilityZone, name_or_id,\n ignore_missing=ignore_missing)", "def get_zone(self):\n to_return = None\n try:\n to_return = self.ns1.loadZone(self.module.params.get('zone'))\n except ResourceException as re:\n if re.response.code == 404:\n if (\n self.module.params.get('ignore_missing_zone')\n and self.module.params.get('state') == \"absent\"\n ):\n # zone not found but we are in the absent state\n # and the user doesn't care that the zone doesn't exist\n # nothing to do and no change\n self.module.exit_json(changed=False)\n else:\n # generic error or user cares about missing zone\n self.module.fail_json(\n msg=\"error code %s - %s \" % (re.response.code, re.message)\n )\n return to_return", "def infoDnsZone(self, domainname: str) -> DNSZone:\n response = self._send(self.nc_request(action=\"infoDnsZone\", parameters={\"domainname\": domainname}))\n\n # build zone\n zone = DNSZone(name=domainname,\n ttl=int(response[\"ttl\"]),\n serial=response[\"serial\"],\n refresh=int(response[\"refresh\"]),\n retry=int(response[\"retry\"]),\n expire=int(response[\"expire\"]),\n dnssecstatus=response[\"dnssecstatus\"])\n\n return zone", "def zone(self) -> str:\n return pulumi.get(self, \"zone\")", "def zone(self) -> str:\n return pulumi.get(self, \"zone\")", "def get_zone(self, zone_id, records=True):\r\n mask = None\r\n if records:\r\n mask = 'resourceRecords'\r\n return self.service.getObject(id=zone_id, mask=mask)", "def hosted_zone_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"hosted_zone_id\")", "def get_zone(self, zone_id):\n\n if zone_id not in self._zones:\n raise ZoneDoesNotExistError(driver=self, value=None, zone_id=zone_id)\n\n return self._zones[zone_id][\"zone\"]", "def get_zone(name: Optional[str] = None,\n private_zone: Optional[bool] = None,\n resource_record_set_count: Optional[int] = None,\n tags: Optional[Mapping[str, str]] = None,\n vpc_id: Optional[str] = None,\n zone_id: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetZoneResult:\n __args__ = dict()\n __args__['name'] = name\n __args__['privateZone'] = private_zone\n __args__['resourceRecordSetCount'] = resource_record_set_count\n __args__['tags'] = tags\n __args__['vpcId'] = vpc_id\n __args__['zoneId'] = zone_id\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('aws:route53/getZone:getZone', __args__, opts=opts, typ=GetZoneResult).value\n\n return AwaitableGetZoneResult(\n arn=pulumi.get(__ret__, 'arn'),\n caller_reference=pulumi.get(__ret__, 'caller_reference'),\n comment=pulumi.get(__ret__, 'comment'),\n id=pulumi.get(__ret__, 'id'),\n linked_service_description=pulumi.get(__ret__, 'linked_service_description'),\n linked_service_principal=pulumi.get(__ret__, 'linked_service_principal'),\n name=pulumi.get(__ret__, 'name'),\n name_servers=pulumi.get(__ret__, 'name_servers'),\n primary_name_server=pulumi.get(__ret__, 'primary_name_server'),\n private_zone=pulumi.get(__ret__, 'private_zone'),\n resource_record_set_count=pulumi.get(__ret__, 'resource_record_set_count'),\n tags=pulumi.get(__ret__, 'tags'),\n vpc_id=pulumi.get(__ret__, 'vpc_id'),\n zone_id=pulumi.get(__ret__, 'zone_id'))", "def get_account_by_name(self, name):\n return next((account for account in self.accounts\n if account.ynab_account_name.lower() == name.lower()), None)", "def get_zone(self):\n return self.project.get_flow().get_zone_of_object(self)", "def get_account(self, name):\n return self._accounts[name]", "def zone_name(self):\n return self._zone_name", "def get_account_by_name(self, name):\n return next((account for account in self.accounts\n if account.name.lower() == name.lower()), None)", "def zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone\")" ]
[ "0.7249278", "0.70573413", "0.6600804", "0.6556853", "0.65454966", "0.6509461", "0.6491115", "0.64886916", "0.64364696", "0.6349082", "0.6179819", "0.61753106", "0.61310756", "0.61241627", "0.6072795", "0.59662193", "0.5963174", "0.5910606", "0.58478206", "0.58478206", "0.58464926", "0.58451945", "0.5782923", "0.5763719", "0.57537585", "0.56903", "0.5684234", "0.56837535", "0.5678423", "0.5670928" ]
0.7208443
1
Updates or Creates a domain name with FQDN resource.
def set_domain_to_dns_name(session, domain_name, dns_resource, hosted_zone): if session is None: return None client = session.client('route53') hosted_zone_id = get_hosted_zone_id(session, hosted_zone) if hosted_zone_id is None: print("Error: Unable to find Route 53 Hosted Zone, " + hosted_zone + ", Cannot set resource record for: " + dns_resource) return None response = client.change_resource_record_sets( HostedZoneId=hosted_zone_id, ChangeBatch={ 'Changes': [ { 'Action': 'UPSERT', 'ResourceRecordSet': { 'Name': domain_name, 'Type': 'CNAME', 'ResourceRecords': [ { 'Value': dns_resource }, ], 'TTL': 300, } }, ] } ) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_dns(self):\n if self.ptr:\n which_zone = None\n zones = dns.models.Zone.objects.all()\n for zone in zones:\n if self.ptr.endswith(zone.name) or self.ptr.endswith(zone.name + '.'):\n which_zone = zone\n break\n\n if which_zone:\n zone_name = which_zone.name\n record_name = self.ptr[:-len(zone_name)] if not self.ptr.endswith('.') else self.ptr[:-len(zone_name) - 1]\n if record_name.endswith('.'):\n record_name = record_name[:-1]\n record_type = 'A' if self.family == 4 else 'AAAA'\n\n dns.models.Record.objects.get_or_create(\n name=record_name,\n record_type=record_type,\n zone=which_zone,\n address=self\n )", "def setNodeDNSDomain(self,node,domain):\n post_data = {'search': str(domain)}\n data = self.connect('put',\"nodes/%s/dns\" % (node), post_data)\n return data", "def update_dns(c, stack_name, domain_name, profile, create=False):\n action = 'create' if create else 'update'\n\n with chdir(WORKING_DIR):\n aws('cloudformation', f'{action}-stack',\n '--stack-name', f'{stack_name}-dns',\n '--template-body', f'file://zone.yaml',\n '--parameters',\n f'ParameterKey=DomainName,ParameterValue={domain_name}',\n f'--profile', f'{profile}')\n\n aws('cloudformation', 'wait',\n f'stack-{action}-complete',\n '--stack-name', f'{stack_name}-dns',\n f'--profile', f'{profile}')\n\n with chdir(WORKING_DIR):\n aws('cloudformation', f'{action}-stack',\n '--stack-name', f'{stack_name}-dns-mail',\n '--template-body', f'file://mail.yaml',\n f'--profile', f'{profile}')", "def set_dns_cname ( route53_conn, dns_name, cname_value ) :\n r53 = boto.route53.record.ResourceRecordSets( route53_conn, route_53_hosted_zoneid )\n monitor_dns = r53.add_change( 'UPSERT', dns_name, 'CNAME', ttl=60 )\n monitor_dns.add_value( cname_value )\n r53.commit( )", "def update(domain_id, name, sensitive):\n domain = get(domain_id)\n domain.name = name\n domain.sensitive = sensitive\n database.update(domain)", "def set_keystone_v3_domain(self, **kwargs):\n LOG_OBJ.debug(\"Creating the domain.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/domains/\" + \\\n str(kwargs['domain_id'])\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n\n _domain_info = {\"domain\": {}}\n for argument in [\"name\", \"description\", \"enabled\", \"disabled\"]:\n try:\n _domain_info['domain'].update(\n {argument: kwargs[argument]})\n except KeyError:\n pass\n _body = json.dumps(_domain_info)\n response = self.request(\"PATCH\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while set the domain\")\n print (\"No response from Server while set the domain\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Set domain Failed with status %s and error : %s\" %\n (response.status, response.data))\n print (\"Set domain Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n return True", "def update_domain():\n\n for e in Expr.search() + User.search(): e.set_tld(config.server_name)", "def _ensure_fqdn(self, name):\n if name[-1:] != \".\":\n return \"%s.\" % name\n else:\n return name", "def create_dns_name ( base_name, name ) :\n return create_r53_name( base_name, name) + '.mse-esp.com'", "def update_domain_name(self, DomainName: str, DomainNameConfigurations: List = None) -> Dict:\n pass", "def replace_domain(address, old_domain, new_domain):\n old_domain_pattern = r'' + old_domain + '$'\n address = re.sub(old_domain_pattern, new_domain, address)\n return address", "def test_record_fqdn(self):\n zone = Zone('test.example.com')\n record = Record(zone, 'test-record', {'type': 'A', 'ttl': 300})\n self.assertEqual(record.fqdn, 'test-record.test.example.com')", "def dnsUpdate(portId, ipAddr='', action='create'):\n\tzone = 'osdev.skrill.net.'\n\trevZone = '23.32.10.in-addr.arpa'\n\tcname = portId + '.' + zone\n\tttl = 300\n\tnsServer = '10.32.29.99'\n key = 'yw0ADuZjXAhcGgMOYg/Clx1128iUSfhlOHdsY4CzVNIVVVXismrAe+WKMBxocLhbrIVHGvmR94jDC46K18K6oQ=='\n keyRing = dns.tsigkeyring.from_text({zone : key})\n\thostName = genHostname(ipAddr)\n\tdnsUpdate = dns.update.Update(zone, keyring=keyRing)\n\tipAddr = str(ipAddr)\n\thostName = str(hostName)\n\tif action == 'create':\n\t\tdnsUpdate.replace( hostName.split('.')[0], ttl, 'A', ipAddr )\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS A record updated for: ' + hostName)\n\t\tdnsUpdate.replace(portId, ttl, 'CNAME', hostName)\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS CNAME record updated for: ' + hostName)\n\t\tdnsUpdate = dns.update.Update(revZone, keyring=keyRing)\n\t\tdnsUpdate.replace(ipAddr.split('.')[3], ttl, 'PTR', hostName)\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS PTR record updated for: ' + hostName)\n\tif action == 'delete':\n\t\ttry:\n\t\t\thostName = dns.resolver.query(cname, 'CNAME')[0].to_text()\n\t\t\tipAddr = dns.resolver.query(hostName, 'A')[0].to_text()\n\t\texcept Exception, e:\n\t\t\tlogging.exception('DNS query failed for cname and A records: ' + cname + ' ' + hostName)\n\t\t\thostName = ''\n\t\t\treturn hostName\n\t\tdnsUpdate.delete(cname, 'CNAME')\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS CNAME record deleted for: ' + portId + ' to ' + hostName)\n\t\tdnsUpdate.delete(hostName.split('.')[0])\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS A record deleted for: ' + hostName)\n\t\tdnsUpdate = dns.update.Update(revZone, keyring=keyRing)\n dnsUpdate.delete(ipAddr.split('.')[3])\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS PTR record deleted for: ' + hostName)\n\t\treturn hostName", "def edit_domain(domain_name):\n\n if request.method == \"POST\":\n domain = session.query(Domain).filter(\n Domain.domain_name == domain_name).first()\n\n # Check if domain.provider object exists to make sure\n # duplicate Provider.provider_url is not created\n provider = session.query(Provider).filter(\n Provider.provider_url == domain.provider.provider_url).first()\n if not provider:\n provider = Provider(\n provider_url=request.form[\"provider-url\"].strip())\n\n domain.category.category_name = session.query(CategoryName).filter(\n CategoryName.name == request.form[\"category\"].strip()).first()\n\n domain.domain_name = parse_url(request.form[\"domain-name\"].strip())\n domain.ip = request.form[\"ip-address\"].strip()\n domain.provider.provider_url = parse_url(\n provider.provider_url.strip())\n domain.is_active = request.form.get(\"is-active\", False)\n domain.is_monitored = request.form.get(\"is-monitored\", False)\n\n # Convert date string from form to date object\n exp_date = datetime.strptime(request.form.get(\"exp-date\"),\n \"%Y-%m-%d\").date()\n domain.exp_date = exp_date\n\n session.add(domain)\n\n try:\n session.commit()\n message = \"{}Success!{} Updated {}{}{} successfully.\".format(\n \"<strong>\", \"</strong>\", \"<em>\", domain.domain_name, \"</em>\")\n flash(message, \"success\")\n except:\n session.rollback()\n message = \"{}Error!{} Problem with one of the fields.\".format(\n \"<strong>\", \"</strong>\")\n flash(message, \"danger\")\n return redirect(url_for(\"edit_domain\", domain_name=domain_name))\n\n if request.form[\"submit\"] == \"Save\":\n return redirect(url_for(\"view_domain\",\n domain_name=domain.domain_name,\n category_names=category_names))\n else:\n return redirect(url_for(\"edit_domain\",\n domain_name=domain.domain_name,\n category_names=category_names))\n else:\n domain = session.query(Domain).filter(\n Domain.domain_name == domain_name).first()\n\n # Obtain list of domain names without tuple to use\n # for domain_pager()\n domain_names = [d.domain_name for d in session.query(\n Domain.domain_name).order_by(Domain.domain_name).all()]\n next_domain, previous_domain = domain_pager(domain_name, domain_names)\n\n kwargs = {\n \"domain\": domain,\n \"domain_name\": domain_name,\n \"category_names\": category_names,\n \"next_domain\": next_domain,\n \"previous_domain\": previous_domain\n }\n return render_template(\"edit_domain.html\", **kwargs)", "def update_tld_redirect(c, stack_name, fqdn, profile, cert_arn=None, create=False):\n action = 'create' if create else 'update'\n\n with chdir(WORKING_DIR):\n aws('cloudformation', f'{action}-stack',\n '--stack-name', f'{stack_name}-dns-tld',\n '--template-body', f'file://top-level-domain.yaml',\n '--parameters',\n f'ParameterKey=FullyQualifiedDomainName,ParameterValue={fqdn}',\n f'ParameterKey=CertificateArn,ParameterValue={cert_arn if cert_arn else \"\"}',\n f'--profile', f'{profile}')", "def create_or_show_domain(name):\n manager = get_manager()\n domain_id = manager.resolve_domain_id(name)\n if domain_id:\n log(\"Domain '%s' already exists.\" % name, level=DEBUG)\n else:\n manager.create_domain(domain_name=name,\n description='Created by Juju')\n log(\"Created new domain: %s\" % name, level=DEBUG)\n domain_id = manager.resolve_domain_id(name)\n return domain_id", "def create_domain(DomainName=None):\n pass", "def post_domain_update(self, resource_id, resource_dict):\n pass", "def add_domain():\n\n today = date.today()\n\n if request.method == \"POST\":\n # Check to see if domain already exists because\n # duplicate domain names aren't allowed\n domain = session.query(Domain).filter_by(\n domain_name=request.form[\"domain-name\"]).first()\n if domain:\n message = \"{}Error!{} {}{}{} already exists.\".format(\n \"<strong>\", \"</strong>\", \"<em>\", domain.domain_name, \"</em>\")\n flash(message, \"danger\")\n return redirect(url_for(\"add_domain\", today=today,\n category_names=category_names))\n\n # Find existing Provider otherwise create new Provider object\n provider = session.query(Provider).filter(\n Provider.provider_url == request.form[\"provider-url\"]).first()\n if not provider:\n provider = Provider(provider_url=request.form[\"provider-url\"])\n\n # Get existing category name object from CategoryName table\n category_name = session.query(CategoryName).filter(\n CategoryName.name == request.form[\"category\"]).first()\n\n domain = Domain(\n category=Category(),\n domain_name=request.form[\"domain-name\"],\n ip=request.form[\"ip-address\"],\n provider=provider)\n domain.category.category_name = category_name\n domain.status.append(Status(status_type=\"added\"))\n domain.is_active = request.form.get(\"is-active\", False)\n domain.is_monitored = request.form.get(\"is-monitored\", False)\n\n # Convert date string from form to date object\n exp_date = datetime.strptime(request.form.get(\"exp-date\"),\n \"%Y-%m-%d\").date()\n domain.exp_date = exp_date\n\n session.add(domain)\n\n try:\n session.commit()\n message = \"{}Success!{} Added {}{}{} successfully.\".format(\n \"<strong>\", \"</strong>\", \"<em>\", domain.domain_name, \"</em>\")\n flash(message , \"success\")\n except:\n session.rollback()\n message = \"{}Error!{} Could not add add {}{}{}.\".format(\n \"<strong>\", \"</strong>\", \"<em>\", domain.domain_name, \"</em>\")\n flash(message, \"danger\")\n\n if request.form[\"submit\"] == \"Submit\":\n return redirect(url_for(\"home\"))\n else:\n return redirect(url_for(\"add_domain\", today=today,\n category_names=category_names))\n else:\n return render_template(\"add_domain.html\", today=today,\n category_names=category_names)", "def post_domain_create(self, resource_dict):\n pass", "def dns_update(self, full_record_name, record_type, value=None, raw=False, **kwargs):\n\n endpoint = '/Domain/DnsRecord/Update'\n\n params = {\n 'FullRecordName' : full_record_name,\n 'Type': record_type,\n }\n\n params.update(kwargs)\n\n\n if record_type not in VALID_DNS_RECORD_TYPES:\n raise ValueError(\"Accepted values for this argument are: A, AAAA, DYNAMIC, CNAME, MX, SRV, TXT and NS\")\n\n if not value and record_type != 'DYNAMIC':\n raise ValueError(\"All records except DYNAMIC must have their value\")\n \n if record_type == 'DYNAMIC':\n if not kwargs.has_key('DynDnsLogin') or not kwargs.has_key('DynDnsPassword'):\n raise ValueError('DynDNS login and password are required when record type is DYNAMIC')\n\n if value:\n params['Value'] = value\n \n response = self.__perform_get_request(endpoint, params)\n\n if response.status_code == 200:\n parsed_response = response.json()\n if raw:\n return parsed_response\n else:\n return parsed_response.get('status') == 'SUCCESS'", "def domain(self, value):\n if hasattr(self, \"_domain\"):\n raise ValueError(\"A ServerName's domain cannot be changed.\")\n if value is None:\n raise ValueError(\"A ServerName must be given a domain.\")\n if not isinstance(value, str):\n raise TypeError(\"The domain must be a string, not %s.\" % (type(value)))\n if value is \"\":\n raise ValueError(\"A empty string is not a valid domain.\")\n self._domain = value", "def create_domain(self, url_data, service_id, service_version):\n request_dict = {k: v[0] for k, v in url_data}\n domain_name = request_dict['name']\n\n create_domain = {\n 'comment': '',\n 'service_id': service_id,\n 'version': service_version,\n 'name': domain_name}\n\n if 'domain_list' not in self.fastly_cache[service_id]:\n self.fastly_cache[service_id]['domain_list'] = []\n\n self.fastly_cache[service_id]['domain_list'].append(\n [create_domain, 'None', 'False'])\n return create_domain", "def add_new_domain(self):\n\n domain = self.dlg.uComboBoxDomain.currentText()\n\n if domain in self.domains:\n self.dlg.uWarningSettings.show()\n self.dlg.uWarningSettings.setText(\n \"Warning: Domains must be unique. \" \"Please edit the domain below\"\n )\n return\n\n if len(self.domains) >= 10:\n self.dlg.uWarningSettings.show()\n self.dlg.uWarningSettings.setText(\n \"Warning: You can only store up to . \" \"10 domain entries\"\n )\n return\n\n if domain == \"OTHER\":\n domain = \"\"\n getattr(self.dlg, \"uTextDomain{0}\".format(len(self.domains) + 1)).setText(\n domain\n )\n getattr(self.dlg, \"uTextDomain{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uTextAPIKey{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uBtnRemoveDomain{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uBtnSaveDomain{0}\".format(len(self.domains) + 1)).show()\n self.dlg.uWarningSettings.hide()", "def add(self, newaddress):\n list = newaddress.split(\"@\")\n newdomain = list[-1]\n if not newdomain in self.__domainlist:\n self.__domainlist.append(newdomain)\n else:\n print(\"Domain is already in the database\")", "def normalize_fqdn(fqdn):\n if not fqdn:\n return None\n\n if fqdn.endswith('/'):\n fqdn = fqdn.strip('/')\n\n # bare fqdn, fallback to http://\n if not fqdn.startswith('http'):\n fqdn = \"http://%s\" % fqdn\n return fqdn", "def _associate_floating_ip(self, context, domain_id, extra, floating_ip_id, floating_ip, port_id):\n\n addresses = [{\n 'version': 4,\n 'address': floating_ip,\n }]\n try:\n names = self._create(context=context,\n addresses=addresses,\n name_format=cfg.CONF[self.name].format,\n extra=extra,\n domain_id=domain_id,\n managed_extra='portid:%s' % (port_id),\n resource_type='a:floatingip',\n resource_id=floating_ip_id)\n except (designate.exceptions.DuplicateRecord, CirrusRecordExists):\n LOG.warn('Could not create record for %s using default format, '\n 'trying fallback format' % (extra['instance_name']))\n names = self._create(context=context,\n addresses=addresses,\n name_format=cfg.CONF[self.name].format_fallback,\n extra=extra,\n domain_id=domain_id,\n managed_extra='portid:%s' % (port_id),\n resource_type='a:floatingip',\n resource_id=floating_ip_id)\n LOG.info(\"Created %s to point at %s\" % (','.join(names), floating_ip))", "def update(request):\n from pprint import pformat\n if 'ipv4' not in request.GET and 'ipv6' not in request.GET:\n return HttpResponse(\"Must specify one or both of ipv4/ipv6 address\\nParams:%s\" % pformat(request.GET.dict()), status=400)\n if not u'domain' in request.GET:\n return HttpResponse(\"Must specify domain\\nParams:%s\" % pformat(request.GET.dict()), status=400)\n\n for ipvx, record_type in ((u'ipv4', 'A'), (u'ipv6', 'AAAA')):\n if ipvx not in request.GET:\n continue\n record, created = Record.objects.get_or_create(\n name=request.GET['domain'],\n type=record_type,\n )\n record.domain_id = 1\n record.ttl = 1\n record.auth = True\n record.content = request.GET[ipvx]\n record.save()\n\n return HttpResponse(\"Saved record(s)\")", "def set_domain(self) -> None:\n self._fanfic.domain = \"Ficwad.com\"", "def post_virtual_DNS_update(self, resource_id, resource_dict):\n pass" ]
[ "0.61005515", "0.5949715", "0.58637345", "0.58026123", "0.57765675", "0.57385725", "0.56413877", "0.5568498", "0.55607516", "0.5557578", "0.55217475", "0.55150574", "0.55067784", "0.5486445", "0.5471696", "0.5465719", "0.546032", "0.5456307", "0.54177785", "0.5328181", "0.5326221", "0.5314983", "0.53069544", "0.5275867", "0.5269318", "0.5259438", "0.5233059", "0.5227046", "0.5224536", "0.5222745" ]
0.62173575
0
Delete all of the matching CNAME records from a DNS Zone
def route53_delete_records(session, hosted_zone, cname): if session is None: return None client = session.client('route53') hosted_zone_id = get_hosted_zone_id(session, hosted_zone) if hosted_zone_id is None: print("Could not locate Route53 Hosted Zone '{}'".format(hosted_zone)) return None response = client.list_resource_record_sets( HostedZoneId=hosted_zone_id, StartRecordName=cname, StartRecordType='CNAME' ) changes = [] for record in response['ResourceRecordSets']: if not record['Name'].startswith(cname): continue changes.append({ 'Action': 'DELETE', 'ResourceRecordSet': record }) if len(changes) == 0: print("No {} records to remove".format(cname)) return None response = client.change_resource_record_sets( HostedZoneId=hosted_zone_id, ChangeBatch={'Changes': changes} ) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_deletednsrecord(kasserver, kasapi):\n kasserver.delete_dns_record(\"test.example.com\", \"CNAME\")\n assert kasapi.requests_contains(\"delete_dns_settings\")", "def delete_container_links(container):\n container_uri = container.cdn_uri.replace(\"http://\", \"\")\n domain = get_domain()\n if domain:\n for record in pyrax.cloud_dns.get_record_iterator(domain):\n if record.type == \"CNAME\" and record.data == container_uri:\n print(\"Unlinking %s\" % record.name)\n record.delete()", "def removeall(subdomain):\n\tTarget.query.filter(Target.subdomain.like(f\"%{subdomain}%\")).delete(synchronize_session='fetch')\n\tdb.session.commit()\n\tprint(\"deleted\",sub)", "def test_deletedbsrecord_notfound(kasserver, kasapi):\n kasserver.delete_dns_record(\"test.example.com\", \"MX\")\n assert not kasapi.requests_contains(\"delete_dns_settings\")", "def destroy_all(self):\n for name in self.app.config['SIMPLE_DOMAINS']:\n self.connection.delete_domain(name)", "def remove_zonerecord(self, record_id=None, remove_all=False):\n\n if record_id:\n r = ZoneRecord(domainname=self.domainname, subdomain=self.subdomain, record_id=record_id)\n r.remove()\n elif remove_all:\n for r in self.get_zonerecords():\n r.remove()", "def delIfMatchedHostName(hostNames_, fHostNames_):\n for i in fHostNames_:\n for j in hostNames_[:]:\n if re.match(i + '$', j):\n hostNames_.remove(j)\n return hostNames_", "def purge_all(zone_id):\n cf = CloudFlare.CloudFlare()\n return cf.zones.purge_cache.delete(zone_id, data={'purge_everything': True})", "def del_txt_record(self, domain_name: str, record_name: str, record_content: str) -> None:\n\n try:\n domain = self._find_domain(domain_name)\n except digitalocean.Error as e:\n logger.debug('Error finding domain using the DigitalOcean API: %s', e)\n return\n\n try:\n domain_records = domain.get_records()\n\n matching_records = [record for record in domain_records\n if record.type == 'TXT'\n and record.name == self._compute_record_name(domain, record_name)\n and record.data == record_content]\n except digitalocean.Error as e:\n logger.debug('Error getting DNS records using the DigitalOcean API: %s', e)\n return\n\n for record in matching_records:\n try:\n logger.debug('Removing TXT record with id: %s', record.id)\n record.destroy()\n except digitalocean.Error as e:\n logger.warning('Error deleting TXT record %s using the DigitalOcean API: %s',\n record.id, e)", "def update_dns(self):\n if self.ptr:\n which_zone = None\n zones = dns.models.Zone.objects.all()\n for zone in zones:\n if self.ptr.endswith(zone.name) or self.ptr.endswith(zone.name + '.'):\n which_zone = zone\n break\n\n if which_zone:\n zone_name = which_zone.name\n record_name = self.ptr[:-len(zone_name)] if not self.ptr.endswith('.') else self.ptr[:-len(zone_name) - 1]\n if record_name.endswith('.'):\n record_name = record_name[:-1]\n record_type = 'A' if self.family == 4 else 'AAAA'\n\n dns.models.Record.objects.get_or_create(\n name=record_name,\n record_type=record_type,\n zone=which_zone,\n address=self\n )", "def purge_files(zone_id, zone_name, files):\n cf = CloudFlare.CloudFlare()\n urls = normalize_urls(zone_name, files)\n click.echo(urls)\n return cf.zones.purge_cache.delete(zone_id, data={'files': urls})", "def test_cname_response(self):\n fqdn = \"cname.github.com\"\n answer = self.resolver.query(fqdn, \"CNAME\")\n for rr in answer:\n if rr.target.to_text() != \"github.map.fastly.net.\":\n raise TestException(\"Unexpected target for {0}: {1}\"\n .format(fqdn, rr.target))\n log.debug(\"[%-15s]: CNAME query for %s succeeded\",\n self.resolver.nameservers[0],\n fqdn)", "def cli(ctx, domain, ip_address, hostname):\n zone = getzone(domain)\n #print('.%s:%s:%s' % (domain, ip_address, hostname))\n for r in zone:\n if r['type'] == 'CNAME':\n print('C%s:%s' %( r['name'], r['content']))\n elif r['type'] == 'TXT':\n print('\\'%s:%s' %( r['name'], r['content']))\n elif r['type'] == 'MX':\n pass\n elif r['type'] == 'A':\n print('=%s:%s' %( r['name'], r['content']))\n else:\n exit('unknown DNS record type: %s' % r['type'])", "def updateDnsRecords(self, zone: DNSZone, recordset: DNSRecordSet):\n self._send(self.nc_request(action=\"updateDnsRecords\",\n parameters={\"domainname\": zone.name, \"dnsrecordset\": recordset.json()}))", "def remove(self):\n\n self.call(method='removeZoneRecord', args=[self.domainname, self.subdomain, self.record_id])", "def _cleanup(self, domain, validation_name, validation):\n\n if self.created_record_reference is None:\n raise errors.PluginError(\n \"Cannot clean up DNS because the record hasn't been created yet\"\n )\n\n domain_name = self._metaname_domain_name_for_hostname(validation_name)\n try:\n self._metaname_client().request(\n \"delete_dns_record\", domain_name, self.created_record_reference\n )\n except Exception as e:\n raise errors.PluginError(\n f\"Unable to delete the acme-challenge record in the zone {domain}: {e}\"\n ) from e\n else:\n self.created_record_reference = None", "def del_all_records():\n delete_alles = Customer.delete().where(Customer.name >= '')\n delete_alles.execute()", "def test_download_publicdns():\n dnsfile = './dnslist.test'\n assert howisresolved.download_publicdns(dnsfile) is None\n os.remove(dnsfile)", "def print_all_dns_records():\n for domain in sorted(get_domains()):\n dns_records = get_domain_dns_records(domain)\n print(domain)\n pprint(dns_records)\n print(\"*\" * 50)\n # TODO: poor man's rate limiter. improve?\n time.sleep(2)", "def remove_all(self, host_names, raise_on_not_found=True):\n for host_name in host_names:\n self.remove_one(host_name, raise_on_not_found)", "def ensure_dns_records_point_to_load_balancer(load_balancer, environment):\n zone_id = Constants['HostedZoneId']\n dns_names = Constants['DnsNames'][environment]\n\n # sum() -- takes a list of lists and outputs a single list :)\n changes = sum([ [\n {\n 'Action': 'UPSERT',\n 'ResourceRecordSet': {\n 'Name': dns_name,\n 'Type': 'A',\n 'AliasTarget': {\n 'HostedZoneId': load_balancer['CanonicalHostedZoneId'],\n 'DNSName': load_balancer['DNSName'],\n 'EvaluateTargetHealth': False,\n },\n },\n },\n {\n 'Action': 'UPSERT',\n 'ResourceRecordSet': {\n 'Name': dns_name,\n 'Type': 'AAAA',\n 'AliasTarget': {\n 'HostedZoneId': load_balancer['CanonicalHostedZoneId'],\n 'DNSName': load_balancer['DNSName'],\n 'EvaluateTargetHealth': False,\n },\n },\n },\n ] for dns_name in dns_names ], [])\n\n Route53.change_resource_record_sets(\n HostedZoneId=zone_id,\n ChangeBatch={\n 'Comment': 'Auto-updated by launch-conglomerate.py',\n 'Changes': changes,\n }\n )", "def configure_dns(cors_origin, krb_realm):\n\n ctx = CellCtx(cors=cors_origin, krb_realm=krb_realm)\n cellname = context.GLOBAL.cell\n\n ipa_client = awscontext.GLOBAL.ipaclient\n idnsname = 'zk.{}'.format(cellname)\n\n admin_cell = admin.Cell(context.GLOBAL.ldap.conn)\n cell = admin_cell.get(cellname)\n\n masters = ','.join(['{}:{}'.format(m['hostname'], m['zk-client-port'])\n for m in cell['masters']])\n scheme = cell.get('zk-auth-scheme')\n if not scheme:\n scheme = 'zookeeper'\n\n zkurl = '{scheme}://{username}@{hostports}/treadmill/{cell}'.format(\n scheme=scheme,\n username=ctx.proid,\n hostports=masters,\n cell=cellname\n )\n\n found = False\n try:\n current_rec = ipa_client.get_dns_record(idnsname)\n except ipaclient.NotFoundError:\n current_rec = None\n\n if current_rec:\n for record in current_rec['txtrecord']:\n if record != zkurl:\n _LOGGER.info(\n 'Deleting stale TXT record: %s %s', idnsname, record\n )\n ipa_client.delete_txt_record(idnsname, record)\n else:\n found = True\n\n if found:\n _LOGGER.info('Zookeeper TXT records up to date: %s : %s',\n idnsname, zkurl)\n return\n\n ipa_client.add_txt_record(idnsname, zkurl)", "def delete_domain(DomainName=None):\n pass", "def delIfMatchedAddr(ipv4Addresses_, fIpv4Addresses_):\n s1 = netaddr.IPSet(ipv4Addresses_)\n l2 = []\n for i in fIpv4Addresses_[:]:\n m = re.search(r'(.*) \\.\\.\\. (.*)', i)\n if not m:\n l2.append(i)\n else:\n l2 += netaddr.IPSet(netaddr.iter_iprange(m.group(1), m.group(2)))\n s2 = netaddr.IPSet(l2)\n return map(str, list(s1 - s2))", "def testDeleteZone(self):\n zmodels.Zone.objects.all().delete()\n self._saveZone()\n response = self._delete('inventory/zones/2/',\n username=\"admin\", password=\"password\")\n self.assertEquals(response.status_code, 204)\n try:\n zmodels.Zone.objects.get(pk=1)\n self.fail(\"Lookup should have failed due to deletion\")\n except zmodels.Zone.DoesNotExist:\n pass # what we expect", "def delete(self, name, *args):\n\n if isinstance(name, string_types):\n name = dns.name.from_text(name, None)\n if len(args) == 0:\n self.find_rrset(self.authority, name, dns.rdataclass.ANY,\n dns.rdatatype.ANY, dns.rdatatype.NONE,\n dns.rdatatype.ANY, True, True)\n elif isinstance(args[0], dns.rdataset.Rdataset):\n for rds in args:\n for rd in rds:\n self._add_rr(name, 0, rd, dns.rdataclass.NONE)\n else:\n args = list(args)\n if isinstance(args[0], dns.rdata.Rdata):\n for rd in args:\n self._add_rr(name, 0, rd, dns.rdataclass.NONE)\n else:\n rdtype = args.pop(0)\n if isinstance(rdtype, string_types):\n rdtype = dns.rdatatype.from_text(rdtype)\n if len(args) == 0:\n self.find_rrset(self.authority, name,\n self.zone_rdclass, rdtype,\n dns.rdatatype.NONE,\n dns.rdataclass.ANY,\n True, True)\n else:\n for s in args:\n rd = dns.rdata.from_text(self.zone_rdclass, rdtype, s,\n self.origin)\n self._add_rr(name, 0, rd, dns.rdataclass.NONE)", "def delete_cors_policy(ContainerName=None):\n pass", "def delete(self,\n dns_forwarder_zone_id,\n ):\n return self._invoke('delete',\n {\n 'dns_forwarder_zone_id': dns_forwarder_zone_id,\n })", "def delete(self, unique_id):\n return request(\n API_LIST.DNS_DELETE.value,\n {\n 'email': self.email,\n 'token': self.token,\n 'id': unique_id\n }\n )", "def test_remove_domains_success(self, acme_id, api_url, req_domains, resp):\n\n # Setup the mocked response\n responses.add(responses.DELETE, api_url, json=resp, status=200)\n\n acme = ACMEAccount(client=self.client)\n response = acme.remove_domains(acme_id, req_domains)\n\n self.assertEqual(response, resp)" ]
[ "0.68798274", "0.6212371", "0.6047711", "0.60346884", "0.5980792", "0.58506405", "0.5818954", "0.57226974", "0.5720566", "0.57001406", "0.56724554", "0.56326467", "0.56292945", "0.56163657", "0.5554157", "0.554218", "0.55259824", "0.5514953", "0.5445766", "0.5426144", "0.5409733", "0.5393368", "0.5374507", "0.5344317", "0.5332127", "0.53155166", "0.53086466", "0.5283521", "0.528034", "0.5271213" ]
0.70036846
0
Unsubscribe all subscriptions for the given SNS topic
def sns_unsubscribe_all(session, topic, region="us-east-1", account=None): if session is None: return None if account is None: account = get_account_id_from_session(session) topic = "arn:aws:sns:{}:{}:{}".format(region, account, topic.replace(".", "-")) client = session.client('sns') response = client.list_subscriptions() for res in response['Subscriptions']: if res['TopicArn'] == topic: client.unsubscribe(SubscriptionArn=res['SubscriptionArn']) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def unsubscribe(self, topic: str, subscription_id: int = None) -> None:\n ...", "async def unsubscribe_topics(self) -> None:\n self._sub_state = await self._mqtt_client.unsubscribe(self._sub_state)", "def unsubscribe(self, topic):\n request = protos.RequestUnsubscribe(topic=topic)\n return self.stub.unsubscribe(request)", "def unsubscribe(self, user_token, topic):\n response = _request('DELETE',\n url=self.url_v1('/user/subscriptions/' + topic),\n user_agent=self.user_agent,\n user_token=user_token,\n )\n _raise_for_status(response)", "def unsubscribe(endpoint: str, topic: str, timeout: int = 5):\n global logger\n logger.info(f\"Unsubscribing from topic '{topic}' ...\")\n action = {\"action\": \"unsubscribe\", \"topic\": topic}\n reply = send_manage_message(endpoint, action, timeout)\n if not reply_is_success(reply):\n logger.warning(\"Unsubscription failed\")\n return\n logger.info(\"Unsubscription successful\")", "def unsubscribe_all_emails_from_cloudwatch(self):\n for subscription in self._get_cloudwatch_subscriptions():\n subscription_arn = subscription['SubscriptionArn']\n if not subscription_arn == 'PendingConfirmation':\n self.conn.unsubscribe(subscription_arn)", "def _async_unsubscribe(self, topic: str) -> None:\n if self._is_active_subscription(topic):\n if self._max_qos[topic] == 0:\n return\n subs = self._matching_subscriptions(topic)\n self._max_qos[topic] = max(sub.qos for sub in subs)\n # Other subscriptions on topic remaining - don't unsubscribe.\n return\n if topic in self._max_qos:\n del self._max_qos[topic]\n if topic in self._pending_subscriptions:\n # Avoid any pending subscription to be executed\n del self._pending_subscriptions[topic]\n\n self._pending_unsubscribes.add(topic)\n self._unsubscribe_debouncer.async_schedule()", "async def unsubscribe(self):\n LOGGER.info('Subscription removed')\n await self._ros.send(self._unsubscribe_msg)", "def unsubscribeTopic(self, topic:str|MQTTTopic) -> None:\n\t\tif isinstance(topic, MQTTTopic):\n\t\t\tif topic.topic not in self.subscribedTopics:\n\t\t\t\tself.messageHandler and self.messageHandler.logging(self.mqttClient, logging.WARNING, f'MQTT: unknown topic: {topic.topic}')\n\t\t\t\treturn\n\t\t\tif (r := self.mqttClient.unsubscribe(topic.topic))[0] == 0:\n\t\t\t\ttopic.mid = r[1]\n\t\t\telse:\n\t\t\t\tself.messageHandler and self.messageHandler.logging(self.mqttClient, logging.ERROR, f'MQTT: cannot unsubscribe: {r[0]}')\n\t\t\t\treturn\n\n\t\telse:\t# if topic is just the name we need to subscribe to\n\t\t\tif topic not in self.subscribedTopics:\n\t\t\t\tself.messageHandler and self.messageHandler.logging(self.mqttClient, logging.WARNING, f'MQTT: unknown topic: {topic}')\n\t\t\t\treturn\n\t\t\tt = self.subscribedTopics[topic]\n\t\t\tif t.isSubscribed:\n\t\t\t\tif (r := self.mqttClient.unsubscribe(t.topic))[0] == 0:\n\t\t\t\t\tt.mid = r[1]\n\t\t\t\telse:\n\t\t\t\t\tself.messageHandler and self.messageHandler.logging(self.mqttClient, logging.ERROR, f'MQTT: cannot unsubscribe: {r[0]}')\n\t\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tself.messageHandler and self.messageHandler.logging(self.mqttClient, logging.WARNING, f'MQTT: topic not subscribed: {topic}')\n\n\t\t# topic is removed in _onUnsubscribe() callback", "def unsubscribe_all(self):\n ids = []\n repeat = True\n cursor = None\n # get all ids\n while repeat:\n ret = self.__twitch.get_eventsub_subscriptions(after=cursor)\n for d in ret.get('data', []):\n ids.append(d.get('id'))\n cursor = ret.get('pagination', {}).get('cursor')\n repeat = cursor is not None\n for _id in ids:\n succ = self.__twitch.delete_eventsub_subscription(_id)\n if not succ:\n self.__logger.warning(f'failed to unsubscribe from event {_id}')\n self.__callbacks.clear()", "def unsubscribe(self, namespace, unsub_strings=None):\n req = JSONRPCRequest('unsubscribe', [namespace, unsub_strings])\n result = yield self._send(req)\n self._cache_jsonrpc_request(req)\n raise tornado.gen.Return(result)", "def unsubscribe(self, subscription):\n request = Request(\n method='delete',\n endpoint='/streams/subcription/{}'.format(subscription)\n )\n\n def response_handler(resp):\n code = resp.status_code\n if resp.is_success:\n return 'OK'\n elif code == 403:\n raise ex.StreamPermissionError(resp, request)\n raise ex.StreamConnectionError(resp, request)\n\n return self._execute(request, response_handler)", "def unsubscribe_topic(self, topic_id: str) -> bool:\n result = self.__twitch.delete_eventsub_subscription(topic_id)\n if result:\n self.__callbacks.pop(topic_id, None)\n return result", "def get_all_subscriptions_by_topic(self, topic, next_token=None):\r\n params = {'ContentType' : 'JSON',\r\n 'TopicArn' : topic}\r\n if next_token:\r\n params['NextToken'] = next_token\r\n response = self.make_request('ListSubscriptionsByTopic', params,\r\n '/', 'GET')\r\n body = response.read()\r\n if response.status == 200:\r\n return json.loads(body)\r\n else:\r\n boto.log.error('%s %s' % (response.status, response.reason))\r\n boto.log.error('%s' % body)\r\n raise self.ResponseError(response.status, response.reason, body)", "def test_unsubscribe_from_topic_groups(self):\r\n group_id = None # Change me!!\r\n topic_id = None # Change me!!\r\n\r\n r = self.client.unsubscribe_from_topic_groups(group_id, topic_id)", "def unsubscribe(self, subscription):\r\n params = {'ContentType' : 'JSON',\r\n 'SubscriptionArn' : subscription}\r\n response = self.make_request('Unsubscribe', params, '/', 'GET')\r\n body = response.read()\r\n if response.status == 200:\r\n return json.loads(body)\r\n else:\r\n boto.log.error('%s %s' % (response.status, response.reason))\r\n boto.log.error('%s' % body)\r\n raise self.ResponseError(response.status, response.reason, body)", "async def async_unsubscribe_services(self) -> None:\n # Delete list of subscriptions and cancel renewal before unsubcribing\n # to avoid unsub-resub race.\n sids = list(self._subscriptions)\n self._subscriptions.clear()\n await self._update_resubscriber_task()\n\n await asyncio.gather(*(self._async_unsubscribe_service(sid) for sid in sids))", "def unsubscribe(self, sub_id):\n self.send({'msg': 'unsub', 'id': sub_id})", "def unsubscribe(self, sub_id):\n self.send({'msg': 'unsub', 'id': sub_id})", "def unsubscribe(self):\n\n # Unsubscribe\n self.pyrps.redis.srem(self.pyrps._ns_subscriptions(self.queue), self.consumer_id) \n\n # Remove message queue\n self.pyrps.redis.delete(self.pyrps._ns_queue(self.queue, self.consumer_id))", "def _onUnsubscribe(self, client:mqtt.Client, userdata:Any, mid:int) -> None:\n\t\t# TODO doc, error check when not connected, not subscribed\n\t\tfor t in self.subscribedTopics.values():\n\t\t\tif t.mid == mid:\n\t\t\t\tdel self.subscribedTopics[t.topic]\n\t\t\t\tself.messageHandler and self.messageHandler.onUnsubscribed(self, t.topic)\n\t\t\t\tbreak", "def _get_subscriptions(self, topic_arn):\n return self.conn.get_all_subscriptions_by_topic(topic_arn)['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['Subscriptions']", "def terminateSubscriptionsOnTopics(self, topics, message=None):\n if not topics:\n return\n topicsCArraySize = len(topics)\n topicsCArray = internals.new_topicPtrArray(topicsCArraySize)\n try:\n for i, topic in enumerate(topics):\n internals.topicPtrArray_setitem(topicsCArray,\n i,\n get_handle(topic))\n _ExceptionUtil.raiseOnError(\n internals.blpapi_ProviderSession_terminateSubscriptionsOnTopics(\n self.__handle,\n topicsCArray,\n topicsCArraySize,\n message))\n finally:\n internals.delete_topicPtrArray(topicsCArray)", "def unsubscribe(self, *rss_feeds):\n [self.subscriptions.remove(feed) for feed in rss_feeds if feed in self.subscriptions]\n self.save()", "def unsubscribe_subscription_events(self, uuid: UUID) -> bool:\n return self._generic_unsubscribe('/subscriptions/events', uuid)", "def onUnsubscribed(self, connection:MQTTConnection, topic:str) -> bool:\n\t\tconnection.subscribedCount -= 1\n\t\treturn True", "def unsubscribe(self):\n if self._subscribed and self._connected:\n try:\n msg = self._create_message(strings.UNSUB_MSG)\n self.write(msg)\n except (OSError, KeyError) as ex:\n _LOGGER.error(\n \"PyISY encountered a socket error while writing unsubscribe message to the socket: %s.\",\n ex,\n )\n self._subscribed = False\n self.disconnect()", "def unsubscribe_all_known(self):\n for key, value in self.__callbacks.items():\n self.__logger.debug(f'unsubscribe from event {key}')\n succ = self.__twitch.delete_eventsub_subscription(key)\n if not succ:\n self.__logger.warning(f'failed to unsubscribe from event {key}')\n self.__callbacks.clear()", "def get_subscriptions(self, topic_name):\r\n resp = self._make_request('get',\r\n 'topics/%s/subscriptions' % topic_name)\r\n return resp.json()", "def unsubscribe( self, mess, args):\n user = self.get_sender_username(mess)\n if not user in self.users:\n return 'You are not subscribed!'\n else:\n user = self.users.pop(user)\n self.message_queue.append('_%s has left the channel_' % user)\n self.log.info( '%s unsubscribed from the broadcast.' % user)\n self.save_state()\n return 'You are now unsubscribed.'" ]
[ "0.78564173", "0.7503416", "0.7494931", "0.7264927", "0.7082679", "0.6934921", "0.6786763", "0.6547516", "0.6486138", "0.6444043", "0.64201444", "0.6402963", "0.63842636", "0.63343644", "0.6314208", "0.63087684", "0.62940603", "0.6240863", "0.6240863", "0.62375754", "0.6231161", "0.6154216", "0.61439407", "0.61316127", "0.60458785", "0.59939873", "0.5969882", "0.59490234", "0.59434533", "0.5910122" ]
0.811901
0
Delete all of the IAM policies that start with the given domain name
def policy_delete_all(session, domain, path="/"): client = session.client('iam') resp = client.list_policies(Scope='Local', PathPrefix=path) prefix = domain.replace('.', '-') for policy in resp.get('Policies', []): if policy['PolicyName'].startswith(prefix): ARN = policy['Arn'] if policy['AttachmentCount'] > 0: # cannot delete a policy if it is still in use attached = client.list_entities_for_policy(PolicyArn=ARN) for group in attached.get('PolicyGroups', []): client.detach_group_policy(GroupName=group['GroupName'], PolicyArn=ARN) for user in attached.get('PolicyUsers', []): client.detach_user_policy(UserName=user['UserName'], PolicyArn=ARN) for role in attached.get('PolicyRoles', []): client.detach_role_policy(RoleName=role['RoleName'], PolicyArn=ARN) client.delete_policy(PolicyArn=ARN)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def destroy_all(self):\n for name in self.app.config['SIMPLE_DOMAINS']:\n self.connection.delete_domain(name)", "def delete_policies():\n if PoliciesOutput.POLICIES_EVENT not in ctx.instance.runtime_properties:\n return\n\n service_component_name = ctx.instance.runtime_properties.get(\n PoliciesOutput.SERVICE_COMPONENT_NAME\n )\n if not service_component_name:\n ctx.logger.warn(\"failed to find service_component_name to delete_policies in consul-kv\")\n return\n\n delete_policies = [\n PoliciesOutput._gen_txn_operation(\n PoliciesOutput.OPERATION_DELETE_FOLDER, service_component_name\n )\n ]\n PoliciesOutput._run_transaction(\"delete_policies\", delete_policies)", "def delete_domain(DomainName=None):\n pass", "def test_delete_namespaced_policy(self):\n pass", "def delete_cors_policy(ContainerName=None):\n pass", "def delete_domain_name(self, DomainName: str):\n pass", "def removeall(subdomain):\n\tTarget.query.filter(Target.subdomain.like(f\"%{subdomain}%\")).delete(synchronize_session='fetch')\n\tdb.session.commit()\n\tprint(\"deleted\",sub)", "def remove(self, *domains):\n for domain in domains:\n self._remove_sequence(domain)", "def delete_bucket_policy(Bucket=None):\n pass", "async def setjrremove(self, ctx, domain):\n allowedDomains = await self.config.guild(ctx.guild).allowedDomains()\n \n try:\n allowedDomains.remove(domain)\n except:\n await ctx.send(\"Something went wrong :( Check if you have the name right using `[p]setjsonrequest list`\")\n else:\n await self.config.guild(ctx.guild).allowedDomains.set(allowedDomains)\n await ctx.message.add_reaction(\"✅\")", "def delete(self, policy_name):\n path = self.vault.normalize(\"/sys/policies/acl/\" + policy_name)\n address = self.vault.vault_adress + \"/v1\" + path\n # Actually run vault\n logging.info(\"Deleting the policy: %s\", address)\n self.vault.requests_request(\"DELETE\", address, headers=self.vault.token_header)", "def policy_delete(request, policy_id):\n neutronclient(request).delete_qos_policy(policy_id)", "def sqs_delete_all(session, domain):\n client = session.client('sqs')\n resp = client.list_queues(QueueNamePrefix=domain.replace('.','-'))\n\n for url in resp.get('QueueUrls', []):\n client.delete_queue(QueueUrl=url)", "def delete(self):\r\n return self.connection.delete_domain(self)", "def delete_all_onprogress_domains():\r\n db = connect()\r\n cursor = db.cursor()\r\n try:\r\n cursor.execute(\"DELETE FROM on_progress_domains\")\r\n db.commit()\r\n except:\r\n cursor.close()\r\n db.close()\r\n raise RuntimeError(\"An Exception happened with the Database, make sure you are connected\")\r\n cursor.close()\r\n db.close()", "def delete_metric_policy(ContainerName=None):\n pass", "def clean_up_service_accounts_in_namespaces_with_cleanup_policy(self, namespaces, cleanup_policy):\n return self.delete_resource_with_cleanup_policy(namespaces, cleanup_policy,\n self.core_api.delete_collection_namespaced_service_account,\n \"SA\")", "def test_delete_namespaced_policy_binding(self):\n pass", "def delete_ipsecpolicy(self, ipsecpolicy):\r\n return self.delete(self.ipsecpolicy_path % (ipsecpolicy))", "def delete_container_policy(ContainerName=None):\n pass", "def _remove_domain(req):\n r = {}\n for key in req:\n if \"domain\" not in key:\n r[key] = req[key]\n return r", "def delete_all_domain_pages():\r\n db = connect()\r\n cursor = db.cursor()\r\n try:\r\n cursor.execute(\"DELETE FROM domain_pages\")\r\n db.commit()\r\n except:\r\n cursor.close()\r\n db.close()\r\n raise RuntimeError(\"An Exception happened with the Database, make sure you are connected\")\r\n cursor.close()\r\n db.close()", "def delete_policy(self, policy_name):\r\n return self.connection.delete_lb_policy(self.name, policy_name)", "def delete_policy(policystore_url, policy_credentials, verbose):\n\n if verbose:\n logging.info('Deleting policy')\n pprint.pprint(policy_credentials)\n\n delete_url = policystore_url + POLICYSTORE_PREFIX + 'DeleteEntitlementPolicy'\n\n r = requests.post(delete_url, headers=headers(), json=policy_credentials)\n if r.status_code != 200:\n logging.error(f'ERROR: Unexpected response: {r.status_code}')\n pprint.pprint(r.json())\n sys.exit('Failed to delete policy')\n\n logging.info('SUCCESS: Deleted policy')", "def delete_tokens_for_domain(self, domain_id):\n if not CONF.token.revoke_by_id:\n return\n projects = self.assignment_api.list_projects()\n for project in projects:\n if project['domain_id'] == domain_id:\n for user_id in self.assignment_api.list_user_ids_for_project(\n project['id']):\n self.delete_tokens_for_user(user_id, project['id'])\n # TODO(morganfainberg): implement deletion of domain_scoped tokens.\n\n users = self.identity_api.list_users(domain_id)\n user_ids = (user['id'] for user in users)\n self.delete_tokens_for_users(user_ids)", "def test_remove_domains_success(self, acme_id, api_url, req_domains, resp):\n\n # Setup the mocked response\n responses.add(responses.DELETE, api_url, json=resp, status=200)\n\n acme = ACMEAccount(client=self.client)\n response = acme.remove_domains(acme_id, req_domains)\n\n self.assertEqual(response, resp)", "def delete_policy(policy_id):\n policy = PolicyService.get_policy_by_id(policy_id)\n if policy is None:\n abort(404)\n\n policy.delete()\n\n return {}", "def rbac_policy_delete(request, policy_id):\n neutronclient(request).delete_rbac_policy(policy_id)", "def clean_up_namespaces_with_cleanup_policy(self, cleanup_policy):\n responses = []\n namespaces = self.core_api.list_namespace(\n label_selector=labels_to_string({CLEANUP_LABEL: cleanup_policy})).items\n namespace_names = [n.metadata.name for n in namespaces]\n self.logger.debug(\"Deleting namespaces %s with cleanup policy %s\", namespace_names, cleanup_policy)\n for namespace in namespaces:\n resp = self.core_api.delete_namespace(namespace.metadata.name, propagation_policy=\"Background\")\n responses.append(resp)\n while self.core_api.list_namespace(label_selector=labels_to_string({CLEANUP_LABEL: cleanup_policy})).items:\n self.logger.debug(\"Waiting for namespaces %s to be deleted.\", namespace_names)\n\n # TODO ugly hack to prevent race conditions when deleting namespaces\n time.sleep(2)\n return responses", "def delete_all(pat: str, resource_registration_endpoint: str, secure: bool = False):\n \n all_resources = list(pat,resource_registration_endpoint,secure)\n\n for resource_id in all_resources:\n delete(pat, resource_registration_endpoint, resource_id, secure)" ]
[ "0.6473392", "0.6243997", "0.5969745", "0.5849608", "0.57430595", "0.57022417", "0.5679662", "0.56782377", "0.5672439", "0.56152827", "0.55565584", "0.54859465", "0.5423485", "0.5404208", "0.5362759", "0.53223443", "0.5299086", "0.5266806", "0.5250096", "0.52498037", "0.5236304", "0.52315396", "0.5193791", "0.5187508", "0.51831037", "0.5156866", "0.5150313", "0.51363075", "0.5109995", "0.5103564" ]
0.71195954
0
gets the account id from the session using the iam client. This method will work even if you have assumed a role in another account.
def get_account_id_from_session(session): if session is None: return None return session.client('iam').list_users(MaxItems=1)["Users"][0]["Arn"].split(':')[4]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def account_id():\n return client.get_caller_identity()['Account']", "def id(self) -> str:\n account_id = self.__session.client(\"sts\").get_caller_identity().get(\"Account\")\n if account_id:\n return account_id\n raise ValueError(\"get_caller_identity did not return Account\")", "def account_id(self) -> str:\n return pulumi.get(self, \"account_id\")", "def account_id(self) -> str:\n return pulumi.get(self, \"account_id\")", "def account_id(self) -> str:\n return pulumi.get(self, \"account_id\")", "def account_id(self) -> str:\n return pulumi.get(self, \"account_id\")", "def account_id(self) -> str:\n return pulumi.get(self, \"account_id\")", "def account_id(self) -> str:\n return pulumi.get(self, \"account_id\")", "def account_id(self) -> str:\n return pulumi.get(self, \"account_id\")", "def account_id(self) -> str:\n return pulumi.get(self, \"account_id\")", "def accountId():\n # save the lookup if we set the account to the environment\n if \"AWS_ACCOUNT_ID\" in os.environ:\n return os.environ[\"AWS_ACCOUNT_ID\"]\n conn = iamConn()\n funcs = [\n lambda: conn.get_user().get('get_user_response')\\\n .get('get_user_result').get('user').get('arn'),\n lambda: conn.list_roles(max_items=1).get('list_roles_response')\\\n .get('list_roles_result').get('roles')[0].get('arn'),\n ]\n for func in funcs:\n try:\n arn = func()\n break\n except (boto.exception.BotoServerError, IndexError):\n pass\n return arn.split(':')[4]", "def account_id(self):\n return self.get('/accounts')[0]['Id']", "def account_id(self):\n return self._account_id", "def account_id(self):\n return self.config.account_id", "def account_id(self):\n\n return self._account_id", "def account_id(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"account_id\")", "def account_id(self) -> Optional[str]:\n return pulumi.get(self, \"account_id\")", "def account_id(self) -> Optional[str]:\n return pulumi.get(self, \"account_id\")", "def account_id(self) -> Optional[str]:\n return pulumi.get(self, \"account_id\")", "def account_id(self) -> Optional[str]:\n return pulumi.get(self, \"account_id\")", "def account_id(self) -> Optional[str]:\n return pulumi.get(self, \"account_id\")", "def account_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"account_id\")", "def account_id(self) -> str:\n return self._account_id", "def get_session(account_id, role_name, session_name):\n\n return get_session_with_arn(\n \"arn:aws:iam::{}:role/{}\".format(account_id, role_name), session_name, None\n )", "def account_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"account_id\")", "def account_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"account_id\")", "def account_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"account_id\")", "def account_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"account_id\")", "def account_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"account_id\")", "def account_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"account_id\")" ]
[ "0.716123", "0.69258696", "0.6868835", "0.6868835", "0.6868835", "0.6868835", "0.6868835", "0.6868835", "0.6868835", "0.6868835", "0.6804414", "0.67929757", "0.67919576", "0.6678467", "0.6673702", "0.661376", "0.6577138", "0.6577138", "0.6577138", "0.6577138", "0.6577138", "0.65430135", "0.6500122", "0.6495493", "0.6480545", "0.6480545", "0.6480545", "0.6480545", "0.6480545", "0.6480545" ]
0.797403
0
Returns the arn for a lambda given a lambda function name.
def lambda_arn_lookup(session, lambda_name): if session is None: return None client = session.client("lambda") response = client.get_function(FunctionName=lambda_name) if response is None: return None else: return response['Configuration']['FunctionArn']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_lambda_arn(region: str, first_part: str, second_part: str=None) -> str:\n client = boto3.client('lambda', region_name=region)\n response = client.list_functions()\n for x in response['Functions']:\n if second_part:\n if first_part in x['FunctionArn'] and second_part in x['FunctionArn']:\n return x['FunctionArn']\n else:\n if first_part in x['FunctionArn']:\n return x['FunctionArn']\n\n if second_part:\n raise ValueError(f\"Cannot find function '{first_part}-*-{second_part}' in region {region}\")\n else:\n raise ValueError(f\"Cannot find function '{first_part}' in region {region}\")", "def notification_lambda_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"notification_lambda_arn\")", "def lambda_rad(self):\n InputFile = self('Meta','InputFile').decode(\"utf-8\")\n d_InputFile = dict([item.replace(' ','').split('=') for item in InputFile.splitlines() if '=' in item])\n if 'lambda' in d_InputFile:\n return float(d_InputFile['lambda'])\n else:\n return self.lambdaref", "def idp_lambda_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"idp_lambda_arn\")", "def getLambdaFasta():\n return _getAbsPath('lambdaNEB.fa')", "def _parse_lambda(text):\n text = text.split('lambda')[1]\n param, rest = text.split(':')\n param = param.strip()\n # There are three things that could terminate a lambda: an (unparenthesized)\n # comma, a new line and an (unmatched) close paren.\n term_chars = [',', '\\n', ')']\n func_text = ''\n inside_paren = 0 # an int rather than a bool to keep track of nesting\n for c in rest:\n if c in term_chars and not inside_paren:\n break\n elif c == ')': # must be inside paren\n inside_paren -= 1\n elif c == '(':\n inside_paren += 1\n func_text += c\n\n # Rename the lambda parameter to 'value' so that the resulting\n # \"description\" makes more sense.\n func_text = re.sub(r'\\b{}\\b'.format(param), 'value', func_text)\n\n return func_text", "def islambda(func):\n return getattr(func, 'func_name', False) == '<lambda>'", "def testGetLambda(self):\n self.ports.get_lambda(file_name = 'get_lambda.xml', port_ids = portsDict['port_ids'], lambdas = portsDict['lambda'])", "def get_lambda_value(lambda_node):\n return get_call_value(lambda_node.body)", "def create_lambda_role(self, name):\n role_name = f\"{name}-role\"\n iam = boto3.client(\"iam\")\n try:\n response = iam.create_role(\n RoleName=role_name,\n AssumeRolePolicyDocument=json.dumps(\n {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\"Service\": \"lambda.amazonaws.com\"},\n \"Action\": \"sts:AssumeRole\",\n }\n ],\n }\n ),\n Description=\"Role for Lambda to call ECS Fargate task\",\n )\n\n role_arn = response[\"Role\"][\"Arn\"]\n\n response = iam.attach_role_policy(\n RoleName=role_name, PolicyArn=\"arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole\"\n )\n\n response = iam.attach_role_policy(\n PolicyArn=\"arn:aws:iam::aws:policy/AmazonSageMakerFullAccess\", RoleName=role_name\n )\n\n return role_arn\n\n except iam.exceptions.EntityAlreadyExistsException:\n print(f\"Using ARN from existing role: {role_name}\")\n response = iam.get_role(RoleName=role_name)\n return response[\"Role\"][\"Arn\"]", "def lambda_notation(tokens: List[TOKEN], local_dict: DICT, global_dict: DICT):\n result: List[TOKEN] = []\n flag = False\n toknum, tokval = tokens[0]\n tokLen = len(tokens)\n\n if toknum == NAME and tokval == 'lambda':\n if tokLen == 2 or tokLen == 3 and tokens[1][0] == NEWLINE:\n # In Python 3.6.7+, inputs without a newline get NEWLINE added to\n # the tokens\n result.extend(tokens)\n elif tokLen > 2:\n result.extend([\n (NAME, 'Lambda'),\n (OP, '('),\n (OP, '('),\n (OP, ')'),\n (OP, ')'),\n ])\n for tokNum, tokVal in tokens[1:]:\n if tokNum == OP and tokVal == ':':\n tokVal = ','\n flag = True\n if not flag and tokNum == OP and tokVal in ('*', '**'):\n raise TokenError(\"Starred arguments in lambda not supported\")\n if flag:\n result.insert(-1, (tokNum, tokVal))\n else:\n result.insert(-2, (tokNum, tokVal))\n else:\n result.extend(tokens)\n\n return result", "def deployLambdaFunctionFromS3(name:str, iamRoleARN:str, handler:str, bucket:str, object:str, version:str=None, runtime='python3.7') -> str:\n client = boto3.client('lambda')\n try:\n code = {\n 'S3Bucket': bucket,\n 'S3Key': object,\n }\n if version:\n code['S3ObjectVersion'] = version\n response = client.create_function(\n Role = iamRoleARN,\n FunctionName = name,\n Runtime = runtime,\n Handler = handler,\n Code = code\n )\n except ClientError as e:\n raise LambdaDeploymentException(e)\n if not 'FunctionArn' in response:\n raise LambdaDeploymentException('\\\"FunctionArn\\\" is not contained in the response. ', repr(response))\n return response['FunctionArn']", "def lambda_handler(event, context):\n name: str = event['name']\n return f'Hi {name}!'", "def get_lambda_apigateway_default_role(self, function_name, lmdo_lambda=False):\n if lmdo_lambda:\n function_name = self.get_lmdo_format_name(function_name)\n\n return self.create_apigateway_lambda_role(self.get_apigateway_lambda_role_name(function_name))", "def invoke_lambda(lambda_name, lambda_payload):\n try:\n LOGGER.debug(f\"Sending request to '{lambda_name}' method: {lambda_payload}\")\n client = boto3.client('lambda')\n invoke_response = client.invoke(FunctionName=lambda_name,\n InvocationType=\"RequestResponse\",\n Payload=json.dumps(lambda_payload))\n response = json.loads(invoke_response['Payload'].read())\n except Exception as ex:\n LOGGER.debug(f\"Error encountered while invoking lambda method '{lambda_name}': {repr(ex)}\")\n\n return response", "def invoke_arn(self) -> str:\n return pulumi.get(self, \"invoke_arn\")", "def test_lambda(n):\n return [lambda v=i: v for i in range(n)]", "def n_lambda(self):\n return self.b()", "def get_lambda(model):\n best_lambdas = [1000.0, 0.001, 100.0, 0.001, 100.0, 100.0, 0.001, 100.0]\n lambda_ = best_lambdas[model]\n return lambda_", "def lambda_method(self,t): \n return 5*math.sin(2*math.pi*1*t) # I don't see the value of 1 here but this is how lamda is defined in the exercise.", "def get_lambdas(self, integrands=\"harm_to_inter\"):\n if integrands == \"harm_to_inter\":\n vertex = self.graph.build_lambdas_harm_to_inter.output\n elif integrands == \"inter_to_vac\":\n vertex = self.graph.build_lambdas_inter_to_vac.output\n else:\n raise KeyError(\n \"The value of `integrands` can only be 'harm_to_inter' or 'inter_to_vac'\"\n )\n return vertex.lambda_pairs[-1][:, 0]", "def get_lambdas(self):\n return self.graph.build_lambdas.output.lambda_pairs[-1][:, 0]", "def GenerateSpecialFunction(n):\n return eval('lambda a: %s' % GenerateSpecialExpression(n))", "def arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"arn\")", "def _generateLambda(self, string):\n derivation = self.fieldNames.sub(r'parent.getSampleValue(stats, \"\\1\")',\n string)\n return lambda stats, parent: eval(derivation)", "def deployLambdaFunctionFromZip(name:str, iamRoleARN:str, handler:str, package:Path, runtime='python3.7') -> str:\n packageContent = package.read_bytes()\n client = boto3.client('lambda')\n try:\n response = client.create_function(\n Role = iamRoleARN,\n FunctionName = name,\n Runtime = runtime,\n Handler = handler,\n Code = {\n 'ZipFile': packageContent\n }\n )\n except ClientError as e:\n raise LambdaDeploymentException(e)\n if not 'FunctionArn' in response:\n raise LambdaDeploymentException('\\\"FunctionArn\\\" is not contained in the response. ', repr(response))\n return response['FunctionArn']", "def segment_lambda_from_arc_length(self, arc_length):\n\n remaining_arc_length = arc_length\n for segment_index, segment in enumerate(self.segments):\n\n remaining_arc_length -= segment.length\n if remaining_arc_length <= 0:\n break\n\n segment_arc_length = remaining_arc_length + segment.length\n lambda_parameter = segment.lambda_from_arc_length(segment_arc_length)\n\n return segment, segment_index, lambda_parameter", "def lambda_handler(event):\r\n return 'Hello ' + event['queryParams']['name']", "def lambda_handler(event, context):\n\n logger.debug('event.bot.name={}'.format(event['bot']['name']))\n\n return dispatch(event)", "def n_ary(n, f):\n if n < 0:\n raise ValueError(\"First argument to n_ary must be a non-negative integer\")\n\n args1, args2 = generate_args(inspect.getfullargspec(f), n)\n\n return eval(\"lambda \" + args1 + \": f(\" + args2 + \")\", {\"f\": f})" ]
[ "0.62491465", "0.6024986", "0.59035015", "0.5837769", "0.5678364", "0.56479216", "0.55890566", "0.54492086", "0.5431791", "0.5426185", "0.53477263", "0.52988434", "0.52703404", "0.5237456", "0.5228843", "0.5223322", "0.52192754", "0.52119076", "0.5209872", "0.52070284", "0.5197417", "0.5191336", "0.51728845", "0.5155436", "0.5140684", "0.5094416", "0.5082463", "0.50748926", "0.5046563", "0.49880672" ]
0.7466593
0
Use SHA1 hash to hash a string, convert it to integer and shift right (160 m) places
def chord_hash(input_string): h = hashlib.sha1() # 160 bit string encoded_data = input_string.encode('utf-8') h.update(encoded_data) hex_string = h.hexdigest() hex_value = int(hex_string, 16) hash_integer_value = hex_value >> (160 - m) return hash_integer_value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sha1(self, s):\n\t\tself.sha1_calls += 1\n\t\treturn int(hashlib.sha1(s).hexdigest(), 16)", "def computeHash(string):\n\tif isBytes(string):\n\t\tstring = string.decode(\"latin-1\")\n\thash_ = 63689\n\tfor char in string:\n\t\thash_ = hash_ * 378551 + ord(char)\n\treturn hash_ % 65536", "def strhash(s: str) -> int:\n h = hashlib.md5(s.encode('utf-8'))\n h = int(h.hexdigest(), base=16)\n return h", "def smallHash(number, text):\n m = hashlib.md5()\n m.update(bytes(number))\n m.update(text.encode('utf-8'))\n return int(m.hexdigest(), 16) % 1000000", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s)\n return h.hexdigest()", "def myHash(string, base=91, mod=1000000321):\n value = 0\n for pos, elem in enumerate(string[::-1]): # считаем значение полинома\n value += ord(elem) * base**pos # в последней задаче сделано с помощью массива (динамика)\n return value % mod", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s.encode())\n return h.hexdigest()", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s.encode('utf-8'))\n return h.hexdigest()", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s.encode('utf-8'))\n return h.hexdigest()", "def hex_hash(s):\n if not s:\n return '0'\n s = s.encode('utf-8')\n return '{:x}'.format(adler32(s) & 0xffffffff)", "def hash(password):\n result = hashlib.sha1(password.encode())\n # return a hexadecimal digits\n return result.hexdigest()", "def get_hash_code(s):\n h = 0\n n = len(s)\n for i, c in enumerate(s):\n h = h + ord(c) * 31 ** (n - 1 - i)\n return StrUtil.convert_4_bytes(h)", "def SHA1(self) -> _n_0_t_3[_n_0_t_9]:", "def hashing_info(string):#KEY HASHING FUNCTION\n nodeInfo = string.encode('utf-8')\n\n #md5 -> 2^7 = 128 bits\n hash_object = hashlib.md5()\n hash_object.update(nodeInfo)\n\n tmp = hash_object.hexdigest()\n tmp = int(tmp,16)\n\n result = tmp >> (128-16)\n return result", "def hash_string(to_hash):\n\n chars = string.printable\n\n hashed = \"\"\n\n total = 1\n\n counter = 1\n\n for letter in to_hash:\n\n total *= (chars.index(letter) * counter * len(to_hash)*13)\n\n counter += 1\n\n if counter%3 == 0:\n\n total *= total\n\n total = str(total)[:30]\n\n temp_int = \"\"\n\n for i in range(len(total)):\n\n temp_int += total[i]\n\n if i % 2 != 0:\n\n hashed += chars[int(temp_int)]\n\n temp_int = \"\"\n\n return hashed", "def get_hash(hash_function, x: str):\n hash_function.update(x.encode())\n return int.from_bytes(hash_function.digest(), byteorder=\"big\")", "def hash(plainString):\n result = plainString\n for i in range(0,12):\n result = hashHelp(result)\n return result", "def keyhash(string):\n return hashlib.sha1(string.encode('utf-8')).hexdigest()", "def customHashFunc(str):\n return sum(ord(chr) for chr in str)%128", "def hash(self, text):\n hashval = 0\n for i in xrange(0, len(text)):\n hashval += ord(text[i])**i\n return hashval", "def sha1(s: str) -> str:\n return hashlib.sha1(s.encode()).hexdigest()", "def concatHash(h1: int, h2: int, len2: int, mod=10**11 + 7, base=1313131) -> int:\r\n return (h1 * pow(base, len2, mod) + h2) % mod", "def hash64bits(*args):\n # 64 bits hexdigest\n h = hashlib.sha1(bytes(repr(args), \"utf-8\")).hexdigest()[:16]\n # Convert to an integer and return\n return int(h, 16)", "def hash(string):\n hs = 0\n for s in string:\n hs += ord(s)\n return hs", "def hashstring(astring, tablesize):\n \n sum = 0\n for pos in range(len(astring)):\n # to account for anagrams, we give weightage to positions of the letters to give different hash values\n sum = sum + ord(astring[pos]) * (pos + 1)\n \n return sum % tablesize", "def h(x):\n\n hasher = hashlib.sha256()\n hasher.update(x)\n return hasher.digest()", "def str_sha(raw_sha):\n return hexlify(raw_sha)[:12]", "def hash_int(c, hash_length):\n if isinstance(c, float):\n if numpy.isnan(c):\n return c\n else:\n raise ValueError(f\"numpy.nan expected, not {c}\")\n else:\n b = struct.pack(\"i\", c)\n m = hashlib.sha256()\n m.update(b)\n r = m.hexdigest()\n if len(r) >= hash_length:\n r = r[:hash_length]\n return int(r, 16) % (10 ** 8)", "def hash_function(s):\n\n # O(n) over the key length\n # O(1) over the HASH_DATA_SIZE\n\n bytes_list = s.encode()\n\n total = 0\n\n\n for b in bytes_list: # O(n) over the length of the key\n total += b\n\n\n total &= 0xffffffff # 32 bit (8 f's)\n\n return total", "def calculate_hash(stuff):\n\tsha1 = hashlib.sha1()\n\tsha1.update(stuff)\n\treturn sha1.hexdigest()" ]
[ "0.7196707", "0.71366036", "0.69533736", "0.6906234", "0.6869732", "0.6842359", "0.68278766", "0.6794289", "0.6765675", "0.6747291", "0.6716161", "0.65964735", "0.6587518", "0.6550875", "0.65399194", "0.6535304", "0.6471856", "0.6450568", "0.6412695", "0.6410531", "0.6410033", "0.6406109", "0.6392872", "0.63656497", "0.6322104", "0.63178796", "0.6314481", "0.6303835", "0.62797403", "0.6262979" ]
0.7221014
0
Checks if a given value is in the range start to end while considering given options, i.e., including/excluding start and/or end of the range.
def is_between(value, start, end, including_start=False, including_end=False): if not including_start and not including_end: # not include both start and end if (start < value < end): return True elif (start > end) and (start < value <= (2**m - 1) or 0 <= value < end): return True elif (start == end) and (value != start): return True return False elif not including_start and including_end: # include end but not the start if value == end: return True elif (start < value <= end): return True elif (start > end) and ((start < value <= (2**m - 1)) or (0 <= value <= end)): return True elif (start == end) and (value != start): return True return False elif including_start and not including_end: # include start but not the end if value == start: return True elif (start <= value < end): return True elif (start > end) and (start <= value <= (2**m - 1) or 0 <= value < end): return True elif (start == end) and (value != end): return False return False else: # include both start and end if (start <= value <= end): return True elif (start > end) and (start <= value <= (2**m - 1) or 0 <= value <= end): return True elif start == end: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def in_range(start, end, x):\n if start <= end:\n return start <= x <= end\n else:\n return start <= x or x <= end", "def check_ranges(ranges, value):\n for fromto in ranges:\n start, end = fromto.split('-')\n if int(value) in range(int(start), int(end) + 1):\n return True\n # else:\n # print('%s is not between %s and %s' % (value, start, end))\n return False", "def IsInRange(self, id, start, isStartInclusive, end, isEndInclusive):\r\n if isStartInclusive == False:\r\n start = (start + 1) % NODES\r\n if isEndInclusive == True:\r\n end = (end + 1) % NODES\r\n allRanges = []\r\n if(start < end):\r\n allRanges.append(range(start, end))\r\n else:\r\n allRanges.append(range(start, NODES))\r\n allRanges.append(range(0, end))\r\n for r in allRanges:\r\n if id in r:\r\n return True\r\n return False", "def isInRange(self,section,option,testval):\n \"\"\" us to test if 15 is in range defined as e.g. \"1-10,12,16-19\" \"\"\"\n value=ConfigParser.SafeConfigParser.get(self,section,option)\n value=value.strip('\"')\n elems=value.split(\",\")\n inrange=False\n if elems:\n for elem in elems:\n if(elem.find(\"-\")):\n # it's a range\n limits=elem.split(\"-\",2)\n notlower=limits.pop()\n nothigher=limits.pop()\n if(testval >= notlower and testval <= nothigher)\n inrange=True\n else\n # it's just 1 number\n if(elem == testval):\n inrange=True\n return inrange", "def range_function(num, start_range, end_range):\n if num > start_range and num < end_range:\n print(num, \"is in the range.\\n\")\n elif num < start_range or num > end_range:\n print(num, \"is not in the range.\\n\")", "def in_range(value, ranges):\n\n # is there anythin to check?\n if not ranges or not len(ranges):\n return False\n\n if not isinstance(ranges[0], list):\n ranges = [ranges]\n\n for r in ranges:\n if value >= r[0] and value <= r[1]:\n return True\n\n return False", "def _in_range_op(spec):", "def ranges_overlap(start1, end1, start2, end2):\n return start1 <= end2 and end1 >= start2", "def in_range(val, lst):\n if len(lst) == 2:\n return val>lst[0] and val < lst[1]\n elif len(lst) == 1:\n return True\n return False", "def date_in_range(start, end, x):\n\n if start <= end:\n return start <= x <= end\n else:\n return start <= x or x <= end", "def _range_contains(self, a, b):\n\t\treturn b[0] >= a[0] and b[-1] <= a[-1]", "def _in_interval(value, low, up):\n if low <= value <= up:\n return True\n else:\n return False", "def check_in_range(value, lim_1, lim_2):\n lo_lim = min(lim_1, lim_2)\n hi_lim = max(lim_1, lim_2)\n \n if (abs(value) > abs(lo_lim)) and (abs(value) < abs(hi_lim)):\n return True\n else:\n return False", "def in_range(cls, lhs, rhs):\n return rhs[0] <= lhs <= rhs[1]", "def in_range(data, minval=-np.inf, maxval=np.inf):\n return (minval <= data) & (data <= maxval)", "def __verify_range(value, minimum, maximum):\n if value in range(minimum, maximum):\n return True\n else:\n return False", "def is_in_interval(self, low, high, value):\n return low <= value and value <= high", "def make_sure_between(val, start=None, end=None):\n if start is not None:\n if val < start:\n return start\n if end is not None:\n if val > end:\n return end\n return val", "def check_interval_bounds(begin, end):\n if begin.get_midpoint() >= end.get_midpoint():\n return False\n\n if begin.get_radius() is not None and end.get_radius() is not None:\n if begin.get_midpoint() - begin.get_radius() > \\\n end.get_midpoint() - end.get_radius():\n return False\n\n return True", "def in_range(x, a, b):\n return (x >= a and x <= b) or (x <= a and x >= b)", "def in_interval(value: float, s: float, e: float) -> bool:\n lower = value >= s\n upper = value <= e\n return lower and upper", "def constraint_clause_in_range_validator(field, presentation, context):\n\n field.default_validate(presentation, context)\n\n values = getattr(presentation, field.name)\n if isinstance(values, list):\n # Make sure list has exactly two elements\n if len(values) == 2:\n lower, upper = values\n the_type = presentation._get_type(context)\n\n # Lower bound must be coercible\n lower = coerce_value(context, presentation, the_type, None, None, lower, field.name)\n\n if upper != 'UNBOUNDED':\n # Upper bound be coercible\n upper = coerce_value(context, presentation, the_type, None, None, upper, field.name)\n\n # Second \"in_range\" value must be greater or equal than first\n if (lower is not None) and (upper is not None) and (lower >= upper):\n context.validation.report(\n u'upper bound of \"in_range\" constraint is not greater than the lower bound'\n u' in \"{0}\": {1} <= {2}'\n .format(presentation._container._fullname, safe_repr(lower),\n safe_repr(upper)),\n locator=presentation._locator, level=Issue.FIELD)\n else:\n context.validation.report(\n u'constraint \"{0}\" is not a list of exactly 2 elements in \"{1}\": {2}'\n .format(field.name, presentation._fullname, safe_repr(values)),\n locator=presentation._get_child_locator(field.name), level=Issue.FIELD)", "def isInRange(val, minv, maxv):\n\treturn val >= minv and val <= maxv", "def check_optional_range(specific=None, begin=None, end=None):\n if specific and (begin and end):\n raise ValueError('Cannot pass both a range and specific')\n\n if (begin and not end) or (end and not begin):\n raise ValueError(\"Must pass both begin and end for ranges\")", "def time_in_range(start, end, time):\n if start <= end:\n return start <= time <= end\n else:\n return start <= time or time <= end", "def icd9_in_code_range(val, code_ranges):\n return any(val <= code_range[1] and val >= code_range[0] for code_range in code_ranges)", "def _check_paramrange(value, parameter):\n\n if parameter not in PARAMETER_RANGES.keys():\n raise ValueError('parameter {} not found in dictonary {}'\n .format(parameter, PARAMETER_RANGES))\n ranges = PARAMETER_RANGES[parameter]\n lo = ranges[0]\n hi = ranges[1]\n INRANGE = True\n if not (lo <= value < hi):\n INRANGE = False\n\n return INRANGE, lo, hi", "def date_range(self, start, end, check_date):\n if start <= end:\n return start <= check_date <= end\n else:\n return start <= check_date or check_date <= end", "def overlap(start1, end1, start2, end2):\n return not (end1 < start2 or end2 < start1)", "def overlaps(self, begin, end=None):\n if end is not None:\n # An overlap means that some C exists that is inside both ranges:\n # begin <= C < end\n # and \n # self.begin <= C < self.end\n # See https://stackoverflow.com/questions/3269434/whats-the-most-efficient-way-to-test-two-integer-ranges-for-overlap/3269471#3269471\n return begin < self.end and end > self.begin\n try:\n return self.overlaps(begin.begin, begin.end)\n except:\n return self.contains_point(begin)" ]
[ "0.747743", "0.7280649", "0.70743567", "0.69564146", "0.6774148", "0.6771647", "0.6761343", "0.67048234", "0.66715246", "0.6633946", "0.65986615", "0.6595173", "0.6571696", "0.6535455", "0.6530302", "0.6497032", "0.6490265", "0.64786255", "0.64565444", "0.6445503", "0.6378988", "0.637645", "0.63640904", "0.6359017", "0.6353603", "0.63388073", "0.63282216", "0.6256328", "0.6252755", "0.6248617" ]
0.7582872
0
remove a connection from a node, by the id of the other node in the connection
def remove_connection_by_id(self, node_id: int): for conn in self.get_connections(): if conn.other.node_id == node_id: self.remove_connection(conn) break
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_connection(self, conn: Connection):\n self.__connections.remove(conn)\n # now from other:\n other = conn.other\n for others_conn in other.get_connections():\n if others_conn.other.node_id == self.node_id:\n other.__connections.remove(others_conn)\n break", "def del_node (self, id):\n raise NotImplementedError", "def remove(self, node):\r\n\r\n # Allow node name, get the real node object\r\n if isinstance(node, basestring):\r\n name = node\r\n node = self.nodes[name]\r\n else:\r\n name = self.node_name(node)\r\n\r\n del self.nodes[name]\r\n\r\n remove = [c for c in self.connections if c[0] == node or c[1] == node]\r\n\r\n for connection in remove:\r\n self.connections.remove(connection)", "def remove_connection(self, source, target):\r\n\r\n connection = (self.coalesce_node(source), self.coalesce_node(target))\r\n self.connections.discard(connection)", "def remove_node(self, id):\r\n\t\tif id in self._nodes:\r\n\t\t\tnode = self._nodes[id]\r\n\t\t\tedges = node.edges()\r\n\t\t\t# ugly can maybe fix it up with sets\r\n\t\t\tfor edge in edges:\r\n\t\t\t\tlabel = edge.label\r\n\t\t\t\tdel edge.start_node._edges[label]\r\n\t\t\t\tdel edge.end_node._edges[label]\r\n\t\t\t\tdel self._edges[edge.id]\r\n\t\t\tdel self._nodes[id]\r\n\t\telse:\r\n\t\t\t# return a real exception someday\r\n\t\t\tprint('Error: Cannot remove node since id does not exist')", "def remove_node(self, node_id):\n try: \n del self._nodes[node_id] \n del self._inc[node_id]\n except KeyError:\n return \n for arcs_list in self._inc.values():\n record = arcs_list.get_first_record()\n while record is not None:\n arc = record.element\n if arc._head is node_id: arcs_list.delete_record(record)\n record = record._next", "def remove_connection(self, var1, var2):\n conn, swap = self._find_connection_element(var1, var2)\n if not conn:\n raise ModelModificationError(\"Cannot remove non-existent connection.\")\n if swap:\n var1, var2 = var2, var1\n # Find the relevant map_variables element\n mapv = conn.xml_xpath(u'cml:map_variables[@variable_1=\"%s\" and @variable_2=\"%s\"]'\n % (var1.name, var2.name))\n if not mapv:\n raise ModelModificationError(\"Cannot remove non-existent connection.\")\n conn.xml_remove_child(mapv[0])\n if not hasattr(conn, u'map_variables'):\n conn.xml_parent.xml_remove_child(conn)", "def removeConnection(tagA, tagB): #@NoSelf", "def remove_node():\n\ttry:\n\t\tnetwork.remove_connection()\n\texcept ValueError as err:\n\t\tfeedback.config(text=err)", "def remove_node(self, node):\n\t\tnode.close()\n\t\taddress = (node.server_ip, node.server_port)\n\t\tself.nodes.pop(address)", "def removeNode(self, node):", "def remove_node(self, node):\n self.nodes.pop(self.nodes.index(node))\n node1 = node.neighbour1\n node2 = node.neighbour2\n node1.neighbour2 = node2\n node2.neighbour1 = node1", "def removeNeighbor(self, neighborID):", "def remove(self, connection):\n\n net_tuple = self.read_nodestate(0)\n\n # Tuples are immutable; convert it to a list.\n network_list = list(net_tuple)\n\n # Identify and remove said connection\n try:\n index = network_list.index(connection)\n network_list.pop(index)\n\n # Connection not in network tuple, or socket is [closed]\n except ValueError:\n log_msg = str(\"Not removing non-existent connection: \"+str(connection))\n Primitives.log(log_msg, in_log_level=\"Warning\")\n\n # Update the network tuple with the new one\n self.write_nodestate(nodeState, 0, tuple(network_list))", "def remove_node(self, node_id):\n try: \n del self._nodes[node_id] \n del self._inc[node_id] \n except KeyError:\n return \n for arcs_set in self._inc.values():\n arcs_to_remove = Set()\n for arc in arcs_set:\n if arc._head is node_id: arcs_to_remove.add(arc)\n arcs_set.difference_update(arcs_to_remove)", "def remove(self, node):\r\n\r\n for n, conns in self._graph.items(): # python3: items(); python2: iteritems()\r\n try:\r\n conns.remove(node)\r\n except KeyError:\r\n pass\r\n try:\r\n del self._graph[node]\r\n except KeyError:\r\n pass", "def disconnectFrom( self, node, cls = None ):\n count = 0\n for connection in self.connections(cls):\n if ( connection.inputNode() == node or \\\n connection.outputNode() == node ):\n connection.remove()\n return count", "def detach_node(self, node_id, connection=None):\n\n connection = connection or self.engine.connect()\n\n connection.execute(\n self.paths.delete().where(\n self.paths.c.descendant.in_(\n select([self.paths.c.descendant]).where(\n self.paths.c.ancestor == node_id\n ))\n ).where(\n self.paths.c.ancestor.in_(\n select([self.paths.c.ancestor]).where(\n self.paths.c.descendant == node_id\n ).where(\n self.paths.c.ancestor != self.paths.c.descendant\n ))\n )\n )", "def remove_link(self,link,verbose=False):\n label, child = link\n self.outgoing.remove((label,child))\n child.incoming.remove((label,self))\n if verbose: print('removed', label, self.nodeid, child.nodeid)", "def remove_edges(self, node: NodeKey) -> Edge:", "def remove(self,node,verbose=False):\n for label,parent in node.incoming:\n parent.outgoing.remove((label,node))\n for label,child in node.outgoing:\n child.incoming.remove((label,node))\n self.pop(node.nodeid)\n for x,y in copy(self.undirected):\n if x == node or y == node:\n self.undirected.remove((x,y))\n if self.root == node:\n self.root = None\n if verbose: print('removed',node)", "def remove_node(self, node):\n # if the node is a part of the graph\n if node.get_name() in self.get_node_names():\n for edge in node.get_incident_edges(): # for every edge incident to the input node\n other_node = edge.get_other_node(node.get_name()) # get the other incident node object\n if other_node.get_name() in self.get_node_names(): # if the other node is a part of the graph\n self.remove_edge(tuple((node, other_node))) # remove the edge\n self.set_nodeset(\n set({\n vertex\n for vertex in self.get_nodeset()\n if not vertex.get_name().__eq__(node.get_name())\n })\n ) # remove the node from the graph's nodeset", "def delete_node(self, node_id, connection=None):\n\n connection = connection or self.engine.connect()\n\n # delete the paths associated with this node\n connection.execute(\n self.paths.delete().where(\n self.paths.c.descendant.in_(\n select(\n [self.paths.c.descendant]\n ).where(\n self.paths.c.ancestor == node_id\n ))\n )\n )\n\n # delete the node\n connection.execute(\n self.nodes.delete().where(\n self.nodes.c.id == node_id\n )\n )", "def DisconnectByEdgeInNetwork(self, edge):\n try:\n self.connections.remove((edge.node1, edge.node2))\n edge.node1.removeNeighbour(edge.node2.index)\n except Exception as exc:\n print(\"Exception {} occured when trying to disconnect the edge\".format(exc))", "def delete_node(self, node_id):\n assert node_id < len(self.nodes)\n nodes = self.nodes[:]\n nodes.pop(node_id)\n\n transitions = {}\n for nid, next_ids in self.transitions.iteritems():\n if nid == node_id:\n continue\n elif nid > node_id:\n nid = nid - 1\n next_ids = [x for x in next_ids if x != node_id]\n for i in range(len(next_ids)):\n if next_ids[i] > node_id:\n next_ids[i] = next_ids[i] - 1\n transitions[nid] = next_ids\n return Network(nodes, transitions)", "def delete_node(ugraph, node):\r\n neighbors = ugraph[node]\r\n ugraph.pop(node)\r\n for neighbor in neighbors:\r\n ugraph[neighbor].remove(node)", "def delete_node(ugraph, node):\n neighbors = ugraph[node]\n ugraph.pop(node)\n for neighbor in neighbors:\n ugraph[neighbor].remove(node)", "def delete_node(ugraph, node):\n neighbors = ugraph[node]\n ugraph.pop(node)\n for neighbor in neighbors:\n ugraph[neighbor].remove(node)", "def delete_node(ugraph, node):\n neighbors = ugraph[node]\n ugraph.pop(node)\n for neighbor in neighbors:\n ugraph[neighbor].remove(node)", "def delete_node(ugraph, node):\n neighbors = ugraph[node]\n ugraph.pop(node)\n for neighbor in neighbors:\n ugraph[neighbor].remove(node)" ]
[ "0.75725925", "0.7457941", "0.7354663", "0.7288606", "0.72552603", "0.70799124", "0.70066655", "0.6946529", "0.69460434", "0.6903269", "0.68844795", "0.68477476", "0.6784337", "0.6707545", "0.668367", "0.66627955", "0.6633565", "0.65117043", "0.649248", "0.64896923", "0.64595276", "0.6428991", "0.6423886", "0.64148027", "0.6397178", "0.6387411", "0.6375083", "0.6375083", "0.6375083", "0.6375083" ]
0.8295772
0
Generate key using random bytes with specified size.
def generate_key(self, size): key = bytearray() for i in range(0,size): random_byte = ord(os.urandom(1)) key.append(random_byte) return key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_key():\n return get_random_bytes(KEY_SIZE)", "def random_key(size):\n return ''.join(random.choice(string.letters) for _ in range(size))", "def get_random_secret_key(cls, size=None):\n if not size:\n size = cls.default_secret_key_size\n return os.urandom(size)", "def genKey(length=32):\r\n return os.urandom(length)", "def get_random_key(self, size=16):\n key = ''.join([random.choice(Characters.get_characters()) for i in range(size)])\n return self.__strengthen_key(key)", "def Generate(size=keyinfo.HMAC_SHA1.default_size):\n key_bytes = util.RandBytes(size / 8)\n key_string = util.Encode(key_bytes)\n return HmacKey(key_string, size)", "def generate_key(self)->bytes:\n return os.urandom(32)", "def generate_key(self, filename, size):\n if size != 16 and size != 24 and size != 32:\n raise ValueError(\"AES key size not valid.\")\n key = os.urandom(size)\n self.export_key(filename, key)\n return key", "def Generate(size=keyinfo.HMAC_SHA1.default_size):\n key_bytes = util.RandBytes(size // 8)\n key_string = util.Base64WSEncode(key_bytes)\n return HmacKey(key_string, size)", "def Generate(size=keyinfo.AES.default_size):\n key_bytes = util.RandBytes(size / 8)\n key_string = util.Encode(key_bytes)\n hmac_key = HmacKey.Generate() # use default HMAC-SHA1 key size\n return AesKey(key_string, hmac_key, size)", "def Generate(size=keyinfo.AES.default_size):\n key_bytes = util.RandBytes(size // 8)\n key_string = util.Base64WSEncode(key_bytes)\n hmac_key = HmacKey.Generate() # use default HMAC-SHA1 key size\n return AesKey(key_string, hmac_key, size)", "def generate_random_key():\n return '%030x' % (random.randrange(256**15),)", "def gen_key():\n key = os.urandom(32) # 256 bit\n return base64.b64encode(key).rstrip('=') # strip off padding", "def generate_key():\n\tkey = [ randint(0,255) for i in range(16) ]\n\treturn bytes( key )", "def unique_key(size):\n # Charset to create keys from\n charset = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'\n l = len(charset)-1\n bad_key = 1\n\n # Get a new seed\n ran.seed()\n\n while(bad_key > 0):\n # Create key\n key = list()\n for i in range(size):\n r = ran.randint(0, l)\n key.append(charset[r])\n key = \"\".join(key)\n\n # Check key\n bad_key = check_key(key)\n\n return(key)", "def __generate_key(length):\n if length % 2 != 0:\n raise ValueError(\"'length' must be a multiple of 2\")\n length_bytes = int(length / 2) # length of key in bytes\n key_bytes = os.urandom(length_bytes)\n return binascii.hexlify(key_bytes).decode()", "def generate_key(length):\n return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))", "def generate_preshare_key(size_of_psk=16):\n preshare_key = \"\"\n psk_source = string.ascii_letters + string.digits\n for i in range(size_of_psk):\n preshare_key += secrets.choice(psk_source)\n \n char_list = list(preshare_key)\n secrets.SystemRandom().shuffle(char_list)\n preshare_key = ''.join(char_list)\n\n return preshare_key", "def randkey():\n return binascii.b2a_hex(os.urandom(15))", "def generate_key(random=random.SystemRandom()):\n poly = 0\n while not is_acceptable_multiplier(poly):\n poly = random.getrandbits(61)\n oh = []\n for _ in range(2 * BLOCK_SIZE + TWISTING_COUNT):\n u64 = None\n while u64 is None or u64 in oh:\n u64 = random.getrandbits(64)\n oh.append(u64)\n return UmashKey(poly, oh)", "def create_state_key(size: int = 15) -> str:\n # https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits\n return \"\".join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(size))", "def generate_randomkey(length):\n chars = string.letters + string.digits\n return ''.join([choice(chars) for i in range(length)])", "def generate_random_key(self):\n self.key = ''.join(choice(ascii_letters + digits) for i in range(300))", "def generate_key():\n key = ''.join([chr(random.randint(0, 0x10)) for _ in range(block_size)])\n return AES.new(second_key, AES.MODE_ECB).encrypt(pad((key.encode('ascii')), block_size))", "def _generateSecretKey():\n return ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(20))", "def generate_private_key():\n\treturn binascii.hexlify(os.urandom(32)).decode('utf-8').upper()", "def generate_salt(size):\n return hexlify(urandom(size)).decode()", "def GenerateRandomHexKey(length=_RANDOM_BYTE_LENGTH):\n # After encoded in hex, the length doubles.\n return os.urandom(length).encode('hex')", "def random_bytes(N):\n return Crypto.Random.get_random_bytes(N)", "def generate_key(length, choices=None):\n if choices is None:\n choices = (\n string.ascii_lowercase +\n string.ascii_uppercase +\n string.digits\n )\n\n return ''.join(random.choice(choices) for x in xrange(length))" ]
[ "0.86057556", "0.80857086", "0.8032245", "0.8031311", "0.8002937", "0.79636395", "0.7943351", "0.790454", "0.7867546", "0.7851878", "0.77818877", "0.7591018", "0.7578894", "0.75569624", "0.7556202", "0.74411345", "0.74128795", "0.73925257", "0.7379331", "0.72996837", "0.72394043", "0.7227982", "0.7225126", "0.706671", "0.7058441", "0.7005046", "0.7003181", "0.7002432", "0.6983597", "0.69725055" ]
0.8874437
0
'Encrypt' the password with the key. Reverse key bytes and XOR with password bytes. Very low security but a bit obfuscated.
def mix_keys(self, password, key): rev_key = list(reversed(key)) # Reverse bytes result = bytearray() for i in range(0, len(password)): xored = password[i] ^ rev_key[i] # Mix each byte result.append(xored) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def xor_encode(data, key):\n if not data:\n return \"\"\n if not key:\n raise exceptions.EncryptError\n return binascii.hexlify(\n ''.join(chr(ord(a) ^ ord(b)) for a, b in zip(data, key)).encode(\"utf-8\")).decode(\"utf-8\")", "def encrypt_password(pass_to_encrypt):\n\n temp_key = get_crypt_key()\n tk = Fernet(temp_key)\n\n pass_to_encrypt = pass_to_encrypt.encode(\"UTF-8\")\n return tk.encrypt(pass_to_encrypt)", "def encrypt(plaintext: str, key: str) -> str:\n return \"\".join(chr(ord(p) ^ ord(k)) for (p, k) in zip(plaintext, key))", "def encrypt(data, key):\n data = six.ensure_binary(data)\n data = privy.hide(secret=data, password=key)\n data = six.ensure_text(data)\n return data", "def b64_xor_crypt(self, data, key, mode):\n if mode == 'dec':\n data = base64.b64decode(data)\n elif mode == 'enc':\n data = base64.b64encode(data)\n data = data.decode()\n\n return ''.join(chr(ord(str(a)) ^ ord(str(b))) for (a, b) in zip(data, cycle(key)))", "def xor_crypt(data: Union[bytes, bytearray], key: Union[int, bytes, bytearray]) -> bytes:\n\n if not isinstance(data, (bytes, bytearray)):\n raise TypeError(\"'data' must be bytes-like.\")\n\n if isinstance(key, int):\n if not (0 < key < 256): # 0 changes nothing\n raise ValueError(\"A integer key must be in range(1, 256).\")\n return bytes([c^key for c in data])\n elif isinstance(key, (bytes, bytearray)) and key:\n return bytes([c^k for c, k in zip(data, cycle(key))])\n else:\n raise TypeError(\"'key' must be an integer or non-empty bytes-like object.\")", "def passwd_encryption(self):\n key = Fernet.generate_key()\n cipher_suite = Fernet(key)\n bin_passwd = bytes(self.password, 'utf-8')\n ciphered_text = cipher_suite.encrypt(bin_passwd)\n with open(self.pass_path, 'wb') as pass_output:\n pass_output.write(ciphered_text)\n with open(self.key_path, 'wb') as key_output:\n key_output.write(key)", "def xor(plaintext, key):\n # NOTE: this will return a string of length equal to the shorter of the two lengths\n \n # Iterate through the strings, creating a list of bytes\n arr = [chr(a ^ b) for (a,b) in zip(plaintext, key)]\n bstr = b\"\" # Initialize a byte string\n for byte in arr: # For each byte in the list,\n bstr += bytes([ord(byte)]) # Convert the byte in the list to a byte string\n return bstr", "def func(plaintext, key):\n ciphertext = xor(plaintext, key)\n return ciphertext", "def encrypt_password(password: str) -> str:\n return pwd_context.hash(password)", "def recover_encrypt_pass(self):\n with open(self.key_path) as input_file:\n key = input_file.readlines()\n cipher_suite = Fernet(key[0])\n bin_passwd = bytes(self.password, 'utf-8')\n ciphered_text = cipher_suite.encrypt(bin_passwd)\n return ciphered_text", "def encrypt(key, plaintext):\n data = fk(keyGen(key)[0], ip(plaintext))\n return fp(fk(keyGen(key)[1], swapNibbles(data)))", "def crypt(key, data, iv):\n return xtea.crypt(key, data, iv)", "def encrypt_and_encode(data, key):\r\n return base64.urlsafe_b64encode(aes_encrypt(data, key))", "def encrypt_key(data, key):\n data = MegaCrypto.base64_decode(data)\n return sum((MegaCrypto.str_to_a32(MegaCrypto.cbc_encrypt(data[_i:_i + 16], key))\n for _i in range(0, len(data), 16)), ())", "def _encrypt(self, b):\n from cryptography.hazmat.primitives.ciphers \\\n import Cipher, algorithms, modes\n from cryptography.hazmat.backends import default_backend\n\n backend = default_backend()\n cypher = Cipher(\n algorithms.AES(self.__key), modes.CBC(self.__iv), backend=backend)\n encryptor = cypher.encryptor()\n pad_length = 16 - (len(b) % 16)\n b += bytes([pad_length]) * pad_length\n result = encryptor.update(b) + encryptor.finalize()\n return result", "def repeating_key_xor(plaintext, key):\n ciphertext = ''\n i = 0\n\n for byte in plaintext:\n ciphertext += chr(byte ^ key[i])\n\n i = (i + 1) % len(key)\n return ciphertext", "def xor_encryption(source, destination, key):\n text = inputoutput.read_from_file(source, \"b\")\n # text = read_from_file(source)\n key = bytearray(key, 'utf-8')\n result = bytearray()\n for i in range(len(text)):\n result.append(text[i] ^ key[i % len(key)])\n inputoutput.write_to_file(result, destination, \"b\")", "def xor_single_char(str_bytes, key):\n output = b''\n\n for char in str_bytes:\n output += bytes([char ^ key])\n\n return output", "def xor_decode(data, key):\n if not data:\n return \"\"\n if not key:\n raise exceptions.DecryptError\n data = binascii.a2b_hex(data.encode(\"utf-8\")).decode(\"utf-8\")\n return ''.join(chr(ord(a) ^ ord(b)) for a, b in zip(data, key))", "def xor_decrypt(ciphertext, key):\n\n\tdecrypted_char = ''\n\tdecrypted_str = ''\n\n\tfor char in ciphertext:\n\t\tdecrypted_char = chr(char ^ key)\n\t\tdecrypted_str += decrypted_char\n\n\treturn decrypted_str", "def xor(data: bytes, key: bytes) -> bytes:\n key = key[: len(data)]\n int_var = int.from_bytes(data, _sys.byteorder)\n int_key = int.from_bytes(key, _sys.byteorder)\n int_enc = int_var ^ int_key\n return int_enc.to_bytes(len(data), _sys.byteorder)", "def encryptPassword(password):\n context = CryptContext(schemes=[encryption_algorithm])\n # replaced 'encrypt' (deprecated as of 1.7) with 'hash'\n return context.hash(password)", "def single_byte_xor(enc_b, key_i):\n return bytes(key_i ^ c_i for c_i in enc_b)", "def Encrypt(key, value):\n key = key.zfill(32)[:32]\n value = Pad(value, 16)\n aes = AES.new(key, AES.MODE_ECB)\n encrypted = aes.encrypt(value)\n return base64.b64encode(encrypted)", "def password_encryption(self, password):\n return bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt())", "def encrypt_password(password,salt=None):\n\tif salt is None:\n\t\tsalt = os.urandom(8) #64 bits\n\n\tassert 8 == len(salt)\n\tassert isinstance(salt,str)\n\n\tif isinstance(password,unicode):\n\t\tpassword = password.encode('UTF-8')\n\n\tassert isinstance(password,str)\n\n\tresult = password\n\tfor i in xrange(10):\n\t\tresult = HMAC(result,salt,sha256).digest()\n\treturn salt + result", "def encrypt_password(cls, password):\n return generate_password_hash(password)", "def encrypt(self, key, plaintext):\n output = []\n padded_key = padd_key(key, plaintext)\n for i in range(len(plaintext)):\n enc_ascii = (ord(plaintext[i]) + ord(padded_key[i])) % 256\n output.append(chr(enc_ascii))\n return ''.join(output)", "def wrap(self, key:bytes, credential:PublicKeyCredentialSource)->bytes:\n return keywrap.aes_key_wrap_with_padding(key,credential.get_bytes(True),default_backend())" ]
[ "0.71066976", "0.70337445", "0.70089537", "0.6976745", "0.69720566", "0.68509895", "0.6773852", "0.6713531", "0.669853", "0.6677601", "0.66625684", "0.6630633", "0.66287386", "0.6601027", "0.6546598", "0.65335375", "0.6481441", "0.64804226", "0.646954", "0.6465444", "0.6434244", "0.6420508", "0.6419503", "0.6399521", "0.63453937", "0.63433033", "0.6339565", "0.63305056", "0.6283787", "0.6280496" ]
0.70879525
1
Finds the peak in the specified time range. Finds the peak in the data in the specified time range. You must pass it pretrigger and timebase as well as the time and data series. timebase is an integer equal to (1 us)/(delta t of the digitizer). For example, if capturing data at 10 MHz, timebase = (1e6/1e7) = 10. For data captured at 2 MHz, timebase = (1e6/5e7) = 2. In other words, how many data points per us. It's a pretty bad way to code this up, but it lets you specify the time range in micro seconds in which to look for the peak. pretrigger how many microseconds of data before t = 0.
def polyPeak_noPlot(time, data, timerange = [40,80],axis = 'x'): # Find the indices corresponding to the ends of the time range t1 = mj.tindex(time,timerange[0])#+pretrigger) t2 = mj.tindex(time,timerange[1])#+pretrigger) # print 't1=', t1 # print 't2=', t2 # generate an array of indices spanning the range ti = np.arange(t1,t2) # get the time and data points in the range t = time[ti] d = data[ti] # Fit a 2nd degree polynomial and find the min and max. p = np.polyfit(t,d,2) fit = p[0]*t**2 + p[1]*t + p[2] dataMax = fit.max() dataMin = fit.min() if abs(dataMin) > dataMax: dataMax = dataMin return dataMax
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def peak(data, fft_data=None):\n return np.max(np.abs(data))", "def find_peak(mhw, mhw_relSeas, ev, tt_start):\n tt_peak = np.argmax(mhw_relSeas)\n mhw[\"time_peak\"].append(mhw[\"time_start\"][ev] + tt_peak)\n mhw[\"date_peak\"].append(date.fromordinal(mhw[\"time_start\"][ev] + tt_peak))\n mhw[\"index_peak\"].append(tt_start + tt_peak)\n\n return mhw, tt_peak", "def pull_peak_times(data):\n bin_centers = np.arange(0.,1.501,0.002)\n data = np.asarray(data)\n maxs = np.argmax(data, axis=1)\n return bin_centers[maxs]", "def detect_peak(data):\n nonlocal last, ascent_dist, ascent_start\n if data > last:\n if ascent_start is None:\n ascent_start = last\n ascent_dist += 1\n else:\n if ascent_dist:\n peak = last\n ascent_dist = 0\n if (peak - ascent_start) > thresh:\n last = data\n ascent_start = None\n return peak\n ascent_start = None\n last = data\n return None", "def peak_finder(thresh=0):\n last = 0 # Track last input value\n ascent_dist = 0 # Distance from last trough.\n ascent_start = None # Last trough height\n\n def detect_peak(data):\n \"\"\" Returns initialized function to detect peaks on live streaming data.\n\n Args:\n data (numeric value): Input data point.\n\n Returns:\n If peak is detected return peak value, else return None\n \"\"\"\n nonlocal last, ascent_dist, ascent_start\n if data > last:\n if ascent_start is None:\n ascent_start = last\n ascent_dist += 1\n else:\n if ascent_dist:\n peak = last\n ascent_dist = 0\n if (peak - ascent_start) > thresh:\n last = data\n ascent_start = None\n return peak\n ascent_start = None\n last = data\n return None\n\n return detect_peak", "def _peaktimes(x, prc=95, t_buffer=.01, fs=1000):\n if np.logical_or(prc < 0, prc >= 100):\n raise ValueError('Percentile threshold must be between 0 and 100.')\n\n samp_buffer = np.int(np.round(t_buffer * fs))\n hi = x > np.percentile(x, prc)\n event_intervals = _chunk_time(hi, samp_buffer=samp_buffer)\n E = np.int(np.size(event_intervals) / 2)\n events = np.zeros(E, dtype=object)\n\n for e in range(E):\n temp = x[np.arange(event_intervals[e][0], event_intervals[e][1] + 1)]\n events[e] = event_intervals[e][0] + np.argmax(temp)\n\n return events", "def get_peak_Vm_time(self, cellname, tstart, tend):\n istart = int(tstart/self.plotdt+0.5)\n iend = int(tend/self.plotdt+0.5)\n try:\n data = self.datafile['/Vm/'+cellname][istart:iend]\n except KeyError:\n print 'get_peak_Vm_time: no Vm entry for', cellname\n return -1\n peakindex = data.argmax()\n peaktime = tstart + peakindex * self.plotdt\n print 'peak time:', cellname, ':', peaktime\n return peaktime", "def peak_time(self):\n return np.array([self.wftime[ch][self.waveform[ch].argmax()] for ch in range(self.nchannels)])", "def find_max_power(data, interval_power, interval_duration, search_range):\n max_power = 0\n index = None\n for i in range(min(len(data), search_range)):\n power = get_row_power(data, i)\n if power > max_power:\n index, max_power = i, power\n logging.debug(\"peak index = %u, max_power = %u\", index, max_power)\n return index", "def find_peaks(self, t_measure):\n self._check_time(t_measure)\n #widths = np.arange(2,7) # range of widths to check by find_peaks_cwt\n #peak_nodes = find_peaks_cwt(self.get_velocities(t_measure), widths, min_snr=2.0,noise_perc=30.0)\n peak_beads = peakutils.peak.indexes(self.get_velocities(t_measure), thres=0.75, min_dist=7)\n return peak_beads", "def test_peak_refinement_speed(self):\n from sms.models import utilFunctions # pylint: disable=C0415\n\n mx, px = more_itertools.first(self.sm.dft_frames(self.x))\n g = {\n \"mx\": mx,\n \"px\": px,\n \"ploc\": sample_dsp.peak_detect(mx, self.sm.t),\n \"sample_dsp\": sample_dsp,\n \"utilFunctions\": utilFunctions,\n }\n\n def get_time(func: str,):\n return timeit.timeit(\n func,\n number=256,\n globals=g,\n )\n\n t = get_time(\"sample_dsp.peak_refine(ploc, mx, px)\")\n t_sms = get_time(\"utilFunctions.peakInterp(mx, px, ploc)\")\n self.assertLessEqual(t, t_sms)\n print(\"\\n\" + f\" {t} <= {t_sms}\")", "def _peakdetect_parabole_fitter(raw_peaks, x_axis, y_axis, points):\n func = lambda x, k, tau, m: k * ((x - tau) ** 2) + m\n fitted_peaks = []\n for peak in raw_peaks:\n index = peak[0]\n x_data = x_axis[index - points // 2: index + points // 2 + 1]\n y_data = y_axis[index - points // 2: index + points // 2 + 1]\n # get a first approximation of tau (peak position in time)\n tau = x_axis[index]\n # get a first approximation of peak amplitude\n m = peak[1]\n \n # build list of approximations\n # k = -m as first approximation?\n p0 = (-m, tau, m)\n popt, pcov = curve_fit(func, x_data, y_data, p0)\n # retrieve tau and m i.e x and y value of peak\n x, y = popt[1:3]\n \n # create a high resolution data set for the fitted waveform\n x2 = np.linspace(x_data[0], x_data[-1], points * 10)\n y2 = func(x2, *popt)\n \n fitted_peaks.append([x, y, [x2, y2]])\n \n return fitted_peaks", "def get_peak_in_aperture(self,data,mask=None, method='exact',subpixels=5,unit=None):\n data_cutouts = self.get_data_cutouts(data,mask=None,method='exact',subpixels=5,unit=None)\n \n aperture_peaks = []\n for data_cutout in data_cutouts:\n aperture_peaks.append(np.max(data_cutout))\n \n aperture_peaks = self._prepare_photometry_output(aperture_peaks,\n unit=unit)\n return aperture_peaks", "def times_and_values_maxima(time, values, start_time=0.0, end_time=10000000):\n end_index = np.where(time <= end_time)[0][-1]\n start_index = np.where(start_time <= time)[0][0]\n time = time[start_index:end_index+1]\n values = values[start_index:end_index+1]\n\n # time, values = np.where(start_time <= time <= end_time, time, values)\n\n # these are the maxima of the graphs, will be used for fitting to look at trend\n max_index = (np.diff(np.sign(np.diff(values))) < 0).nonzero()[0] + 1\n time_max = time[max_index]\n value_max = values[max_index]\n return time_max, value_max", "def peak(self):\n pass", "def testUpperBound(self,time,accel):\n\t\tif (time - self.timestamp) > ParserSettings.TIME_DELTA:#tests lockout threshold of a flick event\n\t\t\tif accel < self.upper:#tests if flick maximum is found, relative to previous magnitude\n\t\t\t\tself.timestamp\t= time#once peak found, set appropriate data and return a flick\n\t\t\t\ttoReturn\t\t= self.upper\n\t\t\t\tself.upper\t\t= 0\n\t\t\t\treturn toReturn\n\t\t\telse:\n\t\t\t\tself.upper = accel#if no flick yet, update most recent flick to test\n\t\t\t\treturn 0\n\t\telse:\n\t\t\treturn 0", "def __get_max_peak(x_value, raw_values):\n\n\n raw_values_index = raw_values[raw_values[\"m/z\"] == x_value].index[0]\n\n value, index = float(raw_values.loc[raw_values_index - 5, \"intensity_normalized\"]), raw_values_index - 5\n\n for z in range(-5, 15):\n if float(raw_values.loc[raw_values_index + z, \"intensity_normalized\"]) > value:\n value, index = float(raw_values.loc[raw_values_index + z, \"intensity_normalized\"]), raw_values_index + z\n return value", "def get_peak(self):\r\n \r\n sensor_1_list = []\r\n\r\n for i in self.return_data:\r\n sensor_1_list.append(i[0])\r\n\r\n sensor_peak = max(sensor_1_list)\r\n \r\n return(sensor_peak)", "def SearchPeakMagnitude(body, startTime):\n # s1 and s2 are relative longitudes within which peak magnitude of Venus can occur.\n s1 = 10.0\n s2 = 30.0\n if body != Body.Venus:\n raise InvalidBodyError()\n\n iter = 1\n while iter <= 2:\n # Find current heliocentric relative longitude between the\n # inferior planet and the Earth.\n plon = EclipticLongitude(body, startTime)\n elon = EclipticLongitude(Body.Earth, startTime)\n rlon = _LongitudeOffset(plon - elon)\n # The slope function is not well-behaved when rlon is near 0 degrees or 180 degrees\n # because there is a cusp there that causes a discontinuity in the derivative.\n # So we need to guard against searching near such times.\n if -s1 <= rlon < +s1:\n # Seek to the window [+s1, +s2].\n adjust_days = 0.0\n # Search forward for the time t1 when rel lon = +s1.\n rlon_lo = +s1\n # Search forward for the time t2 when rel lon = +s2.\n rlon_hi = +s2\n elif rlon >= +s2 or rlon < -s2:\n # Seek to the next search window at [-s2, -s1].\n adjust_days = 0.0\n # Search forward for the time t1 when rel lon = -s2.\n rlon_lo = -s2\n # Search forward for the time t2 when rel lon = -s1.\n rlon_hi = -s1\n elif rlon >= 0:\n # rlon must be in the middle of the window [+s1, +s2].\n # Search BACKWARD for the time t1 when rel lon = +s1.\n syn = _SynodicPeriod(body)\n adjust_days = -syn / 4\n rlon_lo = +s1\n # Search forward from t1 to find t2 such that rel lon = +s2.\n rlon_hi = +s2\n else:\n # rlon must be in the middle of the window [-s2, -s1].\n # Search BACKWARD for the time t1 when rel lon = -s2.\n syn = _SynodicPeriod(body)\n adjust_days = -syn / 4\n rlon_lo = -s2\n # Search forward from t1 to find t2 such that rel lon = -s1.\n rlon_hi = -s1\n\n t_start = startTime.AddDays(adjust_days)\n t1 = SearchRelativeLongitude(body, rlon_lo, t_start)\n t2 = SearchRelativeLongitude(body, rlon_hi, t1)\n\n # Now we have a time range [t1,t2] that brackets a maximum magnitude event.\n # Confirm the bracketing.\n m1 = _mag_slope(body, t1)\n if m1 >= 0.0:\n raise InternalError()\n\n m2 = _mag_slope(body, t2)\n if m2 <= 0.0:\n raise InternalError()\n\n # Use the generic search algorithm to home in on where the slope crosses from negative to positive.\n tx = Search(_mag_slope, body, t1, t2, 10.0)\n if tx is None:\n # The search should have found the ascending root in the interval [t1, t2].\n raise InternalError()\n\n if tx.tt >= startTime.tt:\n return Illumination(body, tx)\n\n # This event is in the past (earlier than startTime).\n # We need to search forward from t2 to find the next possible window.\n # We never need to search more than twice.\n startTime = t2.AddDays(1.0)\n iter += 1\n\n # We should have found the peak magnitude in at most 2 iterations.\n raise InternalError()", "def findpeakg(data, zp, noise, bottom, top, r):\n length = len(data)\n width = (top - bottom)/(length - 1)\n absdata = []\n peak = []\n for i in range(length):\n absdata.append(abs(data[i] - zp[i]))\n i = 0\n fsnum = 0#fitting section number\n fsec = []#fitting section\n fdata = []#signal of fitting section\n fnum = []#fitting number\n fm = []#index of max and min points\n while(i<length):\n if absdata[i]>noise*r:\n fsnum = fsnum + 1\n fsec.append([])\n fdata.append([])\n tempmax = absdata[i]\n tempmin = absdata[i]\n inma = i\n inmi = i\n fnum.append(0)\n fm.append([])\n direction = 1#1:rising,0:descending\n while(absdata[i]>noise*r):\n if direction==1:\n if absdata[i]>tempmax:\n tempmax = absdata[i]\n inma = i\n elif absdata[i]<tempmax - noise*r:\n direction = 0\n fm[fsnum - 1].append([inma,inmi])\n tempmin = absdata[i]\n inmi = i\n fnum[fsnum - 1] = fnum[fsnum - 1] + 1\n elif direction==0:\n if absdata[i]<tempmin:\n tempmin = absdata[i]\n inmi = i\n elif absdata[i]>tempmin + noise*r:\n direction = 1\n tempmax = absdata[i]\n inma = i\n fsec[fsnum - 1].append(bottom + width*i)\n fdata[fsnum - 1].append(absdata[i])\n i = i + 1\n if i>=length:\n break\n if fm[fsnum - 1]==[]:\n del fsec[fsnum - 1]\n del fdata[fsnum - 1]\n del fnum[fsnum - 1]\n del fm[fsnum - 1]\n fsnum = fsnum - 1\n i = i + 1\n for i in range(fsnum):\n pars = Parameters()\n j = 0\n mod = GaussianModel(prefix = 'g1_')\n pars.update(GaussianModel(prefix = 'g%i_'%(j + 1)).make_params())\n sigma0 = math.sqrt((width*(fm[i][j][0] - fm[i][j][1]))**2/(2*math.log(absdata[fm[i][j][0]]/absdata[fm[i][j][1]])))\n pars['g%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['g%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['g%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]/0.3989423*sigma0,min = noise*r/0.3989423*sigma0,max = absdata[fm[i][j][0]]*20/0.3989423*sigma0)\n for j in range(1,fnum[i]):\n mod = mod + GaussianModel(prefix = 'g%i_'%(j + 1))\n pars.update(GaussianModel(prefix = 'g%i_'%(j + 1)).make_params())\n sigma0 = math.sqrt((width*(fm[i][j][0] - fm[i][j][1]))**2/(2*math.log(absdata[fm[i][j][0]]/absdata[fm[i][j][1]])))\n pars['g%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][-1])\n pars['g%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['g%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]/0.3989423*sigma0,min = noise*r/0.3989423*sigma0,max = absdata[fm[i][j][0]]*20/0.3989423*sigma0)\n# =============================================================================\n# result = mod.fit(fdata[i],pars,x = fsec[i])\n# #print(result.fit_report())\n# plt.plot(fsec[i],fdata[i],'bo',label = 'original')\n# plt.plot(fsec[i],result.best_fit,'r-',label = 'fitting')\n# plt.title('Gaussian fitting')\n# plt.show()\n# =============================================================================\n tempbo = int((fsec[i][0] - bottom)/width)\n tempto = int((fsec[i][-1] - bottom)/width)\n for k in range(fnum[i]):\n amplitude = pars['g%i_height'%(k + 1)].value\n sigma = pars['g%i_sigma'%(k + 1)].value\n miu = pars['g%i_center'%(k + 1)].value\n sum1 = 0\n for p in range(tempbo,tempto + 1):\n v = abs(amplitude*math.exp( - (bottom + width*p - miu)*(bottom + width*p - miu)/(2*sigma*sigma)))\n sum1 = sum1 + (v - absdata[k])*(v - absdata[k])\n sum1 = sum1/(tempto - tempbo + 1)\n peak.append([sigma,miu,amplitude,sum1,tempbo,tempto])\n return peak", "def eeg_peaks(array,tim,onset,plot='false'):\n\tp1_i,n1_i,p2_i = onset+56,onset+104,onset+176\n\twin_p1,win_n1,win_p2 = 15,20,40\n\t# determine P1,N1 and P2 values on the basis of the maximum in GFP in a window around the expected values\n\tidx_p1 = np.logical_and(tim>p1_i-win_p1, tim<p1_i+win_p1)\n\tidx_n1 = np.logical_and(tim>n1_i-win_n1, tim<n1_i+win_n1)\n\tidx_p2 = np.logical_and(tim>p2_i-win_p2, tim<p2_i+win_p2)\n\tp1 = np.max(array[idx_p1])\n\ttp1 = tim[idx_p1][array[idx_p1].argmax()]\n\tn1 = np.min(array[idx_n1])\n\ttn1 = tim[idx_n1][array[idx_n1].argmin()]\n\tp2 = np.max(array[idx_p2])\n\ttp2 = tim[idx_p2][array[idx_p2].argmax()]\n\n\tlineax = dict(linewidth=1, color='black', linestyle='--')\n\tlinep1 = dict(linewidth=1, color='red', linestyle='--')\n\tlinen1 = dict(linewidth=1, color='green', linestyle='--')\n\tlinep2 = dict(linewidth=1, color='blue', linestyle='--')\n\n\tif plot == 'true':\t\t\n\t\tfig = plt.figure(19,figsize=[7,5])\n\t\tax = fig.add_subplot(111, autoscale_on=False, xlim=[onset-100,tp2+200], ylim=[1.25*np.min([p1,n1,p2]),1.25*np.max([p1,n1,p2])])\n\t\tplt.plot(tim,array,'k-',lw=3)\n\t\tplt.plot(tp1,p1,'ro')\n\t\tplt.plot(tn1,n1,'go')\n\t\tplt.plot(tp2,p2,'bo')\n\t\tax.axvline(p1_i-win_p1,**linep1)\n\t\tax.axvline(p1_i+win_p1,**linep1)\n\t\tax.axvline(n1_i-win_n1,**linen1)\n\t\tax.axvline(n1_i+win_n1,**linen1)\n\t\tax.axvline(p2_i-win_p2,**linep2)\n\t\tax.axvline(p2_i+win_p2,**linep2)\n\t\tax.axhline(**lineax)\n\t\tplt.text(tp1-120,1.25*p1,'P1 = %.2f muV at %.0f ms' %(p1,tp1),fontsize=10)\n\t\tplt.text(tn1-40,1.1*n1,'N1 = %.2f muV at %.0f ms' %(n1,tn1),fontsize=10)\n\t\tplt.text(tn1+40,1.1*p2,'P2 = %.2f muV at %.0f ms' %(p2,tp2),fontsize=10)\n\t\tplt.xlabel('time (ms)',fontsize = 13)\n\t\tplt.ylabel('Amplitude',fontsize = 13)\n\treturn [p1,n1,p2,tp1,tn1,tp2]", "def first_peak_detect(beam, start_point):\n logging.debug('running first_peak_detect function')\n for i in range(start_point, len(beam)):\n logging.debug('current value of i is %d', i)\n if beam[i-1] < beam[i] > beam[i+1]:\n logging.debug('value determined to be the center of the values %d, %d, %d', beam[i-1], beam[i], beam[i+1])\n return i\n\n logging.error(\"no peak was found. will try working with the length of the beam\")\n return len(beam)", "def find_local_peak(flux, x, width, figname=None):\n width = int(round(width))\n if width%2 != 1:\n width += 1\n half = int((width-1)/2)\n\n i = int(round(x))\n\n # find the peak in a narrow range\n\n i1, i2 = max(0, i-half), min(flux.size, i+half+1)\n\n if i2 - i1 <= 4:\n # 4 is the number of free parameters in fitting function\n return None\n\n # find the peak position\n imax = flux[i1:i2].argmax() + i1\n xdata = np.arange(i1,i2)\n ydata = flux[i1:i2]\n # determine the initial parameters for gaussian fitting + background\n p0 = [ydata.max()-ydata.min(), imax, 3., ydata.min()]\n # least square fitting\n #p1,succ = opt.leastsq(errfunc2, p0[:], args=(xdata,ydata))\n p1, cov, info, mesg, ier = opt.leastsq(errfunc2, p0[:],\n args=(xdata, ydata), full_output=True)\n\n res_lst = errfunc2(p1, xdata, ydata)\n\n if res_lst.size-len(p0)-1 == 0:\n return None\n\n std = math.sqrt((res_lst**2).sum()/(res_lst.size-len(p0)-1))\n\n if figname is not None:\n fig = plt.figure()\n ax1 = fig.add_axes([0.1, 0.4, 0.8, 0.5])\n ax2 = fig.add_axes([0.1, 0.1, 0.8, 0.25])\n ax1.plot(xdata, ydata, 'o', ms=4)\n newx = np.arange(xdata[0], xdata[-1], 0.1)\n newy = gaussian_bkg(p1[0], p1[1], p1[2], p1[3], newx)\n ax1.plot(newx, newy, '-', lw=0.6)\n yerr = errfunc2(p1, xdata, ydata)\n ax2.plot(xdata, yerr, 'o', ms=4)\n ax1.set_xlim(xdata[0], xdata[-1])\n ax2.set_xlim(xdata[0], xdata[-1])\n fig.savefig(figname)\n plt.close(fig)\n\n return i1, i2, p1, std", "def peak_to_subpeak_list(chrom,start,end):\n num_subpeaks = int(end) - int(start) // 60\n start_list = list(range(start,end,60))\n end_list = start_list[1:] \n end_list.append(start_list[-1] + 60)\n subpeak_lists = [(chrom,s,e) for s,e in zip(start_list,end_list)]\n return subpeak_lists", "def myfitpeak(v,a):\n x = np.array(v) # voltage\n y = np.array(a) # current\n\n y = smooth(y)\n # limit peak width to 1/50 of the totoal scan length to entire scan.\n # limit minimum peak height to be over 0.2 percentile of all neighbors\n heightlimit = np.quantile(np.absolute(y[0:-1] - y[1:]), 0.8) * 3\n # heightlimit = np.absolute(y[0:-1] - y[1:]).mean() * 3\n # set height limit so that props return limits\n peaks, props = signal.find_peaks(\n y, height=heightlimit, prominence=heightlimit, width=len(y) / 30, rel_height=0.5)\n\n # return if no peaks found.\n if len(peaks) == 0:\n return {'fx': [v[0],v[1]], 'fy': [0,0], 'pc': 0, 'pv': 0, 'err': 1}\n\n peak = pickpeaks(peaks, props, len(y))\n\n # find tagent to 3X peak width window\n x1, x2 = find_tangent(y, peak)\n\n y1 = y[x1]\n y2 = y[x2]\n k = (y2-y1)/(x2-x1)\n b = -k*x2 + y2\n\n peakcurrent = y[peak] - (k*peak + b)\n peakvoltage = x[peak]\n\n twopointx = np.array([x[x1], x[x2]]).tolist()\n twopointy = np.array([y[x1], y[x2]]).tolist()\n\n # for compatibility return the same length tuple of results.\n # currently, no error is calculated.\n return {'fx': twopointx, 'fy': twopointy, 'pc': float(peakcurrent), 'pv': float(peakvoltage), 'err': 0}", "def findpeakl(data, zp, noise, bottom, top, r):\n length = len(data)\n width = (top - bottom)/(length - 1)\n absdata = []\n peak = []\n for i in range(length):\n absdata.append(abs(data[i] - zp[i]))\n i = 0\n fsnum = 0#fitting section number\n fsec = []#fitting section\n fdata = []#signal of fitting section\n fnum = []#fitting number\n fm = []#index of max and min points\n while(i<length):\n if absdata[i]>noise*r:\n fsnum = fsnum + 1\n fsec.append([])\n fdata.append([])\n tempmax = absdata[i]\n tempmin = absdata[i]\n inma = i\n inmi = i\n fnum.append(0)\n fm.append([])\n direction = 1#1:rising,0:descending\n while(absdata[i]>noise*r):\n if direction==1:\n if absdata[i]>tempmax:\n tempmax = absdata[i]\n inma = i\n elif absdata[i]<tempmax - noise*r:\n direction = 0\n fm[fsnum - 1].append([inma,inmi])\n tempmin = absdata[i]\n inmi = i\n fnum[fsnum - 1] = fnum[fsnum - 1] + 1\n elif direction==0:\n if absdata[i]<tempmin:\n tempmin = absdata[i]\n inmi = i\n elif absdata[i]>tempmin + noise*r:\n direction = 1\n tempmax = absdata[i]\n inma = i\n fsec[fsnum - 1].append(bottom + width*i)\n fdata[fsnum - 1].append(absdata[i])\n i = i + 1\n if i>=length:\n break\n if fm[fsnum - 1]==[]:\n del fsec[fsnum - 1]\n del fdata[fsnum - 1]\n del fnum[fsnum - 1]\n del fm[fsnum - 1]\n fsnum = fsnum - 1\n i = i + 1\n for i in range(fsnum):\n pars = Parameters()\n j = 0\n mod = LorentzianModel(prefix = 'l1_')\n pars.update(LorentzianModel(prefix = 'l%i_'%(j + 1)).make_params())\n sigma0 = abs(width*(fm[i][j][0] - fm[i][j][1]))/math.sqrt(absdata[fm[i][j][0]]/absdata[fm[i][j][1]] - 1)\n pars['l%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['l%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['l%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]*sigma0/0.3183099,min = noise*r*sigma0/0.3183099,max = absdata[fm[i][j][0]]*20*sigma0/0.3183099)\n for j in range(1,fnum[i]):\n mod = mod + LorentzianModel(prefix = 'l%i_'%(j + 1))\n pars.update(LorentzianModel(prefix = 'l%i_'%(j + 1)).make_params())\n sigma0 = abs(width*(fm[i][j][0] - fm[i][j][1]))/math.sqrt(absdata[fm[i][j][0]]/absdata[fm[i][j][1]] - 1)\n pars['l%i_center'%(j + 1)].set(value = bottom + width*fm[i][j][0],min = fsec[i][0],max = fsec[i][ - 1])\n pars['l%i_sigma'%(j + 1)].set(value = sigma0,min = sigma0/20,max = sigma0*20)\n pars['l%i_amplitude'%(j + 1)].set(value = absdata[fm[i][j][0]]*sigma0/0.3183099,min = noise*r*sigma0/0.3183099,max = absdata[fm[i][j][0]]*20*sigma0/0.3183099)\n# =============================================================================\n# result = mod.fit(fdata[i],pars,x = fsec[i])\n# #print(result.fit_report())\n# plt.plot(fsec[i],fdata[i],'bo',label = 'original')\n# plt.plot(fsec[i],result.best_fit,'r-',label = 'fitting')\n# plt.title('Lorentzian fitting')\n# plt.show()\n# =============================================================================\n tempbo = int((fsec[i][0] - bottom)/width)\n tempto = int((fsec[i][ - 1] - bottom)/width)\n for k in range(fnum[i]):\n gama2 = (pars['l%i_sigma'%(k + 1)].value)**2\n amplitude = pars['l%i_height'%(k + 1)].value*gama2\n miu = pars['l%i_center'%(k + 1)].value\n sum1 = 0\n for p in range(tempbo,tempto + 1):\n v = abs(amplitude/((bottom + width*p - miu)*(bottom + width*p - miu) + gama2))\n sum1 = sum1 + (v - absdata[k])*(v - absdata[k])\n sum1 = sum1/(tempto - tempbo + 1)\n peak.append([gama2,miu,amplitude,sum1,tempbo,tempto])\n return peak", "def determine_peaks_and_limits(\n data, smoothed, limits,\n peak_prom, peak_height,\n valley_prom, valley_height,\n debug, smooth_window_size, outfile,\n skip_smooth,\n):\n mm = max(smoothed)\n peaks, props = find_peaks(smoothed, height=peak_height, prominence=peak_prom) # maxima (peaks positions)\n rpeaks, rprops = find_peaks([-i+mm for i in smoothed], height=valley_height, prominence=valley_prom) # minima (peaks limits)\n\n if len(peaks) > 3 :\n print(\"WARNING: More than 3 peaks detected.\\nPossible erroneous detection:\\n\\t-Restart setting the -ll parameter.\\n\\t-check histogram and modify peak height and prominence arguments accordingly.\\n\\t-Contaminant peaks may also break detection, remove them with tools such as blobtools or by hard-filtering low coverage contigs.\")\n print(\"NOTE: Assuming the last 2 peaks are diploid and haploid...\")\n\n if debug :\n debug_plot_peak_errors(data, smoothed, peaks, limits.values(), rpeaks, smooth_window_size, outfile, skip_smooth)\n\n if len(peaks) > 0 :\n print(\"Peaks found: \" + \"x, \".join(str(p) for p in peaks) + \"x\")\n else :\n raise Exception(\"No peaks found! Try changing the input parameters or setting thresholds manually!\")\n if len(rpeaks) > 0 :\n print(\"Valleys found: \" + \"x, \".join(str(p) for p in rpeaks) + \"x\")\n else :\n print(\"No valleys found!\")\n\n valleys = [0] + list(rpeaks) + [len(smoothed)]\n thresholds = get_threshold_between_peaks(smoothed, peaks, valleys)\n\n relevant_peaks = peaks[-3:]\n #valleys = rpeaks[-3:]\n print(\"Relevant peaks: \" + \"x, \".join(str(p) for p in relevant_peaks) + \"x\")\n print(\"Thresholds:\\n\\t- \" + \"\\t- \".join(\"{}: {}x\\n\".format(k,p) for k,p in thresholds.items()))\n\n return relevant_peaks, thresholds", "def find_peaks(self, high=None, low=None, beta_std=None, **kwargs):\n peaks, left, right = {}, {}, {}\n for ch in range(self.nch):\n if beta_std is not None:\n self.high_threshold = np.mean(self.data[:,ch]) + beta_std * np.std(self.data[:,ch])\n self.low_threshold = np.mean(self.data[:,ch]) - beta_std * np.std(self.data[:,ch])\n _peaks_idx, _ = signal.find_peaks(self.data[:,ch], height=self.high_threshold, **kwargs)\n _left_idx, _right_idx = self.find_left_right_nearest(\n np.where(self.data[:, ch] < self.low_threshold)[0], _peaks_idx)\n elif high is not None and low is not None:\n _peaks_idx, _ = signal.find_peaks(self.data[:, ch], height=high, **kwargs)\n _left_idx, _right_idx = self.find_left_right_nearest(\n np.where(self.data[:, ch] < low)[0], _peaks_idx)\n peaks[ch] = TimeSeries(self.t[_peaks_idx], self.data[_peaks_idx], self.name+'_peaks_'+str(ch))\n left[ch] = TimeSeries(self.t[_left_idx], self.data[_left_idx], self.name+'_left_'+str(ch))\n right[ch] = TimeSeries(self.t[_right_idx], self.data[_right_idx], self.name+'_right_'+str(ch))\n return peaks, left, right", "def peakdetect_parabole(y_axis, x_axis, points = 9):\n # check input data\n x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)\n # make the points argument odd\n points += 1 - points % 2\n #points += 1 - int(points) & 1 slower when int conversion needed\n \n # get raw peaks\n max_raw, min_raw = peakdetect_zero_crossing(y_axis)\n \n # define output variable\n max_peaks = []\n min_peaks = []\n \n max_ = _peakdetect_parabole_fitter(max_raw, x_axis, y_axis, points)\n min_ = _peakdetect_parabole_fitter(min_raw, x_axis, y_axis, points)\n \n max_peaks = map(lambda x: [x[0], x[1]], max_)\n max_fitted = map(lambda x: x[-1], max_)\n min_peaks = map(lambda x: [x[0], x[1]], min_)\n min_fitted = map(lambda x: x[-1], min_)\n \n \n #pylab.plot(x_axis, y_axis)\n #pylab.hold(True)\n #for max_p, max_f in zip(max_peaks, max_fitted):\n # pylab.plot(max_p[0], max_p[1], 'x')\n # pylab.plot(max_f[0], max_f[1], 'o', markersize = 2)\n #for min_p, min_f in zip(min_peaks, min_fitted):\n # pylab.plot(min_p[0], min_p[1], 'x')\n # pylab.plot(min_f[0], min_f[1], 'o', markersize = 2)\n #pylab.show()\n \n return [max_peaks, min_peaks]", "def peak(self) -> Tuple[MeasureInput, MeasureResult]:\n assert self._data\n return self._data[0][2]" ]
[ "0.6256507", "0.62215835", "0.61286575", "0.59447867", "0.5906512", "0.5898816", "0.58075976", "0.5780191", "0.57295424", "0.5648836", "0.5639455", "0.5607322", "0.5568726", "0.55157095", "0.551521", "0.5498673", "0.54892355", "0.54749453", "0.5392688", "0.53639615", "0.534727", "0.53234446", "0.5319398", "0.52918553", "0.5289434", "0.5287273", "0.52803546", "0.52655005", "0.52598995", "0.52571535" ]
0.6415059
0
Provides positions in meters along probe stalks for 4x4 array of probes built by M. Kaur.
def get_probeLocs_calib_setup(dir, num_probes = 16): position_vectors = [[0] * 3 for i in range(num_probes)] #every x postion # Convert to meters x_pos = [-4.25*1e-3*25.4, -4.25*1e-3*25.4, 4.24*1e-3*25.4, 4.24*1e-3*25.4] y_pos = [-4.25*1e-3*25.4, 4.24*1e-3*25.4, 4.24*1e-3*25.4, -4.25*1e-3*25.4] z_pos = [-2.25*1e-3*25.4, -0.75*1e-3*25.4, 0.75*1e-3*25.4, 2.25*1e-3*25.4] x = 0 for i in range(num_probes): if(i%4 ==0 and i>0): x+=1 position_vectors[i][0] =x_pos[x] position_vectors[i][1] = y_pos[x] position_vectors[i][2] =z_pos[i%4] # print(position_vectors[i][0]) """ Now take into account the direction r shots : x,y,z - > r,t,z t shots : x,y,z - > r,t,z z shots : x,y,z - > r,t,z """ if dir ==2 :#r # don't need to switch anything return position_vectors if dir == 0:#t # looks like -90 degree rotation about y-axis of probe coil orientation, so switch x and z position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0] return position_vectors if dir ==1:#z # also like -90 degree rotation, switch x and z position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0] return position_vectors return position_vectors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_probeLocs_calib_setup_cm(dir, num_probes = 16):\n position_vectors = [[0] * 3 for i in range(num_probes)]\n\n #every x postion\n\n # Convert to meters\n x_pos = [-4.25*2.54, -4.25*2.54, 4.24*2.54, 4.24*2.54]\n y_pos = [-4.25*2.54, 4.24*2.54, 4.24*2.54, -4.25*2.54]\n z_pos = [-2.25*2.54, -0.75*2.54, 0.75*2.54, 2.25*2.54]\n x = 0\n for i in range(num_probes):\n if(i%4 ==0 and i>0):\n x+=1\n position_vectors[i][0] =x_pos[x]\n position_vectors[i][1] = y_pos[x]\n position_vectors[i][2] =z_pos[i%4]\n # print(position_vectors[i][0])\n\n \"\"\" Now take into account the direction\n r shots : x,y,z - > r,t,z\n t shots : x,y,z - > r,t,z\n z shots : x,y,z - > r,t,z\n \"\"\"\n if dir ==2 :#r\n # don't need to switch anything\n return position_vectors\n if dir == 0:#t\n # looks like -90 degree rotation about y-axis of probe coil orientation, so switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n if dir ==1:#z\n # also like -90 degree rotation, switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n\n return position_vectors", "def current_probe_position(self):\n\t\t# Obtain encoder feedback and calculate probe position\n\t\tx_position = self.x_mc.current_position() / self.steps_per_cm\n\t\ty_position = self.y_mc.current_position() / self.steps_per_degree *5 #Seems that 1 encoder unit = 5 motor step unit\n\n\t\treturn x_position, y_position", "def get_probe_location(self):\n\n probe_x, probe_y = self.position\n\n if self.previous_direction == (1, 0):\n probe_x += CAR_LENGTH - 1\n elif self.previous_direction == (0, 1):\n probe_y += CAR_LENGTH - 1\n\n return probe_x, probe_y", "def get_hand_points(index, annotations, offset):\n # Get the index, and entry in array\n this_index = annotations[index]['uv_vis']\n \n\n points = [None] * 21\n\n # Grab all the points\n points[FINGER_MAP[\"Wrist\"]] = this_index[offset + 0]\n\n points[FINGER_MAP[\"Thumb1\"]] = this_index[offset + 1]\n points[FINGER_MAP[\"Thumb2\"]] = this_index[offset + 2]\n points[FINGER_MAP[\"Thumb3\"]] = this_index[offset + 3]\n points[FINGER_MAP[\"Thumb4\"]] = this_index[offset + 4]\n\n points[FINGER_MAP[\"Index1\"]] = this_index[offset + 5]\n points[FINGER_MAP[\"Index2\"]] = this_index[offset + 6]\n points[FINGER_MAP[\"Index3\"]] = this_index[offset + 7]\n points[FINGER_MAP[\"Index4\"]] = this_index[offset + 8]\n\n points[FINGER_MAP[\"Middle1\"]] = this_index[offset + 9]\n points[FINGER_MAP[\"Middle2\"]] = this_index[offset + 10]\n points[FINGER_MAP[\"Middle3\"]] = this_index[offset + 11]\n points[FINGER_MAP[\"Middle4\"]] = this_index[offset + 12]\n\n points[FINGER_MAP[\"Ring1\"]] = this_index[offset + 13]\n points[FINGER_MAP[\"Ring2\"]] = this_index[offset + 14]\n points[FINGER_MAP[\"Ring3\"]] = this_index[offset + 15]\n points[FINGER_MAP[\"Ring4\"]] = this_index[offset + 16]\n\n points[FINGER_MAP[\"Pinky1\"]] = this_index[offset + 17]\n points[FINGER_MAP[\"Pinky2\"]] = this_index[offset + 18]\n points[FINGER_MAP[\"Pinky3\"]] = this_index[offset + 19]\n points[FINGER_MAP[\"Pinky4\"]] = this_index[offset + 20]\n\n return points", "def positions(self, tileID, numSamples):", "def picket_positions(self) -> Sequence[float]:\n picket_pos = []\n for line, sign in zip(self.marker_lines, (-1, 1)):\n if self._orientation == Orientation.UP_DOWN:\n picket = self._fit(line.center.y)\n else:\n picket = self._fit(line.center.x)\n if (\n self._separate_leaves\n ): # offset the picket position by the DLG and nominal gap\n mag_factor = self._image.sid / 1000\n picket += (\n sign * self._nominal_gap_mm * mag_factor / 2 * self._image.dpmm\n )\n picket_pos.append(picket / self._image.dpmm)\n return picket_pos", "def get_positions(specs):\r\n xy = []\r\n for i, spec in enumerate(specs):\r\n slit = spec.split(\"n3311\", 1)[1].replace(\".fits\", \"\")\r\n # slit = spec.split(\".\")[0].split(\"_\", 1)[1][5:]\r\n index = canvas.slits.ids.index(slit)\r\n xy.append([canvas.slits.x[index], canvas.slits.y[index]])\r\n return np.array(xy)", "def _getoffsets(isMountoffset):\n mplist = list()\n for i in range(23) :\n a = i+1\n mp = device.Carma(a).getName() + \".AntennaCommon.Drive.Point.\"\n if (isMountoffset): mp += \"mountOffset\"\n else: mp += \"offset\"\n mpaz = mp + \"Az\"\n mpel = mp + \"El\"\n mps = [mpaz, mpel]\n mplist.append(mps)\n r = queryMpValues(mplist, nothrow=True)\n if False:\n for i in range(23):\n if r[i][0] == None: astr = \" None\"\n else : astr = \"%5.2f\" %r[i][0]\n if r[i][1] == None: estr = \" None\"\n else : estr = \"%5.2f\" %r[i][1]\n print \"%2d: %s %s\" %(i+1, astr, estr)\n return r", "def beam_positions(closepack=False):\n \n x_pos, y_pos = [], []\n\n x=0\n for j in range(0,6,1):\n x += 0.1\n y=0\n for k in range(0,6,2):\n y += 0.2\n x_pos.append(x+(0.05 if closepack else 0))\n y_pos.append(y)\n y += 0.2\n x_pos.append(x)\n y_pos.append(y)\n\n return x_pos, y_pos", "def player_location(self):\n x = 0\n y = 0\n for line in self.grid:\n for i in line:\n if i == \"P\":\n return x, y\n \n y+=1\n x += 1\n y = 0", "def to_oriented_points(self):\n return g.points_from_probe(self)", "def get_pos(self, frame):\n frame = self.perspective_shift(frame)\n \n puck_mask = self.color_mask(frame, self.color_green, thresh=15)\n striker_mask = self.color_mask(frame, self.color_orange, thresh=25, blur=5)\n \n puck_loc, _ = self.find_centroids(puck_mask)\n striker_locs, _ = self.find_centroids(striker_mask, 2)\n \n p_pos = self.abs_to_meter(puck_loc[0])\n # cases: (pos,pos), (pos,None), (None,None)\n if striker_locs[0] is not None:\n pos_1 = self.abs_to_meter(striker_locs[0])\n pos_2 = self.abs_to_meter(striker_locs[1])\n s1_pos = pos_1 if pos_1[1]<0 else pos_2\n s2_pos = pos_2 if pos_1[1]<0 else pos_1\n else:\n s1_pos, s2_pos = None, None \n \n return [p_pos, s1_pos, s2_pos]", "def getRatio(probe_num, position_vector, shot_range, dir, day ='050119r'):\n ratio_x = 0\n ratio_y = 0\n ratio_z = 0\n # helm_B = [0,0,0]\n divideby = 0\n for shot in range(shot_range[0], shot_range[1]+1):\n print( 'On shot ', day+str(shot), ' for probe ',probe_num)\n x,y,z, currmax,helmB_new = probe_calib(day+str(shot), probe_num, position_vector,dir)\n ratio_x = ratio_x + x\n ratio_y = ratio_y + y\n ratio_z = ratio_z + z\n # helm_B = [helm_B[i] + helmB_new[i] for i in len(helmB)]\n divideby = divideby + 1 #averaging over the number of shots\n ratio_Bx = ratio_x/divideby\n ratio_By = ratio_y/divideby\n ratio_Bz = ratio_z/divideby\n # helmB = [helm_B]/divideby\n # print ratio_Bx, ratio_By, ratio_Bz, helmB\n # print(\"ratio_Bx %f, ratio_By %f, ratio_Bz %f, helmB%s\"%(ratio_Bx, ratio_By, ratio_Bz, helmB))\n Bx_sqr =ratio_x**2\n By_sqr =ratio_y**2\n Bz_sqr =ratio_z**2\n B = Bx_sqr + By_sqr+ Bz_sqr\n norm_factor = np.sqrt(B)\n ratio_Bx, ratio_By, ratio_Bz = [ratio_Bx, ratio_By, ratio_Bz]/norm_factor\n\n return (ratio_Bx, ratio_By, ratio_Bz, norm_factor)", "def _calculate_distances(boxes, homography):\n pos_markers = []\n pix_markers = []\n for box in boxes:\n (pt1_w, pt1_h), (pt2_w, pt2_h) = box\n\n pix_marker = ((pt1_w + pt2_w) // 2, max(pt1_h, pt2_h))\n pix_markers.append(pix_marker)\n\n pos_marker = np.array(pix_marker).reshape(\n 1, 1, 2).astype(\"float32\")\n pos_marker = cv2.perspectiveTransform(\n pos_marker, homography).squeeze()\n pos_markers.append(pos_marker)\n\n if len(pos_markers) <= 1:\n return np.array([]), np.array([])\n\n distances = pdist(np.array(pos_markers))\n return pix_markers, distances", "def positions(self):\n method = 'get_xdata' if self.direction == 'horizontal' else 'get_ydata'\n return [getattr(line, method)()[0] for line in self.artists]", "def BeamPosition():\n \n XPOS, YPOS = [], []\n\n x=0\n for j in range(0,6,1):\n x += 0.1\n y=0\n for k in range(0,6,1):\n y += 0.2\n XPOS.append(x)\n YPOS.append(y)\n\n return XPOS, YPOS", "def galaxy_positions():\n hdulist1 = pf.open(source+'/kids_data/KiDS_DR3.1_G9_ugri_shear.fits')\n '''\n hdulist2 = pf.open('../kids_data/KiDS_DR3.1_G12_ugri_shear.fits')\n hdulist3 = pf.open('../kids_data/KiDS_DR3.1_G15_ugri_shear.fits')\n hdulist4 = pf.open('../kids_data/KiDS_DR3.1_G23_ugri_shear.fits')\n hdulist5 = pf.open('../kids_data/KiDS_DR3.1_GS_ugri_shear.fits')\n '''\n ra = hdulist1[1].data['RAJ2000'][:sample]\n dec = hdulist1[1].data['DECJ2000'][:sample]\n global maxra\n maxra = max(ra)\n global minra\n minra = min(ra)\n global maxdec\n maxdec = max(dec)\n global mindec\n mindec = min(dec)\n global bsize\n bsize = abs(max(maxra, maxdec) - min(mindec, minra))\n coords = np.column_stack([ra, dec])\n global SIZE\n SIZE = len(coords)\n print(maxra, maxdec, minra, mindec, SIZE)\n ctree = cKDTree(coords)\n return ctree", "def get_offsets():\n \n offsets = dict()\n offsets['leiptr'] = [0.0, -0.005, 'left']\n offsets['gjoll'] = [0.15, -0.002, 'left']\n offsets['gd1'] = [0.15, -0.002, 'left']\n offsets['phlegethon'] = [0.0, 0.005, 'center']\n offsets['ylgr'] = [0.15, -0.002, 'left']\n offsets['wambelong'] = [0.0, -0.005, 'left']\n offsets['fimbulthul'] = [0.15, -0.002, 'left']\n offsets['ophiuchus'] = [0.0, -0.005, 'center']\n offsets['elqui'] = [0.15, -0.002, 'left']\n offsets['svol'] = [0.0, -0.004, 'right']\n offsets['ravi'] = [-0.1, 0.002, 'right']\n offsets['sylgr'] = [0.15, -0.002, 'left']\n offsets['jhelum'] = [0.15, -0.002, 'left']\n offsets['indus'] = [0.15, -0.002, 'left']\n offsets['phoenix'] = [0.0, -0.004, 'right']\n offsets['slidr'] = [0.15, 0.002, 'left']\n offsets['atlas'] = [0.1, -0.003, 'left']\n offsets['aliqa_uma'] = [0.15, -0.003, 'left']\n offsets['turbio'] = [-0.15, 0.00, 'right']\n offsets['turranburra'] = [-0.0, -0.003, 'right']\n offsets['fjorm'] = [0.0, -0.004, 'right']\n offsets['triangulum'] = [0.2, -0.005, 'center']\n offsets['willka_yaku'] = [-0.2, 0.005, 'center']\n \n return offsets", "def find_rpt_coords(self) -> (int, int):\n start_size = self.size\n end_size = self.size + len(self.allele)\n coord = self.coord\n fasta_alt = self.fasta_alt\n while self.allele == fasta_alt:\n coord += len(self.allele)\n start_size += len(self.allele)\n end_size += len(self.allele)\n fasta_alt = self.seq[start_size:end_size]\n new_start = coord - len(self.allele)\n new_end = new_start + len(self.allele) - 1\n return new_start, new_end", "def separate(self) -> np.ndarray:\n loc = self.state[:, :, Boids.Attr.LOC]\n mut_influence = self._perceive(self.proxim_bounds[0])\n return loc - np.dot(loc, mut_influence)", "def generateMatrix():\n num_probes = 16\n\n # print(position_vectors)\n # Create the (48x4) calibration matrix:\n calibration_lookup= [[0] * 3 for i in range(num_probes)]\n calibration_matrix = [[0] * 9 for i in range(num_probes)]\n counter = 0\n\n # first populate with x-direction:\n shot_range = [17, 20] #x-direction\n dir = 0 #the direction of the orentation of the probe array\n position_vectors = get_probeLocs_calib_setup(dir)\n for probe_num in range(num_probes):\n position_vector = position_vectors[probe_num]\n ratio_Bx, ratio_By, ratio_Bz, norm_factor= getRatio(probe_num, position_vector, shot_range, dir)\n calibration_lookup[probe_num][dir] = ratio_Bx\n calibration_matrix[probe_num][(dir*3):(dir*3+1)] = [ratio_Bx, ratio_By, ratio_Bz]\n print(\"Progress: %d / %d\" %(counter+1,num_probes*3 ))\n counter +=1\n\n # Then populate with y-direction:\n shot_range = [21, 25] #y-direction\n dir = 1 #the direction of the orentation of the probe array\n position_vectors = get_probeLocs_calib_setup(dir)\n for probe_num in range(num_probes):\n position_vector = position_vectors[probe_num]\n ratio_Bx, ratio_By, ratio_Bz, norm_factor= getRatio(probe_num, position_vector, shot_range, dir)\n calibration_lookup[probe_num][dir] = ratio_By\n calibration_matrix[probe_num][(dir*3):(dir*3+1)] = [ratio_Bx, ratio_By, ratio_Bz]\n print(\"Progress: %d / %d\" %(counter+1,num_probes*3 ))\n counter +=1\n\n # Then populate with z-direction:\n shot_range = [11, 15] #z-direction\n dir = 2\n position_vectors = get_probeLocs_calib_setup(dir)\n for probe_num in range(num_probes):\n position_vector = position_vectors[probe_num]\n ratio_Bx, ratio_By, ratio_Bz, norm_factor= getRatio(probe_num, position_vector, shot_range, dir)\n calibration_lookup[probe_num][dir] = ratio_Bz\n calibration_matrix[probe_num][(dir*3):(dir*3+1)] = [ratio_Bx, ratio_By, ratio_Bz]\n print(\"Progress: %d / %d\" %(counter+1,num_probes*3 ))\n counter +=1\n\n pth = os.getcwd()\n date = '050119'\n print(\"Finished! File saved as calib-%s-4x4_lookup.txt and _3x3 in current working directory\" %(date))\n savetxt(os.path.join(pth, 'calib-%s-4x4_lookup_no_switch.txt' % (date)) , calibration_lookup)\n savetxt(os.path.join(pth, 'calib-%s-4x4_3x3_no_switch.txt' % (date)) , calibration_matrix)", "def find_center(file):\n\n data = pyfits.getdata(file)\n chipx = data.field('X')\n chipy = data.field('Y')\n#\n#--- because the array is too large to handle in one swipe, divide it into 8x8 segments\n#\n xmin = min(chipx)\n ymin = min(chipy)\n xmax = max(chipx)\n ymax = max(chipy)\n xstep = int((xmax-xmin) / 8 )\n ystep = int((ymax-ymin) / 8 )\n#\n#--- find the interval which contains largest samples \n#\n cposx = 0\n cposy = 0\n cmax = 0\n for i in range (0, 8):\n xstart = xstep * i + xmin\n xstop = xstart + xstep\n for j in range (0, 8):\n ystart = ystep * j + ymin\n ystop = ystart + ystep\n\n mask = (data.field('X') >= xstart) & (data.field('X') < xstop) & (data.field('Y') >= ystart) & (data.field('Y') < ystop)\n temp = data[mask]\n chipx_p = temp.field('X')\n chipy_p = temp.field('Y')\n if len(chipx_p) > cmax:\n cmax = len(chipx_p)\n cposx = i\n cposy = j\n#\n#--- extract the area of the highest count\n#\n xpos_list = []\n ypos_list = []\n maxv_list = []\n xstart = xstep * cposx + xmin\n xstop = xstart + xstep\n\n ystart = ystep * cposy + ymin\n ystop = ystart + ystep\n\n mask = (data.field('X') >= xstart) & (data.field('X') < xstop) & (data.field('Y') >= ystart) & (data.field('Y') < ystop)\n temp = data[mask]\n chipx_p = temp.field('X')\n chipy_p = temp.field('Y')\n#\n#--- count up the events. bin to 2x2 so that we get enough count in each bin\n#\n xmin = min(chipx_p)\n xmax = max(chipx_p)\n xdim = int(0.5 * (xmax - xmin)) + 1\n ymin = min(chipy_p)\n ymax = max(chipy_p)\n ydim = int(0.5 * (ymax - ymin)) + 1\n\n cbin = [[0 for y in range(0, ydim)] for x in range(0, xdim)]\n for j in range(0, len(chipy_p)):\n xpos = int(0.5 * (chipx_p[j]-xmin))\n ypos = int(0.5 * (chipy_p[j]-ymin))\n cbin[xpos][ypos] += 1\n#\n#--- now find max position\n#\n vmax = 0\n xx = 0\n yy = 0\n for m in range(0, xdim):\n for n in range(0, ydim):\n if cbin[m][n] > vmax:\n vmax = cbin[m][n]\n xx = m\n yy = n\n#\n#--- take the mddle of the bin as the brightest spot\n#\n xv = int(xx * 2.0 + 1.0 + xmin)\n yv = int(yy * 2.0 + 1.0 + ymin)\n\n return [xv, yv]", "def GetPlayerXY(level):\n for row, line in enumerate(level):\n for column, square in enumerate(line):\n if square in \"SQ\":\n return (column, row, square)", "def determine_measure_position(self):\n green_probs = []\n net_size = len(self.net)\n #Belief propagation:\n #Analyzes each position's probability of obtaining\n #green when measuring at a time t+1.\n for i in range(0, net_size):\n accum = 0\n for j in range(0, net_size):\n distance = self.__get_distance(i, j)\n if distance == 0: #Probability of measure green at distance 0 from 'i'.\n accum += self.net[i].value * self.ct[0][0]\n elif distance == 1: #Probability of measure green at distance 1 from 'i'.\n accum += self.net[i].value * self.ct[1][0]\n elif distance == 2: #Probability of measure green at distance 2 from 'i'.\n accum += self.net[i].value * self.ct[2][0]\n elif distance == 3: #Probability of measure green at distance 3 from 'i'.\n accum += self.net[i].value * self.ct[3][0]\n else: #Probability of measure green at a distance >= 4 from 'i'.\n accum += self.net[i].value * self.ct[4][0]\n green_probs.append(accum)\n #Returns the position in which the probability of\n #obtaining green when measuring is the highest.\n return self.net[np.argmax(green_probs)].id", "def get_indices(waves):\n prob_ = np.abs(waves)**2\n # batch\n prob = [np.sum(prob_[i:i+4,:], axis=0) for i in range(0, len(waves[:,0]), 4)]\n prob = np.asarray(prob)\n prob_tot = np.sum(prob, axis=0)\n \n # cutoff\n length = np.size(prob[:,0])\n len10 = int(length/10)\n flags = np.zeros((prob.shape[1]), dtype=int)\n # hinges\n # 50% within 10% of corners\n\n # surface\n # 50% within 10% of surfaces\n # not already labelled hinges\n prob_left = np.sum(prob[0:len10,:], axis=0)\n frac_left = prob_left/prob_tot\n\n prob_right = np.sum(prob[length-len10:length,:], axis=0)\n frac_right = np.divide(prob_right, prob_tot)\n\n for i in range(len(flags)):\n if frac_left[i]>0.5 or frac_right[i]>0.5:\n flags[i] = 1\n \n indices = [i for i, x in enumerate(flags) if x == 1]\n indices0 = [i for i, x in enumerate(flags) if x == 0]\n \n return indices, indices0", "def distances(self):", "def find_order(identlist, npixel):\n aper_lst, wlc_lst = [], []\n for aperture, list1 in sorted(identlist.items()):\n if list1.size<3:\n continue\n less_half = (list1['pixel'] < npixel/2).sum()>0\n more_half = (list1['pixel'] > npixel/2).sum()>0\n #less_half, more_half = False, False\n #for pix, wav in zip(list1['pixel'], list1['wavelength']):\n # if pix < npixel/2.:\n # less_half = True\n # elif pix >= npixel/2.:\n # more_half = True\n if less_half and more_half:\n if list1['pixel'].size>2:\n deg = 2\n else:\n deg = 1\n c = np.polyfit(list1['pixel'], list1['wavelength'], deg=deg)\n wlc = np.polyval(c, npixel/2.)\n aper_lst.append(aperture)\n wlc_lst.append(wlc)\n aper_lst = np.array(aper_lst)\n wlc_lst = np.array(wlc_lst)\n if wlc_lst[0] > wlc_lst[-1]:\n k = 1\n else:\n k = -1\n\n offset_lst = np.arange(-500, 500)\n eva_lst = []\n for offset in offset_lst:\n const = (k*aper_lst + offset)*wlc_lst\n diffconst = np.diff(const)\n eva = (diffconst**2).sum()\n eva_lst.append(eva)\n eva_lst = np.array(eva_lst)\n offset = offset_lst[eva_lst.argmin()]\n\n return k, offset", "def get_pixel_pos(self):\n\n c = self.get_center()\n\n return Tank.three_by_three(c[0],c[1])", "def getLEDPos(self, nx, ny, centre, wLen):\n ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.Params['distance']) # Angle to x axis\n ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.Params['distance']) # Angle to y axis\n dx = ax / (wLen * self.Params['fResolution'][0])\n dy = ay / (wLen * self.Params['fResolution'][1])\n pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.subParams['subSize'][0] / 2)]\n return pos", "def GetScanPositions(self,d):\n\n # make it so that you have a gap, only use data where the telescope is moving\n\n elcurrent = np.abs(d['level1/hk/antenna0/driveNode/elDacOutput'][:])\n elutc = d['level1/hk/antenna0/driveNode/utc'][:]\n mjd = d['level1/spectrometer/MJD'][:]\n select = np.where((elcurrent > np.max(elcurrent)*0.8))[0] # these are when the telescope is changing position\n #if len(select) == 0:\n\n\n dselect = select[1:]-select[:-1]\n ends = np.concatenate((np.where((dselect > 10))[0], np.array([len(dselect)-1])))\n\n indices = []\n for e in select[ends]:\n end_idx = np.argmin((elutc[e]-mjd)**2)\n indices += [end_idx]\n\n mean_scan_time = np.mean(elutc[ends[1:]] - elutc[ends[:-1]])*24*3600\n step = mean_scan_time*0.1\n mask = np.zeros(len(mjd)).astype(bool)\n samples = np.arange(len(mjd))\n\n buffer_size = 50\n buffer_multiply = 5\n for iend, end in enumerate(indices):\n if (iend ==0):\n mask[samples < (end+step*buffer_size)] = True\n elif (iend == (len(indices)-1)):\n mask[samples > (end-step*buffer_size)] = True\n else:\n mask[(samples > (end - buffer_multiply*buffer_size)) & (samples < (end + buffer_multiply*buffer_size))] = True\n\n\n # Map indices\n oldindex = np.arange(len(mjd))[~mask] # old positions\n newindex = np.arange(len(oldindex)) # new positions\n mapOld2New = {o:n for (o,n) in zip(oldindex,newindex)}\n mapNew2Old = {n:o for (o,n) in zip(oldindex,newindex)}\n\n diff_mask = mask[1:].astype(int) - mask[:-1].astype(int)\n mask_select = np.where((diff_mask > 0))[0] # old positions\n end_indices = np.unique([0] + [mapOld2New[i] for i in mask_select] )\n\n if end_indices[-1] > newindex.size:\n end_indices[-1] = newindex.size-1\n \n\n starts, ends = end_indices[:-1],end_indices[1:] \n\n start_final = []\n end_final = []\n for start,end in zip(starts,ends):\n i0,i1 = mapNew2Old[start], mapNew2Old[end]\n if (end-start) < self.minimum_scanlength:\n mask[i0:i1] = True\n else:\n start_final += [start]\n end_final += [end]\n\n return (mask == False), np.array(start_final).astype(int), np.array(end_final).astype(int)" ]
[ "0.68602735", "0.635877", "0.622993", "0.62070554", "0.57699996", "0.5661007", "0.5541144", "0.54987085", "0.54753906", "0.54418695", "0.5396649", "0.5383856", "0.53815603", "0.5378696", "0.53735656", "0.534419", "0.53433716", "0.5325735", "0.53203666", "0.52891964", "0.5276348", "0.52636147", "0.5254501", "0.5250076", "0.523805", "0.52378315", "0.5211884", "0.52018666", "0.5198308", "0.51968414" ]
0.68148327
1
Provides positions in meters along probe stalks for 4x4 array of probes built by M. Kaur.
def get_probeLocs_calib_setup_cm(dir, num_probes = 16): position_vectors = [[0] * 3 for i in range(num_probes)] #every x postion # Convert to meters x_pos = [-4.25*2.54, -4.25*2.54, 4.24*2.54, 4.24*2.54] y_pos = [-4.25*2.54, 4.24*2.54, 4.24*2.54, -4.25*2.54] z_pos = [-2.25*2.54, -0.75*2.54, 0.75*2.54, 2.25*2.54] x = 0 for i in range(num_probes): if(i%4 ==0 and i>0): x+=1 position_vectors[i][0] =x_pos[x] position_vectors[i][1] = y_pos[x] position_vectors[i][2] =z_pos[i%4] # print(position_vectors[i][0]) """ Now take into account the direction r shots : x,y,z - > r,t,z t shots : x,y,z - > r,t,z z shots : x,y,z - > r,t,z """ if dir ==2 :#r # don't need to switch anything return position_vectors if dir == 0:#t # looks like -90 degree rotation about y-axis of probe coil orientation, so switch x and z position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0] return position_vectors if dir ==1:#z # also like -90 degree rotation, switch x and z position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0] return position_vectors return position_vectors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_probeLocs_calib_setup(dir, num_probes = 16):\n position_vectors = [[0] * 3 for i in range(num_probes)]\n\n #every x postion\n\n # Convert to meters\n x_pos = [-4.25*1e-3*25.4, -4.25*1e-3*25.4, 4.24*1e-3*25.4, 4.24*1e-3*25.4]\n y_pos = [-4.25*1e-3*25.4, 4.24*1e-3*25.4, 4.24*1e-3*25.4, -4.25*1e-3*25.4]\n z_pos = [-2.25*1e-3*25.4, -0.75*1e-3*25.4, 0.75*1e-3*25.4, 2.25*1e-3*25.4]\n x = 0\n for i in range(num_probes):\n if(i%4 ==0 and i>0):\n x+=1\n position_vectors[i][0] =x_pos[x]\n position_vectors[i][1] = y_pos[x]\n position_vectors[i][2] =z_pos[i%4]\n # print(position_vectors[i][0])\n\n \"\"\" Now take into account the direction\n r shots : x,y,z - > r,t,z\n t shots : x,y,z - > r,t,z\n z shots : x,y,z - > r,t,z\n \"\"\"\n if dir ==2 :#r\n # don't need to switch anything\n return position_vectors\n if dir == 0:#t\n # looks like -90 degree rotation about y-axis of probe coil orientation, so switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n if dir ==1:#z\n # also like -90 degree rotation, switch x and z\n position_vectors[:][0], position_vectors[:][2] = position_vectors[:][2], position_vectors[:][0]\n return position_vectors\n\n return position_vectors", "def current_probe_position(self):\n\t\t# Obtain encoder feedback and calculate probe position\n\t\tx_position = self.x_mc.current_position() / self.steps_per_cm\n\t\ty_position = self.y_mc.current_position() / self.steps_per_degree *5 #Seems that 1 encoder unit = 5 motor step unit\n\n\t\treturn x_position, y_position", "def get_probe_location(self):\n\n probe_x, probe_y = self.position\n\n if self.previous_direction == (1, 0):\n probe_x += CAR_LENGTH - 1\n elif self.previous_direction == (0, 1):\n probe_y += CAR_LENGTH - 1\n\n return probe_x, probe_y", "def get_hand_points(index, annotations, offset):\n # Get the index, and entry in array\n this_index = annotations[index]['uv_vis']\n \n\n points = [None] * 21\n\n # Grab all the points\n points[FINGER_MAP[\"Wrist\"]] = this_index[offset + 0]\n\n points[FINGER_MAP[\"Thumb1\"]] = this_index[offset + 1]\n points[FINGER_MAP[\"Thumb2\"]] = this_index[offset + 2]\n points[FINGER_MAP[\"Thumb3\"]] = this_index[offset + 3]\n points[FINGER_MAP[\"Thumb4\"]] = this_index[offset + 4]\n\n points[FINGER_MAP[\"Index1\"]] = this_index[offset + 5]\n points[FINGER_MAP[\"Index2\"]] = this_index[offset + 6]\n points[FINGER_MAP[\"Index3\"]] = this_index[offset + 7]\n points[FINGER_MAP[\"Index4\"]] = this_index[offset + 8]\n\n points[FINGER_MAP[\"Middle1\"]] = this_index[offset + 9]\n points[FINGER_MAP[\"Middle2\"]] = this_index[offset + 10]\n points[FINGER_MAP[\"Middle3\"]] = this_index[offset + 11]\n points[FINGER_MAP[\"Middle4\"]] = this_index[offset + 12]\n\n points[FINGER_MAP[\"Ring1\"]] = this_index[offset + 13]\n points[FINGER_MAP[\"Ring2\"]] = this_index[offset + 14]\n points[FINGER_MAP[\"Ring3\"]] = this_index[offset + 15]\n points[FINGER_MAP[\"Ring4\"]] = this_index[offset + 16]\n\n points[FINGER_MAP[\"Pinky1\"]] = this_index[offset + 17]\n points[FINGER_MAP[\"Pinky2\"]] = this_index[offset + 18]\n points[FINGER_MAP[\"Pinky3\"]] = this_index[offset + 19]\n points[FINGER_MAP[\"Pinky4\"]] = this_index[offset + 20]\n\n return points", "def positions(self, tileID, numSamples):", "def picket_positions(self) -> Sequence[float]:\n picket_pos = []\n for line, sign in zip(self.marker_lines, (-1, 1)):\n if self._orientation == Orientation.UP_DOWN:\n picket = self._fit(line.center.y)\n else:\n picket = self._fit(line.center.x)\n if (\n self._separate_leaves\n ): # offset the picket position by the DLG and nominal gap\n mag_factor = self._image.sid / 1000\n picket += (\n sign * self._nominal_gap_mm * mag_factor / 2 * self._image.dpmm\n )\n picket_pos.append(picket / self._image.dpmm)\n return picket_pos", "def get_positions(specs):\r\n xy = []\r\n for i, spec in enumerate(specs):\r\n slit = spec.split(\"n3311\", 1)[1].replace(\".fits\", \"\")\r\n # slit = spec.split(\".\")[0].split(\"_\", 1)[1][5:]\r\n index = canvas.slits.ids.index(slit)\r\n xy.append([canvas.slits.x[index], canvas.slits.y[index]])\r\n return np.array(xy)", "def _getoffsets(isMountoffset):\n mplist = list()\n for i in range(23) :\n a = i+1\n mp = device.Carma(a).getName() + \".AntennaCommon.Drive.Point.\"\n if (isMountoffset): mp += \"mountOffset\"\n else: mp += \"offset\"\n mpaz = mp + \"Az\"\n mpel = mp + \"El\"\n mps = [mpaz, mpel]\n mplist.append(mps)\n r = queryMpValues(mplist, nothrow=True)\n if False:\n for i in range(23):\n if r[i][0] == None: astr = \" None\"\n else : astr = \"%5.2f\" %r[i][0]\n if r[i][1] == None: estr = \" None\"\n else : estr = \"%5.2f\" %r[i][1]\n print \"%2d: %s %s\" %(i+1, astr, estr)\n return r", "def beam_positions(closepack=False):\n \n x_pos, y_pos = [], []\n\n x=0\n for j in range(0,6,1):\n x += 0.1\n y=0\n for k in range(0,6,2):\n y += 0.2\n x_pos.append(x+(0.05 if closepack else 0))\n y_pos.append(y)\n y += 0.2\n x_pos.append(x)\n y_pos.append(y)\n\n return x_pos, y_pos", "def player_location(self):\n x = 0\n y = 0\n for line in self.grid:\n for i in line:\n if i == \"P\":\n return x, y\n \n y+=1\n x += 1\n y = 0", "def to_oriented_points(self):\n return g.points_from_probe(self)", "def get_pos(self, frame):\n frame = self.perspective_shift(frame)\n \n puck_mask = self.color_mask(frame, self.color_green, thresh=15)\n striker_mask = self.color_mask(frame, self.color_orange, thresh=25, blur=5)\n \n puck_loc, _ = self.find_centroids(puck_mask)\n striker_locs, _ = self.find_centroids(striker_mask, 2)\n \n p_pos = self.abs_to_meter(puck_loc[0])\n # cases: (pos,pos), (pos,None), (None,None)\n if striker_locs[0] is not None:\n pos_1 = self.abs_to_meter(striker_locs[0])\n pos_2 = self.abs_to_meter(striker_locs[1])\n s1_pos = pos_1 if pos_1[1]<0 else pos_2\n s2_pos = pos_2 if pos_1[1]<0 else pos_1\n else:\n s1_pos, s2_pos = None, None \n \n return [p_pos, s1_pos, s2_pos]", "def getRatio(probe_num, position_vector, shot_range, dir, day ='050119r'):\n ratio_x = 0\n ratio_y = 0\n ratio_z = 0\n # helm_B = [0,0,0]\n divideby = 0\n for shot in range(shot_range[0], shot_range[1]+1):\n print( 'On shot ', day+str(shot), ' for probe ',probe_num)\n x,y,z, currmax,helmB_new = probe_calib(day+str(shot), probe_num, position_vector,dir)\n ratio_x = ratio_x + x\n ratio_y = ratio_y + y\n ratio_z = ratio_z + z\n # helm_B = [helm_B[i] + helmB_new[i] for i in len(helmB)]\n divideby = divideby + 1 #averaging over the number of shots\n ratio_Bx = ratio_x/divideby\n ratio_By = ratio_y/divideby\n ratio_Bz = ratio_z/divideby\n # helmB = [helm_B]/divideby\n # print ratio_Bx, ratio_By, ratio_Bz, helmB\n # print(\"ratio_Bx %f, ratio_By %f, ratio_Bz %f, helmB%s\"%(ratio_Bx, ratio_By, ratio_Bz, helmB))\n Bx_sqr =ratio_x**2\n By_sqr =ratio_y**2\n Bz_sqr =ratio_z**2\n B = Bx_sqr + By_sqr+ Bz_sqr\n norm_factor = np.sqrt(B)\n ratio_Bx, ratio_By, ratio_Bz = [ratio_Bx, ratio_By, ratio_Bz]/norm_factor\n\n return (ratio_Bx, ratio_By, ratio_Bz, norm_factor)", "def _calculate_distances(boxes, homography):\n pos_markers = []\n pix_markers = []\n for box in boxes:\n (pt1_w, pt1_h), (pt2_w, pt2_h) = box\n\n pix_marker = ((pt1_w + pt2_w) // 2, max(pt1_h, pt2_h))\n pix_markers.append(pix_marker)\n\n pos_marker = np.array(pix_marker).reshape(\n 1, 1, 2).astype(\"float32\")\n pos_marker = cv2.perspectiveTransform(\n pos_marker, homography).squeeze()\n pos_markers.append(pos_marker)\n\n if len(pos_markers) <= 1:\n return np.array([]), np.array([])\n\n distances = pdist(np.array(pos_markers))\n return pix_markers, distances", "def positions(self):\n method = 'get_xdata' if self.direction == 'horizontal' else 'get_ydata'\n return [getattr(line, method)()[0] for line in self.artists]", "def galaxy_positions():\n hdulist1 = pf.open(source+'/kids_data/KiDS_DR3.1_G9_ugri_shear.fits')\n '''\n hdulist2 = pf.open('../kids_data/KiDS_DR3.1_G12_ugri_shear.fits')\n hdulist3 = pf.open('../kids_data/KiDS_DR3.1_G15_ugri_shear.fits')\n hdulist4 = pf.open('../kids_data/KiDS_DR3.1_G23_ugri_shear.fits')\n hdulist5 = pf.open('../kids_data/KiDS_DR3.1_GS_ugri_shear.fits')\n '''\n ra = hdulist1[1].data['RAJ2000'][:sample]\n dec = hdulist1[1].data['DECJ2000'][:sample]\n global maxra\n maxra = max(ra)\n global minra\n minra = min(ra)\n global maxdec\n maxdec = max(dec)\n global mindec\n mindec = min(dec)\n global bsize\n bsize = abs(max(maxra, maxdec) - min(mindec, minra))\n coords = np.column_stack([ra, dec])\n global SIZE\n SIZE = len(coords)\n print(maxra, maxdec, minra, mindec, SIZE)\n ctree = cKDTree(coords)\n return ctree", "def BeamPosition():\n \n XPOS, YPOS = [], []\n\n x=0\n for j in range(0,6,1):\n x += 0.1\n y=0\n for k in range(0,6,1):\n y += 0.2\n XPOS.append(x)\n YPOS.append(y)\n\n return XPOS, YPOS", "def get_offsets():\n \n offsets = dict()\n offsets['leiptr'] = [0.0, -0.005, 'left']\n offsets['gjoll'] = [0.15, -0.002, 'left']\n offsets['gd1'] = [0.15, -0.002, 'left']\n offsets['phlegethon'] = [0.0, 0.005, 'center']\n offsets['ylgr'] = [0.15, -0.002, 'left']\n offsets['wambelong'] = [0.0, -0.005, 'left']\n offsets['fimbulthul'] = [0.15, -0.002, 'left']\n offsets['ophiuchus'] = [0.0, -0.005, 'center']\n offsets['elqui'] = [0.15, -0.002, 'left']\n offsets['svol'] = [0.0, -0.004, 'right']\n offsets['ravi'] = [-0.1, 0.002, 'right']\n offsets['sylgr'] = [0.15, -0.002, 'left']\n offsets['jhelum'] = [0.15, -0.002, 'left']\n offsets['indus'] = [0.15, -0.002, 'left']\n offsets['phoenix'] = [0.0, -0.004, 'right']\n offsets['slidr'] = [0.15, 0.002, 'left']\n offsets['atlas'] = [0.1, -0.003, 'left']\n offsets['aliqa_uma'] = [0.15, -0.003, 'left']\n offsets['turbio'] = [-0.15, 0.00, 'right']\n offsets['turranburra'] = [-0.0, -0.003, 'right']\n offsets['fjorm'] = [0.0, -0.004, 'right']\n offsets['triangulum'] = [0.2, -0.005, 'center']\n offsets['willka_yaku'] = [-0.2, 0.005, 'center']\n \n return offsets", "def find_rpt_coords(self) -> (int, int):\n start_size = self.size\n end_size = self.size + len(self.allele)\n coord = self.coord\n fasta_alt = self.fasta_alt\n while self.allele == fasta_alt:\n coord += len(self.allele)\n start_size += len(self.allele)\n end_size += len(self.allele)\n fasta_alt = self.seq[start_size:end_size]\n new_start = coord - len(self.allele)\n new_end = new_start + len(self.allele) - 1\n return new_start, new_end", "def separate(self) -> np.ndarray:\n loc = self.state[:, :, Boids.Attr.LOC]\n mut_influence = self._perceive(self.proxim_bounds[0])\n return loc - np.dot(loc, mut_influence)", "def generateMatrix():\n num_probes = 16\n\n # print(position_vectors)\n # Create the (48x4) calibration matrix:\n calibration_lookup= [[0] * 3 for i in range(num_probes)]\n calibration_matrix = [[0] * 9 for i in range(num_probes)]\n counter = 0\n\n # first populate with x-direction:\n shot_range = [17, 20] #x-direction\n dir = 0 #the direction of the orentation of the probe array\n position_vectors = get_probeLocs_calib_setup(dir)\n for probe_num in range(num_probes):\n position_vector = position_vectors[probe_num]\n ratio_Bx, ratio_By, ratio_Bz, norm_factor= getRatio(probe_num, position_vector, shot_range, dir)\n calibration_lookup[probe_num][dir] = ratio_Bx\n calibration_matrix[probe_num][(dir*3):(dir*3+1)] = [ratio_Bx, ratio_By, ratio_Bz]\n print(\"Progress: %d / %d\" %(counter+1,num_probes*3 ))\n counter +=1\n\n # Then populate with y-direction:\n shot_range = [21, 25] #y-direction\n dir = 1 #the direction of the orentation of the probe array\n position_vectors = get_probeLocs_calib_setup(dir)\n for probe_num in range(num_probes):\n position_vector = position_vectors[probe_num]\n ratio_Bx, ratio_By, ratio_Bz, norm_factor= getRatio(probe_num, position_vector, shot_range, dir)\n calibration_lookup[probe_num][dir] = ratio_By\n calibration_matrix[probe_num][(dir*3):(dir*3+1)] = [ratio_Bx, ratio_By, ratio_Bz]\n print(\"Progress: %d / %d\" %(counter+1,num_probes*3 ))\n counter +=1\n\n # Then populate with z-direction:\n shot_range = [11, 15] #z-direction\n dir = 2\n position_vectors = get_probeLocs_calib_setup(dir)\n for probe_num in range(num_probes):\n position_vector = position_vectors[probe_num]\n ratio_Bx, ratio_By, ratio_Bz, norm_factor= getRatio(probe_num, position_vector, shot_range, dir)\n calibration_lookup[probe_num][dir] = ratio_Bz\n calibration_matrix[probe_num][(dir*3):(dir*3+1)] = [ratio_Bx, ratio_By, ratio_Bz]\n print(\"Progress: %d / %d\" %(counter+1,num_probes*3 ))\n counter +=1\n\n pth = os.getcwd()\n date = '050119'\n print(\"Finished! File saved as calib-%s-4x4_lookup.txt and _3x3 in current working directory\" %(date))\n savetxt(os.path.join(pth, 'calib-%s-4x4_lookup_no_switch.txt' % (date)) , calibration_lookup)\n savetxt(os.path.join(pth, 'calib-%s-4x4_3x3_no_switch.txt' % (date)) , calibration_matrix)", "def find_center(file):\n\n data = pyfits.getdata(file)\n chipx = data.field('X')\n chipy = data.field('Y')\n#\n#--- because the array is too large to handle in one swipe, divide it into 8x8 segments\n#\n xmin = min(chipx)\n ymin = min(chipy)\n xmax = max(chipx)\n ymax = max(chipy)\n xstep = int((xmax-xmin) / 8 )\n ystep = int((ymax-ymin) / 8 )\n#\n#--- find the interval which contains largest samples \n#\n cposx = 0\n cposy = 0\n cmax = 0\n for i in range (0, 8):\n xstart = xstep * i + xmin\n xstop = xstart + xstep\n for j in range (0, 8):\n ystart = ystep * j + ymin\n ystop = ystart + ystep\n\n mask = (data.field('X') >= xstart) & (data.field('X') < xstop) & (data.field('Y') >= ystart) & (data.field('Y') < ystop)\n temp = data[mask]\n chipx_p = temp.field('X')\n chipy_p = temp.field('Y')\n if len(chipx_p) > cmax:\n cmax = len(chipx_p)\n cposx = i\n cposy = j\n#\n#--- extract the area of the highest count\n#\n xpos_list = []\n ypos_list = []\n maxv_list = []\n xstart = xstep * cposx + xmin\n xstop = xstart + xstep\n\n ystart = ystep * cposy + ymin\n ystop = ystart + ystep\n\n mask = (data.field('X') >= xstart) & (data.field('X') < xstop) & (data.field('Y') >= ystart) & (data.field('Y') < ystop)\n temp = data[mask]\n chipx_p = temp.field('X')\n chipy_p = temp.field('Y')\n#\n#--- count up the events. bin to 2x2 so that we get enough count in each bin\n#\n xmin = min(chipx_p)\n xmax = max(chipx_p)\n xdim = int(0.5 * (xmax - xmin)) + 1\n ymin = min(chipy_p)\n ymax = max(chipy_p)\n ydim = int(0.5 * (ymax - ymin)) + 1\n\n cbin = [[0 for y in range(0, ydim)] for x in range(0, xdim)]\n for j in range(0, len(chipy_p)):\n xpos = int(0.5 * (chipx_p[j]-xmin))\n ypos = int(0.5 * (chipy_p[j]-ymin))\n cbin[xpos][ypos] += 1\n#\n#--- now find max position\n#\n vmax = 0\n xx = 0\n yy = 0\n for m in range(0, xdim):\n for n in range(0, ydim):\n if cbin[m][n] > vmax:\n vmax = cbin[m][n]\n xx = m\n yy = n\n#\n#--- take the mddle of the bin as the brightest spot\n#\n xv = int(xx * 2.0 + 1.0 + xmin)\n yv = int(yy * 2.0 + 1.0 + ymin)\n\n return [xv, yv]", "def GetPlayerXY(level):\n for row, line in enumerate(level):\n for column, square in enumerate(line):\n if square in \"SQ\":\n return (column, row, square)", "def determine_measure_position(self):\n green_probs = []\n net_size = len(self.net)\n #Belief propagation:\n #Analyzes each position's probability of obtaining\n #green when measuring at a time t+1.\n for i in range(0, net_size):\n accum = 0\n for j in range(0, net_size):\n distance = self.__get_distance(i, j)\n if distance == 0: #Probability of measure green at distance 0 from 'i'.\n accum += self.net[i].value * self.ct[0][0]\n elif distance == 1: #Probability of measure green at distance 1 from 'i'.\n accum += self.net[i].value * self.ct[1][0]\n elif distance == 2: #Probability of measure green at distance 2 from 'i'.\n accum += self.net[i].value * self.ct[2][0]\n elif distance == 3: #Probability of measure green at distance 3 from 'i'.\n accum += self.net[i].value * self.ct[3][0]\n else: #Probability of measure green at a distance >= 4 from 'i'.\n accum += self.net[i].value * self.ct[4][0]\n green_probs.append(accum)\n #Returns the position in which the probability of\n #obtaining green when measuring is the highest.\n return self.net[np.argmax(green_probs)].id", "def distances(self):", "def get_indices(waves):\n prob_ = np.abs(waves)**2\n # batch\n prob = [np.sum(prob_[i:i+4,:], axis=0) for i in range(0, len(waves[:,0]), 4)]\n prob = np.asarray(prob)\n prob_tot = np.sum(prob, axis=0)\n \n # cutoff\n length = np.size(prob[:,0])\n len10 = int(length/10)\n flags = np.zeros((prob.shape[1]), dtype=int)\n # hinges\n # 50% within 10% of corners\n\n # surface\n # 50% within 10% of surfaces\n # not already labelled hinges\n prob_left = np.sum(prob[0:len10,:], axis=0)\n frac_left = prob_left/prob_tot\n\n prob_right = np.sum(prob[length-len10:length,:], axis=0)\n frac_right = np.divide(prob_right, prob_tot)\n\n for i in range(len(flags)):\n if frac_left[i]>0.5 or frac_right[i]>0.5:\n flags[i] = 1\n \n indices = [i for i, x in enumerate(flags) if x == 1]\n indices0 = [i for i, x in enumerate(flags) if x == 0]\n \n return indices, indices0", "def find_order(identlist, npixel):\n aper_lst, wlc_lst = [], []\n for aperture, list1 in sorted(identlist.items()):\n if list1.size<3:\n continue\n less_half = (list1['pixel'] < npixel/2).sum()>0\n more_half = (list1['pixel'] > npixel/2).sum()>0\n #less_half, more_half = False, False\n #for pix, wav in zip(list1['pixel'], list1['wavelength']):\n # if pix < npixel/2.:\n # less_half = True\n # elif pix >= npixel/2.:\n # more_half = True\n if less_half and more_half:\n if list1['pixel'].size>2:\n deg = 2\n else:\n deg = 1\n c = np.polyfit(list1['pixel'], list1['wavelength'], deg=deg)\n wlc = np.polyval(c, npixel/2.)\n aper_lst.append(aperture)\n wlc_lst.append(wlc)\n aper_lst = np.array(aper_lst)\n wlc_lst = np.array(wlc_lst)\n if wlc_lst[0] > wlc_lst[-1]:\n k = 1\n else:\n k = -1\n\n offset_lst = np.arange(-500, 500)\n eva_lst = []\n for offset in offset_lst:\n const = (k*aper_lst + offset)*wlc_lst\n diffconst = np.diff(const)\n eva = (diffconst**2).sum()\n eva_lst.append(eva)\n eva_lst = np.array(eva_lst)\n offset = offset_lst[eva_lst.argmin()]\n\n return k, offset", "def get_pixel_pos(self):\n\n c = self.get_center()\n\n return Tank.three_by_three(c[0],c[1])", "def getLEDPos(self, nx, ny, centre, wLen):\n ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.Params['distance']) # Angle to x axis\n ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.Params['distance']) # Angle to y axis\n dx = ax / (wLen * self.Params['fResolution'][0])\n dy = ay / (wLen * self.Params['fResolution'][1])\n pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.subParams['subSize'][0] / 2)]\n return pos", "def GetScanPositions(self,d):\n\n # make it so that you have a gap, only use data where the telescope is moving\n\n elcurrent = np.abs(d['level1/hk/antenna0/driveNode/elDacOutput'][:])\n elutc = d['level1/hk/antenna0/driveNode/utc'][:]\n mjd = d['level1/spectrometer/MJD'][:]\n select = np.where((elcurrent > np.max(elcurrent)*0.8))[0] # these are when the telescope is changing position\n #if len(select) == 0:\n\n\n dselect = select[1:]-select[:-1]\n ends = np.concatenate((np.where((dselect > 10))[0], np.array([len(dselect)-1])))\n\n indices = []\n for e in select[ends]:\n end_idx = np.argmin((elutc[e]-mjd)**2)\n indices += [end_idx]\n\n mean_scan_time = np.mean(elutc[ends[1:]] - elutc[ends[:-1]])*24*3600\n step = mean_scan_time*0.1\n mask = np.zeros(len(mjd)).astype(bool)\n samples = np.arange(len(mjd))\n\n buffer_size = 50\n buffer_multiply = 5\n for iend, end in enumerate(indices):\n if (iend ==0):\n mask[samples < (end+step*buffer_size)] = True\n elif (iend == (len(indices)-1)):\n mask[samples > (end-step*buffer_size)] = True\n else:\n mask[(samples > (end - buffer_multiply*buffer_size)) & (samples < (end + buffer_multiply*buffer_size))] = True\n\n\n # Map indices\n oldindex = np.arange(len(mjd))[~mask] # old positions\n newindex = np.arange(len(oldindex)) # new positions\n mapOld2New = {o:n for (o,n) in zip(oldindex,newindex)}\n mapNew2Old = {n:o for (o,n) in zip(oldindex,newindex)}\n\n diff_mask = mask[1:].astype(int) - mask[:-1].astype(int)\n mask_select = np.where((diff_mask > 0))[0] # old positions\n end_indices = np.unique([0] + [mapOld2New[i] for i in mask_select] )\n\n if end_indices[-1] > newindex.size:\n end_indices[-1] = newindex.size-1\n \n\n starts, ends = end_indices[:-1],end_indices[1:] \n\n start_final = []\n end_final = []\n for start,end in zip(starts,ends):\n i0,i1 = mapNew2Old[start], mapNew2Old[end]\n if (end-start) < self.minimum_scanlength:\n mask[i0:i1] = True\n else:\n start_final += [start]\n end_final += [end]\n\n return (mask == False), np.array(start_final).astype(int), np.array(end_final).astype(int)" ]
[ "0.6814141", "0.6357938", "0.6228107", "0.62063867", "0.576915", "0.56598634", "0.5540284", "0.5499135", "0.5474505", "0.54411244", "0.5393473", "0.5382529", "0.5381584", "0.53801537", "0.5371877", "0.53437704", "0.53433514", "0.5324205", "0.5318798", "0.5288676", "0.5275662", "0.526339", "0.52537304", "0.5247998", "0.5237342", "0.52360886", "0.5210287", "0.52005047", "0.51978016", "0.5196847" ]
0.68606234
0
This finds the ratio between the idealized helmholtz field and the actual recoreded signal This also corrects for inverted signals... however due to what I'm assuming is noise, finding the inverted ones are a bit tricky feel free to uncomment the plotting lines and see if it needs adjusments, though I did get it working reliably for the 050119 calibration (SI units, meters, Tesla) KG 06/24/19
def ratio_4_doc(shot, dir, num_probes = 16): # data = [[0] *3 for i in range(num_probes)] # magdata = hdr.getMagData(shot) probe_locs = get_probeLocs_calib_setup(shot) data=hdr.getquikData(shot) time,eastcurrent,westcurrent = loadcurrent(shot)#using eastcurrent ratios = [[0]*3 for i in range(num_probes)] for probe in range(num_probes): ratio =1 inverted = False # fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True) B=sp.signal.detrend(cumtrapz(data.unCalibData[dir,probe,:], data.time)) plot_time = data.time[:-1] if(np.max(B[2000:6000]) < abs(np.average(B[2000:6000]))): # print("\ninverted!") inverted = True # B = B* -1 # ratio = -1 r = probe_locs[probe] max_current = polyPeak_noPlot(time,eastcurrent) # if(np.max(eastcurrent) < -1*(np.min(eastcurrent))): # max_current = -1*np.min(eastcurrent) helmB = helmholtz2(r,max_current) # THis is intentional! I am only using shots where the cmponent is lined # up with the z-direction of the helmholz field # helmB[2] = helmB[2]*-1 max_theoretical = np.max(helmB[2]) max_measured = polyPeak_noPlot(plot_time, B) ratio = ratio * max_theoretical/max_measured if ratio > 30000 or ratio < -30000: ratio = 0 ratios[probe][dir] = ratio # print("\tRatio is: %f" %(ratio)) # if(inverted and ratio <0): # print("Inverted and ratio reflects that") # elif(not inverted and ratio <0): if probe ==1: print("\n Ratio: %5f \n\t max_measured: %3f, \n\t max_theoretical: %5f"%(ratio,max_measured,max_theoretical ) ) # Compute the median of the non-zero elements # m = np.median(foo[foo > 0]) # Assign the median to the zero elements # foo[foo == 0] = m return ratios
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _calculate_measurement_error(self): \n \n # Calculate Hartmann Spot\n # FIXME what are factor_1, factor_2 ???\n factor_1, factor_2 = 206265*5.89e-7, 206265*6.5e-7\n term1, term2 = factor_1/self.actuator_spacing, factor_2/self.r0\n hartmann_spot = np.max([term1, term2])\n \n # Calculate SNR \n n_pix=4 # FIXME spreadsheet says not to change this idk why?\n sample_time = 1/(10*self.controller_frequency)\n brightness = (8.9e5)*10**((0-self.guide_star_mag)/2.5)\n n_photons = brightness*sample_time*((100*self.actuator_spacing)**2)\n snr = n_photons/np.sqrt(n_photons + n_pix*(self.sigma_readnoise)**2)\n\n # Calculate noise propagator \n degrees_of_freedom = np.round((np.pi/4) * (self.telescope_diameter/self.actuator_spacing)**2)\n factor_1, factor_2 = 0.0536, 0.0795 # FIXME WHAT THE HECK IS THIS\n if self.aperture == 'circular':\n factor_1, factor_2 = 0.0068, 0.0796\n noise_propagator = np.sqrt(2*(factor_1 + factor_2*np.log(degrees_of_freedom)))\n\n # Calculate close loop averaging\n controller_over_frame = 1/10\n close_loop_averaging = np.sqrt(2*controller_over_frame)*np.arctan(1/(2*controller_over_frame))\n sigma_measurement = noise_propagator * close_loop_averaging * (self.actuator_spacing*1e9) * (hartmann_spot/snr*4.84814e-6)\n self.sigma_measurement = sigma_measurement # in nm", "def width_h_invis(self):\n if m_higgs > 2.0 * self.mx:\n coupling = self.gsxx * self.stheta / np.sqrt(1 - self.stheta**2)\n\n val = (\n (coupling**2 * (m_higgs**2 - 4 * self.mx**2) ** 1.5)\n / (8.0 * m_higgs**2 * np.pi)\n ).real\n\n assert val >= 0\n\n return val\n else:\n return 0.0", "def flatNoiseCGH():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/NoiseStudy/FlatMeasurements/'\n d1,dx1 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas2.fits')\n p1,px1 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas1.fits')\n p2,px2 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas2.fits')\n p3,px3 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas1.fits')\n p4,px4 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas2.fits')\n\n #Construct baseline power spectra\n f1,pow1 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1)\n f2,pow2 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1,axis=1)\n \n #Construct parroted power spectra\n f3,pow3 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx1)\n f4,pow4 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx2,axis=1)\n f5,pow5 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx1)\n f6,pow6 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx2,axis=1)\n\n #Plot\n plt.loglog(f1,pow1/f1[0],label='Axial Baseline')\n plt.loglog(f2,pow2/f2[0],label='Azimuthal Baseline')\n plt.loglog(f3,pow3/f3[0],label='Pitch Axial')\n plt.loglog(f4,pow4/f4[0],label='Pitch Azimuthal')\n plt.loglog(f5,pow5/f5[0],label='Roll Axial')\n plt.loglog(f6,pow6/f6[0],label='Roll Azimuthal')\n plt.title('Residual Fringe Repeatability Impact')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n plt.legend(loc='lower left')\n\n return f1,pow1", "def MRE(actual, noisy):\n if len(actual) != len(noisy): return -1\n absErr = np.abs(np.array(actual) - np.array(noisy))\n idx_nonzero = np.where(np.array(actual) != 0)\n absErr_nonzero = absErr[idx_nonzero]\n true_nonzero = np.array(actual)[idx_nonzero]\n relErr = absErr_nonzero / true_nonzero\n return relErr.mean()", "def PCO1S12Noise():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/' \\\n 'NoiseStudy/TestOptics_PCO1S12/'\n d1,dx1 = met.read4DFits(wdir+'161202_PCO1S12_4InchCut_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161202_PCO1S12_4InchCut_Avg8_Meas2.fits')\n d3,dx3 = met.read4DFits(wdir+'161202_PCO1S12_4InchCut_Avg8_Meas3.fits')\n\n #Construct power spectra\n f12,pow12 = fourier.meanPSD((d1-d2)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f23,pow23 = fourier.meanPSD((d2-d3)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f13,pow13 = fourier.meanPSD((d1-d3)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n\n #Mid frequency\n midfreq = [1000*np.sqrt(np.sum(p[np.logical_and(f>.1,f<1.)])) \\\n for f,p in zip([f12,f23,f13],[pow12,pow23,pow13])]\n\n #Plot\n plt.loglog(f12,pow12/f12[0],label='1-2: %.2f' % midfreq[0])\n plt.loglog(f23,pow23/f23[0],label='2-3: %.2f' % midfreq[1])\n plt.loglog(f13,pow13/f13[0],label='1-3: %.2f' % midfreq[2])\n plt.legend(loc='lower left')\n plt.grid()\n plt.title('4D Repeatability: PCO1S12')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n\n print midfreq\n\n return f12,pow12", "def sound_horizon_EH(self):\n om_m = self.omega_cb\n om_b = self.omega_b\n om_n = np.sum(self.omega_nu)\n h = self.h \n if self.M_nu_tot == 0.: rs = 44.5*np.log(9.83/om_m)/np.sqrt(1+10*om_b**0.75)*h\n else: rs = 55.154*np.exp(-72.3*(om_n+0.0006)**2.)/(om_m**0.25351*om_b**0.12807)*h\n return rs", "def get_corrected_ratio(data, wsp, model, sym_uncer=False, dbg_file=None):\n logging.info('Getting corrected ratio with {} errors'.\n format('HESSE' if sym_uncer else 'MINOS'))\n corr_ratio = []\n # NOTE: Assuming here that the bins are ordered correctly AND that the\n for label, bounds in model.bins.iteritems():\n selections = []\n for ivar, var in enumerate(model.bin_cut_vars):\n selections.append(select_bin(var, *bounds[ivar]))\n bin_data = apply_selections(data, selections)\n\n chi1_prob, chi2_prob = get_state_fractions(bin_data, wsp, model, label)\n\n print_info('chi1', bin_data, chi1_prob)\n print_info('chi2', bin_data, chi2_prob)\n\n if dbg_file is not None:\n debug_plots('chi1_{}'.format(label), dbg_file, chi1_prob,\n bin_data.corr_chi1, bin_data.chicMass)\n debug_plots('chi2_{}'.format(label), dbg_file, chi2_prob,\n bin_data.corr_chi2, bin_data.chicMass)\n\n chi1_w = bin_data.loc[:, 'corr_chi1'] * chi1_prob\n chi2_w = bin_data.loc[:, 'corr_chi2'] * chi2_prob\n chi1_corr = np.sum(chi1_w)\n chi2_corr = np.sum(chi2_w)\n corr_ratio.append(chi2_corr / chi1_corr)\n\n # Assume that the relative uncertainties are unchanged for the corrected and\n # the uncorrected graph and use them to determine the uncertainties of the\n # corrected graph\n uncorr_graph = get_graph(wsp, model, 'r_chic2_chic1', sym_uncer)\n xlo, xhi, err_lo, err_hi = get_errors(uncorr_graph)\n xvals, yvals = np.array(uncorr_graph.GetX()), np.array(uncorr_graph.GetY())\n corr_ratio = np.array(corr_ratio)\n\n return r.TGraphAsymmErrors(len(corr_ratio), xvals, corr_ratio, xlo, xhi,\n err_lo / yvals * corr_ratio,\n err_hi / yvals * corr_ratio)", "def refCylNoise():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/' \\\n 'NoiseStudy/RefCylinderMeasurements/'\n d1,dx1 = met.read4DFits(wdir+'161205_RefCylinder_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161205_RefCylinder_Avg8_Meas2.fits')\n d3,dx3 = met.read4DFits(wdir+'161205_RefCylinder_Avg8_Meas3.fits')\n\n p1,px1 = met.read4DFits(wdir+'161205_RefCylinder_'\n 'ParrotingTestPitch_Meas1.fits')\n p2,px2 = met.read4DFits(wdir+'161205_RefCylinder_'\n 'ParrotingTestPitch_Meas2.fits')\n p3,px3 = met.read4DFits(wdir+'161205_RefCylinder_'\n 'ParrotingTestRoll_Meas1.fits')\n p4,px4 = met.read4DFits(wdir+'161205_RefCylinder_'\n 'ParrotingTestRoll_Meas2.fits')\n\n #Construct baseline power spectra\n f1,pow1 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1)\n f2,pow2 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1,axis=1)\n \n #Construct parroted power spectra\n f3,pow3 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx1)\n f4,pow4 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx2,axis=1)\n f5,pow5 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx1)\n f6,pow6 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx2,axis=1)\n\n #Plot\n plt.loglog(f1,pow1/f1[0],label='Axial Baseline')\n plt.loglog(f2,pow2/f2[0],label='Azimuthal Baseline')\n plt.loglog(f3,pow3/f3[0],label='Pitch Axial')\n plt.loglog(f4,pow4/f4[0],label='Pitch Azimuthal')\n plt.loglog(f5,pow5/f5[0],label='Roll Axial')\n plt.loglog(f6,pow6/f6[0],label='Roll Azimuthal')\n\n return f1,pow1", "def flatNoisePellicle():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/' \\\n 'NoiseStudy/SolarBwPellicle/'\n d1,dx1 = met.read4DFits(wdir+'161209_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161209_Avg8_Meas2.fits')\n d3,dx3 = met.read4DFits(wdir+'161209_Avg8_Meas3.fits')\n d4,dx4 = met.read4DFits(wdir+'161209_Avg8_Meas4.fits')\n\n #Construct power spectra\n f12,pow12 = fourier.meanPSD((d1-d2)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f23,pow23 = fourier.meanPSD((d2-d3)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f34,pow34 = fourier.meanPSD((d3-d4)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f14,pow14 = fourier.meanPSD((d1-d4)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n\n #Mid frequency\n midfreq = [1000*np.sqrt(np.sum(p[np.logical_and(f>.1,f<1.)])) \\\n for f,p in zip([f12,f23,f34,f14],[pow12,pow23,pow34,pow14])]\n\n #Plot\n plt.loglog(f12,pow12/f12[0],label='1-2: %.2f' % midfreq[0])\n plt.loglog(f23,pow23/f23[0],label='2-3: %.2f' % midfreq[1])\n plt.loglog(f34,pow34/f34[0],label='3-4: %.2f' % midfreq[2])\n plt.loglog(f14,pow14/f14[0],label='1-4: %.2f' % midfreq[3])\n plt.legend(loc='lower left')\n plt.grid()\n plt.title('4D Repeatability: SolarB Flat+Pellicle')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n\n print midfreq\n\n return f12,pow12", "def calc_escape_peak_ratios(lineE,detectorelement='Si'):\n \n if(detectorelement=='Si'):\n #\n # For Si the K peak is 95% of the transition\n # and the photoionization to total cross section is ~ 95% \n # Si escape peak is typically only 0.2-1% (bigger at lower energies) \n #\n jump = xl.JumpFactor(14,xl.K_SHELL)\n fluy = xl.FluorYield(14,xl.K_SHELL)\n corr = fluy*(jump-1.0)/jump\n corr_photo = xl.CS_Photo(14,lineE)/xl.CS_Total(14,lineE)\n corr_trans = xl.RadRate(14,xl.KA_LINE)+xl.RadRate(14,xl.KB_LINE)\n mu_si= xl.CS_Total(14,lineE)\n mu_internal = xl.CS_Total(14,1.73998)\n r = mu_internal/mu_si\n eta = corr_trans*corr_photo*corr*0.5*(1.0-r*log(1.0+1.0/r))\n ratio = eta/(1.0-eta)\n #\n # escape peak sigma should be narrower than the main peak.\n #\n return ratio\n else:\n # \n # Ge detector...\n # Ge has a large escape peak ratio ~ 5-15% and a Ka and kb component\n #\n if(lineE < 11.5):\n return 0.0,0.0\n jump = xl.JumpFactor(32,xl.K_SHELL)\n fluy = xl.FluorYield(32,xl.K_SHELL)\n corr = fluy*(jump-1.0)/jump\n corr_photo = xl.CS_Photo(32,lineE)/xl.CS_Total(32,lineE)\n corr_trans_ka = xl.RadRate(32,xl.KA_LINE)\n corr_trans_kb =xl.RadRate(32,xl.KB_LINE)\n mu_ge= xl.CS_Total(32,lineE)#\n # one for the Ka and one for the Kb peak...\n mu_internal_ka = xl.CS_Total(32,xl.LineEnergy(32,xl.KA_LINE))\n r_ka = mu_internal_ka/mu_ge\n eta_ka = corr_trans_ka*corr_photo*corr*0.5*(1.0-r_ka*log(1.0+1.0/r_ka))\n ratio_ka = eta_ka/(1.0-eta_ka)\n\n mu_internal_kb = xl.CS_Total(32,xl.LineEnergy(32,xl.KB_LINE))\n r_kb = mu_internal_kb/mu_ge\n eta_kb = corr_trans_kb*corr_photo*corr*0.5*(1.0-r_kb*log(1.0+1.0/r_kb))\n ratio_kb = eta_kb/(1.0-eta_kb)\n\n return ratio_ka,ratio_kb", "def electrons_normalize(superdark):\n\n logging.info('\\tConverting {} to electrons.'.format(superdark))\n\n # Open the image and get the data\n hdulist = fits.open(superdark, 'update')\n sci1 = hdulist[1].data\n err2 = hdulist[2].data\n sci4 = hdulist[4].data\n err5 = hdulist[5].data\n\n # Find gains and exposure time\n gain = {}\n gain['A'] = hdulist[0].header['ATODGNA']\n gain['B'] = hdulist[0].header['ATODGNB']\n gain['C'] = hdulist[0].header['ATODGNC']\n gain['D'] = hdulist[0].header['ATODGND']\n exptime = hdulist[0].header['EXPTIME']\n\n # Multiply each \"half\" of the extensions by the appropriate gain.\n logging.info('\\tMultiplying each quadrant by its gain.')\n apply_norm(sci1, '*', gain['C'], 'regionAorC')\n apply_norm(err2, '*', gain['C'], 'regionAorC')\n apply_norm(sci1, '*', gain['D'], 'regionBorD')\n apply_norm(err2, '*', gain['D'], 'regionBorD')\n apply_norm(sci4, '*', gain['A'], 'regionAorC')\n apply_norm(err5, '*', gain['A'], 'regionAorC')\n apply_norm(sci4, '*', gain['B'], 'regionBorD')\n apply_norm(err5, '*', gain['B'], 'regionBorD')\n\n # Normalizing the gain to 1 is not necessary since calwf3\n # doesn't look at this keyword. It already assumes the units\n # of the darks are e-/sec and will use the gains in CCDTAB to\n # reconvert the darks to DNs. But we do it for consistency.\n logging.info('\\tNormalizing the SCI and ERR extensions (1, 2, 4, 5) ' + \\\n 'by the integration time.')\n apply_norm(sci1, '/', exptime, 'None')\n apply_norm(err2, '/', exptime, 'None')\n apply_norm(sci4, '/', exptime, 'None')\n apply_norm(err5, '/', exptime, 'None')\n\n # Update necessary keywords\n for ext in range(7):\n hdulist[ext].header['CCDGAIN'] = 1.0\n hdulist[0].header['EXPTIME'] = 1.0\n hdulist[0].header['TEXPTIME'] = 1.0\n hdulist.close()", "def test_rh_mixing_ratio():\n p = 1013.25 * units.mbar\n temperature = 20. * units.degC\n w = 0.012 * units.dimensionless\n rh = relative_humidity_from_mixing_ratio(p, temperature, w)\n assert_almost_equal(rh, 81.72498 * units.percent, 3)", "def getAbsNormalizationFactor(deltaE_wkspace,min,max):\n global reducer\n van_mass=reducer.get_default_parameter('vanadium-mass') \n \n Integration(InputWorkspace=deltaE_wkspace,OutputWorkspace='van_int',RangeLower=min,RangeUpper=max,IncludePartialBins='1')\n input_ws = mtd[deltaE_wkspace]\n ei_monovan = input_ws.getRun().getLogData(\"Ei\").value\n data_ws=mtd['van_int']\n nhist = data_ws.getNumberHistograms()\n #print nhist\n\n signal1_sum = 0.0\n weight1_sum = 0.0 \n signal2_sum = 0.0\n weight2_sum = 0.0 \n signal3_sum = 0.0\n weight3_sum = 0.0 \n signal4_sum = 0.0\n weight4_sum = 0.0 \n\n \n ic=0;\n izerc=0;\n for i in range(nhist):\n try:\n det = data_ws.getDetector(i)\n except Exception:\n continue\n if det.isMasked():\n continue\n\n signal = data_ws.readY(i)[0]\n error = data_ws.readE(i)[0]\n \n if signal != signal: #ignore NaN\n continue\n if ((error<=0) or (signal<=0)): # ignore Inf (0 in error are probably 0 in sign\n izerc+=1\n continue\n # Guess which minimizes the value sum(n_i-n)^2/Sigma_i -- this what Libisis had\n weight = 1.0/error\n signal1_sum += signal * weight\n weight1_sum += weight \n # Guess which minimizes the value sum(n_i-n)^2/Sigma_i^2\n weight2 = 1.0/(error*error)\n signal2_sum += signal * weight2\n weight2_sum += weight2 \n # Guess which assumes puassonian distribution with Err=Sqrt(signal) and calculates \n # the function: N_avrg = 1/(DetEfficiency_avrg^-1)*sum(n_i*DetEfficiency_i^-1)\n # where the DetEfficiency = WB_signal_i/WB_average WB_signal_i is the White Beam Vanadium \n # signal on i-th detector and the WB_average -- average WB vanadium signal. \n # n_i is the modified signal \n err_sq = error*error\n weight = err_sq/signal\n signal3_sum += err_sq\n weight3_sum += weight\n # Guess which estimatnes value sum(n_i^2/Sigma_i^2)/sum(n_i/Sigma_i^2) TGP suggestion from 12-2012\n signal4_sum += signal*signal/err_sq\n weight4_sum += signal/err_sq\n \n ic += 1 \n #print 'signal value =' ,signal\n #print 'error value =' ,error \n #print 'average ',signal_sum \n #---------------- Loop finished\n \n if( weight1_sum==0.0 or weight2_sum == 0.0 or weight3_sum == 0.0 or weight4_sum == 0.0) :\n print \"WB integral has been calculated incorrectrly, look at van_int workspace and input workspace: \",deltaE_wkspace\n raise IOError(\" divided by 0 weight\")\n \n integral_monovanLibISIS=signal1_sum / weight1_sum\n integral_monovanSigSq =signal2_sum / weight2_sum \n integral_monovanPuason =signal3_sum / weight3_sum \n integral_monovanTGP =signal4_sum / weight4_sum\n #integral_monovan=signal_sum /(wbVan_sum)\n van_multiplier = (float(reducer.van_rmm)/float(van_mass))\n absnorm_factorLibISIS = integral_monovanLibISIS * van_multiplier\n absnorm_factorSigSq = integral_monovanSigSq * van_multiplier \n absnorm_factorPuason = integral_monovanPuason * van_multiplier \n absnorm_factorTGP = integral_monovanTGP * van_multiplier \n #print 'Monovan integral :' ,integral_monovan \n \n if ei_monovan >= 210.0: \n xsection = 421 # vanadium cross-section in mBarn/sR (402 mBarn/Sr) (!!!modified to fit high energy limit?!!!)\n else: # old textbook cross-section for vanadium for ei=20mEv\n xsection = 400 + (ei_monovan/10) \n\n absnorm_factorLibISIS /= xsection\n absnorm_factorSigSq /= xsection \n absnorm_factorPuason /= xsection \n absnorm_factorTGP /= xsection \n \n sample_multiplier = (float(reducer.sample_mass)/float(reducer.sample_rmm))\n absnorm_factorLibISIS= absnorm_factorLibISIS *sample_multiplier\n absnorm_factorSigSq = absnorm_factorSigSq *sample_multiplier\n absnorm_factorPuason = absnorm_factorPuason *sample_multiplier\n absnorm_factorTGP = absnorm_factorTGP *sample_multiplier\n \n if (absnorm_factorLibISIS !=absnorm_factorLibISIS)|(izerc!=0): # It is an error, print diagnostics:\n if (absnorm_factorLibISIS !=absnorm_factorLibISIS):\n print '--------> Absolute normalization factor is NaN <----------------------------------------------'\n else:\n print '--------> Warning, Monovanadium has zero spectra <--------------------------------------------' \n print '--------> Processing workspace: ',deltaE_wkspace\n print '--------> Monovan Integration range : min=',min,' max=',max\n print '--------> Summarized: ',ic,' spectra with total value: ',signal2_sum, 'and total weight: ',weight2_sum\n print '--------> Dropped: ',izerc,' empty spectra'\n print '--------> Van multiplier: ',van_multiplier,' sample multiplier: ',sample_multiplier, 'and xsection: ',xsection \n print '--------> Abs norm factors: LibISIS: ',absnorm_factorLibISIS,' Sigma^2: ',absnorm_factorSigSq\n print '--------> Abs norm factors: Puasonian: ',absnorm_factorPuason, ' TGP: ',absnorm_factorTGP\n print '----------------------------------------------------------------------------------------------' \n else:\n DeleteWorkspace(Workspace=deltaE_wkspace)\n DeleteWorkspace(Workspace=data_ws)\n return (absnorm_factorLibISIS,absnorm_factorSigSq,absnorm_factorPuason,absnorm_factorTGP)", "def _residual_edp(self, params):\n data = self.F**2\n model = np.absolute(self._model())**2\n sigma = self.sigma\n return (data[self.mask]-model[self.mask]) / sigma[self.mask] \n \n # The following three lines do not reproduce Sun's results, which proves\n # that the fits were done through intensity, not form factor.\n #data = self.F\n #model = np.absolute(self._model())\n #return (data - model) ", "def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - self.fr[fc_ix + n - 1:fc_ix - 1:-1]))", "def plot_ratio_illustration_for_poster(fs, lfp_pre, lfp_band, zeros, extrema, extrema_kind, steepness_indices,\n steepness_values, save_figure=False):\n \"\"\"\n Plot a figure for illustration of sharpness ratio and steepness ratio calculation. use large fonts for poster.\n \"\"\"\n upto = 5\n fontsize = 20\n markersize = 8\n\n filter_offset = 167 # in ms\n samples_per_5ms = int(5 * fs / 1000)\n last_zero_idx = zeros[upto]\n extrema_to_use = extrema[:(upto - 1)]\n steepness_to_use = steepness_indices[:upto - 2]\n steepness_values_to_use = steepness_values[:upto - 2]\n sharpness_idx = np.sort(np.array([extrema_to_use - samples_per_5ms, extrema_to_use + samples_per_5ms]).flatten())\n\n extrema_idx = int((upto - 1) / 2)\n last_idx = np.max([last_zero_idx, extrema_to_use.max()])\n time_array = np.linspace(filter_offset, filter_offset + last_idx / fs * 1000, last_idx)\n plt.close()\n\n plt.figure(figsize=(15, 5))\n\n # plot LFP only up to last but one zero:\n plt.plot(time_array[:zeros[upto - 1]], lfp_pre[:zeros[upto - 1]], label='preprocessed')\n plt.plot(time_array[:zeros[upto - 1]], lfp_band[:zeros[upto - 1]], label='beta filtered', color='C1')\n plt.plot(time_array[zeros[:upto]], lfp_band[zeros[:upto]], 'o', color='C1', markersize=markersize)\n\n # plot zero line\n plt.axhline(0, color='grey')\n\n # plot +-5ms sharpness markers\n for idx, sharpness_sample in enumerate(sharpness_idx):\n # plt.axvline(x=time_array[sharpness_sample], color='k', alpha=.7, linewidth=.5)\n # color = 'red' if ll[idx] > 0 else 'blue'\n color = 'm'\n plt.plot(time_array[sharpness_sample], lfp_pre[sharpness_sample], '*', markersize=markersize, color=color)\n\n # plot maximum slope markers\n for idx, steepness_sample in enumerate(steepness_to_use):\n plt.plot(time_array[steepness_sample], lfp_pre[steepness_sample], 'd', markersize=markersize, color='g')\n # set up a tangent on this point\n m = (steepness_values_to_use[idx]) / 0.4\n y_val = lfp_pre[steepness_sample]\n x_val = time_array[steepness_sample]\n bias = y_val - m * x_val\n tangent = lambda x: m * x + bias\n tangent_time_array = np.linspace(time_array[steepness_sample] - 2,\n time_array[steepness_sample] + 2, 20)\n plt.plot(tangent_time_array, tangent(tangent_time_array), color='g')\n\n for idx, extrema in enumerate(extrema_to_use):\n if extrema_kind[idx] > 0:\n format = '^'\n label = 'peaks'\n else:\n format = 'v'\n label = 'troughs'\n\n color = 'red'\n plt.plot(time_array[extrema], lfp_pre[extrema], format, markersize=markersize, color=color)\n\n # remove labels for poster figure\n plt.xticks([], [])\n plt.yticks([], [])\n plt.xlabel('time', fontsize=fontsize)\n plt.ylabel('LFP', fontsize=fontsize)\n # plt.xlim([time_array[0], time_array[-1]])\n\n plt.tight_layout()\n plt.legend(frameon=False, prop={'size': 20})\n if save_figure:\n plt.savefig(os.path.join(SAVE_PATH_FIGURES, 'pre_sharpness.pdf'))\n plt.show()", "def diff_smf(mstar_arr, volume, cvar_err, h1_bool):\n if not h1_bool:\n # changing from h=0.7 to h=1\n logmstar_arr = np.log10((10**mstar_arr) / 2.041) \n else:\n logmstar_arr = mstar_arr\n if survey == 'eco':\n bin_min = np.round(np.log10((10**8.9) / 2.041), 1)\n bin_max = np.round(np.log10((10**11.8) / 2.041), 1)\n bins = np.linspace(bin_min, bin_max, 7)\n \n elif survey == 'resolvea':\n bin_min = np.round(np.log10((10**8.9) / 2.041), 1)\n bin_max = np.round(np.log10((10**11.5) / 2.041), 1)\n bins = np.linspace(bin_min, bin_max, 7)\n\n elif survey == 'resolveb':\n bin_min = np.round(np.log10((10**8.7) / 2.041), 1)\n bin_max = np.round(np.log10((10**11.8) / 2.041), 1)\n bins = np.linspace(bin_min, bin_max, 7) \n\n\n # Unnormalized histogram and bin edges\n counts, edg = np.histogram(logmstar_arr, bins=bins) # paper used 17 bins\n dm = edg[1] - edg[0] # Bin width\n maxis = 0.5 * (edg[1:] + edg[:-1]) # Mass axis i.e. bin centers\n # Normalized to volume and bin width\n err_poiss = np.sqrt(counts) / (volume * dm)\n phi = counts / (volume * dm) # not a log quantity\n return maxis, phi, err_poiss, bins, counts", "def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw", "def like_ratio(null_model, alt_model, df=1):\n D = -2 * (null_model.llf - alt_model.llf)\n return {\"D\" : D, \"p_val\" : 1 - sp.stats.chi2.cdf(D, df)}", "def fraction_of_infectious_virus(self) -> _VectorisedFloat:\n return self.virus.viable_to_RNA_ratio * (1 - self.host_immunity)", "def periodicity_metric(light_curve_rms, sm_phase_rms):\n return (sm_phase_rms ** 2) / (light_curve_rms ** 2)", "def diagnosefft(Y, df, charf, X, subband, debug=False, version=2, harmthreshold=3.0, subthreshold=3.0):\n\n #Rescale fft\n df /= X\n nHarm = 1\n score = 0.0\n if debug is True:\n harmonics = []\n subbandsNeg = []\n subbandsPos = []\n noises = []\n scores = []\n while True:\n #if second harmonic, reduce the tolerance for finding peak\n if nHarm == 1:\n per = 0.02\n else:\n per = 0.01\n #Detect the charf harmonic\n j1 = int((nHarm*charf-per*charf)/df)\n j2 = int((nHarm*charf+per*charf)/df)\n jh = np.argmax(Y[j1:j2]) + j1\n harm = Y[jh]\n #Reclaibrate characteristic frequency\n charf = df*jh/nHarm\n #Detect the noise level for the harmonic\n if version == 1:\n j1n = int((nHarm*charf-0.02*charf)/df)\n j2n = int((nHarm*charf+0.02*charf)/df)\n if jh - j1n == 0:\n noise = np.mean(Y[jh+1:j2n])\n elif j2n - (jh + 1) == 0:\n noise = np.mean(Y[j1n:jh])\n else:\n noise = (np.mean(Y[j1n:jh]) + np.mean(Y[jh+1:j2n]))/2.0\n elif version == 2:\n # Find left bottom of harmonic\n for i in range(jh, 0, -1):\n if Y[i-1] > Y[i]:\n jhl = i\n break\n # Find right bottom of harmonic\n for i in range(jh, Y.size, 1):\n if Y[i+1] > Y[i]:\n jhr = i\n break\n # j1n = int((nHarm*charf-charf)/df)\n # j2n = int((nHarm*charf+charf)/df)\n noise = (np.mean(Y[jhl-2:jhl+1]) + np.mean(Y[jhr:jhr+3]))/2.0\n # print('j1=%i, j2=%i, jh=%i, harm=%f, jhl=%i, jhr=%i, noise=%f' % (j1,j2,jh,harm,jhl,jhr,noise))\n #If there should be subbands, detect them aswell\n if subband > 0.01:\n #Check for negative subband first\n j1 = int((nHarm*charf - subband - 0.05)/df)\n j2 = int((nHarm*charf - subband + 0.05)/df)\n jsn = np.argmax(Y[j1:j2]) + j1\n negSubBand = Y[jsn]\n #Check for position subband\n j1 = int((nHarm*charf + subband - 0.05)/df)\n j2 = int((nHarm*charf + subband + 0.05)/df)\n jsp = np.argmax(Y[j1:j2]) + j1\n posSubBand = Y[jsp]\n #Make the final score!\n #If subband should exist:\n if subband > 0.01:\n if harm >= noise*harmthreshold and (negSubBand > noise*subthreshold or posSubBand > noise*subthreshold):\n score += harm/(noise*3.0)*nHarm**2.0\n nHarm += 1\n if debug is True:\n subbandsNeg.append(jsn)\n subbandsPos.append(jsp)\n harmonics.append(jh)\n noises.append(noise)\n scores.append(score)\n else:\n if debug is True:\n return score, subbandsNeg, subbandsPos, harmonics, noises, scores\n else:\n return score\n #if subband should not exist\n else:\n if harm >= noise*harmthreshold:\n score += harm/(noise*harmthreshold)*(nHarm+1.0)**2.0\n nHarm += 1\n if debug is True:\n harmonics.append(jh)\n noises.append(noise)\n scores.append(score)\n else:\n if debug is True:\n return score, subbandsNeg, subbandsPos, negSubBand, posSubBand, harmonics, noises, scores\n else:\n return score\n\n #Check if FFT is too short. If so, return what is done!\n test1 = int((nHarm*charf+0.02*charf)/df)\n test2 = int((nHarm*charf + subband + 0.05)/df)\n if test1 > Y.size or test2 > Y.size:\n if debug is True:\n return score, subbandsNeg, subbandsPos, negSubBand, posSubBand, harmonics, noises, scores\n else:\n return score", "def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - (self.gain - self.fr[fc_ix + n - 1:fc_ix - 1:-1])))", "def test_mixing_ratio_from_rh_dimensions():\n p = 1000. * units.mbar\n temperature = 0. * units.degC\n rh = 100. * units.percent\n assert (str(mixing_ratio_from_relative_humidity(p, temperature, rh).units)\n == 'dimensionless')", "def line_ratio_per_pixel_AHIMSA(ratio_name='NII',quant='ne',phase='all',res=0.5, plane='xy',col='grey',add=False,**kwargs):\n\n fig = plt.figure(constrained_layout=True,figsize=(15,10))\n gs = fig.add_gridspec(3,2)\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n gal_ob = gal.galaxy(p.gal_index)\n \n location = aux.moment0_map_location(res=res,plane=plane,gal_index=p.gal_index)\n # Load sim and cell data\n try:\n moment_0_map = np.load(location, allow_pickle=True)\n print('Found stored momentmap data')\n except:\n print('Did not find stored momentmap data - creating')\n aux.convert_cell_data_to_regular_grid(gal_index=p.gal_index)\n moment_0_map = np.load(location, allow_pickle=True)\n\n cols = {'HII':'blue','HI':'orange','H2':'red'}\n for i,phase in enumerate(['HII','HI','H2']):\n ax = fig.add_subplot(gs[i,0])\n\n dataset= np.array(moment_0_map[:,3])\n\n # Do all pixels for comparison\n index1 = p.moment0_dict['L_[NII]122']\n index2 = p.moment0_dict['L_[NII]205']\n index3 = p.moment0_dict['m']\n index4 = p.moment0_dict['ne_mw']\n line1 = []\n line2 = []\n ne_mw = []\n m = []\n for row in dataset:\n try:\n line1.append(row[index1])\n line2.append(row[index2])\n m.append(row[index3])\n ne_mw.append(row[index4])\n except:\n print(row)\n m = np.array(m) \n ne_mw = np.array(ne_mw) \n line1 = np.array(line1)\n line2 = np.array(line2)\n # print(line2)\n ne_mw = np.array(ne_mw[line2 > 0])\n ratio = line1[line2 > 0]/line2[line2 > 0]\n ax.scatter(np.log10(ne_mw/m[line2 > 0]),ratio,color=col,label= 'All gas in moment0map pixels',alpha=0.3)\n\n # Now for one ISM phase\n index1 = p.moment0_dict['L_[NII]122_%s' % phase]\n index2 = p.moment0_dict['L_[NII]205_%s' % phase]\n index3 = p.moment0_dict['m_%s' % phase]\n index4 = p.moment0_dict['ne_%s_mw' % phase]\n line1 = []\n line2 = []\n ne_mw = []\n m_phase = []\n for row in dataset:\n try:\n line1.append(row[index1])\n line2.append(row[index2])\n m_phase.append(row[index3])\n ne_mw.append(row[index4])\n except:\n print(row)\n m_phase = np.array(m_phase) \n ne_mw = np.array(ne_mw) \n line1 = np.array(line1)\n line2 = np.array(line2)\n ne_mw = np.array(ne_mw[line2 > 0])\n ratio = line1[line2 > 0]/line2[line2 > 0]\n ax.scatter(np.log10(ne_mw/m[line2 > 0]),ratio,color=cols[phase],label= '%s gas in moment0map pixels' % phase,alpha=0.3)\n\n xs = np.arange(ax.get_xlim()[0],ax.get_xlim()[1],0.1)\n ax.plot(xs,aux.NII_from_logne(xs),'-b')\n if p.xlim: ax.set_xlim(p.xlim)\n if p.ylim: ax.set_ylim(p.ylim)\n ax.set_xlabel('log ' + getlabel('ne'))\n ax.set_ylabel(getlabel('NIIratio'))\n ax.legend()\n\n ax = fig.add_subplot(gs[:,-1])\n ax.set_ylabel('y [kpc]')\n ax.set_xlabel('x [kpc]')\n map_sim_property(prop='m',vmin=9,vmax=12.9,add=True,log=True,sim_type='simgas',ax=ax,**kwargs)\n plt.subplots_adjust(hspace = 0, wspace = 0.2)\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'lineratios/NII/'): os.mkdir(p.d_plot + 'lineratios/NII/') \n plt.savefig(p.d_plot+'lineratios/NII/%s%s_G%i_NII_ne' % (p.sim_name,p.sim_run,p.gal_index)+'.png', dpi=200)", "def test_renyi_values():\n d1 = Distribution(['0', '1'], [0, 1])\n d2 = Distribution(['0', '1'], [1 / 2, 1 / 2])\n d3 = Distribution(['0', '1'], [1, 0])\n\n assert renyi_divergence(d1, d2, 1 / 2) == pytest.approx(np.log2(2))\n assert renyi_divergence(d2, d3, 1 / 2) == pytest.approx(np.log2(2))\n assert renyi_divergence(d1, d3, 1 / 2) == pytest.approx(np.inf)", "def get_pH(self):\n rawline = self.f.readline()\n while rawline:\n rematch = self.solvphre.match(rawline)\n if rematch:\n return float(rematch.groups()[0])\n rawline = self.f.readline()", "def qFelder(h):\n\treturn (0.92 + 0.153 * h/1.01) * math.sqrt(9.8 * (2/3.0 * h)**3)", "def information_ratio(returns, factor_returns):\n active_return = returns - factor_returns\n tracking_error = np.std(active_return, ddof=1)\n if np.isnan(tracking_error):\n return 0.0\n return np.mean(active_return) / tracking_error", "def compareRepeatability():\n fc,pc = refCylNoise()\n fp,pp = flatNoisePellicle()\n fcgh,pcgh = flatNoiseCGH()\n fpco,ppco = PCO1S12Noise()\n\n midfreq = [1000*np.sqrt(np.sum(p[np.logical_and(f>.1,f<1.)])) \\\n for f,p in zip([fp,fc,fcgh,fpco],[pp,pc,pcgh,ppco])]\n plt.clf()\n \n plt.loglog(fp,pp/fp[0],label='Flat+Pellicle: %.2f' % midfreq[0])\n plt.loglog(fc,pc/fc[0],label='Cylinder+CGH: % .2f' % midfreq[1])\n plt.loglog(fcgh,pcgh/fcgh[0],label='Flat+CGH: %.2f' % midfreq[2])\n plt.loglog(fpco,ppco/fpco[0],label='PCO1S12: %.2f' % midfreq[3])\n plt.title('Repeatability Comparison')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n plt.legend(loc='lower left')" ]
[ "0.62044317", "0.59503055", "0.5934977", "0.58116406", "0.5744772", "0.5688612", "0.5682206", "0.5657256", "0.5637151", "0.56292534", "0.562283", "0.56201357", "0.5602905", "0.5597058", "0.55753255", "0.55715555", "0.55521727", "0.5543683", "0.55348676", "0.5511092", "0.55000806", "0.5490362", "0.54900813", "0.54887897", "0.5488657", "0.54636854", "0.5455198", "0.5453186", "0.54511994", "0.54284585" ]
0.6009351
1
Retrieve the .mat filenames for the troika dataset. Review the README in ./datasets/troika/ to understand the organization of the .mat files.
def LoadTroikaDataset(): data_dir = "./datasets/troika/training_data" data_fls = sorted(glob.glob(data_dir + "/DATA_*.mat")) ref_fls = sorted(glob.glob(data_dir + "/REF_*.mat")) return data_fls, ref_fls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matrices_names(self, folder=None):\n if folder is None:\n abs_path = os.path.abspath(os.path.dirname(sys.argv[0]))\n folder = os.path.join(abs_path, 'matrix')\n\n matrix_name_x = \"%s_%s_%s_%s_x.txt\" % (\n self.D, self.Rs, self.axe_X, self.FOV_img)\n matrix_file_x = os.path.join(folder, matrix_name_x)\n\n matrix_name_y = \"%s_%s_%s_%s_y.txt\" % (\n self.D, self.Rs, self.axe_X, self.FOV_img)\n matrix_file_y = os.path.join(folder, matrix_name_y)\n\n return matrix_file_x, matrix_file_y", "def _get_subject_files(self):\n from itertools import chain\n\n subjsf = fetch_one_file(self.ica_dir, self._subjects_fname)\n mat_file = sio.loadmat(subjsf)['files']\n return [f.strip() for f in list(chain.from_iterable(chain.from_iterable(chain.from_iterable(mat_file))))]", "def collect_mat_files(dpath,dname='LMD',didx=0):\n #dnames = ['LMD','Ballroom']\n #dname = dnames[didx]\n return glob.glob(os.path.join(dpath, dname,'*.mat'))", "def get_feature_labels_files(dataset):\n features = []\n audio_labels = []\n focal_labels = []\n files = []\n for frame in dataset:\n files.append(frame[0])\n features.append(frame[1][0].T)\n if frame[1][1] is not None:\n audio_labels.append(frame[1][1][0].T)\n focal_labels.append(frame[1][1][1].T)\n else:\n audio_labels.append(None)\n focal_labels.append(None)\n features = np.expand_dims(np.asarray(features), 4)\n audio_labels = np.asarray(audio_labels)\n focal_labels = np.asarray(focal_labels)\n return [features, audio_labels,focal_labels, files]", "def read_matricies():\n\tmatrix_arr = []\n\tcurr_dirpath = os.getcwd()\n\tfor subdir, dirs, files in os.walk(curr_dirpath + '/Example Matrices'):\n\t\tfor curr_file in files:\n\t\t\tcurr_filepath = curr_dirpath + '/Example Matrices/' + curr_file\n\t\t\twith open(curr_filepath, 'r') as open_file:\n\t\t\t\tfor line in open_file:\n\t\t\t\t\tif len(line) > 0:\n\t\t\t\t\t\tcurr_matrix = np.matrix(line)\n\t\t\t\t\t\tmatrix_arr.append(curr_matrix)\n\treturn matrix_arr", "def get_filenames(is_training, data_dir):\n if is_training:\n return [os.path.join(data_dir, 'LIP_train5.record')]\n else:\n return [os.path.join(data_dir, 'LIP_val5.record')]", "def _get_filenames():\n src_dir = os.path.join(FLAGS.dataset_dir, FLAGS.src_dir)\n filenames = [os.path.join(src_dir, f) for f in os.listdir(src_dir) if\n f.endswith(\".tfrecord\") and\n all([blackflag not in f for blackflag in TEMP_BLACK_LIST])]\n shuffle(filenames)\n return filenames", "def get_filenames_reid(is_training, data_dir):\n if is_training:\n return [os.path.join(data_dir, 'train-512-170.tfrecords')]\n else:\n return [os.path.join(data_dir, 'val-512-170.tfrecords')]", "def list_mat_files(glob_pat):\n fnames = glob.glob(glob_pat)\n #print len(fnames)\n return fnames", "def get_atlas_names():\n tar_path = os.path.join(get_data_dir(), 'atlas')\n xml_names = get_file_names(tar_path,'.xml')\n return xml_names", "def LoadTroikaDataFile(data_fl):\n data = sp.io.loadmat(data_fl)['sig']\n return data[2:]", "def get_filenames(is_training,datadir):\n assert os.path.exists(datadir), (\n 'Can not find data at given directory!!')\n if(is_training):\n labels = []\n data_dir = []\n with open('/home/qnie/PycharmProjects/ntumotion/training_protocol/fEDM_R_CS_trainimgs.txt') as f:\n for line in f:\n data_dir.append(datadir+line.strip())\n f.close()\n\n with open('/home/qnie/PycharmProjects/ntumotion/training_protocol/fEDM_R_CS_trainlabels.txt') as f:\n for line in f:\n labels.append(int(line.strip()))\n f.close()\n else:\n labels = []\n data_dir = []\n with open('/home/qnie/PycharmProjects/ntumotion/training_protocol/fEDM_R_CS_testimgs.txt') as f:\n for line in f:\n data_dir.append(datadir + line.strip())\n f.close()\n\n with open('/home/qnie/PycharmProjects/ntumotion/training_protocol/fEDM_R_CS_testlabels.txt') as f:\n for line in f:\n labels.append(int(line.strip()))\n f.close()\n\n return data_dir, labels", "def get_filenames(root_dir):\n from pathlib import Path\n\n file_list = Path(root_dir).rglob('*featuresN.hdf5')\n file_list = [str(file) for file in file_list]\n\n filenames = pd.DataFrame(file_list, columns=['file_name'])\n filenames.insert(0, 'file_id', np.arange(len(file_list)))\n\n return filenames", "def get_filenames(is_training, data_dir):\n\n return [ os.path.join(data_dir, 'train_'+str(shard_id)+'.tfrecord') for shard_id in range(_NUM_SHARDS)]", "def get_train_files(self):\n train_dir = os.path.join(self.data_dir, \"train_{}\".format(self.patient_no))\n filenames = os.listdir(train_dir)\n interm = ((os.path.splitext(f)[0].split(\"_\"), os.path.join(train_dir, f)) for f in filenames)\n return [(int(p[0][0]), int(p[0][1]), int(p[0][2]), p[1]) for p in interm]", "def read_dataset_files(datasetid, clean_folder):\n fnotu = datasetid + '.otu_table.clean.feather'\n fnmeta = datasetid + '.metadata.clean.feather'\n\n df = feather.read_dataframe(os.path.join(clean_folder, fnotu))\n # Feather format does not support index names, first column has index\n df.index = df.iloc[:,0]\n df = df.iloc[:, 1:]\n\n meta = feather.read_dataframe(os.path.join(clean_folder, fnmeta))\n meta.index = meta.iloc[:, 0]\n meta = meta.iloc[:, 1:]\n\n ## Make sure sample names are strings\n if df.index.dtype != 'O':\n df.index = pd.read_csv(os.path.join(clean_folder, fnotu), sep='\\t', dtype=str).iloc[:,0]\n\n if meta.index.dtype != 'O':\n meta.index = pd.read_csv(os.path.join(clean_folder, fnmeta), sep='\\t', dtype=str).iloc[:,0]\n\n return df, meta", "def read_dataset_v1():\n path = load_config()\n T = feather.read_dataframe(path['data_dir'] / 'T_dat.feather')\n E = feather.read_dataframe(path['data_dir'] / 'E_dat.feather')\n M = feather.read_dataframe(path['data_dir'] / 'Meta.feather')\n data = sio.loadmat(path['data_dir'] / 'highvar_genes.mat', squeeze_me=True)\n return T, E, M, data", "def raw_file_names(self):\n return self.raw_file_names_3d", "def gen_datafiles():\n\tnum_reads = 10000\n\tnum_samples = 100\n\tgen_sequences('hg38.fa', num_reads, num_samples, 1, 'hg38_train.txt')\n\tgen_sequences('HIV-1.fasta', num_reads, num_samples, 0, 'HIV-1_train.txt')\n\tgen_sequences('hg38.fa', num_reads, num_samples, 1, 'hg38_test.txt')\n\tgen_sequences('HIV-1.fasta', num_reads, num_samples, 0, 'HIV-1_test.txt')", "def get_filenames():\r\n datadir = \"./phase3_data/\"\r\n samples = os.listdir(datadir)\r\n all_files = []\r\n for i in range(len(samples)):\r\n sampfiles = []\r\n datadir = \"./phase3_data/\" + samples[i]\r\n files = os.listdir(datadir)\r\n for file in files:\r\n if file.endswith(\".bin\"):\r\n sampfiles += [file]\r\n all_files += [sampfiles]\r\n return samples, all_files", "def return_file_read(_):\n return [\"scorevideo LOG\", \"File: log.mat\"]", "def get_images_and_labels(tampered_path, authentic_path):\n tampered_dir = tampered_path\n authentic_dir = authentic_path\n images = {}\n for im in glob.glob(authentic_dir):\n images[im] = {}\n images[im]['mat'] = cv2.imread(im)\n images[im]['label'] = 0\n for im in glob.glob(tampered_dir):\n images[im] = {}\n images[im]['mat'] = cv2.imread(im)\n images[im]['label'] = 1\n return images", "def files():\r\n fn=pd.read_csv(request.files.get('fnm'))\r\n scaling = scaler.transform(fn)\r\n prediction = classifier.predict(scaling)\r\n return 'Predictions'+ str(list(prediction))", "def get_filenames(self):\n return [doc['filename'] for doc in self.vocab]", "def img_filenames(self, matricule):\n proj, sid = next((proj, proj.Matricule(matricule).to('AmcStudentId'))\n for proj in self.projects_by_serie.values()\n if proj.Matricule(matricule).exists('AmcStudentId'))\n return [\n (int(num), filename.replace('%PROJET', proj.path))\n for num, filename in proj.dbs['capture'].execute('select page, src from capture_page where student=? order by page', [sid])\n ]", "def getFilePaths():\n \n image_dir = r'/hpc/wfok007/mpi_heart/Training Set'\n mask_paths = []\n image_paths = []\n for root, dirs, files in os.walk(image_dir, topdown=False):\n for name in files:\n if name == 'laendo.nrrd':\n mask_paths.append(os.path.join(root, name))\n elif name == 'lgemri.nrrd':\n image_paths.append(os.path.join(root, name))\n else:\n print ('%s is unknown' %name)\n return mask_paths, image_paths", "def inputFiles(self):\n return (self.matrixFile,)", "def get_filename_data_readers(image_ids_file, get_labels=False,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tx_jpg_dir=None, y_png_dir=None):\n\tx_jpg_dir = x_jpg_dir or os.path.join(FLAGS.data_dir, 'images')\n\ty_png_dir = y_png_dir or os.path.join(FLAGS.data_dir, 'tf_segmentation')\n\t# TODO(student): Write code.\n\n\tlines_dataset = tf.data.TextLineDataset(image_ids_file)\n\timages_dataset = lines_dataset.map(lambda x: x_jpg_dir + os.path.sep + x + '.jpg')\n\tif get_labels:\n\t\tlabels_dataset = lines_dataset.map(lambda x: y_png_dir + os.path.sep + x + '.png')\n\t\treturn images_dataset, labels_dataset\n\telse:\n\t\treturn images_dataset", "def load_data():\n\n training_files_dir = \"digits/trainingDigits\"\n training_files = os.listdir(training_files_dir)\n file_num = len(training_files)\n hw_labels = []\n\n training_mat = zeros((file_num, 32 * 32))\n for i in xrange(file_num):\n filename = training_files[i]\n file_label = int((filename.split(\".\")[0]).split(\"_\")[0])\n hw_labels.append(file_label)\n training_mat[i, :] = img2vector(training_files_dir + '/' + filename)\n\n return training_mat, hw_labels", "def get_data_path():\n up_folder = os.path.abspath(os.path.join(ROOT_DIR, '..'))\n img_folder = os.path.join(up_folder, 'data_set', 'XX-ImageLabel', 'train_data_416')\n img_file = os.path.join(DATA_DIR, \"t_img_tags_train.txt\") # 数据类别\n return img_folder, img_file" ]
[ "0.62999743", "0.5992772", "0.5919344", "0.58812904", "0.5764292", "0.57600445", "0.5720086", "0.56433666", "0.5607395", "0.5561246", "0.55274653", "0.55114895", "0.5472151", "0.54595053", "0.54580384", "0.5449727", "0.54414546", "0.54392225", "0.54072315", "0.5384773", "0.53776836", "0.5372219", "0.536323", "0.5352726", "0.5342113", "0.53379977", "0.53359866", "0.5332669", "0.5313846", "0.5292724" ]
0.73199314
0
Toplevel function evaluation function. Runs the pulse rate algorithm on the Troika dataset and returns an aggregate error metric.
def Evaluate(): global reg reg = ModelRegression() # Retrieve dataset files data_fls, ref_fls = LoadTroikaDataset() errs, confs = [], [] for data_fl, ref_fl in zip(data_fls, ref_fls): # Run the pulse rate algorithm on each trial in the dataset errors, confidence = RunPulseRateAlgorithm(data_fl, ref_fl) errs.append(errors) confs.append(confidence) # Compute aggregate error metric errs = np.hstack(errs) confs = np.hstack(confs) return AggregateErrorMetric(errs, confs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aggregated_error_rate(self):\n estimates = np.mat([0.8, 0.4, 0.8, 0.4])\n m = np.shape(self.data_matrix)[0]\n returned = ada_boost.aggregated_error_rate(estimates, self.labels, m)\n self.assertEqual(returned, 2.0)", "def run(self):\n self.evaluate()\n self.accumulate()\n self.summarize()", "def test(self, test_data_path):\n test_data = read_datafile(test_data_path)\n test_data = self.preprocessor.preprocess(test_data)\n self.run(test_data)\n if check_format(self.get_result_path()):\n thresholds, precisions, avg_precision, reciprocal_rank, num_relevant = evaluate(test_data_path,\n self.get_result_path())\n return avg_precision", "def eval_beat(individual):\n # compile the individual\n routine = gp.compile(individual, pset)\n # generate some test output\n try:\n test_output = gen_beat_output(routine)\n except:\n return 0.0,\n ## do some stats on the beat\n sd = np.std(np.array(test_output))\n bpm, correl = bpm_detector(test_output,24000)\n bpm_score = 1 - abs((bpm/120.0)-1)\n sd_score = sd / 128.0\n del test_output\n # return the score\n return float(bpm_score * sd_score),", "def evaluate(self, threshold=0.5):\n pass", "def do_analyse(args):\n\n pd.set_option('display.max_columns', None)\n pd.set_option('display.width', None)\n\n score = 'score'\n\n # Read in the results, and add a boolean target column.\n df = pd.read_csv(args.results, index_col=0)\n df['target'] = df['verify_speaker'] == df['enrol_speaker']\n\n # Calculate ideal 0.01% threshold over the multi-session data.\n nontarget_df = df.loc[df['target'] == False].sort_values(score, ascending=False)\n nontarget_count = nontarget_df[score].count()\n th_calc = nontarget_df.iloc[int(nontarget_count * (1 / 10000))][score]\n\n # Now filter the data so that we only consider mono-session enrolment and verification.\n df = df.loc[df['verify_room'] == df['enrol_room']]\n target_df = df.loc[df['target'] == True].sort_values(score, ascending=False)\n nontarget_df = df.loc[df['target'] == False].sort_values(score, ascending=False)\n target_count = target_df[score].count()\n nontarget_count = nontarget_df[score].count()\n\n # Calculate FA/FR for the user-defined threshold.\n th_user = args.th_user\n fr_user = target_df.loc[target_df[score] < th_user][score].count()\n fa_user = nontarget_df.loc[nontarget_df[score] > th_user][score].count()\n frr_user = fr_user / target_count\n far_user = fa_user / nontarget_count\n label_user = 'User Threshold: th {:.4f}, FR {} ({:.3f}%), FA {} ({:.3f}%)'.format(th_user, fr_user, frr_user * 100,\n fa_user, far_user * 100)\n\n # Calculate the FA/FR for the ideal threshold calculated from the multi-session data.\n fr_calc = target_df.loc[target_df[score] < th_calc][score].count()\n fa_calc = nontarget_df.loc[nontarget_df[score] > th_calc][score].count()\n frr_calc = fr_calc / target_count\n far_calc = fa_calc / nontarget_count\n label_calc = 'Calc Threshold: th {:.4f}, FR {} ({:.3f}%), FA {} ({:.3f}%)'.format(th_calc, fr_calc, frr_calc * 100,\n fa_calc, far_calc * 100)\n\n # Print the stats.\n print('\\nTarget Stats:')\n print(target_df[score].describe())\n print('\\nNon-Target Stats:')\n print(nontarget_df[score].describe())\n print('\\nThresholds:')\n print(label_user)\n print(label_calc)\n\n # Paint the graphs.\n paint_graph(score, 'verify_room', df, th_user, label_user, th_calc, label_calc)\n paint_graph(score, 'enrol_room', df, th_user, label_user, th_calc, label_calc)\n paint_graph(score, 'verify_speaker', df, th_user, label_user, th_calc, label_calc)\n paint_graph(score, 'enrol_speaker', df, th_user, label_user, th_calc, label_calc)", "def main():\n logging.info(\"Executing data quality module\")\n\n calculate_quality()", "def process_evaluation_epoch(aggregates):\n if 'losses' in aggregates:\n eloss = torch.mean(torch.stack(aggregates['losses'])).item()\n else:\n eloss = None\n\n hypotheses = aggregates['preds']\n references = aggregates['txts']\n\n wer, scores, num_words = word_error_rate(hypotheses, references)\n multi_gpu = dist.is_initialized()\n if multi_gpu:\n if eloss is not None:\n eloss /= dist.get_world_size()\n eloss_tensor = torch.tensor(eloss).cuda()\n dist.all_reduce(eloss_tensor)\n eloss = eloss_tensor.item()\n\n scores_tensor = torch.tensor(scores).cuda()\n dist.all_reduce(scores_tensor)\n scores = scores_tensor.item()\n num_words_tensor = torch.tensor(num_words).cuda()\n dist.all_reduce(num_words_tensor)\n num_words = num_words_tensor.item()\n wer = scores * 1.0 / num_words\n return wer, eloss", "def evaluate(self, time) -> float:\n ...", "def _get_fpr(self, arg):", "def evaluate(self, dataset):\n\t\tpass", "def AggregateErrorMetric(pr_errors, confidence_est):\n # Higher confidence means a better estimate. The best 90% of the estimates\n # are above the 10th percentile confidence.\n percentile90_confidence = np.percentile(confidence_est, 10)\n\n # Find the errors of the best pulse rate estimates\n best_estimates = pr_errors[confidence_est >= percentile90_confidence]\n\n # Return the mean absolute error\n return np.mean(np.abs(best_estimates))", "def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )", "def evaluation(model_path, threshold):\n classifier = joblib.load(model_path)\n\n positive = np.load(\"./processed_data/validation/positive.npy\")\n unlabeled = np.load(\"./processed_data/validation/unlabeled.npy\")\n\n p_result = np.array(classifier.predict_proba(positive[:, :-1])[:, 1])\n plt.hist(p_result, bins=300)\n plt.show()\n\n tp_rate = np.where(p_result >= threshold, 1, 0).sum() / p_result.shape[0]\n print(tp_rate)\n\n u_result = np.array(classifier.predict_proba(unlabeled[:, :-1])[:, 1])\n plt.hist(u_result, bins=300)\n plt.show()\n\n\n # the following steps aim to filter 'possible' negative instances in the evaluation-unlabeled set\n stageone_classifier = joblib.load(\"./solver_result/liblinear/0.01/logistic.pkl\")\n stgone_result = np.array(stageone_classifier.predict_proba(unlabeled[:,:-1])[:, 1])\n possibly_negative = unlabeled[np.where(stgone_result <= _negative_threshold)]\n print(positive.shape)\n print(unlabeled.shape)\n print(possibly_negative.shape)\n possi_ng_result = np.array(classifier.predict_proba(possibly_negative[:, :-1])[:, 1])\n fp_rate = np.where(possi_ng_result >= threshold, 1, 0).sum() / possi_ng_result.shape[0]\n plt.hist(possi_ng_result, bins=300)\n plt.show()\n\n print(fp_rate)\n print(\"TP: \" + str(tp_rate) + \" FP: \" + str(fp_rate) + \" GMean: \" + str(math.sqrt(tp_rate * (1 - fp_rate))))", "def evaluate(pred, ground_truth, target, count_to_level = False, Y_name = None, thresholds = None, print_metrics=True):\n \n if target == 'count':\n \n # fill NaNs with zeroes\n pred = pred.fillna(method = \"ffill\")\n pred = pred.fillna(method = \"bfill\")\n ground_truth = ground_truth.fillna(method = \"ffill\")\n ground_truth = ground_truth.fillna(method = \"bfill\")\n \n # Set negative predictions to zero\n pred[pred < 0] = 0\n ground_truth[ground_truth < 0] = 0\n \n # Calculate error metrics\n rmse = mean_squared_error(ground_truth, pred, squared=False)\n mae = mean_absolute_error(ground_truth, pred)\n \n # Calculate error metrics only for crowded moments (p75) \n busy = np.percentile(ground_truth, 75)\n ground_truth_busy = ground_truth[ground_truth > busy].dropna()\n pred_busy = pred[ground_truth > busy].dropna()\n rmse_busy = mean_squared_error(ground_truth_busy, pred_busy, squared=False)\n mae_busy = mean_absolute_error(ground_truth_busy, pred_busy)\n \n # Store error metrics in dict\n error_metrics = dict({'rmse': rmse, 'rmse_busy': rmse_busy, 'mae': mae, 'mae_busy': mae_busy})\n \n if print_metrics:\n print(f\"Root mean squared error: {rmse.round(1)}\")\n print(f\"Root mean squared error (crowded): {rmse_busy.round(1)}\")\n print(f\"Mean absolute error: {mae.round(1)}\")\n print(f\"Mean absolute error (crowded): {mae_busy.round(1)}\")\n \n if count_to_level:\n pred = get_crowd_levels(pred, Y_name, thresholds)\n ground_truth = get_crowd_levels(ground_truth, Y_name, thresholds)\n \n # Confusion matrix\n conf_mat = confusion_matrix(ground_truth, pred)\n \n error_metrics['conf_mat'] = conf_mat\n \n elif target == \"level\":\n \n # Set dtype to category\n pred = pred.astype('category')\n ground_truth = ground_truth.astype('category')\n \n # Forward fill NaNs\n pred = pred.fillna(method = \"ffill\")\n ground_truth = ground_truth.fillna(method = \"ffill\")\n \n # Confusion matrix\n conf_mat = confusion_matrix(ground_truth, pred)\n \n # Classification report (recall, precision, F1)\n class_report = classification_report(ground_truth, pred, output_dict = True)\n class_report = pd.DataFrame(class_report).transpose()\n \n error_metrics = dict({\"conf_mat\": conf_mat, \"class_report\": class_report})\n \n if print_metrics:\n print(f\"Confusion matrix: {conf_mat}\")\n print(f\"Classification report: {class_report}\")\n \n return error_metrics", "def evaluate(self, data, metric, classes=None):\n func_dict = {\n 'mutual_information': sklearn.metrics.mutual_info_score,\n 'normed_mutual_information': sklearn.metrics.normalized_mutual_info_score,\n 'square_error': sklearn.metrics.mean_squared_error,\n 't-test': scipy.stats.ttest_ind,\n 'wilcoxon': scipy.stats.wilcoxon,\n 'correlation': np.corrcoef\n }\n self.make_signature(data, classes)\n try:\n if metric in {'mutual_information', 'normed_mutual_information'}:\n self.score = func_dict[metric](classes, self.digit_signature()) \n elif metric == 'square_error':\n self.score = func_dict[metric](classes, self.signatures)\n elif metric in {'t-test', 'wilcoxon'} :\n self.score = np.abs(func_dict[metric](self.signatures[classes==1], \\\n self.signatures[classes==0])[0])\n \n elif metric == 'correlation':\n self.score = func_dict[metric](classes, self.signatures)[1,0]\n \n except: KeyError(\"no such a function\") \n \n return self.score", "def eval_error_metric(predt, dtrain: xgb.DMatrix):\n label = dtrain.get_label()\n r = np.zeros(predt.shape)\n gt = predt > 0.5\n if predt.size == 0:\n return \"CustomErr\", 0\n r[gt] = 1 - label[gt]\n le = predt <= 0.5\n r[le] = label[le]\n return 'CustomErr', np.sum(r)", "def __call__(self):\n\n accepted = False\n\n while not accepted:\n\n test_log10E = np.random.uniform(1, 7)\n\n test_pdf = np.random.uniform(self._min_pdf, self._max_pdf)\n\n if test_pdf < self._likelihood(10 ** test_log10E, self._index):\n\n accepted = True\n\n return 10 ** test_log10E", "def evaluate(self) -> int:", "def evaluate(self) :\n pass", "def test_load_avg_15():\n result = _run_metric('load_avg_15')\n assert result.exit_code == 0", "def compute(self, failures):\n pass", "def calculate_average_run_accuracy(self):\n overall_true_rate, true_positive_rate, true_negative_rate, false_positive_rate, false_negative_rate, true_positive_rate_cutoff, true_negative_rate_cutoff, \\\n false_positive_rate_cutoff, false_negative_rate_cutoff, unclassified_cutoff, matthews_correlation_coefficient, brier_score, auc_score, fit_time, hmeasure = [0] * 15\n balanced_accuracy_arr = []\n auc_arr = []\n hmeasure_arr = []\n brier_score_arr = []\n fit_time_arr = []\n mcc_arr = []\n true_positive_arr = []\n true_negative_arr = []\n false_positive_arr = []\n false_negative_arr = []\n\n count = 0\n for result_dictionary in self.errors:\n for z in range(len(result_dictionary[\"balanced_accuracy_arr\"])):\n overall_true_rate += result_dictionary[\"balanced_accuracy_arr\"][z]\n true_positive_rate += result_dictionary[\"true_positive_rate_arr\"][z]\n true_negative_rate += result_dictionary[\"true_negative_rate_arr\"][z]\n false_positive_rate += result_dictionary[\"false_positive_rate_arr\"][z]\n false_negative_rate += result_dictionary[\"false_negative_rate_arr\"][z]\n matthews_correlation_coefficient += result_dictionary[\"mcc_arr\"][z]\n auc_score += result_dictionary[\"auc_arr\"][z]\n brier_score += result_dictionary[\"brier_score_arr\"][z]\n fit_time += result_dictionary[\"fit_time_arr\"][z]\n hmeasure += result_dictionary[\"hmeasure_arr\"][z]\n count += 1\n\n true_positive_rate_cutoff += result_dictionary[\"avg_true_positive_rate_with_prob_cutoff\"]\n true_negative_rate_cutoff += result_dictionary[\"avg_true_negative_rate_with_prob_cutoff\"]\n false_positive_rate_cutoff += result_dictionary[\"avg_false_positive_rate_with_prob_cutoff\"]\n false_negative_rate_cutoff += result_dictionary[\"avg_false_negative_rate_with_prob_cutoff\"]\n unclassified_cutoff += result_dictionary[\"avg_false_negative_rate_with_prob_cutoff\"]\n balanced_accuracy_arr += result_dictionary[\"balanced_accuracy_arr\"]\n hmeasure_arr += result_dictionary[\"hmeasure_arr\"]\n auc_arr += result_dictionary[\"auc_arr\"]\n brier_score_arr += result_dictionary[\"brier_score_arr\"]\n fit_time_arr += result_dictionary[\"fit_time_arr\"]\n mcc_arr += result_dictionary[\"mcc_arr\"]\n true_positive_arr += result_dictionary[\"true_positive_rate_arr\"]\n true_negative_arr += result_dictionary[\"true_negative_rate_arr\"]\n false_positive_arr += result_dictionary[\"false_positive_rate_arr\"]\n false_negative_arr += result_dictionary[\"false_negative_rate_arr\"]\n\n avg_run_results = [None] * 31\n avg_run_results[0] = matthews_correlation_coefficient / float(count)\n avg_run_results[1] = brier_score / float(count)\n avg_run_results[2] = overall_true_rate / float(count)\n avg_run_results[3] = true_positive_rate / float(count)\n avg_run_results[4] = true_negative_rate / float(count)\n avg_run_results[5] = false_positive_rate / float(count)\n avg_run_results[6] = false_negative_rate / float(count)\n avg_run_results[7] = true_positive_rate_cutoff / float(len(self.errors))\n avg_run_results[8] = true_negative_rate_cutoff / float(len(self.errors))\n avg_run_results[9] = false_positive_rate_cutoff / float(len(self.errors))\n avg_run_results[10] = false_negative_rate_cutoff / float(len(self.errors))\n avg_run_results[11] = unclassified_cutoff / float(len(self.errors))\n avg_run_results[12] = fit_time / float(count)\n avg_run_results[14] = balanced_accuracy_arr\n avg_run_results[15] = auc_score / float(count)\n avg_run_results[16] = auc_arr\n avg_run_results[17] = brier_score_arr\n avg_run_results[18] = fit_time_arr\n avg_run_results[19] = mcc_arr\n avg_run_results[13] = self.calculate_std_deviation(balanced_accuracy_arr)\n avg_run_results[20] = self.calculate_std_deviation(mcc_arr)\n avg_run_results[21] = self.calculate_std_deviation(brier_score_arr)\n avg_run_results[22] = self.calculate_std_deviation(auc_arr)\n avg_run_results[23] = self.calculate_std_deviation(fit_time_arr)\n avg_run_results[24] = self.calculate_std_deviation(true_positive_arr)\n avg_run_results[25] = self.calculate_std_deviation(true_negative_arr)\n avg_run_results[26] = self.calculate_std_deviation(false_positive_arr)\n avg_run_results[27] = self.calculate_std_deviation(false_negative_arr)\n avg_run_results[28] = hmeasure / float(count)\n avg_run_results[29] = self.calculate_std_deviation(hmeasure_arr)\n avg_run_results[30] = hmeasure_arr\n\n return avg_run_results", "def evaluate(x, y, amplitude, x_0, y_0, r_0):\n rr = (x - x_0) ** 2 + (y - y_0) ** 2\n rr_0 = r_0 ** 2\n\n # Because np.select evaluates on the whole rr array\n # we have to catch the invalid value warnings\n with np.errstate(invalid='ignore'):\n values = np.select([rr <= rr_0, rr > rr_0], [2 * np.sqrt(rr_0 - rr), 0])\n return amplitude * values / (4 / 3. * np.pi * rr_0 * r_0)", "def compute_average(self, error=None):\n\n nbjobs = len(self)\n if not nbjobs:\n return\n max_xsec = max(one.xsec for one in self)\n min_xsec = min(one.xsec for one in self)\n self.axsec = sum([one.axsec for one in self]) / nbjobs\n self.xsec = sum([one.xsec for one in self]) /nbjobs\n self.xerrc = sum([one.xerrc for one in self]) /nbjobs\n self.xerru = math.sqrt(sum([one.xerru**2 for one in self])) /nbjobs\n if error:\n self.xerrc = error\n self.xerru = error\n\n self.nevents = sum([one.nevents for one in self])\n self.nw = 0#sum([one.nw for one in self])\n self.maxit = 0#len(self.yerr_iter) # \n self.nunwgt = sum([one.nunwgt for one in self]) \n self.wgt = 0\n self.luminosity = sum([one.luminosity for one in self])\n self.ysec_iter = []\n self.yerr_iter = []\n self.th_maxwgt = 0.0\n self.th_nunwgt = 0 \n for result in self:\n self.ysec_iter+=result.ysec_iter\n self.yerr_iter+=result.yerr_iter\n self.yasec_iter += result.yasec_iter\n self.eff_iter += result.eff_iter\n self.maxwgt_iter += result.maxwgt_iter\n\n #check full consistency\n onefail = False\n for one in list(self):\n if one.xsec < (self.xsec - 25* one.xerru):\n if not onefail:\n logger.debug('multi run are inconsistent: %s < %s - 25* %s: assign error %s', one.xsec, self.xsec, one.xerru, error if error else max_xsec-min_xsec)\n onefail = True\n self.remove(one)\n if onefail:\n if error:\n return self.compute_average(error)\n else:\n return self.compute_average((max_xsec-min_xsec)/2.)", "def evaluate(self):\n pass", "def evaluate(self):\n pass", "def test_load_avg_5():\n result = _run_metric('load_avg_5')\n assert result.exit_code == 0", "def analyze(data):\n ## Do welch periodogram here\n pass", "def test_evaluate_error_score(error_score, return_data, strategy, backend):\n # skip test for dask backend if dask is not installed\n if backend == \"dask\" and not _check_soft_dependencies(\"dask\", severity=\"none\"):\n return None\n\n forecaster = ExponentialSmoothing(sp=12)\n y = load_airline()\n # add NaN to make ExponentialSmoothing fail\n y.iloc[1] = np.nan\n fh = [1, 2, 3]\n cv = ExpandingWindowSplitter(step_length=48, initial_window=12, fh=fh)\n if error_score in [np.nan, 1000]:\n with pytest.warns(FitFailedWarning):\n results = evaluate(\n forecaster=forecaster,\n y=y,\n cv=cv,\n return_data=return_data,\n error_score=error_score,\n strategy=strategy,\n backend=backend,\n )\n if isinstance(error_score, type(np.nan)):\n assert results[\"test_MeanAbsolutePercentageError\"].isna().sum() > 0\n if error_score == 1000:\n assert results[\"test_MeanAbsolutePercentageError\"].max() == 1000\n if error_score == \"raise\":\n with pytest.raises(Exception): # noqa: B017\n evaluate(\n forecaster=forecaster,\n y=y,\n cv=cv,\n return_data=return_data,\n error_score=error_score,\n strategy=strategy,\n )" ]
[ "0.5552829", "0.54419374", "0.5426438", "0.53882104", "0.53734046", "0.52837074", "0.5248022", "0.5202013", "0.5199766", "0.51779705", "0.51705366", "0.51694053", "0.5148345", "0.514448", "0.51326084", "0.5094116", "0.5040183", "0.50327647", "0.5014488", "0.5009077", "0.5005315", "0.5001878", "0.49987593", "0.4998435", "0.4990035", "0.49862745", "0.49862745", "0.4965138", "0.495036", "0.49448076" ]
0.7705827
0
Given the string representation of a tagged token, return the corresponding tuple representation. The rightmost occurence of C{sep} in C{s} will be used to divide C{s} into a word string and a tag string. If C{sep} does not occur in C{s}, return C{(s, None)}.
def str2tuple(s, sep='/'): loc = s.rfind(sep) if loc >= 0: return (s[:loc], s[loc+1:].upper()) else: return (s, None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tuple2str(tagged_token, sep='/'):\n word, tag = tagged_token\n if tag is None:\n return word\n else:\n assert sep not in tag, 'tag may not contain sep!'\n return '%s%s%s' % (word, sep, tag)", "def split(value: str, sep: str = \":\") -> Tuple:\n left, _, right = value.partition(sep)\n return (left, right) if right else (None, left)", "def split_by_commas(maybe_s: str) -> tuple[str, ...]:\n if not maybe_s:\n return ()\n parts: list[str] = []\n split_by_backslash = maybe_s.split(r'\\,')\n for split_by_backslash_part in split_by_backslash:\n splitby_comma = split_by_backslash_part.split(',')\n if parts:\n parts[-1] += ',' + splitby_comma[0]\n else:\n parts.append(splitby_comma[0])\n parts.extend(splitby_comma[1:])\n return tuple(parts)", "def tsplit(s, sep):\n stack = [s]\n for char in sep:\n pieces = []\n for substr in stack:\n pieces.extend(substr.split(char))\n stack = pieces\n return stack", "def split_tag(elem, tags):\n splited_tag = elem.split(TAG_SEP)\n if len(splited_tag) > 1:\n tag_prefix, tag = splited_tag\n assert tag in tags.tags\n assert tag_prefix in tags.iob\n else:\n tag = elem\n tag_prefix = None\n assert tag == tags.default\n return tag_prefix, tag", "def split_tag(chunk_tag):\n if chunk_tag == 'O':\n return ('O', None)\n return chunk_tag.split('-', maxsplit=1)", "def tokenize(s):\n tokens = []\n i = 0\n fs = [parse_prolog, parse_starttag, parse_selfclosing, parse_endtag, parse_text, parse_fail]\n while i < len(s):\n for f in fs:\n t = f(s[i:])\n if t:\n e, end_i = t\n i += end_i\n tokens.append(e)\n break\n\n return tokens", "def destringify(cls, s: str)->typing.Tuple[str, str, str]:\n language: str = \"\"\n language_suffix: str = \"\"\n if s.startswith(cls.LANGUAGE_QUALIFIED_STRING_SIGIL):\n s, language = s.rsplit(\"@\", 1)\n if \"-\" in language:\n language, language_suffix = language.split(\"-\", 1)\n language_suffix = \"-\" + language_suffix\n s = s.replace('\\\\|', '|')\n return (ast.literal_eval(s), language, language_suffix)", "def parse_log_line(line: str) -> ('value', ):\n return tuple(SEP_RE.split(line))", "def tokenlist(sep, item):\n return item + ZeroOrMore(sep + item) + Optional(sep)", "def tokenize(string, delimiter):\n return (str(string).split(delimiter), 0)", "def _preprocess(self, tagged: List[Tuple]) -> Tuple:\n ori = \" \".join([tag[0] for tag in tagged])\n tags = [tag[1] for tag in tagged]\n # Mapping into general tagset\n tags = [self._map[tag] if tag in self._map else \"X\" for tag in tags]\n return \" \".join(tags), ori", "def partition(self, sep, reverse=False):\n if hasattr(sep, \"_clean_string\"):\n sep = sep.clean()\n if reverse:\n parent_result = self._clean_string.rpartition(sep)\n else:\n parent_result = self._clean_string.partition(sep)\n current_index = 0\n result = tuple()\n for section in parent_result:\n result += (self[current_index : current_index + len(section)],)\n current_index += len(section)\n return result", "def tokenize_key_value_pair(kv_pair):\n key, value = kv_pair.strip().split('\\t')\n key = tuple(key.strip().split())\n value = tuple(value.strip().split())\n return (key, value)", "def parse(arg: Tuple[str, str, str, str, str]) -> Tuple[str, str, str]:\n return (arg[2], arg[3], arg[4])", "def split_tokens(s):\n parts = []\n parens = 0\n for opar, cpar, unquoted, quoted, residue in TOKENS_FINDALL(s):\n if unquoted:\n parts.append(unquoted)\n elif quoted:\n parts.append(quoted[1:-1])\n elif opar:\n parens += 1\n parts.append(opar)\n elif cpar:\n parens -= 1\n parts.append(cpar)\n elif residue == '$':\n if not parens:\n raise ValueError(\"'$' outside parenthesis in %r\" % (s))\n else:\n raise ValueError(residue, s)\n if parens:\n raise ValueError(\"Unbalanced parenthesis in %r\" % (s))\n return parts", "def _parseSequence(string, delimiter=','):\n if not isinstance(string, str):\n return string\n string = string.strip()\n if string.startswith('[') and string.endswith(']'):\n sequenceType = 'list'\n elif string.startswith('(') and string.endswith(')'):\n sequenceType = 'tuple'\n else:\n return _parseSingle(string)\n \n string = string[1:-1]\n \n tokens = []\n current = []\n \n plev = 0\n blev = 0\n sqopen = False\n dqopen = False\n \n for char in string:\n if char == '[':\n blev += 1\n current.append(char)\n elif char == ']':\n blev -= 1\n current.append(char)\n elif char == '(':\n plev += 1\n current.append(char)\n elif char == ')':\n plev -= 1\n current.append(char)\n elif char == '\"':\n dqopen = not dqopen\n current.append(char)\n elif char == \"'\":\n sqopen = not sqopen\n current.append(char)\n elif (char == delimiter and plev == 0 and blev == 0 and \n not sqopen and not dqopen):\n tokens.append(_parseSequence(''.join(current).strip()))\n current = []\n else:\n current.append(char)\n \n if len(current) > 0:\n tokens.append(_parseSequence(''.join(current)))\n \n if sequenceType == 'tuple':\n tokens = tuple(tokens) \n return tokens", "def parse_feature_value(s,next_index=0):\n next_index = jump_over_space(s,next_index)\n start_index = next_index\n while True:\n if not s[next_index].isspace():\n next_index += 1\n else:\n break\n feature_value = s[start_index:next_index]\n if feature_value == '':\n feature_value = None\n feature_value = feature_value.split('/')\n return (feature_value,next_index)", "def process_pair(words: tuple) -> Optional[tuple]:\n\n # Replace all reflexive forms\n to_replace = [\"[se]\", \"|se|\", \"[-și]\", \"[o]\", \"|-și|\", \"|și|\", \"[-i]\", \"[i]\", \"[și]\", \"a \"]\n raw_line = \" \".join(words)\n for sub in to_replace:\n raw_line = raw_line.replace(sub, \"\")\n\n # Replace multiple spaces, strip beginning / ending spaces\n processed_line = re.sub('\\s{2,}', ' ', raw_line).strip()\n\n words = processed_line.split(' ')\n\n # Return the new tuple\n # Or the empty string if the words are the same or contain each other, or ar capital nouns\n if len(words) != 2:\n return None\n if words[1] in words[0] or words[0] in words[1]:\n return None\n if words[1][0].isupper() or words[0][0].isupper():\n return None\n return tuple(words)", "def tokenize(s):\n return s.split()", "def get_tuple(self, string):\n a = re.search('\\((\\d+\\.\\d+), (\\d+\\.\\d+)\\)', string)\n if not a:\n return None\n else:\n return (float(a.group(1)), float(a.group(2)))", "def tag(s, tokenize=True, encoding=\"utf-8\"):\n tags = []\n for sentence in parse(s, tokenize, True, False, False, False, encoding).split():\n for token in sentence:\n tags.append((token[0], token[1]))\n return tags", "def parse_tags(s: str) -> List[str]:\n tags = []\n buf = []\n in_quoted = None\n\n for c in s:\n if in_quoted:\n if c == in_quoted:\n in_quoted = None\n else:\n buf.append(c)\n elif c == '\"' or c == '\\'':\n in_quoted = c\n elif c == ',':\n if buf:\n tag = ''.join(buf).strip()\n if tag:\n tags.append(tag)\n buf.clear()\n else:\n buf.append(c)\n\n if buf:\n tag = ''.join(buf).strip()\n if tag:\n tags.append(tag)\n\n return tags", "def splitpop(string, delimiter):\n if delimiter not in string:\n string += delimiter\n fields = string.split(delimiter)\n return delimiter.join(fields[:-1]), fields[-1]", "def split_tuple(value: tuple[str, ...] | str | None) -> tuple[str, ...] | None:\n if value is None:\n return None\n if TYPE_CHECKING:\n assert isinstance(value, str)\n return tuple(value.split(\".\"))", "def split_param(text: str, prefixes: Sequence[str], sep: str) -> tuple[str, str, str]:\n stripped = text.strip()\n if not prefixes:\n prefix = ''\n rest = stripped\n else:\n try:\n prefix = next(filter(stripped.startswith, prefixes))\n except StopIteration:\n prefix = ''\n rest = stripped\n else:\n rest = stripped.split(prefix, maxsplit=1)[1].strip()\n assert len(prefix) >= 1\n assert rest\n arg, part_sep, descr = rest.partition(sep.join((' ', ' ')))\n if not part_sep:\n if rest.endswith(sep):\n arg = rest[:-1]\n elif sep + ' ' in rest:\n arg, _, descr = rest.partition(sep + ' ')\n # if we hit neither then there is no '-' in text, possible case of '[prefix] foo'?\n return prefix, arg.strip(), descr.lstrip()", "def parse_grouping(p: str) -> tuple[int, int]:\n width = len(p)\n g1 = p.rfind(',')\n if g1 == -1:\n return 1000, 1000\n g1 = width - g1 - 1\n g2 = p[:-g1 - 1].rfind(',')\n if g2 == -1:\n return g1, g1\n g2 = width - g1 - g2 - 2\n return g1, g2", "def tsplit(string, delimiters):\n delimiters = tuple(delimiters)\n stack = [string,]\n for delimiter in delimiters:\n for i, substring in enumerate(stack):\n substack = substring.split(delimiter)\n stack.pop(i)\n for j, _substring in enumerate(substack):\n stack.insert(i+j, _substring)\n return stack", "def parse_NFelt(K, s):\n return K([QQ(c) for c in s.split(\",\")])", "def line_to_tuple(self, line, sep, line_number):\n s = line.strip().split(sep, 2)\n if len(s) < 2:\n raise ValueError(f'Line {line_number} has too few values.')\n return tuple((s[i] if i < len(s) else \"\" for i in range(3)))" ]
[ "0.6646102", "0.612387", "0.60105777", "0.5899971", "0.56244576", "0.5581877", "0.556164", "0.54761165", "0.5448013", "0.5431246", "0.54079366", "0.5345186", "0.5311481", "0.52914494", "0.5287557", "0.5282446", "0.5266853", "0.52644396", "0.52302814", "0.52152735", "0.52006245", "0.5197828", "0.519519", "0.51872545", "0.51732093", "0.5162204", "0.5135963", "0.51312363", "0.5101573", "0.50916094" ]
0.682012
0
Given the tuple representation of a tagged token, return the corresponding string representation. This representation is formed by concatenating the token's word string, followed by the separator, followed by the token's tag. (If the tag is None, then just return the bare word string.)
def tuple2str(tagged_token, sep='/'): word, tag = tagged_token if tag is None: return word else: assert sep not in tag, 'tag may not contain sep!' return '%s%s%s' % (word, sep, tag)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tagify(parsedtag):\n tag = \"\"\n for t in parsedtag:\n if t == '':\n t = '_'\n tag = tag+t\n return tag", "def tuple_to_string(letter_word_pair):\n letter, word = letter_word_pair\n return '{letter}: {word}'.format(letter=letter, word=word)", "def _preprocess(self, tagged: List[Tuple]) -> str:\n ori = \" \".join([tag[0] for tag in tagged if tag[1] != \"SPACE\"])\n sent = \" \".join([tag[1] for tag in tagged if tag[1] != \"SPACE\"])\n sent = sent.replace(\"-LRB-\", \"(\")\n sent = sent.replace(\"-RRB-\", \")\")\n return sent, ori", "def _preprocess(self, tagged: List[Tuple]) -> Tuple:\n ori = \" \".join([tag[0] for tag in tagged])\n tags = [tag[1] for tag in tagged]\n # Mapping into general tagset\n tags = [self._map[tag] if tag in self._map else \"X\" for tag in tags]\n return \" \".join(tags), ori", "def tag_to_wordnet(tag):\n if (tag == 'ADJ'): return('a')\n elif (tag == 'ADV'): return('r')\n elif (tag == 'NOUN'): return('n')\n elif (tag == 'VERB'): return('v')\n else: return None", "def POStag(self, word):\n \t\tif word in (\"'\",\",\",\".\",':',';','.'):\n \t\t\ttag = 'PUNCT'\n \t\telif word == '-':\n \t\t\ttag = 'DASH'\n \t\telse:\n \t\t\ttag = 'NOTAG'\n \t\treturn tag", "def formatted_tag(tag_ids):\n if tag_ids is None:\n return ''\n else:\n ids = []\n for tag_id in tag_ids.split(','):\n ids.append(Tag.id == int(tag_id))\n tags = Tag.query.filter(or_(*ids)).all()\n if tags is None:\n return ''\n else:\n return 'with tags: ' + ', '.join([tag.name.title() for tag in tags])", "def special_tags_to_text(self):\n if (self.windtag is None and self.tornadotag is None and\n self.hailtag is None and self.tornadodamagetag is None and\n self.waterspouttag is None and not self.flood_tags):\n return \"\"\n\n parts = []\n if self.tornadotag is not None:\n parts.append(\"tornado: %s\" % (\n self.tornadotag))\n if self.waterspouttag is not None:\n parts.append(\"waterspout: %s\" % (\n self.waterspouttag))\n if self.tornadodamagetag is not None:\n parts.append(\"tornado damage threat: %s\" % (\n self.tornadodamagetag))\n if self.windtag is not None:\n parts.append(\"wind: %s%s %s\" % (\n self.winddirtag.replace(\">\", \"&gt;\").replace(\"<\", \"&lt;\"),\n self.windtag, self.windtagunits))\n if self.hailtag is not None:\n parts.append(\"hail: %s%s IN\" % (\n self.haildirtag.replace(\">\", \"&gt;\").replace(\"<\", \"&lt;\"),\n self.hailtag))\n for k, v in self.flood_tags.items():\n parts.append(\"%s: %s\" % (k.lower(), v.lower()))\n return \" [\" + \", \".join(parts) + \"] \"", "def format(token, word, start, end, groundtruth, prediction, delimiter='\\t'):\n out = ''\n for t, w, s, e, g, p in zip(token, word, start, end, groundtruth, prediction):\n out += delimiter.join((t, w, s, e, g, p)) + '\\n'\n return out", "def make_tags(tag, word):\n tag1 = \"<{}>\".format(tag)\n tag2 = \"</{}>\".format(tag)\n final = tag1 + word + tag2\n return final", "def token_to_word(self, token):\n\n word = \" \" if token == 0 else self.index_to_word[token]\n return word", "def tag(self, tokens):\n if overridden(self.tag_sents):\n return self.tag_sents([tokens])[0]", "def _tag_of(entry: _LexiconEntry) -> str:\n return entry[\"tag\"].upper()", "def tokens_to_str(cls, tokens):\n\n assert np.iterable(tokens)\n result = []\n for t in tokens:\n # if type(t) in [str, int]:\n if isinstance(t, (int, long, basestring)):\n result.append('/'+str(t))\n elif type(t) == slice:\n start = str(t.start) if t.start is not None else ''\n stop = str(t.stop) if t.stop is not None else ''\n result.append('[%s:%s]' % (start, stop))\n elif type(t) in [tuple, list]:\n if not t:\n raise ValueError('invalid token')\n result.append('['+','.join(map(str, t))+']')\n else:\n raise ValueError('invalid token')\n return ''.join(result)", "def _tupstr(tuple_):\n return ', '.join(list(map(str, tuple_)))", "def _get_tag(self, tag):\n return self.prefix + tag", "def word_tag(self, word):\n if word[1] in (\"NN\", \"NNS\", \"NNP\", \"NNPS\"):\n return _wordnet.NOUN\n if word[1] in (\"JJ\", \"JJR\", \"JJS\"):\n return _wordnet.ADJ\n if word[1] in (\"VB\", \"VBD\", \"VBG\", \"VBN\", \"VBP\", \"VBZ\"):\n return _wordnet.VERB\n if word[1] in (\"RB\", \"RBR\", \"RBS\"):\n return _wordnet.ADV\n\n return None", "def tuple(self, arg: SeField[Any]) -> str:\n if is_bare_tuple(arg.type):\n return arg.varname\n elif is_variable_tuple(arg.type):\n earg = arg[0]\n earg.name = \"v\"\n return f\"tuple({self.render(earg)} for v in {arg.varname})\"\n else:\n rvalues = []\n for i, _ in enumerate(type_args(arg.type)):\n r = arg[i]\n r.name = f\"{arg.varname}[{i}]\"\n rvalues.append(self.render(r))\n return f\"({', '.join(rvalues)},)\" # trailing , is required for single element tuples", "def tokens_to_string(self, tokens):\n\n # Create a list of the individual words.\n words = [self.index_to_word[token]\n for token in tokens\n if token != 0]\n\n # Concatenate the words to a single string\n # with space between all the words.\n text = \" \".join(words)\n\n return text", "def split_tag(elem, tags):\n splited_tag = elem.split(TAG_SEP)\n if len(splited_tag) > 1:\n tag_prefix, tag = splited_tag\n assert tag in tags.tags\n assert tag_prefix in tags.iob\n else:\n tag = elem\n tag_prefix = None\n assert tag == tags.default\n return tag_prefix, tag", "def format_nested(token, word, start, end, groundtruth, prediction, delimiter='\\t'):\n out = ''\n for st, sw, ss, se, sg, sp in zip(token, word, start, end, groundtruth, prediction):\n out += format(st, sw, ss, se, sg, sp, delimiter) + '\\n'\n return out", "def tag(word: str, tags: list):\n open_tags = ['<' + tag + '>' for tag in tags]\n close_tags = ['</' + tag + '>' for tag in reversed(tags)]\n logger.debug('*************** %s ' %\n\n word)\n return ''.join(open_tags) + word + ''.join(close_tags)", "def _tuple_to_str(self, the_tuple):\r\n ret = \"\"\r\n for item in the_tuple:\r\n ret += (\" \" + str(item))\r\n return ret[1:]", "def _postprocess(\n self,\n tags: List[str],\n words: List[str],\n pos: bool = False,\n ):\n result = list()\n\n i = 0\n for tag in tags:\n if (\"<\" not in tag) and (\">\" not in tag):\n if pos:\n result.append(f\"{words[i]}/{pos[i]}\")\n else:\n result.append(words[i])\n i += 1\n else:\n result.append(tag)\n\n return \" \".join(result)", "def split_tag(chunk_tag):\n if chunk_tag == 'O':\n return ('O', None)\n return chunk_tag.split('-', maxsplit=1)", "def gen_type_tuple_string(self, name, node):\n return \"('{}', {})\".format(name, self.gen_type_string(node))", "def _postprocess(self, tags: List[str], words: List[str], pos: List[str]):\n result = list()\n\n i = 0\n for tag in tags:\n if (\"<\" not in tag) and (\">\" not in tag):\n if pos:\n result.append(f\"{words[i]}/{pos[i]}\")\n else:\n result.append(words[i])\n i += 1\n else:\n result.append(tag)\n\n return \" \".join(result)", "def to_tag(self) -> str:\n if self._str_tag is not None:\n return self._str_tag\n subtags = ['und']\n if self.language:\n subtags[0] = self.language\n if self.extlangs:\n for extlang in sorted(self.extlangs):\n subtags.append(extlang)\n if self.script:\n subtags.append(self.script)\n if self.territory:\n subtags.append(self.territory)\n if self.variants:\n for variant in sorted(self.variants):\n subtags.append(variant)\n if self.extensions:\n for ext in self.extensions:\n subtags.append(ext)\n if self.private:\n subtags.append(self.private)\n self._str_tag = '-'.join(subtags)\n return self._str_tag", "def parse_for_pos_tagging(sentence):\n try:\n return \" \".join([token.form + \"/\" + token.upos for token in sentence])\n except TypeError: # if a POS tag is missing\n return \"\"", "def format_term(term: Union[BNode, Literal, URIRef, Variable]) -> str:\n if isinstance(term, URIRef):\n return str(term)\n elif isinstance(term, BNode):\n return '?v_' + str(term)\n elif isinstance(term, Literal):\n return format_literal(term)\n else:\n return term.n3()" ]
[ "0.65129", "0.6197551", "0.6189595", "0.603213", "0.59723246", "0.5791183", "0.56370145", "0.56172186", "0.5614327", "0.55962265", "0.5587963", "0.55618066", "0.5522836", "0.5503617", "0.5499369", "0.5470717", "0.5397113", "0.5374059", "0.5342552", "0.5339643", "0.53354275", "0.53332937", "0.5329793", "0.5324548", "0.5283398", "0.5268304", "0.5255988", "0.5247178", "0.5246655", "0.52336" ]
0.8627057
0
Given a tagged sentence, return an untagged version of that sentence. I.e., return a list containing the first element of each tuple in C{tagged_sentence}. >>> untag([('John', 'NNP'), ('saw', 'VBD'), ('Mary', 'NNP')] ['John', 'saw', 'mary']
def untag(tagged_sentence): return [w for (w, t) in tagged_sentence]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def untag(tagged_sentence):\n return [w for w, _ in tagged_sentence]", "def untag(self, tagged_sent, strict=True, verbose=False):\n word = \"\"\n for char, tag in tagged_sent:\n if verbose:\n print char, tag\n if tag in self.itags:\n if word:\n yield word\n word = char\n elif tag in self.otags:\n word += char\n else:\n if strict:\n raise TagError()\n if word:\n yield word\n word = \"\"\n yield char\n if word:\n yield word", "def strip_tags(tagged_sentences):\n untagged_sentences = []\n for taggedsent in tagged_sentences:\n untaggedsent = ''\n\tfor taggedword in taggedsent.split():\n\t word = re.split('(?<!\\\\\\)\\/', taggedword)[0]\n untaggedsent += word + ' '\n #print untaggedsent\n untagged_sentences.append(untaggedsent)\n return untagged_sentences", "def untag(self, uuid, tags=None):\n if isinstance(tags, basestring):\n tags = [tags]\n\n self._backend.untag(uuid, tags)", "def strip_training_tags(self, sentence=None, sep=None):\n if sentence is None:\n sentence = self.hand_tagged\n if sep is None:\n sep = self.sep\n return [w.split(sep, 1)[0] for w in sentence]", "def tag_unigram(self, untagged_string: str):\n untagged_tokens = wordpunct_tokenize(untagged_string)\n tagger = self._load_model(\"unigram\")\n tagged_text = tagger.tag(untagged_tokens)\n return tagged_text", "def pos_tag(self,sentence):\n tagged = self.brill_tagger.tag(sentence.split())\n tagged_sentence = \" \".join([nltk.tag.tuple2str(tok) for tok in tagged])\n print tagged_sentence\n\n tag_list = [(each.split(\"/\")[0],each.split(\"/\")[1]) for each in tagged_sentence.split()]\n return tag_list", "def untag():\n form = TagSubscriptionForm(hidden_mode=True)\n if not form.validate_on_submit():\n abort(403)\n\n subscription = current_user.subscriptions.filter_by(\n channel_id=form.channel_id.data\n ).first_or_404()\n tag = current_user.tags.filter_by(name=form.tag_name.data).first_or_404()\n\n results = subscription.untag(tag.id)\n response = {\"success\": results}\n return jsonify(response)", "def tag(self, postagged_sentences):\n return [self.tag_sentence(sentence) for sentence in postagged_sentences]", "def getTagList(tags):\n tags = tags[1:len(tags)-1]\n return tags.split('><')", "def tag(s, tokenize=True, encoding=\"utf-8\"):\n tags = []\n for sentence in parse(s, tokenize, True, False, False, False, encoding).split():\n for token in sentence:\n tags.append((token[0], token[1]))\n return tags", "def pos_tag(self, sentence):\n tags = []\n tokens = sentence.split(\" \")\n for i in range(len(tokens)):\n tags.append('')\n for i in range (len(tokens)):\n feat = []\n feat.append(self.features(tokens,tags,i))\n tag_predicted = self.postagger.predict(feat)[0]\n tags[i] = tag_predicted\n return tags", "def tokenize_tag(tag):\n sentences = nltk.sent_tokenize(tag.text)\n sentence_words = []\n for sentence in sentences:\n words = nltk.casual_tokenize(sentence)\n lower_words = [w.lower() for w in words]\n filtered_words = [w for w in lower_words if w not in stop_words and not w.isdigit() and len(w) > 2]\n sentence_words += filtered_words\n return sentence_words", "def detag_string(self, string):\r\n counter = itertools.count(0)\r\n count = lambda m: '<%s>' % counter.next()\r\n tags = self.tag_pattern.findall(string)\r\n tags = [''.join(tag) for tag in tags]\r\n (new, nfound) = self.tag_pattern.subn(count, string)\r\n if len(tags) != nfound:\r\n raise Exception('tags dont match:' + string)\r\n return (new, tags)", "def stopword_removal_from_taggedwords(tagged_words):\n stops = set(stopwords.words('english'))\n tagged_words = [w for w in tagged_words if not w[0] in stops]\n return tagged_words", "def strip_tags(text):\n return \"\".join(\n [\n split_chunk(chunk).text\n for chunk in split_tagged_text_into_chunks(text)\n ]\n )", "def tag(self, sent):\n return [self.tag_word(w) for w in sent]", "def tag(self, sent):\n return [self.tag_word(w) for w in sent]", "def untag_event(request, tag_id, event_id, error='', message=''):\n try:\n tag = Tag.objects.get(id=tag_id)\n except:\n error += 'Couldn\\'t retrieve tag ' + tag_id + '.'\n try:\n event = Event.objects.get(id=event_id)\n except:\n error += 'Couldn\\'t retrieve event ' + event_id + '.'\n\n if tag not in event.tags.all():\n error += 'This event isn\\'t tagged with this tag.'\n\n if not error:\n try:\n event.tags.remove(tag)\n except:\n error += 'Couldn\\'t remove tag from comment.'\n return render_to_response('feedback/taglist.html', {\n 'object': event, 'type': 'event', 'error': error,\n }, context_instance=RequestContext(request))", "def removeTags(self, words):\n\t\treturn re.sub(r'<.*?>', '', words)", "def true_tags (tagged_sentences):\n tags = []\n for sent in tagged_sentences:\n tags.extend([re.split('(?<!\\\\\\)\\/', word)[1] for word in sent.split()])\n return tags", "def tag_tnt(self, untagged_string: str):\n untagged_tokens = wordpunct_tokenize(untagged_string)\n tagger = self._load_model(\"tnt\")\n tagged_text = tagger.tag(untagged_tokens)\n return tagged_text", "def tags(self):\n return tuple([x.strip() for x in self._dict.get('tags').split(',')])", "def untag(self, tag):\n if isinstance(tag, six.integer_types):\n try:\n tag = Tag.objects.get(pk=tag, owner=self.owner)\n except Tag.DoesNotExist:\n return\n \n if isinstance(tag, six.string_types):\n try:\n tag = Tag.objects.get(slug=makeslug(tag), owner=self.owner)\n except Tag.DoesNotExist:\n return\n \n self.tags.remove(tag)", "def remove_tags(self, tags):\n\n tags = H.to_list(tags)\n # self._tags.difference_update(tags)\n self.tags.difference_update(tags)", "def generate_tags(mail):\r\n stop_words = set(stopwords.words(\"english\"))\r\n words = word_tokenize(mail)\r\n\r\n punctuation = [',', '.', '!', ';', '-', '(', ')']\r\n filtered_sentence = []\r\n\r\n # Removing stop words and punctuation\r\n for word in words:\r\n if word not in stop_words:\r\n if word not in punctuation:\r\n filtered_sentence.append(word)\r\n\r\n # Keeping only nouns verbs adjectives\r\n tagged = nltk.pos_tag(filtered_sentence) # list of tuples\r\n final_list = []\r\n for tag in tagged:\r\n if tag[1] in parts_of_speech:\r\n final_list.append(tag[0])\r\n return final_list", "def pos_tag_sents(\n sentences: List[List[str]], engine: str = \"perceptron\", corpus: str = \"orchid\"\n) -> List[List[Tuple[str, str]]]:\n if not sentences:\n return []\n\n return [pos_tag(sent, engine=engine, corpus=corpus) for sent in sentences]", "def remove_tag(args):", "def remove_tag(self, tag):\n if tag in self.tags:\n index = self.tags.index(tag)\n self.tags[index:index + 1] = []\n self.stop_times[index:index + 1] = []", "def filter_tags(tagged_tokens, categories=None, ignore_categories=('O',), replace_by=None):\n if categories:\n categories = set(categories)\n if not replace_by:\n return [(token, tag) for token, tag in tagged_tokens if (tag in categories)]\n else:\n return [(token, tag) if (tag in categories) else (token, replace_by) for token, tag in tagged_tokens]\n else:\n ignore_categories = set(ignore_categories)\n if not replace_by:\n return [(token, tag) for token, tag in tagged_tokens if (tag not in ignore_categories)]\n else:\n return [(token, tag) if (tag not in ignore_categories) else (token, replace_by) for token, tag in tagged_tokens]" ]
[ "0.8872581", "0.688079", "0.6484743", "0.5917314", "0.58630955", "0.5849702", "0.5814519", "0.57450205", "0.565951", "0.5595256", "0.55470544", "0.55409217", "0.5509677", "0.54981333", "0.5497102", "0.54217565", "0.5420616", "0.5420616", "0.5396361", "0.5392199", "0.53684276", "0.53093165", "0.5285878", "0.52343655", "0.5228352", "0.52049536", "0.5192313", "0.5188088", "0.5162145", "0.5151719" ]
0.8875718
0
Select only rows representing extended sources from a catalog table
def select_extended(cat_table): try: l = [len(row.strip()) > 0 for row in cat_table['Extended_Source_Name'].data] return np.array(l, bool) except KeyError: return cat_table['Extended']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_sources(cat_table, cuts):\n nsrc = len(cat_table)\n full_mask = np.ones((nsrc), bool)\n for cut in cuts:\n if cut == 'mask_extended':\n full_mask *= mask_extended(cat_table)\n elif cut == 'select_extended':\n full_mask *= select_extended(cat_table)\n else:\n full_mask *= make_mask(cat_table, cut)\n\n lout = [src_name.strip() for src_name in cat_table['Source_Name'][full_mask]]\n return lout", "def _query_sequence_sources(self):\n pass", "def fetch_fermi_extended_sources(catalog):\n BASE_URL = 'http://fermi.gsfc.nasa.gov/ssc/data/access/lat/'\n if catalog == '3FGL':\n url = BASE_URL + '4yr_catalog/LAT_extended_sources_v15.tgz'\n elif catalog == '2FGL':\n url = BASE_URL + '2yr_catalog/gll_psc_v07_templates.tgz'\n elif catalog == '1FHL':\n url = BASE_URL + '1FHL/LAT_extended_sources_v12.tar'\n else:\n ss = 'Invalid catalog: {}\\n'.format(catalog)\n raise ValueError(ss)\n\n filename = download_file(url, cache=True)\n tar = tarfile.open(filename, 'r')\n\n hdu_list = []\n for member in tar.getmembers():\n if member.name.endswith(\".fits\"):\n file = tar.extractfile(member)\n hdu = fits.open(file)[0]\n hdu_list.append(hdu)\n hdu_list = fits.HDUList(hdu_list)\n\n return hdu_list", "def getSourceSubset(self, selection=None):\n if not selection or selection.lower() == \"all\":\n return self.sources\n # sort by brightness\n from past.builtins import cmp\n from functools import cmp_to_key\n srclist0 = sorted(self.sources, key=cmp_to_key(lambda a, b: cmp(b.brightness(), a.brightness())))\n all = set([src.name for src in srclist0])\n srcs = set()\n for ispec, spec in enumerate(re.split(\"\\s+|,\", selection)):\n spec = spec.strip()\n if spec:\n # if first spec is a negation, then implictly select all sources first\n if not ispec and spec[0] in \"!-\":\n srcs = all\n if spec.lower() == \"all\":\n srcs = all\n elif self._re_bynumber.match(spec):\n negate, start, end = self._re_bynumber.match(spec).groups()\n sl = slice(int(start) if start else None, int(end) if end else None)\n if negate:\n srcs.difference_update([src.name for src in srclist0[sl]])\n else:\n srcs.update([src.name for src in srclist0[sl]])\n elif spec.startswith(\"-=\") or spec.startswith(\"!=\"):\n srcs.difference_update([src.name for src in srclist0 if getattr(src, spec[2:], None)])\n elif spec.startswith(\"=\"):\n srcs.update([src.name for src in srclist0 if getattr(src, spec[1:], None)])\n elif spec.startswith(\"-\") or spec.startswith(\"!\"):\n srcs.discard(spec[1:])\n else:\n srcs.add(spec)\n # make list\n return [src for src in srclist0 if src.name in srcs]", "def get_new_sourcedatasets(self):\n previous_study_version = self.get_previous_version()\n SourceDataset = apps.get_model('trait_browser', 'SourceDataset')\n if previous_study_version is not None:\n qs = SourceDataset.objects.filter(source_study_version=self)\n # We can probably write this with a join to be more efficient.\n previous_dataset_accessions = SourceDataset.objects.filter(\n source_study_version=previous_study_version\n ).values_list('i_accession', flat=True)\n qs = qs.exclude(i_accession__in=previous_dataset_accessions)\n return qs\n else:\n return SourceDataset.objects.none()", "def get_results_from_aggregation_sources(self, context):\n sources = context.getContentSources()\n results = []\n for source in sources:\n sresults = source.queryCatalog()\n if not sresults:\n continue\n results.append({\n 'id': source.id,\n 'title': source.Title(),\n 'description': source.Description(),\n 'uid': source.UID(),\n 'portal_type': sresults[0].portal_type,\n 'brains': sresults,\n 'brains_count': len(sresults),\n })\n return results", "def get_source_query(self) -> QuerySet:\n raise NotImplementedError", "def pyrex_sources(self, sources, extension):\n new_sources = []\n ext_name = extension.name.split('.')[-1]\n for source in sources:\n (base, ext) = os.path.splitext(source)\n if ext == '.pyx':\n target_file = self.generate_a_pyrex_source(base, ext_name,\n source,\n extension)\n new_sources.append(target_file)\n else:\n new_sources.append(source)\n return new_sources", "def get_diffuse_sources(self, src_sel):\n extended = self._select_and_freeze(self.extended_sources, src_sel)\n for s in extended: # this seems redundant, but was necessary\n s.model.free[:] = False if src_sel.frozen(s) else s.free[:]\n sources.validate(s,self.nside, None)\n s.smodel = s.model\n \n return self.get_global_sources(src_sel.skydir()), extended", "def get_auxiliary_source_data(source: Union[Source, int]):\n\n from astra.database.catalogdb import (\n Catalog,\n CatalogToTIC_v8,\n TIC_v8 as TIC,\n TwoMassPSC,\n Gaia_DR2 as Gaia\n )\n\n catalogid = get_catalog_identifier(source)\n tic_dr = TIC.__name__.split(\"_\")[-1]\n gaia_dr = Gaia.__name__.split(\"_\")[-1]\n\n ignore = lambda c: c is None or isinstance(c, str)\n\n # Define the columns and associated comments.\n field_descriptors = [\n BLANK_CARD,\n (\" \", \"IDENTIFIERS\", None),\n (\"CAT_ID\", Catalog.catalogid, f\"SDSS-V catalog identifier\"),\n (\"TIC_ID\", TIC.id.alias(\"tic_id\"), f\"TESS Input Catalog ({tic_dr}) identifier\"),\n (\"GAIA_ID\", Gaia.source_id, f\"Gaia {gaia_dr} source identifier\"),\n BLANK_CARD,\n (\" \", \"ASTROMETRY\", None),\n (\"RA\", Catalog.ra, \"SDSS-V catalog right ascension (J2000) [deg]\"),\n (\"DEC\", Catalog.dec, \"SDSS-V catalog declination (J2000) [deg]\"),\n (\"GAIA_RA\", Gaia.ra, f\"Gaia {gaia_dr} right ascension [deg]\"),\n (\"GAIA_DEC\", Gaia.dec, f\"Gaia {gaia_dr} declination [deg]\"),\n (\"PLX\", Gaia.parallax, f\"Gaia {gaia_dr} parallax [mas]\"),\n (\"E_PLX\", Gaia.parallax_error, f\"Gaia {gaia_dr} parallax error [mas]\"),\n (\"PMRA\", Gaia.pmra, f\"Gaia {gaia_dr} proper motion in RA [mas/yr]\"),\n (\n \"E_PMRA\",\n Gaia.pmra_error,\n f\"Gaia {gaia_dr} proper motion in RA error [mas/yr]\",\n ),\n (\"PMDE\", Gaia.pmdec, f\"Gaia {gaia_dr} proper motion in DEC [mas/yr]\"),\n (\n \"E_PMDE\",\n Gaia.pmdec_error,\n f\"Gaia {gaia_dr} proper motion in DEC error [mas/yr]\",\n ),\n (\"V_RAD\", Gaia.radial_velocity, f\"Gaia {gaia_dr} radial velocity [km/s]\"),\n (\n \"E_V_RAD\",\n Gaia.radial_velocity_error,\n f\"Gaia {gaia_dr} radial velocity error [km/s]\",\n ),\n BLANK_CARD,\n (\" \", \"PHOTOMETRY\", None),\n (\n \"G_MAG\",\n Gaia.phot_g_mean_mag,\n f\"Gaia {gaia_dr} mean apparent G magnitude [mag]\",\n ),\n (\n \"BP_MAG\",\n Gaia.phot_bp_mean_mag,\n f\"Gaia {gaia_dr} mean apparent BP magnitude [mag]\",\n ),\n (\n \"RP_MAG\",\n Gaia.phot_rp_mean_mag,\n f\"Gaia {gaia_dr} mean apparent RP magnitude [mag]\",\n ),\n (\"J_MAG\", TwoMassPSC.j_m, f\"2MASS mean apparent J magnitude [mag]\"),\n (\"E_J_MAG\", TwoMassPSC.j_cmsig, f\"2MASS mean apparent J magnitude error [mag]\"),\n (\"H_MAG\", TwoMassPSC.h_m, f\"2MASS mean apparent H magnitude [mag]\"),\n (\"E_H_MAG\", TwoMassPSC.h_cmsig, f\"2MASS mean apparent H magnitude error [mag]\"),\n (\"K_MAG\", TwoMassPSC.k_m, f\"2MASS mean apparent K magnitude [mag]\"),\n (\"E_K_MAG\", TwoMassPSC.k_cmsig, f\"2MASS mean apparent K magnitude error [mag]\"),\n ]\n\n q = (\n Catalog.select(*[c for k, c, comment in field_descriptors if not ignore(c)])\n .distinct(Catalog.catalogid)\n .join(CatalogToTIC_v8, JOIN.LEFT_OUTER)\n .join(TIC)\n .join(Gaia, JOIN.LEFT_OUTER)\n .switch(TIC)\n .join(TwoMassPSC, JOIN.LEFT_OUTER)\n .where(Catalog.catalogid == catalogid)\n .dicts()\n )\n row = q.first()\n\n if row is None:\n log.warning(f\"Trouble getting auxillary data for Source {catalogid}. Using separate queries and cone searches.\")\n\n only_fields_of = lambda model: [c for k, c, comment in field_descriptors if not ignore(c) and not isinstance(c, Alias) and c.model == model]\n # Fill it with what we can.\n row = (\n Catalog\n .select(*only_fields_of(Catalog))\n .where(Catalog.catalogid == catalogid)\n .dicts()\n .first()\n )\n row.update(\n CatalogToTIC_v8\n .select(CatalogToTIC_v8.target_id.alias(\"tic_id\"))\n .where(CatalogToTIC_v8.catalogid == catalogid)\n .dicts()\n .first()\n or {\"tic_id\": None}\n )\n # Cone search Gaia and 2MASS\n # TODO: Don't do this!\n row.update(\n Gaia\n .select(*only_fields_of(Gaia))\n .where(Gaia.cone_search(row[\"ra\"], row[\"dec\"], 1.0 / 3600.0))\n .dicts()\n .first() or dict()\n )\n\n # TODO: Don't do this!\n row.update(\n TwoMassPSC\n .select(*only_fields_of(TwoMassPSC))\n .where(TwoMassPSC.cone_search(row[\"ra\"], row[\"dec\"], 1.0 / 3600.0, dec_col=\"decl\"))\n .dicts()\n .first() or dict()\n )\n\n # Damn. Floating point nan values are not allowed in FITS headers.\n default_values = {}\n data = []\n for header_key, field, comment in field_descriptors:\n if ignore(field):\n data.append((header_key, field, comment))\n else:\n field_name = field._alias if isinstance(field, Alias) else field.name\n if field_name in row:\n value = row[field_name]\n else:\n value = default_values.get(header_key, None)\n data.append(\n (\n header_key,\n value,\n comment,\n )\n )\n\n # Add carton and target information\n cartons, programs = get_cartons_and_programs(source) # ordered by priority\n\n data.extend(\n [\n BLANK_CARD,\n (\" \", \"TARGETING\", None),\n (\n \"CARTON_0\",\n cartons[0],\n f\"First carton for source (see documentation)\",\n ),\n (\"CARTONS\", \",\".join(cartons), f\"SDSS-V cartons\"),\n (\"PROGRAMS\", \",\".join(list(set(programs))), f\"SDSS-V programs\"),\n (\n \"MAPPERS\",\n \",\".join(list(set([p.split(\"_\")[0] for p in programs]))),\n f\"SDSS-V mappers\",\n ),\n ]\n )\n return data", "def getArticleRows(baseName, extId):\n pr = pubStore.PubReaderFile(baseName)\n for artRow, fileRows in pr.iterArticlesFileList():\n if artRow.externalId==extId:\n return artRow, fileRows", "def externalSubset(self, name, externalID, systemID):\n pass", "def source_list(self):\n return [\n source.Name for source in self.coordinator.data.sources if not source.Hidden\n ]", "def source_products(self, uuid):\n return self._backend.source_products(uuid)", "def loadSourceCatalog(self, filename):\n sourceCat = afwTable.SourceCatalog.readFits(filename)\n aliasMap = sourceCat.schema.getAliasMap()\n aliasMap.set(\"slot_ApFlux\", \"base_PsfFlux\")\n instFluxKey = sourceCat.schema[\"slot_ApFlux_instFlux\"].asKey()\n instFluxErrKey = sourceCat.schema[\"slot_ApFlux_instFluxErr\"].asKey()\n\n # print(\"schema=\", sourceCat.schema)\n\n # Source x,y positions are ~ (500,1500) x (500,1500)\n centroidKey = sourceCat.table.getCentroidSlot().getMeasKey()\n for src in sourceCat:\n adjCentroid = src.get(centroidKey) - lsst.geom.Extent2D(500, 500)\n src.set(centroidKey, adjCentroid)\n src.set(instFluxKey, 1000)\n src.set(instFluxErrKey, 1)\n\n # Set catalog coord\n for src in sourceCat:\n src.updateCoord(self.wcs)\n return sourceCat", "def add_row_filter(source, args, index):\n queries = []\n for subindex in range(1, 6):\n query = args.get('select-query%02d-%02d' % (index, subindex))\n if query:\n queries.append(query)\n reverse = (args.get('select-reverse%02d' % index) == 'on')\n if reverse:\n return source.without_rows(queries)\n else:\n return source.with_rows(queries)", "def by_source(self, source):\n return self.filter(source_object=source)", "def has_sources(self, extension=None):\r\n return (self.has_label('sources') and\r\n (not extension or\r\n (hasattr(self, 'sources') and\r\n any(source.endswith(extension) for source in self.sources))))", "def filter_by_type_and_id(src, object_type, object_id, source_name):\n filters = [\n Filter(\"type\", \"=\", object_type),\n Filter(\"id\", \"=\", object_id),\n Filter(\"external_references.source_name\", \"=\", source_name),\n ]\n results = src.query(filters)\n return remove_deprecated(results)", "def solr_sources(self):\n # conn = pysolr.Solr(settings.SOLR['SERVER'])\n q = {\n \"fq\": ['type:source', f'archive_i:{self.pk}'],\n \"fl\": [\"pk\",\n \"public_images_b\",\n 'display_name_s',\n 'cover_image_i',\n 'source_type_s',\n 'date_statement_s',\n 'surface_type_s'],\n \"rows\": 10000,\n \"sort\": [\"shelfmark_ans asc\"]\n }\n\n res = SolrConnection.search(\"*:*\", **q)\n if res.hits > 0:\n return res.docs\n else:\n return []", "def filter_sources(self, data):\n return tuple([d for d, s in zip(data, self.provides_sources)\n if s in self.sources])", "def generate_a_pyrex_source(self, base, ext_name, source, extension):\n return []", "def _sql_where(self, cursor, table, prefix=None, aggregate=False):\n assert False, \"subclass responsibility\"", "def construct_exptable(self, exp): \n \n # Statement for excluding the rows where fields with the option\n # exclude=True are False.\n exclude_fields = [field for field in exp.measurementmodel._meta.get_fields() \n if getattr(field, 'exclude', False)]\n exclude_columns = [field.column for field in exclude_fields]\n exclude_data = ' '.join(['AND %s IS NOT FALSE' % column for column in exclude_columns])\n \n sql = '''\n SELECT %s, %s \n FROM %s\n WHERE %s = %s %s\n ''' % (self.construct_SELECT_AS(exp), exp.biofield.column,\n exp.measurementmodel._meta.db_table,\n exp.measurementmodel._meta.get_field('experiment').column, exp.id, exclude_data)\n return sql", "def example():\n joined_table = [[1900, 170, 10], [0, 120, 10], [0, 120, 100], [2010, 120, 10], [1650, 200, 10]]\n remove_columns = [2]\n example_table = [[1900, 170], [0, 120]]\n\n annotated_table = query.decorate_table(example_table, remove_columns, joined_table)\n\n joined_schema = [\"I SHOULD NOT BE VISIBLE\", \"birth\", \"height\"] # the decorator column should never be in the output\n tree = decision_tree.make_tree(annotated_table)\n\n print(tree)\n print(query.where_segment(joined_schema, tree))", "def _select_data(\n self, db: str, table: str, column_filters: Dict[str, str]\n ) -> List[List]:\n pass", "def getCatalogs():", "def get_results_from_aggregation_sources(self, context):", "def getOLAPSource():", "def select(self, table, columns=['*'], condition='', orderby='', limit=0, isFetchAll=True):\n return True" ]
[ "0.60211074", "0.5248736", "0.5232373", "0.5228239", "0.5216698", "0.514871", "0.51264745", "0.5091105", "0.50576377", "0.50509965", "0.500945", "0.50049895", "0.49812737", "0.49577492", "0.4953649", "0.49411482", "0.49411097", "0.49344343", "0.4923671", "0.4913756", "0.4913236", "0.48461935", "0.48370817", "0.4827357", "0.48131776", "0.4793487", "0.47622725", "0.4756061", "0.46744353", "0.4667473" ]
0.67723024
0
Select only rows passing a set of cuts from catalog table
def select_sources(cat_table, cuts): nsrc = len(cat_table) full_mask = np.ones((nsrc), bool) for cut in cuts: if cut == 'mask_extended': full_mask *= mask_extended(cat_table) elif cut == 'select_extended': full_mask *= select_extended(cat_table) else: full_mask *= make_mask(cat_table, cut) lout = [src_name.strip() for src_name in cat_table['Source_Name'][full_mask]] return lout
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_subset(df, constraints):\n for constraint in constraints:\n subset = df.loc[df[constraint[0]].isin(constraint[1])]\n df = subset\n return subset", "def get_table_subset(table, batches):\n idxs = np.array([])\n for batch in batches:\n idxs = np.append(idxs, np.where(table['batch'] == batch)[0])\n\n idxs = idxs.astype(int)\n return table.iloc[idxs]", "def __line_id_subset(self, ids):\n idcs = []\n for idx in range(len(self.table)):\n data = self.table.iloc[idx]\n # extract line_id\n word_id = data['word_id']\n line_id = self.word_id_to_line_id(word_id=word_id)\n # map to indices\n if line_id in ids:\n idcs.append(idx)\n sub_set = self.sub_set(idcs=idcs)\n return sub_set", "def cn_filter(df, binary_cutoff=12):\n del_df = (df.ix['Deletion'].dropna(1) < 0).astype(int)\n del_df = del_df[del_df.sum(1) >= binary_cutoff]\n del_df.index = del_df.index.droplevel(1)\n del_df = del_df.T\n amp_df = (df.ix['Amplification'].dropna(1) > 0).astype(int)\n amp_df = amp_df[amp_df.sum(1) >= binary_cutoff]\n amp_df.index = amp_df.index.droplevel(1)\n amp_df = amp_df.T\n return amp_df, del_df", "def run_get_15k_row_subset():\n get_15k_row_subset('politics_30_months_comments_cleaned_standardized_vader_flair.csv',\n 'politics_30_months_comments_cleaned_standardized_vader_flair_15k.csv')", "def filter_catalogs(conn, catalogs, res): \n # Determine which resolution range the image belongs in\n for config, res_range in res_dict.items():\n if res_range[0] < res <= res_range[1]:\n use_range = res_range\n # Combine highest resolutions to allow for more catalogs\n if config == 'A' or config == 'B':\n use_range = (res_dict['A'][0], res_dict['B'][1])\n\n # Find all catalogs that fall into the adequate resolution range\n cur = conn.cursor()\n filtered_catalogs = []\n for catalog in catalogs:\n try:\n catalog_res = catalogio.catalog_dict[catalog]['resolution']\n except KeyError:\n cur.execute('''SELECT resolution FROM radcat.catalogs\n WHERE name = %s''', (catalog, ))\n catalog_res = cur.fetchone()[0]\n if use_range[0] < catalog_res <= use_range[1]:\n filtered_catalogs.append(catalog)\n\n cur.close()\n\n return filtered_catalogs", "def selectCatalytic(self,mol=None, sele=\"all\"):\n\n\t\tif mol == None:\n\t\t\tmol = self.design\n\n\t\tcat = []\n\t\tfor c in mol.catalytic:\n\t\t\tcat.append(int(c.file_id))\n\n\t\tif len(cat) > 0:\n\t\t\tstrcat = []\n\t\t\tfor i in cat:\n\t\t\t\tstrcat.append(str(i))\n\n\t\t\tmylist = string.join(strcat, \",\")\n\t\t\tcmd.delete(\"catalytic\")\n\t\t\tcmd.select(\"catalytic\", \"(\" + sele + \" & resi \" + mylist + \")\")\n\t\t\tcmd.disable(\"catalytic\")\n\t\telse:\n\t\t\tprint \"no catalytic residues\"", "def index_selecting():\n df = pd.read_csv('data/ad_feature.csv',header=0) #type:pd.DataFrame\n print df[:2] , df[2: ] #前两行\n\n df.iloc[:2 , :]\n df.iloc[:2, [2,3] ] # 第 2 列和 第3列\n\n # df.loc[row_indexer,column_indexer]\n df.loc[3, ['cate_id','price']]\n\n df[['cate_id', 'price']]\n\n #boolean index\n df[ df['price'] > 1000]\n df[ (df['price'] > 1000) & (df['price'] < 2000)]\n\n\n\n df[df['cate_id'].isin([6261])]\n\n #select by callable\n\n\n # .loc, .iloc, and also [] indexing can accept a callable as indexer\n\n\n df.loc[lambda d: d.price > 2000, :]", "def contamination(store, cutoff=50, filter_srrs=None, keep_srrs=None):\n\n df = store['prealn/workflow/fastq_screen'].copy()\n df.reset_index(inplace=True)\n df = df[['srx', 'srr', 'reference', 'one_hit_one_library_percent']].set_index(['srx', 'srr', 'reference']).unstack()\n df.columns = df.columns.droplevel(0)\n df.reset_index(inplace=True)\n\n df = remove_rows(df, 'srr', filter_srrs)\n df = keep_rows(df, 'srr', keep_srrs)\n\n return df.loc[df['dm6'] >= cutoff, ['srx', 'srr']]", "def col_subset(self, patt, rmprefix=None):\n cols = self.widedf.columns\n want = [bool(re.search('^sesid$|^age$|'+patt, x)) for x in cols]\n subset = self.widedf.iloc[:, want]\n if rmprefix is None:\n # assume\n # * the best prefix to remove is from the first non-{age,id} column \n # * prefix is any text before the first '_'\n subset_specifc_cols = [x for x in cols[want] if x not in ['sesid','age']]\n rmprefix = subset_specifc_cols[0].split(\"_\")[0]\n if rmprefix:\n subset.columns = [re.sub(f'^{rmprefix}_','',x) for x in subset.columns]\n return subset.dropna()", "def show_diverse_recs(res, threshold):\n rec_ids = [] # result list\n while len(rec_ids) < threshold:\n for clust in res[\"CENTROID\"].unique():\n cluster_rec = res[res[\"CENTROID\"] == clust]\n if len(rec_ids) < threshold:\n for i in cluster_rec.index:\n if i in rec_ids:\n continue\n else:\n rec = i\n if rec not in rec_ids:\n rec_ids.append(rec) # add unique rec\n break\n else:\n continue\n else:\n break\n # return subset of df with re-arranged items\n return res.loc[rec_ids]", "def chose_only_hypothesis_colums(df):\n lst = ['abv', 'ibu', 'gravity', 'abv_min', 'abv_max', 'ibu_min',\n 'ibu_max', 'srm_min', 'srm_max', 'og_min', 'fg_min', 'fg_max']\n return df[lst]", "def subset(self, *args, **kwargs):\n\t\tgs = copy.copy(self)\n\t\tqueryStrings = kwargs['queryStrings'] if 'queryStrings' in kwargs else args[0] if args else []\n\t\tif isinstance(queryStrings, str) or isinstance(queryStrings, bytes):\t# assume queryString was specified as a string\n\t\t\tqueryStrings = [queryStrings]\n\t\n\t\tif len(queryStrings)==0:\n\t\t\treturn gs\n\t\t\t\n\t\tdf = gs._dataframe\n\t\t\n\t\tif 'species' in kwargs and kwargs['species'] in ['MusMusculus','HomoSapiens']:\n\t\t\tdf = df[df['Species']==kwargs['species']]\n\t\t\t\n\t\tcaseSensitive = kwargs.get('caseSensitive', False)\n\t\tif not caseSensitive: queryStrings = [item.lower() for item in queryStrings]\n\t\t\n\t\tmatchSubstring = kwargs.get('matchSubstring', True)\n\t\t\n\t\t# determine which columns to search\n\t\tsearchColumns = kwargs.get('searchColumns')\n\t\tallColumns = ['EnsemblId','EntrezId','GeneSymbol','Synonyms','Description']\n\t\tif searchColumns:\n\t\t\tsearchColumns = ['EnsemblId' if item=='GeneId' or item=='geneId' else item for item in searchColumns]\n\t\tif searchColumns and len(set(allColumns).intersection(set(searchColumns)))>0:\n\t\t\tsearchColumns = list(set(allColumns).intersection(set(searchColumns)))\n\t\telse:\n\t\t\tsearchColumns = allColumns\n\t\t\n\t\trowsToKeep = set()\n\t\tdf = df.reset_index()\n\t\tfor column in searchColumns:\n\t\t\tfor rowIndex,value in df[column].iteritems():\n\t\t\t\tif rowIndex in rowsToKeep or not value: continue\n\t\t\t\tif not caseSensitive: value = value.lower()\n\t\t\t\tfor queryString in queryStrings:\n\t\t\t\t\tif matchSubstring and queryString in value or (not matchSubstring and queryString==value):\n\t\t\t\t\t\trowsToKeep.add(rowIndex)\n\t\t\t\t\t\tbreak\n\n\t\tgs._dataframe = df.loc[list(rowsToKeep),:].set_index('EnsemblId')\n\t\treturn gs", "def filter_by_occurrences(df, column, cut):\n counts = df[column].value_counts(sort=False)\n all_entries = counts[counts >= cut].index.values\n all_entries.sort()\n return all_entries", "def column_selection(type1, cat):\n col_selection = []\n for col in cat.colnames:\n if col == \"_RAJ2000\":\n continue\n if col == \"_DEJ2000\":\n continue\n desc = cat[col].info.description\n f = any([(ban in desc) for ban in BANNED_KEYWORDS])\n if f is False:\n col_selection.append(col)\n return col_selection", "def filter_rare_genes(data, *extra_data, cutoff=0, min_cells=5):\n gene_sums = measure.gene_capture_count(data, cutoff=cutoff)\n keep_genes_idx = gene_sums >= min_cells\n data = select.select_cols(data, *extra_data, idx=keep_genes_idx)\n return data", "def filter_data(self):\n self.df = self.df[HeatStrokeDataFiller.important_features]", "def feature_subset(self,node,db,labels,ids):\n return None", "def filter_by(df, constraints):\n indexer = [constraints[name] if name in constraints else slice(None)\n for name in df.index.names]\n return df.loc[tuple(indexer)] if len(df.shape) == 1 else df.loc[tuple(indexer),]", "def filter_res(rows, res_class):\n keep = []\n match_logger.info('Limiting to sources in resolution class {} '\n '({}\" < BMIN <= {}\")'.format(res_class,\n res_dict[res_class][0],\n res_dict[res_class][1]))\n for row in rows:\n if row['res_class'] == res_class:\n keep.append(row)\n\n match_logger.info(' -- {} sources remaining'.format(len(keep)))\n\n return keep", "def choose_rows(rows):\n\n # Ensure that the object is not empty.\n assert(len(rows) > 0)\n\n # The following rows preferentially select data where the device_id=64 (i.e the GLONASS over the Trimble).\n # Also select by data quality (1 = good value, 2 = probably good value).\n # If the data quality is not good, then do not select, even if there is no other point for that time.\n # We are only interested in the good data at this point.\n if len(rows) >= 1 and rows[0]['device_id'] == 64 and rows[0]['measureland_qualifier_flag_overall'] == 1:\n return rows[0]\n\n elif len(rows) >=2 and rows[1]['device_id'] == 64 and rows[1]['measureland_qualifier_flag_overall'] == 1:\n return rows[1]\n\n elif len(rows) >= 3 and rows[2]['device_id'] == 64 and rows[2]['measureland_qualifier_flag_overall'] == 1:\n return rows[2]\n\n elif len(rows) >= 1 and rows[0]['device_id'] == 63 and rows[0]['measureland_qualifier_flag_overall'] == 1:\n return rows[0]\n\n elif len(rows) >=2 and rows[1]['device_id'] == 63 and rows[1]['measureland_qualifier_flag_overall'] == 1:\n return rows[1]\n\n elif len(rows) >= 3 and rows[2]['device_id'] == 63 and rows[2]['measureland_qualifier_flag_overall'] == 1:\n return rows[2]\n\n elif len(rows) == 1 and rows[0]['measureland_qualifier_flag_overall'] == 2: # for the first row which has a value\n # of 3, because QC was not able to tell otherwise\n return rows[0]\n\n return None", "def subset_(self):\n return self.X.ix[:, self.important_features_]", "def check_subset(P1,P2,k=-1):", "def select_fits_subset(self, pre_processing=None, binning=None,\n exposure=(None, 'max'), reduction=None,\n observatory=None, jd=(None, 'exact')):\n self.data_subset = self.all_data.copy()\n\n # Select on pre-processing if provided.\n if pre_processing:\n self.data_subset = self.data_subset.loc[\n self.data_subset['PreProcessing'] == pre_processing]\n\n # Select on binning if provided.\n if binning:\n self.data_subset = self.data_subset.loc[\n self.data_subset['Binning'] == binning]\n\n # Select on exposure if provided.\n if exposure[0]:\n if exposure[1] == 'exact':\n self.data_subset = self.data_subset.loc[\n self.data_subset['Exposure'] == exposure[0]]\n elif exposure[1] == 'min':\n self.data_subset = self.data_subset.loc[\n self.data_subset['Exposure'] > exposure[0]]\n elif exposure[1] == 'max':\n self.data_subset = self.data_subset.loc[\n self.data_subset['Exposure'] < exposure[0]]\n elif exposure[1] == 'between':\n self.data_subset = self.data_subset.loc[\n (self.data_subset['Exposure'] > exposure[0][0]) &\n (self.data_subset['Exposure'] < exposure[0][1])]\n\n # Select on reduction if provided.\n if reduction:\n self.data_subset = self.data_subset.loc[\n self.data_subset['Reduction'] == reduction]\n\n # Select on observatory if provided.\n if observatory:\n self.data_subset = self.data_subset.loc[\n self.data_subset['Observatory'] == observatory]\n\n # Select on jd if provided.\n if jd[0]:\n if jd[1] == 'exact':\n if isinstance(jd[0], list):\n self.data_subset = self.data_subset.loc[\n self.data_subset['JD'].isin(jd[0])]\n else:\n self.data_subset = self.data_subset.loc[\n self.data_subset['JD'] == jd[0]]\n elif jd[1] == 'except':\n if isinstance(jd[0], list):\n self.data_subset = self.data_subset.drop(\n self.data_subset[self.data_subset[\n 'JD'].isin(jd[0])].index)\n else:\n self.data_subset = self.data_subset.drop(\n self.data_subset[self.data_subset['JD']\n == jd[0]].index)\n elif jd[1] == 'after':\n self.data_subset = self.data_subset.loc[\n self.data_subset['JD'] > jd[0]]\n elif jd[1] == 'before':\n self.data_subset = self.data_subset.loc[\n self.data_subset['JD'] < jd[0]]\n elif jd[1] == 'between':\n self.data_subset = self.data_subset.loc[\n (self.data_subset['JD'] > jd[0][0]) &\n (self.data_subset['JD'] < jd[0][1])]\n\n # Sort by JD and reset index.\n self.data_subset = self.data_subset.sort_values(\n by=['JD'], ascending=True).reset_index(drop=True)\n\n # Check spectra found:\n if len(self.data_subset) == 0:\n raise FileExistsError(\n 'No spectra found for that location or subset.')\n\n print('Selecting {} spectra from fits dataset.'.format(\n len(self.data_subset)))", "def _select_by_range(self, disc_low, disc_high):\n sqlstmt = \"SELECT h FROM %s WHERE d>=? and d<=?\" % self.VIEW\n pickup = self.cursor.execute(sqlstmt, (-disc_high, -disc_low))\n return [h[0] for h in pickup]", "def _filter_subset(systems, test_sets, langpair, origlang, subset=None):\n if origlang is None and subset is None:\n return systems\n if test_sets is None or langpair is None:\n raise ValueError('Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).')\n\n indices_to_keep = []\n for test_set in test_sets.split(','):\n rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[test_set][langpair][0])\n if not rawfile.endswith('.sgm'):\n raise Exception('--origlang and --subset supports only *.sgm files, not %s', rawfile)\n if subset is not None:\n if test_set not in SUBSETS:\n raise Exception('No subset annotation available for test set ' + test_set)\n doc_to_tags = SUBSETS[test_set]\n number_sentences_included = 0\n with smart_open(rawfile) as fin:\n include_doc = False\n for line in fin:\n if line.startswith('<doc '):\n if origlang is None:\n include_doc = True\n else:\n doc_origlang = re.sub(r'.* origlang=\"([^\"]+)\".*\\n', '\\\\1', line)\n if origlang.startswith('non-'):\n include_doc = doc_origlang != origlang[4:]\n else:\n include_doc = doc_origlang == origlang\n if subset is not None:\n doc_id = re.sub(r'.* docid=\"([^\"]+)\".*\\n', '\\\\1', line)\n if not re.search(subset, doc_to_tags.get(doc_id, '')):\n include_doc = False\n if line.startswith('<seg '):\n indices_to_keep.append(include_doc)\n number_sentences_included += 1 if include_doc else 0\n return [[sentence for sentence,keep in zip(sys, indices_to_keep) if keep] for sys in systems]", "def select(self, table, columns=['*'], condition='', orderby='', limit=0, isFetchAll=True):\n return True", "def _select_by_range(self, disc_low, disc_high):\n sqlstmt = \"SELECT h FROM %s WHERE d>=? and d<=?\" % self.VIEW\n pickup = self.cursor.execute(sqlstmt, (disc_low, disc_high,))\n return [h[0] for h in pickup]", "def subset_corpus(text_corpus, index_column_name, not_preprocessed_article_list):\n\n text_corpus = text_corpus[text_corpus[\n index_column_name].isin(not_preprocessed_article_list)]\n return text_corpus", "def selectlayers(lyrindex,selection):\n return [ [item[2].replace('D=','') ,item[3],item[4],item[-1].replace('range=','') ] \n for item in lyrindex if [item[3],item[4]] in selection ]" ]
[ "0.5679256", "0.54460526", "0.5423245", "0.54030055", "0.5230312", "0.5216821", "0.5212075", "0.5188647", "0.51706034", "0.5072302", "0.5045124", "0.5038879", "0.5030005", "0.50211084", "0.50056404", "0.49777895", "0.49624884", "0.4951754", "0.4933273", "0.4932415", "0.49091673", "0.49032915", "0.48872775", "0.48838824", "0.4878526", "0.48751694", "0.487363", "0.48691267", "0.48688", "0.4858287" ]
0.6428772
0
Read the yaml file for a particular split key
def read_catalog_info_yaml(self, splitkey): catalog_info_yaml = self._name_factory.catalog_split_yaml(sourcekey=splitkey, fullpath=True) yaml_dict = yaml.safe_load(open(catalog_info_yaml)) # resolve env vars yaml_dict['catalog_file'] = os.path.expandvars(yaml_dict['catalog_file']) yaml_dict['catalog_extdir'] = os.path.expandvars(yaml_dict['catalog_extdir']) return yaml_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __getitem__(self, item):\n try:\n if \".\" in item:\n keys = item.split(\".\")\n else:\n return self.data[item]\n element = self.data[keys[0]]\n for key in keys[1:]:\n element = element[key]\n except KeyError:\n raise KeyError(f\"The key '{item}' could not be found in the yaml file '{self.filename}'\")\n except Exception as e:\n print(e)\n raise ValueError(\"unkown error\")\n return element", "def test_load_config_image_from_yaml_nested_keys(self):\n with open(\".gitlab.yml\", \"w\") as f:\n f.write(\"somewhere:\\n\")\n f.write(\" down:\\n\")\n f.write(\" here: dummian:8.2\\n\")\n\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\"image: !from_yaml .gitlab.yml somewhere.down.here\\n\")\n\n config = scuba.config.load_config(\".scuba.yml\")\n assert config.image == \"dummian:8.2\"", "def loadfrom_yaml(key, path):\n\twith open(path, 'r') as f:\n\t\td = yaml.load(f)\n\t\tnew_namespace(key)\n\t\t\n\t\t# ns = get_namespace(key)\n\n\t\t# for key, value in d.items():\n\t\t# \t_recurse(0, key, value, ns)", "def read_config(name):\n import yaml\n fname=get_config_file(name)\n #print(\"reading:\",fname)\n with open(fname) as fobj:\n data=yaml.load(fobj)\n return data", "def load_yaml_file(i):\n\n import yaml\n\n fn = i['yaml_file']\n\n try:\n if sys.version_info[0] > 2:\n f = open(fn, 'r', encoding='utf8')\n else:\n f = open(fn, 'r')\n except Exception as e:\n return {'return': 16, 'error': 'problem opening YAML file='+fn+' ('+format(e)+')'}\n\n try:\n s = f.read()\n except Exception as e:\n f.close()\n return {'return': 1, 'error': 'problem reading YAML file='+fn+' ('+format(e)+')'}\n\n f.close()\n\n try:\n d = yaml.load(s, Loader=yaml.FullLoader)\n except Exception as e:\n return {'return': 1, 'error': 'problem parsing YAML from file='+fn+' ('+format(e)+')'}\n\n return {'return': 0, 'dict': d}", "def test_02_ReadFile(self):\n l_node = config_tools.Yaml(self.m_pyhouse_obj).read_yaml(self.m_filename)\n l_config = l_node.Yaml\n # print(PrettyFormatAny.form(l_node, 'C1-02-A'))\n # print(PrettyFormatAny.form(l_config, 'C1-02-B'))\n self.assertEqual(l_config['Location']['Street'], '1600 Pennsylvania Ave NW')\n self.assertEqual(len(l_config['Location']), 10)", "def read_test_file(path):\n #TODO Handle multiple test sets in a given doc\n teststruct = yaml.safe_load(os.path.expandvars(read_file(path)))\n return teststruct", "def read_sparv_config():\n data = {}\n if sparv_config_file.is_file():\n try:\n with open(sparv_config_file, encoding=\"utf-8\") as f:\n data = yaml.load(f, Loader=yaml.FullLoader)\n except:\n data = {}\n return data", "def read_config(self,confile):\n\n\n print(\"reading:\",confile)\n with open(confile) as parf:\n data=yaml.load(parf)\n\n\n return data", "def readModelParams(paramsFile, key=None): \n if paramsFile is None:\n return {}\n try:\n with open(paramsFile) as fp:\n modelParams = yaml.load(fp, Loader=yaml.FullLoader)\n if key is not None:\n if key in modelParams:\n modelParams = modelParams[key]\n # Force to use name of file actually being read\n modelParams['params'] = paramsFile\n except Exception:\n myerror(f'Could not open params file: {paramsFile}')\n return modelParams", "def loader_from_key(key):\n\n if \":\" in key:\n return key.split(\":\")\n return key, None", "def readPipelines(pipelines=default_pipelines):\n with open(pipelines, 'r') as handle:\n return yaml.load(handle, Loader=yaml.FullLoader)", "def _load_data_yaml(self, pathname): \n pathname = self._yaml_extension(pathname)\n\n with open(pathname) as file:\n traj_data = yaml.load(file, Loader=yaml.FullLoader)\n \n return traj_data", "def input_data(self):\n return read_yaml(self.file_path)", "def readKey(self, keyPath):\n\t\ttry:", "def read_yaml_file(yaml_file):\n with open(yaml_file, 'r') as yfile:\n loaded_file = yaml.safe_load(yfile)\n return loaded_file", "def _read_config(path):\n with open(path) as f:\n data = f.read()\n data = os.path.expandvars(data)\n data = yaml.safe_load(data)\n return data", "def get(self, key):\n try:\n return yaml.load(self.DB.get(self.salt + str(key)).decode())\n except Exception as e:\n return None", "def read_from_yaml(file_path, Loader=None):\n import yaml\n if Loader is None:\n Loader = yaml.FullLoader\n if os.path.isfile(file_path):\n with open(file_path, 'r') as stream:\n data = yaml.load(stream, Loader=Loader)\n return data\n else:\n raise Exception('File: {} does not exist.'.format(file_path))", "def read_yaml_file(filepath: str) -> Dict:\n return yaml.safe_load(read_file(filepath))", "def read_yaml(yaml_path):\n with open(yaml_path) as f:\n yaml_data = yaml.load(f, Loader=yaml.FullLoader)\n\n return yaml_data", "def _read_input(input_file):\n with open(input_file, 'r') as f:\n input_dict = yaml.load(f, yaml.SafeLoader)\n # dafi inputs\n inputs_dafi = input_dict['dafi']\n inputs_dafi['save_level'] = inputs_dafi.get('save_level', 'time')\n # inverse method inputs\n if 'inverse' not in input_dict or input_dict['inverse'] is None:\n inputs_inverse = dict()\n else:\n inputs_inverse = input_dict['inverse']\n # physics model inputs\n if 'model' not in input_dict or input_dict['model'] is None:\n inputs_model = dict()\n else:\n inputs_model = input_dict['model']\n return inputs_dafi, inputs_inverse, inputs_model", "def load_yaml(fname):\n with open(fname) as f:\n val = yaml.safe_load(os.path.expandvars(f.read()))\n return val", "def read_exercise_yaml(path_yaml):\n exer_dict = {}\n with open(path_yaml, 'r') as stream:\n try:\n exer_dict = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n return exer_dict", "def read_config(self, config_filename):", "def load_yaml_file(yaml_file):\n try:\n # Get the configuration parameters which contain the region, vpc name, template filename, VPC CIDR blocks\n s = open(yaml_file).read()\n config = list(yaml.load_all(s))[0]\n\n except Exception as e:\n # We're expecting the user parameters to be encoded as YAML\n # so we can pass multiple values. If the YAML can't be decoded\n # then return failure with a helpful message.\n print(e)\n raise Exception('Input configuration parameters could not be decoded as YAML')\n\n return config", "def get_cfg_from_yaml(self):\n try:\n with open(self.parsed_cfg_path, 'r') as cfg_yaml:\n self.from_yaml_cfg_dict = yaml.load(cfg_yaml)\n except Exception as exc:\n print(exc)\n traceback.print_exc()\n self.from_yaml_cfg_dict = {}", "def _load_datas(self) -> tp.Dict[str, dict]:\n with open(self._file, \"r\") as stream:\n try:\n load: tp.Dict[str, dict] = yaml.safe_load(stream)\n logger.info(\"YAML imported\")\n return load\n except yaml.YAMLError as exc:\n logger.debug(\"YAML import error : %s\", exc)\n raise", "def read_yaml(preset_file: Text) -> Dict:\n with open(preset_file, \"r\") as preset_file:\n return yaml.safe_load(preset_file)", "def read(self, key):\n raise NotImplementedError" ]
[ "0.59335405", "0.5843199", "0.5721483", "0.56454915", "0.5619514", "0.56168664", "0.5573831", "0.55296016", "0.5514136", "0.54911107", "0.54887116", "0.5482527", "0.5466847", "0.54572874", "0.5439963", "0.54187524", "0.5408526", "0.540396", "0.5391459", "0.53888166", "0.5377506", "0.53608143", "0.5321774", "0.5320561", "0.5317025", "0.53053916", "0.52779746", "0.5277917", "0.5277744", "0.5242968" ]
0.6570766
0
Build a CatalogInfo object
def build_catalog_info(self, catalog_info): cat = SourceFactory.build_catalog(**catalog_info) catalog_info['catalog'] = cat # catalog_info['catalog_table'] = # Table.read(catalog_info['catalog_file']) catalog_info['catalog_table'] = cat.table catalog_info['roi_model'] =\ SourceFactory.make_fermipy_roi_model_from_catalogs([cat]) catalog_info['srcmdl_name'] =\ self._name_factory.srcmdl_xml(sourcekey=catalog_info['catalog_name']) return CatalogInfo(**catalog_info)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_catalog_comp_info(self, full_cat_info, split_key, rule_key, rule_val, sources):\n merge = rule_val.get('merge', True)\n sourcekey = \"%s_%s_%s\" % (\n full_cat_info.catalog_name, split_key, rule_key)\n srcmdl_name = self._name_factory.srcmdl_xml(sourcekey=sourcekey)\n srcmdl_name = self._name_factory.fullpath(localpath=srcmdl_name)\n kwargs = dict(source_name=\"%s_%s\" % (full_cat_info.catalog_name, rule_key),\n source_ver=split_key,\n sourcekey=sourcekey,\n srcmdl_name=srcmdl_name,\n source_names=sources,\n catalog_info=full_cat_info,\n roi_model=SourceFactory.copy_selected_sources(full_cat_info.roi_model,\n sources))\n if merge:\n return CompositeSourceInfo(**kwargs)\n return CatalogSourcesInfo(**kwargs)", "def newCatalog():\n catalog = {'videosContext': None,\n 'caraContenido': None,\n 'musicalGenero': None,\n 'fechaMusica': None}\n\n catalog['videosContext'] = lt.newList('ARRAY_LIST')\n catalog['caraContenido'] = mp.newMap(30,\n maptype='PROBING',\n loadfactor=0.4)\n catalog['musicaGenero'] = mp.newMap(30,\n maptype='PROBING',\n loadfactor=0.4)\n catalog['fechaMusica'] = om.newMap('RBT')\n\n return catalog", "def initCatalog():\n catalog = model.newCatalog()\n return catalog", "def initCatalog():\n catalog = model.newCatalog()\n return catalog", "def initCatalog():\n catalog = model.newCatalog()\n return catalog", "def make_catalog_comp_info_dict(self, catalog_sources):\n catalog_ret_dict = {}\n split_ret_dict = {}\n for key, value in catalog_sources.items():\n if value is None:\n continue\n if value['model_type'] != 'catalog':\n continue\n versions = value['versions']\n for version in versions:\n ver_key = \"%s_%s\" % (key, version)\n source_dict = self.read_catalog_info_yaml(ver_key)\n try:\n full_cat_info = catalog_ret_dict[key]\n except KeyError:\n full_cat_info = self.build_catalog_info(source_dict)\n catalog_ret_dict[key] = full_cat_info\n\n try:\n all_sources = [x.strip() for x in full_cat_info.catalog_table[\n 'Source_Name'].astype(str).tolist()]\n except KeyError:\n print(full_cat_info.catalog_table.colnames)\n used_sources = []\n rules_dict = source_dict['rules_dict']\n if rules_dict is None:\n rules_dict = {}\n split_dict = {}\n for rule_key, rule_val in rules_dict.items():\n # full_key =\\\n # self._name_factory.merged_sourcekey(catalog=ver_key,\n # rulekey=rule_key)\n sources = select_sources(\n full_cat_info.catalog_table, rule_val['cuts'])\n used_sources.extend(sources)\n split_dict[rule_key] = self.make_catalog_comp_info(\n full_cat_info, version, rule_key, rule_val, sources)\n\n # Now deal with the remainder\n for source in used_sources:\n try:\n all_sources.remove(source)\n except ValueError:\n continue\n rule_val = dict(cuts=[],\n merge=source_dict['remainder'].get('merge', False))\n split_dict['remain'] = self.make_catalog_comp_info(\n full_cat_info, version, 'remain', rule_val, all_sources)\n\n # Merge in the info for this version of splits\n split_ret_dict[ver_key] = split_dict\n\n self._catalog_comp_info_dicts.update(catalog_ret_dict)\n self._split_comp_info_dicts.update(split_ret_dict)\n return (catalog_ret_dict, split_ret_dict)", "def get_catalog(self) -> Dict[str, str]:\n return self.catalog", "def getCatalog(unique_name):", "def init():\n catalog = model.newCatalog()\n return catalog", "def make_catalog_comp_dict(**kwargs):\n library_yamlfile = kwargs.pop('library', 'models/library.yaml')\n csm = kwargs.pop('CatalogSourceManager', CatalogSourceManager(**kwargs))\n if library_yamlfile is None or library_yamlfile == 'None':\n yamldict = {}\n else:\n yamldict = yaml.safe_load(open(library_yamlfile))\n catalog_info_dict, comp_info_dict = csm.make_catalog_comp_info_dict(yamldict)\n return dict(catalog_info_dict=catalog_info_dict,\n comp_info_dict=comp_info_dict,\n CatalogSourceManager=csm)", "def catalog_comp_info_dict(self, catkey):\n return self._catalog_comp_info_dicts[catkey]", "def getCatalog(self, version=None, level=None, cubeInfo=True):\n print('WaPOR API: Loading catalog WaPOR.v{v}_l{lv}...'.format(\n v=version, lv=level))\n self.isAPITokenSet()\n\n isFound = False\n\n # if isinstance(version, int) and isinstance(level, int):\n # print('| int')\n # if 0 < version < 3 and 0 < level < 4:\n # print('| range')\n if version == self.version and level == self.level:\n # print('| equal')\n if self.catalog is not None:\n # print('| not None')\n isFound = True\n\n if isFound:\n df = self.catalog\n\n print('WaPOR API: Loading catalog WaPOR.v{v}_l{lv} found.'.format(\n v=version, lv=level))\n else:\n df = self._query_catalog(version, level)\n\n print('WaPOR API: Loading catalog WaPOR.v{v}_l{lv} loaded.'.format(\n v=version, lv=level))\n\n if cubeInfo:\n cubes_measure = []\n cubes_dimension = []\n for cube_code in df['code'].values:\n cubes_measure.append(self._query_cubeMeasures(cube_code))\n cubes_dimension.append(self._query_cubeDimensions(cube_code))\n df['measure'] = cubes_measure\n df['dimension'] = cubes_dimension\n\n self.catalog = df\n return self.catalog", "def initCatalog():\n t = \"SINGLE_LINKED\"\n catalog = model.newCatalog(t)\n return catalog", "def split_comp_info_dict(self, catalog_name, split_ver):\n return self._split_comp_info_dicts[\"%s_%s\" % (catalog_name, split_ver)]", "def initCatalog(tipolista : str):\n catalog = model.newCatalog(tipolista)\n return catalog", "def initCatalog(tipo):\n catalog = model.newCatalog(tipo)\n \n return catalog", "def catalogConvert():\n ret = libxml2mod.xmlCatalogConvert()\n return ret", "def __init__(self, catalog_path):\n self.catalog_path = catalog_path\n self.load_catalog()\n return", "def catalog(self) -> str:\n return pulumi.get(self, \"catalog\")", "def add_catalogs(self):\n n_exposures = len(self.info['Module'])\n self.info['point_source'] = [None] * n_exposures\n self.info['galaxyListFile'] = [None] * n_exposures\n self.info['extended'] = [None] * n_exposures\n self.info['convolveExtended'] = [False] * n_exposures\n self.info['movingTarg'] = [None] * n_exposures\n self.info['movingTargSersic'] = [None] * n_exposures\n self.info['movingTargExtended'] = [None] * n_exposures\n self.info['movingTargToTrack'] = [None] * n_exposures\n\n for i in range(n_exposures):\n if int(self.info['detector'][i][-1]) < 5:\n filtkey = 'ShortFilter'\n pupilkey = 'ShortPupil'\n else:\n filtkey = 'LongFilter'\n pupilkey = 'LongPupil'\n filt = self.info[filtkey][i]\n pup = self.info[pupilkey][i]\n\n if self.point_source[i] is not None:\n # In here, we assume the user provided a catalog to go with each filter\n # so now we need to find the filter for each entry and generate a list that makes sense\n self.info['point_source'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.point_source, 'point source')))\n else:\n self.info['point_source'][i] = None\n if self.galaxyListFile[i] is not None:\n self.info['galaxyListFile'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.galaxyListFile, 'galaxy')))\n else:\n self.info['galaxyListFile'][i] = None\n if self.extended[i] is not None:\n self.info['extended'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.extended, 'extended')))\n else:\n self.info['extended'][i] = None\n if self.movingTarg[i] is not None:\n self.info['movingTarg'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTarg, 'moving point source target')))\n else:\n self.info['movingTarg'][i] = None\n if self.movingTargSersic[i] is not None:\n self.info['movingTargSersic'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTargSersic, 'moving sersic target')))\n else:\n self.info['movingTargSersic'][i] = None\n if self.movingTargExtended[i] is not None:\n self.info['movingTargExtended'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTargExtended, 'moving extended target')))\n else:\n self.info['movingTargExtended'][i] = None\n if self.movingTargToTrack[i] is not None:\n self.info['movingTargToTrack'][i] = os.path.abspath(os.path.expandvars(\n self.catalog_match(filt, pup, self.movingTargToTrack, 'non-sidereal moving target')))\n else:\n self.info['movingTargToTrack'][i] = None\n if self.convolveExtended is True:\n self.info['convolveExtended'] = [True] * n_exposures", "def create_catalog(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.BinAdminSession.create_bin\n return Catalog(\n self._provider_manager,\n self._get_provider_session('catalog_admin_session').create_catalog(*args, **kwargs),\n self._runtime,\n self._proxy)", "def initCatalog(list_type):\n catalog = model.newCatalog(list_type)\n return catalog", "def __init__(self, catalog: cat.Catalog) -> None:\n self._catalog = catalog\n self._control_dict = self._create_control_dict()", "def initCatalog(tad_list_type):\n catalog = model.newCatalog(tad_list_type)\n return catalog", "def getCatalogs():", "def get_infores_catalog(self):\n return self._infores_catalog", "def get_catalog(self) -> Catalog:\n params: Dict[str, Any] = self._status.get_status_info()\n\n response = self._client.open_api_do(\n \"GET\", \"labels/catalogs\", self.dataset_id, params=params\n ).json()\n return Catalog.loads(response[\"catalog\"])", "def info():\n return buildcat.info()", "def __init__(\n self,\n identifier: str,\n catalog: Catalog,\n ) -> None:\n self._identifier = identifier\n self._catalog = catalog", "def from_catalog(cls, catalog):\n objects = [Object.from_object(obj) for obj in catalog.objects]\n return Catalog(objects, catalog._chooser)" ]
[ "0.6588303", "0.61483276", "0.5996167", "0.5996167", "0.5996167", "0.59377766", "0.5853027", "0.58457947", "0.5777897", "0.57669896", "0.5660952", "0.56151086", "0.5586599", "0.5576048", "0.55725324", "0.55642617", "0.5543427", "0.5534594", "0.55312943", "0.5503297", "0.5495615", "0.547778", "0.5441102", "0.54311174", "0.5398637", "0.5390463", "0.5390172", "0.5384715", "0.53672767", "0.53588074" ]
0.8214636
0
Return the list of full catalogs used
def catalogs(self): return sorted(self._catalog_comp_info_dicts.keys())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getCatalogs():", "def get(self):\n return GenericGet().get_catalogs()", "def get_catalogs(self):\n # Implemented from kitosid template for -\n # osid.resource.BinLookupSession.get_bins_template\n catalogs = self._get_provider_session('catalog_lookup_session').get_catalogs()\n cat_list = []\n for cat in catalogs:\n cat_list.append(Catalog(self._provider_manager, cat, self._runtime, self._proxy))\n return CatalogList(cat_list)", "def list_catalogs(self):\n return self._json_object_field_to_list(\n self._get_catalogs_json(), self.__MISSION_STRING)", "def checkCatalogs():\n url = CHECKBASE % 'catalogs'\n catalogs = []\n try:\n fh = getURLHandle(url)\n #fh = urllib2.urlopen(url)\n data = fh.read()\n dom = minidom.parseString(data)\n fh.close()\n catalog_elements = dom.getElementsByTagName('Catalog')\n for catel in catalog_elements:\n if catel.firstChild is None:\n continue\n catalog = catel.firstChild.data.strip()\n if len(catalog):\n catalogs.append(str(catalog))\n except:\n raise Exception,\"Could not open %s to search for list of catalogs\" % url\n return catalogs", "def getcatalogs():\n \n # default path for the gthumb catalogs of the logged in user\n gpath = os.environ['HOME'] + \"/.local/share/gthumb/catalogs\"\n\n cats = [] \n cat_list = [] \n try:\n # dir_list has all files and directories in path\n # directories are WITHOUT ending '/'\n dir_list = os.listdir(gpath)\n except:\n # path may not be a directory or permission error\n print \"ERROR: in getcatalogs, gpath:\", gpath\n return []\n \n # get only the directories \n for line in dir_list:\n file = gpath + \"/\" + line\n #print file \n if os.path.isdir(file):\n cats.append(file)\n else: \n # not a directory; ignore \n #print \"not a directory:\", file \n pass\n\n # now get each catalog file from each directory\n for cat in cats:\n try:\n # dir_list has all files and directories in path\n # any directory is WITHOUT ending '/'\n dir_list = os.listdir(cat)\n except:\n # path may not be a directory or permission error\n print \"ERROR: in getcatalogs, cat:\", cat\n return []\n \n for line in dir_list:\n file = cat + \"/\" + line\n #print os.path.splitext(file)[1][1:]\n # append file only if it has catalog extension\n if os.path.splitext(file)[1][1:] == \"catalog\":\n cat_list.append(file)\n \n cat_list.sort() \n\n if random_mode:\n random.shuffle(cat_list)\n \n return cat_list", "def get_catalog(self):\n\n rep = req.get_json(self.CATALOG)\n repo_list = rep[\"repositories\"]\n\n for repo in repo_list:\n self.list.append(Repository(repo))\n\n return self.list", "def get_catalog(self) -> Dict[str, str]:\n return self.catalog", "def catalogs(env):\n envs = environments()\n check_env(env, envs)\n\n if app.config['ENABLE_CATALOG']:\n nodenames = []\n catalog_list = []\n query = AndOperator()\n\n if env != '*':\n query.add(EqualsOperator(\"catalog_environment\", env))\n\n query.add(NullOperator(\"catalog_timestamp\", False))\n\n order_by_str = '[{\"field\": \"certname\", \"order\": \"asc\"}]'\n nodes = get_or_abort(puppetdb.nodes,\n query=query,\n with_status=False,\n order_by=order_by_str)\n nodes, temp = tee(nodes)\n\n for node in temp:\n nodenames.append(node.name)\n\n for node in nodes:\n table_row = {\n 'name': node.name,\n 'catalog_timestamp': node.catalog_timestamp\n }\n\n if len(nodenames) > 1:\n form = CatalogForm()\n\n form.compare.data = node.name\n form.against.choices = [(x, x) for x in nodenames\n if x != node.name]\n table_row['form'] = form\n else:\n table_row['form'] = None\n\n catalog_list.append(table_row)\n\n return render_template(\n 'catalogs.html',\n nodes=catalog_list,\n envs=envs,\n current_env=env)\n else:\n log.warn('Access to catalog interface disabled by administrator')\n abort(403)", "def catalog(self) -> str:\n return pulumi.get(self, \"catalog\")", "def avail_categories(self):\n # retrieve categories\n categories = self.show_all_categories()\n # for each category, retrieve packages\n output = {}\n for category in categories:\n packages = self.show_category(category)\n output[category] = packages\n\n return output", "def test_get_hyperflex_app_catalog_list(self):\n pass", "def getCatalog(unique_name):", "def get_catalog():\n return jsonify(getCatalog())", "def get_root_catalogs(self):\n # Implemented from kitosid template for -\n # osid.resource.BinHierarchySession.get_root_bins\n return self._get_provider_session('catalog_hierarchy_session').get_root_catalogs()", "def fetch_account_catalogs(account:str):\n for config in accounts:\n if account in config['streamers']:\n return config['catalogs']\n return", "def get_catalog_options(self):\n catalog_api = CourseCatalogApiClient(self.user)\n catalogs = catalog_api.get_all_catalogs()\n # order catalogs by name.\n catalogs = sorted(catalogs, key=lambda catalog: catalog.get('name', '').lower())\n\n return BLANK_CHOICE_DASH + [\n (catalog['id'], catalog['name'],)\n for catalog in catalogs\n ]", "def catalog(self):\n # This is a nasty hack because our db seems prone to circular links\n nItems = 0\n for item in self._getSubNsList():\n yield item\n nItems += 1\n # NASTY HACK!\n if nItems > 1000:\n self.reindex()\n raise Exception(\"Circular link corrected, try again\")", "def get_catalogs_by_provider(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.BinLookupSession.get_bins_by_provider\n catalogs = self._get_provider_session('catalog_lookup_session').get_catalogs_by_provider(*args, **kwargs)\n cat_list = []\n for cat in catalogs:\n cat_list.append(Catalog(self._provider_manager, cat, self._runtime, self._proxy))\n return CatalogList(cat_list)", "def get_used():\r\n sql = text('''\r\n SELECT category.* FROM category, app\r\n WHERE app.category_id=category.id GROUP BY category.id\r\n ''')\r\n results = db.engine.execute(sql)\r\n categories = []\r\n for row in results:\r\n category = dict(id=row.id, name=row.name, short_name=row.short_name,\r\n description=row.description)\r\n categories.append(category)\r\n return categories", "def can_lookup_catalogs(self):\n # Implemented from kitosid template for -\n # osid.resource.BinLookupSession.can_lookup_bins_template\n return self._get_provider_session('catalog_lookup_session').can_lookup_catalogs()", "def final_catalogs(self, filename=None, catalog_cols=None):\n\n final_catalog = vstack([cluster_info['catalog'] for cluster_info in self._catalog_dictionary.values()])\n\n # If we request to keep only certain columns in our output\n if catalog_cols is not None:\n final_catalog.keep_columns(catalog_cols)\n\n if filename is None:\n return final_catalog\n else:\n if filename.endswith('.cat'):\n final_catalog.write(filename, format='ascii', overwrite=True)\n else:\n final_catalog.write(filename, overwrite=True)", "def get_catalogs_by_genus_type(self, *args, **kwargs):\n # Implemented from kitosid template for -\n # osid.resource.BinLookupSession.get_bins_by_genus_type\n catalogs = self._get_provider_session('catalog_lookup_session').get_catalogs_by_genus_type(*args, **kwargs)\n cat_list = []\n for cat in catalogs:\n cat_list.append(Catalog(self._provider_manager, cat, self._runtime, self._proxy))\n return CatalogList(cat_list)", "def listRepositories(self):\n return self.mini_catalog.listRepositories()", "def catalog_components(self, catalog_name, split_ver):\n return sorted(self._split_comp_info_dicts[\"%s_%s\" % (catalog_name, split_ver)].keys())", "def getServices(self):\n catalog = plone.api.portal.get_tool('portal_catalog')\n path = '{}/catalog'.format('/'.join(plone.api.portal.get().getPhysicalPath()))\n query = dict(portal_type='Service', sort_on='sortable_title', path=path)\n result = list()\n for brain in catalog(**query):\n result.append((brain.getId, brain.Title))\n return result", "def listClusters():\n return [c['name'] for c in pymongo.Connection().clovr.clusters.find()]", "def filter_catalogs(conn, catalogs, res): \n # Determine which resolution range the image belongs in\n for config, res_range in res_dict.items():\n if res_range[0] < res <= res_range[1]:\n use_range = res_range\n # Combine highest resolutions to allow for more catalogs\n if config == 'A' or config == 'B':\n use_range = (res_dict['A'][0], res_dict['B'][1])\n\n # Find all catalogs that fall into the adequate resolution range\n cur = conn.cursor()\n filtered_catalogs = []\n for catalog in catalogs:\n try:\n catalog_res = catalogio.catalog_dict[catalog]['resolution']\n except KeyError:\n cur.execute('''SELECT resolution FROM radcat.catalogs\n WHERE name = %s''', (catalog, ))\n catalog_res = cur.fetchone()[0]\n if use_range[0] < catalog_res <= use_range[1]:\n filtered_catalogs.append(catalog)\n\n cur.close()\n\n return filtered_catalogs", "def test_api_ucs_get_catalog(self):\n api_data = request(\"get\", \"/sys\")\n self.assertEqual(api_data['status'], 200,\n 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))\n total_elements = 0\n for elementTypes in api_data[\"json\"]:\n for element in api_data[\"json\"][str(elementTypes)]:\n api_data_c = request(\"get\", \"/catalog\",\n query={\"identifier\": element[\"relative_path\"].strip(\"/\")})\n self.assertEqual(api_data_c['status'], 200,\n 'Incorrect HTTP return code, expected 200, got:' +\n str(api_data_c['status']))\n total_elements += 1\n self.assertGreater(total_elements, 0, \"Zero catalog elements found\")\n # TO DO: deeper check on the catalog data", "def categories(self):\n return self.env.categories" ]
[ "0.8308617", "0.7364706", "0.7295699", "0.7183018", "0.7094484", "0.7065019", "0.68383414", "0.66891736", "0.65169674", "0.6516233", "0.6503805", "0.64863735", "0.64859456", "0.6473658", "0.6427194", "0.64163595", "0.6385455", "0.63072556", "0.6240876", "0.623814", "0.6236926", "0.62129235", "0.6182712", "0.61599797", "0.61433715", "0.60266954", "0.59904575", "0.5967449", "0.5963826", "0.5950389" ]
0.79021573
1
Return the list of catalog split keys used
def splitkeys(self): return sorted(self._split_comp_info_dicts.keys())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def keys(self) -> List[str]:\n raise NotImplementedError", "def catalog_components(self, catalog_name, split_ver):\n return sorted(self._split_comp_info_dicts[\"%s_%s\" % (catalog_name, split_ver)].keys())", "def keys(self) -> List:\n pass", "def get_keys(self):\r\n\r\n #using database\r\n\r\n if self.using_database:\r\n aprint('GET KEYS')\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT keyword\"\r\n +\" FROM keys_to_indexes\"\r\n +\" WHERE notebook=?;\",\r\n value_tuple)\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {key[0] for key in fetched}\r\n\r\n return set()\r\n\r\n #using shelf\r\n\r\n return self.key_dict.keys()", "def keys(self):\n return self.get_list(self.cloudman.list_keypairs(),\n kind=\"key\")", "def get_key_list(self) -> list:\n return self.key_functs.keys()", "def keys():", "def keys(self):\n return", "def get_keys(self):\n bucket = self.resource.Bucket(self.bucketname)\n return [key.key for key in bucket.objects.all()]", "def keys(self):\n return list(self.token2id.values())", "def catalogs(self):\n return sorted(self._catalog_comp_info_dicts.keys())", "def getkeys(self):\n return list(self.keys)", "def get_library_keys(self):\n return list({\n self._create_library_locator(library_index, branch=None)\n for library_index\n in self.find_matching_course_indexes(branch=\"library\")\n })", "def list_configuration_keys() -> Tuple[List[str], Dict[int, str]]:\n lookup = {i: v for (i, v) in enumerate(PipetteConfigurations.__fields__)}\n return [\n f\"{i}: {v}\" for (i, v) in enumerate(PipetteConfigurations.__fields__)\n ], lookup", "def keys(self):\n keys = set()\n with pd.HDFStore(self.rootpath, mode=\"r\") as hdf:\n hdf5_keys = hdf.keys()\n\n for key in hdf5_keys:\n kp = key.split(\"/\")\n if len(kp) == 5:\n print(kp, len(kp))\n keys.add(kp[4])\n return list(keys)", "def get_keys(self):\r\n return self._keys", "def keys(self) -> t.List[str]: # type: ignore[override]\n return list(self.__keys)", "def return_keys(self):\r\n\r\n keys = list(self.piDD.keys())\r\n return keys", "def keys(self):\r\n return [k for k in self]", "def get_keys(self):\r\n\t\tlogger.debug(\"Getting the keys\")\r\n\t\t\r\n\t\treturn db.get_items('keys')", "def keys(self) -> Sequence[str]:\n raise NotImplementedError", "def _keys(self):\n for name in listdir(abspath(self._path)):\n key, ext = splitext(name)\n if ext == \".pkl\":\n yield key", "async def keys(self) -> Iterable[str]:", "def keys(self):\n # Collect all keys in each bucket\n all_keys = []\n for bucket in self.buckets:\n for key, value in bucket.items():\n all_keys.append(key)\n return all_keys", "def keys(self):\n return [ x for x in self ]", "def keys(self):\n return self.keys", "def _getbundlelistkeysparts(\n bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs\n):\n listkeys = kwargs.get('listkeys', ())\n for namespace in listkeys:\n part = bundler.newpart(b'listkeys')\n part.addparam(b'namespace', namespace)\n keys = repo.listkeys(namespace).items()\n part.data = pushkey.encodekeys(keys)", "def keys(self):\n return _keys(self)", "def keys(self):\n return _keys(self)", "def list_all_keys(self):\n \n return self.keys" ]
[ "0.6911", "0.69100714", "0.6819517", "0.68116844", "0.67278045", "0.6701149", "0.6674959", "0.6631823", "0.6594297", "0.65199286", "0.651894", "0.65150744", "0.65001196", "0.65000886", "0.6440549", "0.64391464", "0.6426035", "0.6371353", "0.6365058", "0.6354946", "0.6335533", "0.63344884", "0.6333683", "0.6329103", "0.6291239", "0.62909704", "0.6285717", "0.6281128", "0.6281128", "0.6262817" ]
0.7640336
0
Return the info for a particular split key
def split_comp_info(self, catalog_name, split_ver, split_key): return self._split_comp_info_dicts["%s_%s" % (catalog_name, split_ver)][split_key]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getsplitinfo():\n \n splitvarlist = spss.GetSplitVariableNames()\n if len(splitvarlist) == 0:\n return [], None\n else:\n splittype = spssaux.getShow(\"split\", olang=\"english\")\n if splittype.lower().startswith(\"layer\"):\n splittype=\"layered\"\n else:\n splittype=\"separate\"\n return splitvarlist, splittype", "def _GetInfo(self, key):\r\n for line in RunShell([\"svn\", \"info\"]).splitlines():\r\n if line.startswith(key + \": \"):\r\n return line.split(\":\", 1)[1].strip()", "def split(value, key):\n return str(value).split(key)", "def split_comp_info_dict(self, catalog_name, split_ver):\n return self._split_comp_info_dicts[\"%s_%s\" % (catalog_name, split_ver)]", "def info(self, key):\n return self.execute_command(self.INFO_CMD, key)", "def getHeader(key):", "def get_info(self, key: str) -> TaskInfo:\n raise NotImplementedError", "def loader_from_key(key):\n\n if \":\" in key:\n return key.split(\":\")\n return key, None", "def get_opt_split(self, command):\n if \"split\" in self.command_dict[\"commands\"][command].keys():\n return self.command_dict[\"commands\"][command][\"split\"]\n else:\n return CommandDict.DEFAULT_OPT_SPLIT", "def bfInfo(self, key):\n\n return self.execute_command(self.BF_INFO, key)", "def _split_key(cls, logical_key):\n if isinstance(logical_key, str):\n path = logical_key.split('/')\n elif isinstance(logical_key, (tuple, list)):\n path = logical_key\n else:\n raise TypeError('Invalid logical_key: %r' % logical_key)\n return path", "def split(self):\n return self._clip_metadata.get(\"split\")", "def fillNodeId(model, key=None, name=None, split=None):\n # if I know the name but not the split version\n if name and not split:\n # set the split\n split = tuple(model.split(name))\n # otherwise, if I know the split but not the name\n elif split and not name:\n # get the name\n name = model.join(*split)\n\n # if I don't know the key but I know the split\n if split and not key:\n # look up the key\n key = model._hash.hash(items=split)\n\n # if the split is known\n if split is not None:\n # normalize it\n split = tuple(split)\n\n # done my best: if i know the key\n if key:\n # return the info\n return name, split, key\n\n # otherwise, get the journal\n import journal\n # and complain; this is a bug\n raise journal.firewall('pyre.calc').log('insufficient nodal metadata')", "def _extract_by_key(self, line, key):\n search = r'{0}=.+?,'.format(key) # lazy match to first ,\n attr_match = re.search(search, line)\n if attr_match:\n # grab just the value of the attribute from attr_key=value,\n value = attr_match.group()[len(key) + 1 : len(attr_match.group()) - 1]\n return value\n else:\n return \"notfound\"", "def extract_info(\n self,\n main_key:str,\n sub_key:str,\n data_key:str,\n ):\n\n extracted_info = {}\n for i in range(len(self.data)):\n try:\n gene_key = self.data[i]['gene'][0]['name']['value']\n if self.data[i][main_key][0][\"type\"] == sub_key:\n extracted_info[gene_key] = [self.data[i][main_key][0][data_key]]\n print(\"success\")\n except KeyError as e:\n print(f\"Could not find <{main_key}> and <{sub_key}>\\n{e}\")\n \n return extracted_info", "def extract(data, key):\n for d in data:\n if d.startswith(key):\n return d.replace(key+':','').strip() #remove the parser tag then remove the spaces", "def _get_split_key(keys, num_splits):\n\n # If the number of keys is less than the number of splits, we are limited\n # in the number of splits we can make.\n if not keys or (len(keys) < (num_splits - 1)):\n return keys\n\n # Calculate the number of keys per split. This should be KEYS_PER_SPLIT,\n # but may be less if there are not KEYS_PER_SPLIT * (numSplits - 1) scatter\n # entities.\n #\n # Consider the following dataset, where - represents an entity and\n # * represents an entity that is returned as a scatter entity:\n # ||---*-----*----*-----*-----*------*----*----||\n # If we want 4 splits in this data, the optimal split would look like:\n # ||---*-----*----*-----*-----*------*----*----||\n # | | |\n # The scatter keys in the last region are not useful to us, so we never\n # request them:\n # ||---*-----*----*-----*-----*------*---------||\n # | | |\n # With 6 scatter keys we want to set scatter points at indexes: 1, 3, 5.\n #\n # We keep this as a float so that any \"fractional\" keys per split get\n # distributed throughout the splits and don't make the last split\n # significantly larger than the rest.\n\n num_keys_per_split = max(1.0, float(len(keys)) / (num_splits - 1))\n\n split_keys = []\n\n # Grab the last sample for each split, otherwise the first split will be too\n # small.\n for i in range(1, num_splits):\n split_index = int(round(i * num_keys_per_split) - 1)\n split_keys.append(keys[split_index])\n\n return split_keys", "def get_key(command):\n return command.split(\" \")[1]", "def _split_by_keypair(self, osw_dict={}): \n lst = osw_dict\n keypair_dict = []\n for d in lst:\n if d['key'] == 'raw_line':\n keypair_lst = re.split(r',',d['value'])\n \n for k,v in keypair_lst:\n _d = [{'timestamp':d['timestamp'] , \n 'category': d['category'], \n 'sub_category': d['sub_category'], \n 'key': k, \n 'value': v}]\n keypair_dict.extend(_d)\n \n return keypair_dict", "def parse_dot_key(data, key):\n for key_part in key.split('.'):\n data = data.get(key_part)\n if data is None:\n break\n return data", "def _get(self, key):\n current_storage_dict = self._storage\n sub_keys = key.split('.')\n i = 1\n sub_keys_count = len(sub_keys)\n for sub_key in sub_keys:\n if i < sub_keys_count:\n if sub_key in current_storage_dict:\n current_storage_dict = current_storage_dict[sub_key]\n else:\n return\n\n else:\n if sub_key in current_storage_dict:\n return current_storage_dict[sub_key]\n else:\n return\n\n i += 1", "def get_mouse_info(key):\n\n info = key.split('_')\n mouse_number = info[0]\n date = info[1]\n exp_type = info[2]\n if exp_type == 'homecagesocial':\n exp_type = 'social'\n elif exp_type == 'homecagenovel':\n exp_type = 'novel'\n\n return mouse_number, date, exp_type", "def split(self, line):\n parts = line.split()\n return {\n 'size': 0 if parts[9] == '-' else int(parts[9]), \n 'file_requested': parts[6]\n }", "def _split_path(self, path):\n if path.strip() in (None, \"\", \"/\"):\n return (None, None)\n tableName, primKey = util.save_split(path.strip(\"/\"), \"/\", 1)\n # _logger.debug(\"'%s' -> ('%s', '%s')\" % (path, tableName, primKey))\n return (tableName, primKey)", "def cfInfo(self, key):\n\n return self.execute_command(self.CF_INFO, key)", "def get_info(self, key: str) -> TaskInfo:\n return self.task_graph.nodes[key][\"info\"]", "def getn(self, key, sep=\"/\"):\n if len(sep) != 1:\n raise ValueError(\"Invalid parameter 'sep': %s\" % sep)\n if isinstance(key, str):\n key = key.split(sep)\n return reduce(dict.get, key, self._config)", "def get_field_info_key(self, key):\n try:\n return self.field_info[key]\n except KeyError:\n raise KeyError(\n gettext('Missing \"%(key)s\" key in %(field_info)r')\n % {\n 'key': key,\n 'field_info': self.field_info,\n })", "def Extract_MetaData(data, key):\n return data.get(key)", "def get_name(self):\n return self.key().name().split(':', 1)[1]" ]
[ "0.6543608", "0.6113423", "0.5998565", "0.5991682", "0.5982638", "0.59728223", "0.59677356", "0.58936083", "0.5857385", "0.576732", "0.57553214", "0.5689077", "0.56885535", "0.5674944", "0.5661469", "0.56329924", "0.5598245", "0.55969393", "0.5588592", "0.5572412", "0.55453086", "0.5534389", "0.55152214", "0.5509254", "0.5492579", "0.54919124", "0.54577196", "0.5456537", "0.5431006", "0.5417694" ]
0.73933583
0
Stores the given model.
def store_model(self, user_name, model_name, model): print(f"sto {user_name}\n{model_name}\n{serialize(model)}") return self.dao.store_serialized_model(user_name, model_name, serialize(model))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_model(self):\n pass", "def save_model(self, *args, **kwargs):\n raise NotImplementedError", "def save_model(self):\n if self.model:\n self.model.save(self.config[\"model_path\"])", "def _save_model(self):\n save_generic(self.model, self.model_pkl_fname)", "def persist_model(self, model, trade_window):\n object_path = 'model_objects/'\n file_name = f'market_maker_model_{self.target_coin}_{trade_window}.pkl'\n self.s3_client.put_object(Bucket=self.s3_bucket,\n Key=object_path + file_name,\n Body=pickle.dumps(model, pickle.HIGHEST_PROTOCOL)\n )\n return", "def save(path_to_model):\n pass", "def save(self, datastore, model):\n datastore.add(model)\n datastore.flush()\n return model", "def save_model(self, model_path: str):", "def save_model(self, path):\n self._model.save(path)", "def save_model(self, path):\n pass", "def save_model( self, request, obj, form, change ):\n obj.save()", "def save(\n self,\n modelSavePath\n ):\n pass", "def save_model(model, model_filepath):", "def save_model(self, request, instance, form, change):\n pass", "def saveModel(self):\n log.info(\"Saving model to %s...\" % self.savedModelsPath)\n self.model.save(self.savedModelsPath)", "def save(self):\n print(\"==> Saving model to\", self.model_dir)\n self.model.save(self.model_dir)", "def register_model(self, model):\n\n self._model = model", "def save_model(self, model, model_filepath):\n joblib.dump(model, model_filepath)", "def save(self, model_path):\n self.encoder.save(model_path)", "def _save(self, step, model):\n\n raise NotImplementedError()", "def save(self, model_file):\n pickle.dump(self, open(model_file, 'wb'))", "def save_model(self, model):\n # get model file name\n root_dir = os.path.split(os.path.realpath(__file__))[0]\n model_path = os.path.join(root_dir, '..', 'common', 'model', self._this_party, self._task_chain_id)\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n model_file_name = os.path.join(model_path, self._task_id + '.model')\n\n # save model to disk\n dump(model, model_file_name)", "def store_serialized_model(self, user_name, model_name, serialized_model):\n try:\n self.rconn.set(redis_keys.for_model(user_name, model_name), json.dumps(serialized_model))\n return True\n except redis.RedisError as e:\n return False", "def save_model(model, model_filepath):\n dump(model, model_filepath)", "def save(self):\n self.presavemodel()\n self.dbm().model_save(self)\n self.set_isdirty(False)\n # we might be smart about flushing when there is no id, so that saving a new model gets it's unique id\n if (self.id == None):\n self.flush_toupdate()", "def saveModel(self):\n with open(self.modelSaveFile, 'wb') as f:\n pickle.dump(self.values, f, pickle.HIGHEST_PROTOCOL)\n pickle.dump(self.policy, f, pickle.HIGHEST_PROTOCOL)", "def save_model(self, filename):\n self.model.save('models/' + str(filename))", "def save_model(self, model):\n # serialize model to JSON\n model_json = model.to_json()\n os.makedirs(os.path.dirname(self.model_json_path), exist_ok=True)\n with open(self.model_json_path, \"w\") as json_file:\n json_file.write(model_json)\n\n # serialize weights to HDF5\n model.save_weights(self.model_weights_path)\n print(\"Saved model to disk\")", "def model(self, model):\n \n self._model = model", "def model(self, model):\n\n self._model = model" ]
[ "0.77885836", "0.7711191", "0.755239", "0.75421274", "0.7444351", "0.7415339", "0.7415298", "0.7396812", "0.73743224", "0.7329616", "0.72817063", "0.71172756", "0.7011479", "0.6994045", "0.6933486", "0.68591076", "0.6839708", "0.68030614", "0.6796083", "0.67817974", "0.676047", "0.67580837", "0.6698856", "0.6652693", "0.6651351", "0.6603686", "0.65879387", "0.65869236", "0.65755093", "0.6570391" ]
0.84057695
0
Stores the given model in redis.
def store_serialized_model(self, user_name, model_name, serialized_model): try: self.rconn.set(redis_keys.for_model(user_name, model_name), json.dumps(serialized_model)) return True except redis.RedisError as e: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def store_model(self, user_name, model_name, model):\n print(f\"sto {user_name}\\n{model_name}\\n{serialize(model)}\")\n return self.dao.store_serialized_model(user_name, model_name, serialize(model))", "def redis_save(key: object, value: object) -> object:\n if key is not None and value is not None:\n red.redis.set(json.dumps(key), json.dumps(value))", "def save(self, datastore, model):\n datastore.add(model)\n datastore.flush()\n return model", "def save(self):\n\n\t\t# Use internal time if we have one, else use the global\n\t\tiExpire = '__expire' in self.__dStore and self.__dStore['__expire'] or _muiExpire\n\n\t\t# If we have no expire time, set forever\n\t\tif iExpire == 0:\n\t\t\t_moRedis.set(self.__id, JSON.encode(self.__dStore))\n\n\t\t# Else, set to expire\n\t\telse:\n\t\t\t_moRedis.setex(self.__id, _muiExpire, JSON.encode(self.__dStore))", "def set(id, model):\n key = build_key(type(model), id) # Get model class from model object\n logger.info(\" CACHE INVALIDATE key=%s\", key)\n cache.delete(key) # Invalidate from cache\n model.id = id\n model.save()", "def register(self, model, values=None, instance_values=None):\n\n if model in self._models:\n raise Exception(\"%s is already registered\" % model)\n\n self._models[model] = CacheConfig(values, instance_values)", "def store(self, data: Union[str, bytes, int, float]) -> str:\n key = str(uuid.uuid4())\n self._redis.set(key, data)\n return key", "def set(cls, obj: Model, data):\n cache.set(cls._construct_key(obj), data)", "def register(cls, model):\n cls.models[model] = True", "def _store(self):\n database.mongo_store_object_by_label(self, self.label)", "def store(self, data: Union[str, bytes, int, float]) -> str:\n k = str(uuid.uuid4())\n self._redis[k] = data\n return k", "def persist_model(self, model, trade_window):\n object_path = 'model_objects/'\n file_name = f'market_maker_model_{self.target_coin}_{trade_window}.pkl'\n self.s3_client.put_object(Bucket=self.s3_bucket,\n Key=object_path + file_name,\n Body=pickle.dumps(model, pickle.HIGHEST_PROTOCOL)\n )\n return", "def store_job(self, job_id: Hashable, key: Hashable, value: Any) -> None:\n self._redis.json().set(f\"job:{job_id}\", f\".{key}\", value)", "def model_with_redis_dict_field(r):\n class Model(object):\n redis_field = RedisTestDictField(r)\n\n return Model", "def register_model(self, model):\n\n self._model = model", "def store_search_value(\n self, search_id: Hashable, key: Hashable, value: Any\n ) -> None:\n key = f\"{search_id}.{key}\"\n value = pickle.dumps(value)\n self._redis.set(key, value)", "def train_model():\n print('Loading the dataset...')\n dataset = pd.read_csv('app/Sentiment_Reviews.csv',index_col=0)\n X = dataset[['Reviews']]\n y = dataset[['Sentiment']]\n le = preprocessing.LabelEncoder()\n le.fit(y)\n y = (le.transform(y))\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=38)\n\n print('Training the model...')\n text_clf_svm = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer(use_idf=False)), ('clf-svm', SGDClassifier(loss='modified_huber', penalty='l2', alpha=0.001, random_state=42, max_iter=20))])\n text_clf_svm = text_clf_svm.fit(X_train['Reviews'], y_train)\n\n print('Storing model to redis...')\n pickled_model = pickle.dumps(text_clf_svm)\n try:\n redis_client.set('ml_model', pickled_model)\n except RedisError as e:\n print('Storing the model was not successful and threw an error.')\n print(e)", "def save_model(self, *args, **kwargs):\n raise NotImplementedError", "def to_redis(self, value, **kwargs):\n return self.to_python(value)", "def save(self, pipeline: Optional['Pipeline'] = None, include_meta: bool = True, include_result: bool = True):\n key = self.key\n connection = pipeline if pipeline is not None else self.connection\n\n mapping = self.to_dict(include_meta=include_meta, include_result=include_result)\n\n if self.get_redis_server_version() >= (4, 0, 0):\n connection.hset(key, mapping=mapping)\n else:\n connection.hmset(key, mapping)", "def set_to_db(key, str_value):\n redis_db.set(KEY_PREFIX + key, str_value)", "def __store(self):\n # connection strings are accessed directly by dbo\n dbo = dbo.connect()\n dbo.save(self.__to_dict())\n # not supre important to call but a nice idea\n dbo.destroy()", "def _save_model(self):\n groups = {cluster: self.model.cluster_metadata.group(cluster)\n for cluster in self.cluster_ids}\n self.model.save(self.model.spike_clusters,\n groups,\n clustering_metadata=self.model.clustering_metadata,\n )\n info(\"Saved {0:s}.\".format(self.model.kwik_path))", "def save_model(self):\n pass", "def store(self, key, obj):\n attrs = self.load_attrs()\n attrs[key] = obj\n self.store_attrs(attrs)", "def dal_set(key, obj):\n global store\n return store.set(urllib.quote(key), obj)", "def store_model(name: str, model: Pipeline) -> int:\n model_dir = '~/.cloudmesh/boltzmann'\n Shell.mkdir(path_expand(model_dir))\n\n dump(model, path_expand(f'{model_dir}/{name}_model.joblib'))\n return 0", "def save(self, key, sort_key, _object):\n return self.storage.set(key, sort_key, _object.to_json())", "def save(self, obj):\n self.uow.save(obj)\n self.imap.save(obj)\n state(obj).session = self", "def cache(self,redis_wrapper,key='default'):\n \n \n if key == 'default':\n key = self.showId()\n \n logger.info('Serializing GriddedTaxonomy. \\n Depending on the amount of data it can take some time')\n \n #Cleaning GeoQueryValuesSets fields\n map(lambda grid : grid.removeQuerySets(),self)\n \n import pickle\n logger.info('Serializing with pickle') \n self_pickle = pickle.dumps(self)\n logger.info(\"Storing in Cache\")\n try:\n \n redis_wrapper.set(key,self_pickle)\n return True\n except:\n logger.error(\"Problem in serializing. The intented caching object could be very big!\")\n return self_pickle" ]
[ "0.6808482", "0.6612063", "0.6367931", "0.60557693", "0.5940292", "0.5915331", "0.58338416", "0.58175427", "0.580114", "0.5768876", "0.57467467", "0.57321674", "0.5718583", "0.5714915", "0.5696914", "0.5685338", "0.56384814", "0.5590227", "0.5558572", "0.5553382", "0.55394566", "0.5515126", "0.55009645", "0.54906315", "0.54706216", "0.5453611", "0.5451722", "0.5449979", "0.5427399", "0.5427022" ]
0.75235426
0
Read the timestamps and sort them to permit simple concurrency tests.
def read_timestamps(self, tasks): from reframe.core.deferrable import evaluate self.begin_stamps = [] self.end_stamps = [] for t in tasks: with open(evaluate(t.check.stdout), 'r') as f: self.begin_stamps.append(float(f.readline().strip())) self.end_stamps.append(float(f.readline().strip())) self.begin_stamps.sort() self.end_stamps.sort()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_sort_data_by_time():\n data = race.read_file_to_list()\n sorted_data = race.sort_data_by_time(data)\n assert data != sorted_data\n assert len(data) == len(sorted_data)\n assert type(sorted_data) == list\n for lines in sorted_data:\n assert type(lines) == dict", "def batch_uses_proper_timestamp_test(self):\n session = self.prepare()\n session.execute(\"\"\"\n BEGIN BATCH USING TIMESTAMP 1111111111111111\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\")\n rows = session.execute(\"SELECT id, writetime(firstname), writetime(lastname) FROM users\")\n res = sorted(rows)\n assert [list(res[0]), list(res[1])] == [[0, 1111111111111111, 1111111111111111], [1, 1111111111111111, 1111111111111111]], res", "def batch_uses_proper_timestamp_test(self):\n cursor = self.prepare()\n cursor.execute(\"\"\"\n BEGIN BATCH USING TIMESTAMP 1111111111111111\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow')\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\")\n cursor.execute(\"SELECT id, writetime(firstname), writetime(lastname) FROM users\")\n res = sorted(cursor.fetchall())\n assert res == [[0, 1111111111111111, 1111111111111111], [1, 1111111111111111, 1111111111111111]], res", "def sort_time(self):\n self.entries.sort(key=lambda x: x.date_stamp_utc)", "def each_statement_in_batch_uses_proper_timestamp_test(self):\n session = self.prepare()\n session.execute(\"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow') USING TIMESTAMP 1111111111111111\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner') USING TIMESTAMP 1111111111111112\n APPLY BATCH\n \"\"\")\n rows = session.execute(\"SELECT id, writetime(firstname), writetime(lastname) FROM users\")\n res = sorted(rows)\n assert [list(res[0]), list(res[1])] == [[0, 1111111111111111, 1111111111111111], [1, 1111111111111112, 1111111111111112]], res", "def load_timestamps(self):\n print('Loading timestamps for sequence ' + self.sequence + '...')\n\n timestamp_file = os.path.join(self.sequence_path, 'times.txt')\n\n # Read and parse the timestamps\n self.timestamps = []\n with open(timestamp_file, 'r') as f:\n for line in f.readlines():\n t = dt.timedelta(seconds=float(line))\n self.timestamps.append(t)\n\n # Subselect the chosen range of frames, if any\n if self.frame_range:\n self.timestamps = [self.timestamps[i] for i in self.frame_range]\n\n print('Found ' + str(len(self.timestamps)) + ' timestamps...')\n\n print('done.')", "def timestamps_sorted_list(self) -> List[int]:\n if len(self._timestamps_sorted_list) == 0:\n # Need to sort\n self._timestamps_sorted_list = sorted(list(self.keys()))\n if len(self._timestamps_sorted_list) > 0:\n self._first_timestamp = self._timestamps_sorted_list[0]\n if len(self._timestamps_sorted_list) > 1:\n self._last_timestamp = self._timestamps_sorted_list[-1]\n return self._timestamps_sorted_list", "def _sort_records(self):\n self.records.sort(reverse=True, key=lambda record: record.timestamp)", "def each_statement_in_batch_uses_proper_timestamp_test(self):\n cursor = self.prepare()\n cursor.execute(\"\"\"\n BEGIN BATCH\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow') USING TIMESTAMP 1111111111111111\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner') USING TIMESTAMP 1111111111111112\n APPLY BATCH\n \"\"\")\n cursor.execute(\"SELECT id, writetime(firstname), writetime(lastname) FROM users\")\n res = sorted(cursor.fetchall())\n assert res == [[0, 1111111111111111, 1111111111111111], [1, 1111111111111112, 1111111111111112]], res", "def load_timestamps(ts_file):\n ts = []\n with open(ts_file, 'r') as f:\n for line in f.readlines():\n line = line.split()\n if line[0] != \"#\":\n ts.append(line)\n\n return ts", "def recorded_timestamps(self):\n return sorted(self.reception_records.keys())", "def load_timestamps(data_path):\n timestamp_file = os.path.join(data_path, 'oxts', 'timestamps.txt')\n\n # Read and parse the timestamps\n timestamps = []\n with open(timestamp_file, 'r') as f:\n for line in f.readlines():\n # NB: datetime only supports microseconds, but KITTI timestamps\n # give nanoseconds, so need to truncate last 4 characters to\n # get rid of \\n (counts as 1) and extra 3 digits\n t = datetime.datetime.strptime(line[:-4], '%Y-%m-%d %H:%M:%S.%f')\n timestamps.append(t)\n return timestamps", "def sort_func(structure):\n return structure.timestamp", "def sortByTimeGenerated(self, records, verbose=False, in_place=False):\n tmp = records\n if (verbose):\n print \"[EVT] Sorting by time generated\"\n\n swapped = True\n while swapped:\n swapped = False\n for i in xrange(len(tmp)-1):\n ni = tmp[i].getField(\"timeGenerated\")\n nj = tmp[i+1].getField(\"timeGenerated\")\n if ni > nj:\n t = tmp[i+1]\n tmp[i+1] = tmp[i]\n tmp[i] = t\n swapped = True\n return tmp", "def order_by_ftime(tasks_lst):\n return sorted(tasks_lst, key=lambda task: task[1])", "def sort_records(records):\n return sorted(records, key=lambda r: r.value.event.datetime, reverse=True)", "def sort_by_time(pairs):\n pairs = sorted(pairs, key=lambda line: line[2], reverse=False)\n order = 0\n out = []\n for i in range(len(pairs)):\n if i != 0 and pairs[i][2] == pairs[i - 1][2]:\n out += [(pairs[i][0], pairs[i][1], order)]\n else:\n order += 1\n out += [(pairs[i][0], pairs[i][1], order)]\n return out", "def task4(self) ->list:\n user_readTimes = {}\n for entry in self.records:\n if(entry['event_type'] == 'pagereadtime'):\n if (entry['visitor_uuid'] in user_readTimes):\n user_readTimes[entry['visitor_uuid']] += entry['event_readtime']\n else:\n user_readTimes[entry['visitor_uuid']] = entry['event_readtime']\n readTimes = list(sorted(user_readTimes.items(), key=operator.itemgetter(1), reverse = True))[0:10]\n for times in readTimes:\n print(times)\n return readTimes", "def _topological_sort_timestamp_index(self) -> Iterator[BaseTransaction]:\n raise NotImplementedError", "def sort_time(cls):\n CloudCtx.objCloudCtx.sort(key=lambda x: datetime.strptime(x.modTs, \"%d-%m-%Y %I:%M:%S %p\"), reverse=True)\n for elem in CloudCtx.objCloudCtx:\n print(elem.display_cloud_ctx())", "def get_timestamps(filename, dictionary):\n \n with open(filename, 'r') as f_obj:\n text = f_obj.readlines()\n inferred_name = re.sub(r'[0-9_\\-]+', ' ', filename).split('/')[-1].split('.lab')[0].split('CD')[-1].strip().lower()\n end_stamp = float(text[-1].split()[1]) # relic of an old idea.\n for line in text:\n line = line.split() \n start = float(line[0])\n stop = float(line[1])\n musical_key = line[2]\n new_key = (inferred_name, start, stop)\n dictionary[new_key] = musical_key", "def todo(self):\n # sort events with eventid using datetime string\n pass", "def timeStamps(dataset):\n \n timestamps = []\n \n for index, row in enumerate(dataset):\n try:\n timeObj = datetime.datetime.strptime(timeStampFix(row), '%y:%j:%H:%M:%S')\n except ValueError:\n print('Failed to create datetime object for ' + timeStampFix(row))\n timestamps.append(timeObj)\n \n return timestamps", "def _timestamp_init(self):\n\n # retrieve absolute timestamp from an external source\n #\n # We first try to contact a network time service for a timestamp, if that\n # fails we use the current system time.\n try:\n ntphost = os.environ.get('RADICAL_PILOT_NTPHOST', '').strip()\n\n if ntphost:\n import ntplib\n response = ntplib.NTPClient().request(ntphost, timeout=1)\n timestamp_sys = response.orig_time\n timestamp_abs = response.tx_time\n return [timestamp_sys, timestamp_abs, 'ntp']\n except:\n pass\n\n t = time.time()\n return [t,t, 'sys']", "async def _timestamps(cls, responses: SourceResponses) -> set[int]:\n timestamps = set()\n async for samples in cls._samples(responses):\n timestamps |= {int(sample[\"timeStamp\"]) for sample in samples}\n return timestamps", "def loadts(args):\n # writing and reading pickle are both about 10x faster than reading csv\n # hence, simplify repeated execs by providing pickle file\n time_before = time.time()\n # timestart = time.time()\n d = dict() # dictionary of numpy arrays that hold timestamps per IP\n p = dict() # dictionary of IPs, holding # timestamps per IP\n offset = dict() # dict to hold tsval offset per IP\n\n try:\n pklfile = open(args.tsfile + \".pickle\", 'rb')\n d, p, offset = pickle.load(pklfile)\n pklfile.close()\n except:\n print(\"TS pickle loading failed, loading from csv\")\n logging.debug(\"TS pickle loading failed, loading from csv\")\n with open(args.tsfile, \"r\") as csvfile:\n datareader = csv.reader(csvfile)\n count = 0\n for row in datareader:\n count += 1\n try:\n ip = row[0]\n tcpt = row[1]\n recvt = row[2]\n except:\n print(\"Error in line \" + str(count) + \"of \" + str(args.tsfile) + \", skipping.\")\n logging.error(\"Error in line \" + str(count) + \"of \" + str(args.tsfile) + \", skipping.\")\n continue\n if ip in d:\n if p[ip] == 9999:\n d[ip].resize(100 * 1000, 2)\n if p[ip] > (100 * 1000) - 1: # more than 100k measurements can not be a target host\n continue\n if ip in offset:\n # recv_t is zero-based and scaled to be in seconds precision\n d[ip][p[ip], :] = \\\n [np.float64(tcpt),\n np.float64(np.uint64(recvt) - np.uint64(offset[ip])) / np.float64(1000.0 * 1000.0)]\n p[ip] = p[ip] + 1\n else:\n print(\"ip not in offset dict (should never happen, exiting): \" + str(ip))\n sys.exit(1)\n else: # ip is not in d, i.e. has not been seen before\n d[ip] = np.zeros((10000, 2), dtype=np.float64)\n p[ip] = 0\n d[ip][p[ip], :] = [np.float64(tcpt), np.float64(0.0)]\n p[ip] += 1\n offset[ip] = recvt\n logging.debug(\"timestamp np structure built after: {}, count: {} {} {}\".format(time.time() - time_before, count, len(d), len(p)))\n # resize all to correct length (removes trailing zeroes)\n for ip, value in p.items():\n d[ip].resize((p[ip], 2))\n\n pklfile = open(args.tsfile + \".pickle\", 'wb')\n pickle.dump([d, p, offset], pklfile)\n pklfile.close()\n print(\"ts data loaded in {} seconds, # IP addresses: {} \".format(round(time.time() - time_before, 2), len(d)))\n logging.debug(\"ts data loaded in {} seconds, # IP addresses: {} \".format(round(time.time() - time_before, 2), len(d)))\n return d, p, offset", "def get_sorted_sequences(filename):\n f = open(filename, \"r\", encoding = \"utf-8\")\n messages = []\n index = 0\n for line in f:\n # if line starts with a date, append it to list of messages\n if re.match(r\"\\d+/\\d+/\\d+, \\d+:\\d+\", line):\n messages.append(line)\n index += 1\n # otherwise, the line is a result of typing a new line and \n # it is therefore appended to the last message\n else:\n messages[index - 1] += line\n # sort messages by time of receival\n messages.sort(key = get_date_from_msg)\n\n return messages", "def _consolidate_events(self):\n for event_file in self._most_recent_event_files():\n with open(event_file, \"r\") as f:\n for line in f.readlines():\n record = json.loads(line)\n event = deserialize_event(record)\n self._events[event.name].append(event)\n for name in self._events.keys():\n self._events[name].sort(key=lambda x: x.timestamp)", "def load_velo_timestamps(velo_path):\n ts_start_file = os.path.join(velo_path, 'timestamps_start.txt')\n ts_end_file = os.path.join(velo_path, 'timestamps_end.txt')\n\n ts_start = load_timestamps(ts_start_file)\n ts_end = load_timestamps(ts_end_file)\n\n return ts_start, ts_end", "def _sort_by_pod_creation_timestamp(pod: V1Pod) -> datetime.datetime:\n return pod.metadata.creation_timestamp" ]
[ "0.70775634", "0.7044557", "0.6865213", "0.6789583", "0.6551319", "0.65016", "0.64157754", "0.63982505", "0.6367806", "0.6276606", "0.6226092", "0.619473", "0.6178502", "0.60988915", "0.60325676", "0.6003747", "0.59616065", "0.5910013", "0.5909187", "0.581583", "0.58126765", "0.58086145", "0.5788114", "0.57417065", "0.5691855", "0.5661201", "0.5653538", "0.5641342", "0.5623968", "0.5612479" ]
0.73157066
0
creates a vao if the instance has a shape where we did not create an vao yet
def update_shape_vaos(self, instance, show): shape = self._shape(instance) shape_object_id = id(shape) if not shape_object_id in self._shape_vaos: self._shape_vaos[shape_object_id] = VertexArray({ 'vertex_position': VertexBuffer.from_numpy(shape.verticies), 'texture_coords': VertexBuffer.from_numpy(shape.texture_coords), }, self.program.attributes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def agregar_arista(self, v, w, peso=1):\n if not v in self.vertices or not w in self.vertices:\n return False\n self.vertices[v][w]= peso\n if not self.dirigido: self.vertices[w][v] = peso", "def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000", "def test_creation(self):\n\n assert self.test_shape.solid is not None\n assert self.test_shape.volume() > 1000", "def setupVAO(self, gpuShape):\n\n glBindVertexArray(gpuShape.vao)\n\n glBindBuffer(GL_ARRAY_BUFFER, gpuShape.vbo)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, gpuShape.ebo)\n\n # 3d vertices + rgb color specification => 3*4 + 3*4 = 24 bytes\n position = glGetAttribLocation(self.shaderProgram, \"position\")\n glVertexAttribPointer(position, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(0))\n glEnableVertexAttribArray(position)\n \n color = glGetAttribLocation(self.shaderProgram, \"color\")\n glVertexAttribPointer(color, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(12))\n glEnableVertexAttribArray(color)\n\n # Unbinding current vao\n glBindVertexArray(0)", "def setupVAO(self, gpuShape):\n\n glBindVertexArray(gpuShape.vao)\n\n glBindBuffer(GL_ARRAY_BUFFER, gpuShape.vbo)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, gpuShape.ebo)\n\n # 3d vertices + rgb color specification => 3*4 + 3*4 = 24 bytes\n position = glGetAttribLocation(self.shaderProgram, \"position\")\n glVertexAttribPointer(position, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(0))\n glEnableVertexAttribArray(position)\n \n color = glGetAttribLocation(self.shaderProgram, \"color\")\n glVertexAttribPointer(color, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(12))\n glEnableVertexAttribArray(color)\n\n # Unbinding current vao\n glBindVertexArray(0)", "def setupVAO(self, gpuShape):\n glBindVertexArray(gpuShape.vao)\n\n glBindBuffer(GL_ARRAY_BUFFER, gpuShape.vbo)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, gpuShape.ebo)\n\n # 3d vertices + rgb color specification => 3*4 + 3*4 = 24 bytes\n position = glGetAttribLocation(self.shaderProgram, \"position\")\n glVertexAttribPointer(position, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(0))\n glEnableVertexAttribArray(position)\n \n color = glGetAttribLocation(self.shaderProgram, \"color\")\n glVertexAttribPointer(color, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(12))\n glEnableVertexAttribArray(color)\n\n # Unbinding current vao\n glBindVertexArray(0)", "def test_pointnum1():\n shape = paramak.CapsuleVacuumVessel(outer_start_point=(0, 0), radius=300, thickness=10)\n assert len(shape.points) == 12\n assert len(shape.processed_points) == 13", "def is_vertex(self): \n return False", "def agregar_vertice(self, v):\n if not v in self.vertices.keys():\n self.vertices[v] = {}", "def _createVetor(cls, elem):\n return cls(elem)", "def is_V(self):\n return True", "def is_V(self):\n return True", "def has_vp(self):\n raise NotImplementedError", "def _create(self, creation_type: str = \"Uniform\"):\n if creation_type == \"Uniform\":\n number_of_vectors = comb(\n self.lattice_resolution + self.number_of_objectives - 1,\n self.number_of_objectives - 1,\n exact=True,\n )\n self.number_of_vectors = number_of_vectors\n temp1 = range(1, self.number_of_objectives + self.lattice_resolution)\n temp1 = np.array(list(combinations(temp1, self.number_of_objectives - 1)))\n temp2 = np.array(\n [range(self.number_of_objectives - 1)] * self.number_of_vectors\n )\n temp = temp1 - temp2 - 1\n weight = np.zeros(\n (self.number_of_vectors, self.number_of_objectives), dtype=int\n )\n weight[:, 0] = temp[:, 0]\n for i in range(1, self.number_of_objectives - 1):\n weight[:, i] = temp[:, i] - temp[:, i - 1]\n weight[:, -1] = self.lattice_resolution - temp[:, -1]\n self.values = weight / self.lattice_resolution\n self.values_planar = np.copy(self.values)\n self.normalize()\n return\n elif creation_type == \"Focused\":\n point_set = [[0, 1, -1]] * (self.number_of_objectives - 1)\n # The cartesian product of point_set.\n initial = np.array(list(product(*point_set)))[1:]\n # First element was removed because of the error during normalization.\n initial = normalize(initial)\n initial = np.hstack((initial, np.zeros((initial.shape[0], 1))))\n final = shear(initial, degrees=5)\n # Adding the first element back\n final = np.vstack(([0] * (self.number_of_objectives - 1) + [1], final))\n self.number_of_vectors = final.shape[0]\n self.values = rotate(final[0], self.ref_point, final)\n self.values_planar = np.copy(self.values)\n self.normalize()\n self.add_edge_vectors()\n elif creation_type == \"Sparse_Focused\":\n initial = np.eye(self.number_of_objectives - 1)\n initial = np.vstack((initial, -initial))\n initial = normalize(initial)\n initial = np.hstack((initial, np.zeros((initial.shape[0], 1))))\n final = shear(initial, degrees=5)\n # Adding the first element back\n final = np.vstack(([0] * (self.number_of_objectives - 1) + [1], final))\n self.number_of_vectors = final.shape[0]\n self.values = rotate(final[0], self.ref_point, final)\n self.values_planar = np.copy(self.values)\n self.normalize()\n self.add_edge_vectors()", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(ibeo_objeto, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.id is None:\n self.id = 0\n if self.age is None:\n self.age = 0.\n if self.velocidad_relativa_x is None:\n self.velocidad_relativa_x = 0.\n if self.velocidad_relativa_y is None:\n self.velocidad_relativa_y = 0.\n if self.velocidad_absoluta_x is None:\n self.velocidad_absoluta_x = 0.\n if self.velocidad_absoluta_y is None:\n self.velocidad_absoluta_y = 0.\n if self.velocidad_absoluta_sigma_x is None:\n self.velocidad_absoluta_sigma_x = 0.\n if self.velocidad_absoluta_sigma_y is None:\n self.velocidad_absoluta_sigma_y = 0.\n if self.bounding_box_centro_x is None:\n self.bounding_box_centro_x = 0.\n if self.bounding_box_centro_y is None:\n self.bounding_box_centro_y = 0.\n if self.bounding_box_largo is None:\n self.bounding_box_largo = 0.\n if self.bounding_box_ancho is None:\n self.bounding_box_ancho = 0.\n if self.object_box_centro_x is None:\n self.object_box_centro_x = 0.\n if self.object_box_centro_y is None:\n self.object_box_centro_y = 0.\n if self.object_box_orientacion is None:\n self.object_box_orientacion = 0.\n if self.object_box_size_x is None:\n self.object_box_size_x = 0.\n if self.object_box_size_y is None:\n self.object_box_size_y = 0.\n if self.clasificacion is None:\n self.clasificacion = 0\n if self.clasificacion_age is None:\n self.clasificacion_age = 0.\n if self.clasificacion_certeza is None:\n self.clasificacion_certeza = 0.\n if self.punto_cercano_x is None:\n self.punto_cercano_x = 0.\n if self.punto_cercano_y is None:\n self.punto_cercano_y = 0.\n if self.punto_referencia_x is None:\n self.punto_referencia_x = 0.\n if self.punto_referencia_y is None:\n self.punto_referencia_y = 0.\n if self.punto_referencia_sigma_x is None:\n self.punto_referencia_sigma_x = 0.\n if self.punto_referencia_sigma_y is None:\n self.punto_referencia_sigma_y = 0.\n else:\n self.id = 0\n self.age = 0.\n self.velocidad_relativa_x = 0.\n self.velocidad_relativa_y = 0.\n self.velocidad_absoluta_x = 0.\n self.velocidad_absoluta_y = 0.\n self.velocidad_absoluta_sigma_x = 0.\n self.velocidad_absoluta_sigma_y = 0.\n self.bounding_box_centro_x = 0.\n self.bounding_box_centro_y = 0.\n self.bounding_box_largo = 0.\n self.bounding_box_ancho = 0.\n self.object_box_centro_x = 0.\n self.object_box_centro_y = 0.\n self.object_box_orientacion = 0.\n self.object_box_size_x = 0.\n self.object_box_size_y = 0.\n self.clasificacion = 0\n self.clasificacion_age = 0.\n self.clasificacion_certeza = 0.\n self.punto_cercano_x = 0.\n self.punto_cercano_y = 0.\n self.punto_referencia_x = 0.\n self.punto_referencia_y = 0.\n self.punto_referencia_sigma_x = 0.\n self.punto_referencia_sigma_y = 0.", "def CreateVector(self) -> BaseVector:", "def _ensure_exists(self, name, shape):\n ident = name.lower()\n internal = self._internals.get(ident, None)\n if internal is None:\n internal = Internal(name, shape)\n self._internals[ident] = internal\n return internal", "def _auto_create(self):\n status = [\n os.path.exists(self.vertices_path),\n os.path.exists(self.edges_path),\n ]\n\n if not all(status):\n self._create_vertex_skel(self.path)\n self._create_edge_skel(self.path)", "def test_pointnum3():\n shape = paramak.CapsuleVacuumVessel(outer_start_point=(1000, -500), radius=5000, thickness=50)\n assert len(shape.points) == 12\n assert len(shape.processed_points) == 13", "def __init__(self,fovraws,ralohi=(),declohi=()\n ,obs_pos=None,obs_vel=None,obs_year=None\n ):\n ### Get count of items in FOV sequence; ensure it is 2 or more\n ### and ralohi and declohi are empty, or that fovraws is empty\n ### and ralohi and declohi have 2 values each\n (self.fovraws\n ,self.ralohi\n ,self.declohi\n ,self.obs_pos\n ,self.obs_vel\n ,self.obs_year\n ,)= fovraws,list(ralohi),list(declohi),obs_pos,obs_vel,obs_year\n self.L = len(fovraws)\n assert (1<self.L and not (self.ralohi+self.declohi)\n ) or (0==self.L and 2==len(self.ralohi) and 2==len(self.declohi)\n ), 'Invalid vertices in FOV'\n\n ################################\n ### Initialize: FOV RA,Dec pairs; FOV type (assume polygon); FOV\n ### vector triples; list of RA,Dec boxes\n self.radecdegs = list()\n self.fovtype = 1<self.L and FOV.POLYGONTYPE or FOV.RADECBOXTYPE\n self.uvfovxyzs,fovsum = list(),sp.vpack(0.,0.,0.)\n self.radec_boxes = list()\n rdba = self.radec_boxes.append ### Shorthand to append box to list\n\n ################################\n ### Parse list of vertices:\n ### - [list,float] => Circle (cone)\n ### - [list,list] => RA,Dec box\n ### - [list,list,list,...] => Polygon\n for vertex in fovraws:\n\n ### For second of two vertices ...\n if 1==len(self.radecdegs) and 2==self.L:\n ### Two-vertex items are either a conic FOV, or an [RA,Dec] box\n try:\n ### If second item in list is a float, then it's a half-angle\n ### of the cone\n self.hangdeg = float(vertex)\n assert self.hangdeg < 90.0,'Cone half-angle is not less than 90degrees'\n assert self.hangdeg > 0.0,'Cone half-angle is not greater than 0degrees'\n self.hangrad = self.hangdeg * rpd\n self.min_cosine = math.cos(self.hangrad)\n self.uv_cone_axis = self.uvfovxyzs[0]\n self.fovtype = FOV.CIRCLETYPE\n break\n except AssertionError as e:\n raise\n except:\n ### If the above fails, then it's the second corner of the box\n self.fovtype = FOV.RADECBOXTYPE\n\n ### Parse one vertex\n ra,dec,uvxyz = parse_inertial(vertex)\n\n ### Append RA,Dec and unit vector XYZ onto their resepective lists\n self.radecdegs.append((ra,dec,))\n self.uvfovxyzs.append(uvxyz)\n fovsum = sp.vadd(fovsum,uvxyz)\n\n ################################\n ### Calculate RA,DEC limits as list of [ralo,rahi,declo,dechi] boxes\n ### - .radec_boxes is a list; rdba is .radec_boxes.append\n ### - List will have multiple RA,Dec boxes if FOV crosses the Prime\n ### Meridian (PM) an even number of times.\n\n if self.fovtype == FOV.RADECBOXTYPE:\n ### RA,DEC box FOV: calculate limits; handle PM crossing\n if 2==self.L:\n ras,decs = zip(*self.radecdegs)\n ralo,rahi = sorted(ras)\n declo,dechi = sorted(decs)\n if 180 > (rahi-ralo):\n rdba([ralo,rahi,declo,dechi])\n else:\n rdba([0.0,ralo,declo,dechi])\n rdba([rahi,360.0,declo,dechi])\n else:\n if self.ralohi[1] > self.ralohi[0]:\n rdba(self.ralohi+self.declohi)\n else:\n rdba([self.ralohi[0],360.0]+self.declohi)\n rdba([0.0,self.ralohi[1]]+self.declohi)\n\n elif self.fovtype == FOV.CIRCLETYPE:\n ### Circular FOV: DEC limits determine RA limits; handle PM Xing\n ra,dec = self.radecdegs[0]\n fovdeclo = dec - self.hangdeg\n fovdechi = dec + self.hangdeg\n\n if fovdeclo < -90.0 or fovdechi > 90.0:\n ### A pole is in the FOV; use full RA range\n fovralo,fovrahi = 0.0,360.0\n fovdeclo,fovdechi = max([fovdeclo,-90.0]),min([fovdechi,+90.0])\n\n elif fovdeclo == -90.0 or fovdechi == 90.0:\n ### A pole is on the FOV circumference; RA range is 180 degrees\n fovralo,fovrahi = ra-90.0,ra+90.0\n\n else:\n ### The FOV excludes the poles; calculate the RA range, using\n ### the formula validated in script validate_delta_ra_formula.py\n tanhang,tandec = math.tan(self.hangrad),math.tan(dec*rpd)\n sinhang,cosdec = math.sin(self.hangrad),math.cos(dec*rpd)\n coshang = math.cos(self.hangrad)\n T = sinhang / math.sqrt(1.0 - ((tanhang*tandec)**2))\n deltara = dpr * math.atan(T / (cosdec * coshang))\n fovralo,fovrahi = ra-deltara,ra+deltara\n\n ### Ensure RA limits are within range [0:360] (N.B. inclusive)\n if fovralo < 0.0: fovralo += 360.0\n if fovrahi > 360.0: fovrahi -= 360.0\n\n if fovralo <= fovrahi:\n ### RA lo <= RA hi: no PM crosssing\n rdba([fovralo,fovrahi,fovdeclo,fovdechi])\n else:\n ### RA hi < RA hi: there is a PM crosssing\n rdba([0.0,fovrahi,fovdeclo,fovdechi])\n rdba([fovralo,360.,fovdeclo,fovdechi])\n\n else:\n assert self.fovtype == FOV.POLYGONTYPE\n ### Polygonal FOV: build frame where all vertices will be\n ### projected onto the plane Z=1\n\n ### .uvavg: unit vector = mean of all vertices, will be +Z\n self.uvavg = sp.vhat(fovsum)\n\n ### Create rotation matrix to FOV frame: +Z is mean of vertices'\n ### directions (.uvavg); +X will be a direction that is not\n ### parallel to any side of the polygon\n ### - Start with temporary matrix with +Z as defined above; +X\n ### toward vertex at largest angle from .uvavg\n vother = min([(sp.vdot(self.uvavg,v),list(v),) for v in self.uvfovxyzs])[1]\n tmpmtx = sp.twovec(self.uvavg,3,vother,1)\n ### - Rotate all vectors to that frame; scale Z components to 1.0\n vtmps = list()\n for v in self.uvfovxyzs:\n ### - Ensure all vertices are in the same hemisphere\n assert 0.0 < sp.vdot(self.uvavg,v),'All vertices are not in the same hemisphere'\n vtmp = sp.mxv(tmpmtx,v)\n vtmps.append(sp.vscl(1.0/vtmp[2],vtmp))\n\n ### Find largest azimuth gap between any two sides: that azimuth\n ### will be direction of +X in the final rotation matrix\n ### - Get azimuths of all sides of polygon, in range [-PI:PI]\n azimuths,vlast = list(),vtmps[-1]\n for v in self.uvfovxyzs:\n azimuths.append(numpy.arctan((v[1]-vlast[1])/(v[0]-vlast[0])))\n vlast = v\n ### - Sort angles and add [least angle plus PI] to end of list\n azimuths.sort()\n azimuths.append(azimuths[0]+sp.pi())\n ### - Find largest delta-azimuth and its index\n dazimuths = [hi-lo for hi,lo in zip(azimuths[1:],azimuths[:-1])]\n maxdaz = max(dazimuths)\n imaxdaz = dazimuths.index(maxdaz)\n ### - Calculate azimuth from to mean of that delta-azimuth,\n meanaz = azimuths[imaxdaz] + (maxdaz / 2.0)\n\n ### Final matrix: add rotation of tmpmtx around +Z by that angle\n self.mtxtofov = sp.mxm(sp.rotate(meanaz,3),tmpmtx)\n\n ### Apply final rotation matrix, store results in .uvlclxyzs\n tmpmtx = sp.twovec(self.uvavg,3,vother,1)\n self.uvlclxyzs = [self.rotate_to_local(v) for v in self.uvfovxyzs]\n\n ### Calculate upper and lower RA and Dec limits, with PM crossings\n los,his = list(),list()\n ### - Create [[RA,Dec],[X,Y,Z]] pairs list; ensure last is off PM\n pairs = list(zip(self.radecdegs,self.uvfovxyzs))\n pop_count = 0\n while pairs[-1][0][0] == 0.0:\n pop_count += 1\n assert pop_count < self.L,'All vertices are on the Prime Meridian'\n pairs.append(pairs.pop(0))\n\n ### Count PM crossings\n self.crossing_count = 0\n lastra = pairs[-1][0][0]\n zero_count = 0\n for (ra,dec,),xyz in pairs:\n if ra == 0.0:\n zero_count += 1\n if lastra > 180.0: ra = 360.0\n if 180 < abs(ra-lastra): self.crossing_count += 1\n lastra = ra\n\n if 0==self.crossing_count or 1==(1&self.crossing_count):\n ### If there are either no, or an odd number, of PM crossings,\n ### then use the pairs as-is for a single FOV\n subfovs = [pairs]\n if self.crossing_count:\n ### - For odd crossing count, one pole or the other must be\n ### in the FOV; init full RA range, that pole for Dec ranges\n ralo,rahi = 0.0,360.0\n if sp.vdot(self.uvavg,[0,0,1]) > 0.0: declo = dechi = +90.0\n else : declo = dechi = -90.0\n else:\n ### - For zero crossing count, initialize inverted ranges\n ralo,rahi = 360.0,0.0\n declo,dechi = +90.0,-90.0\n subranges = [[ralo,rahi,declo,dechi]]\n\n else:\n ### If there are an even, non-zero number of PM crossings, break\n ### them into two sub-FOVs, one on either side of the PM\n\n eastfov,westfov = list(),list()\n\n if zero_count:\n ### If there are any zero RA values, rotate the pairs to\n ### ensure a zero-RA pair is the first, so it and the non-zero\n ### last pair will be assigned to the correct side of the PM\n while pairs[0][0][0]!=0.0: pairs.append(pairs.pop(0))\n else:\n ### If there are no zero RA values, rotate the pairs to ensure\n ### a crossing occurs between the last and first pair, so the\n ### corresponding zero crossing will be assigned to the\n ### correct side of the PM\n while abs(pairs[0][0][0]-pairs[-1][0][0])<180:\n pairs.append(pairs.pop(0))\n\n ### Write vertices into the two sub-FOVs\n\n ### - Set last-vertex values for first item in pairs\n (lastra,lastdec,),lastxyz = pairs[-1]\n\n for pair in pairs:\n ### - Loop over vertex pairs ((RA,DEC,),Cartesian_Vector)\n (ra,dec,),xyz = pair\n\n if ra == 0.0:\n\n ### - When RA=0, the previous RA determines if it's 0 ar 360\n if lastra >= 180.0:\n ra = 360.0\n westfov.append([(ra,dec,),xyz])\n iswest = True\n else:\n eastfov.append(pair)\n iswest = False\n\n elif abs(lastra-ra) >= 180.0:\n\n ### - When the change in RA>=180, the PM is being crossed\n\n ### - Find the mid-vector where the PM is crossed\n k1 = -xyz[1] / (lastxyz[1]-xyz[1])\n midxyz = sp.vhat(sp.vlcom(1.0-k1,xyz,k1,lastxyz))\n middec = dpr * sp.recrad(midxyz)[2]\n\n ### - Add that mid-vector, with RA=360, to the west FOV\n westfov.append([(360.0,middec,),midxyz])\n\n ### - Determine if vector is west\n iswest = ra >= 180.0\n\n ### - Add that mid-vector, with RA=0, to the east FOV ...\n if (ra > 0.0) and (not iswest):\n ### - ... only if the ra is not already 0, as it will be\n ### added in the next step\n eastfov.append([(0.0,middec,),midxyz])\n\n ### Add the vector to either east or west FOV\n if iswest: westfov.append(pair)\n else : eastfov.append(pair)\n\n else:\n\n ### PM was not crossed, add vector to same FOV, as last time\n if iswest: westfov.append(pair)\n else : eastfov.append(pair)\n\n ### - Set last-vertex values for next item in pairs\n (lastra,lastdec,),lastxyz = (ra,dec,),xyz\n\n ### - Create subfovs list of east and west FOVs; set subranges\n subfovs = [eastfov,westfov]\n subranges = [[360.0,0.0,90.0,-90.0],[360.0,0.0,90.0,-90.0]]\n\n ### To here, we have list of FOV(s) and list of range(s); use them\n ### to determine RA,DEC box(es) to use for database query\n\n while subfovs:\n\n ### Get sub-FOV, sub-range; set last vertex's XYZ\n subfov,(ralo,rahi,declo,dechi,) = subfovs.pop(),subranges.pop()\n lastxyz = subfov[-1][-1]\n\n for pair in subfov:\n ### Each element of subfov comprises (RA,Dec) and vertex XYZ\n ### - xyz is a unit vector\n (ra,dec,),xyz = pair\n\n ### - Adjust RA limits as needed from RA of vertex\n if ra > rahi: rahi = ra\n elif ra < ralo: ralo = ra\n\n ### - Set Dec extrema from DEC of vertex\n maxdec = mindec = dec\n\n ### - Calculate Dec extrema from lastxyz to xyz\n ### -- Normal to plane of lastxyz and syz\n sidenormal = sp.vcrss(lastxyz,xyz)\n ### -- Z-rates along great circle at lastxyz and at xyz\n lastdz = sp.vcrss(sidenormal,lastxyz)[2]\n dz = sp.vcrss(sidenormal,xyz)[2]\n if 0.0 > (lastdz*dz):\n ### -- If sign of Z-rates differs, there should be an\n ### extreme value between lastxyz and xyz\n ### --- Get vector perpendicular to side normal on equator\n ### --- Use that to calculate the unit vector at Dec extreme\n equinox = sp.vcrss([0,0,1],sidenormal)\n vtoextremez = sp.ucrss(sidenormal,equinox)\n ### --- Cosine of angle between lastxyz and xyz\n mindot = sp.vdot(lastxyz,xyz)\n for none in [None,None]:\n ### --- Two cases: vtoextremez and -vtoextremez\n ### - Angles from vtoextremez to lastxyz and to xyz\n ### must be less than angle between lastxyz and xyz\n ### so cosines of those angles must be greater\n lastxyzdot = sp.vdot(lastxyz,vtoextremez)\n xyzdot = sp.vdot(xyz,vtoextremez)\n if lastxyzdot>mindot and xyzdot>mindot:\n ### --- Adjust maxdec and mindec as needed\n try : extremedec = dpr * math.asin(vtoextremez[2])\n except: extremedec = dpr * sp.recrad(vtoextremez)[2]\n if extremedec > maxdec: maxdec = extremedec\n elif extremedec < mindec: mindec = extremedec\n break\n ### --- Invert vtoextremez for next pass\n vtoextremez = sp.vminus(vtoextremez)\n\n ### - Adjust Dec limits as needed from Dec extrema of side\n if maxdec > dechi: dechi = maxdec\n if mindec < declo: declo = mindec\n lastxyz = xyz\n\n ### Append calculated RA,Dec box(es)\n rdba((ralo,rahi,declo,dechi,))\n\n ### Put None in .localxyzs, in .v_for_stellar_aberr, and in\n ### .v_for_parallax; if no stellar aberration or parallax is\n ### explicitly applied to define it later, then .localxyzs will be\n ### calculated on the fly\n self.localxyzs = None\n self.v_for_stellar_aberr = None\n self.v_for_parallax = None", "def is_vertex(self):\n return False", "def __init__(self, shape):\n\n self.shape = shape", "def __array_finalize__(self, obj):\n if obj is None or obj.__class__ is Vector3:\n return\n if self.shape != (3,):\n raise ValueError(\n 'Invalid array to view as Vector3 - must be length-3 array.'\n )", "def test_pointnum2():\n shape = paramak.CapsuleVacuumVessel(outer_start_point=(100, -100), radius=400, thickness=25)\n assert len(shape.points) == 12\n assert len(shape.processed_points) == 13", "def is_vertex(self):\n return True", "def is_vertex(self):\n return True", "def shape(self) -> Shape:", "def create(self):\n raise NotImplementedError(\"\"\"The create method must be overridden to\n build desired vtk objects.\"\"\")", "def atualizaVertice(self, v = []):\r\n\r\n #reseta as arestas para d0\r\n #for a in self.arestas:\r\n # a.peso = a.d0\r\n\r\n for vertice in v:\r\n for a in self.arestas:\r\n if (vertice.id == a.v1.id):\r\n #print (\"atualiza aresta\", a.id)\r\n if (a.v2.atualizado):\r\n a.peso = a.d2\r\n else:\r\n a.peso = a.d1\r\n\r\n\r\n if (vertice.id == a.v2.id):\r\n #print (\"atualiza aresta\", a.id)\r\n if (a.v1.atualizado):\r\n a.peso = a.d2\r\n else:\r\n a.peso = a.d1\r\n \r\n vertice.atualizado = True\r\n \r\n for vertice in v:\r\n vertice.atualizado = False", "def test_autocreate(self):\n a = Vector(1, 2)\n b = Vector(a)\n assert b == a" ]
[ "0.5730283", "0.5638069", "0.5638069", "0.56318754", "0.56318754", "0.5603204", "0.5560593", "0.54646444", "0.5322073", "0.5228889", "0.5199293", "0.5199293", "0.5173173", "0.516719", "0.51552814", "0.5147659", "0.51269144", "0.5120981", "0.5112034", "0.5101421", "0.51001644", "0.5085793", "0.5077105", "0.5058219", "0.5017964", "0.5017964", "0.5010424", "0.5009102", "0.5000139", "0.4981105" ]
0.6779693
0
renders a texture containing the borders of all shapes.
def _render_borders(self): # XXX # - read the old glBlendFunc value and restore it if neccessary. glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) self.border_program.use() for shape_object_id, instances in self._instances.items(): self._shape_vaos[shape_object_id].bind() for instance in instances: border_size = instance.border['size'] if len(instance.border) > 0: glEnable(GL_BLEND) # XXX # - cache the modelview matrix modelview = ModelView() modelview.set_scaling(instance.size[0]+2*border_size, instance.size[1]+2*border_size) modelview.set_position(instance.position[0]-border_size, instance.position[1]-border_size) self.border_program.uniform('mat_modelview', modelview.mat4) self.border_program.uniform('color', instance.border['color']) glDrawArrays(GL_TRIANGLES, 0, 6) glDisable(GL_BLEND) # XXX # - cache the modelview matrix modelview = ModelView() modelview.set_scaling(*instance.size) modelview.set_position(*instance.position) self.border_program.uniform('color', [0,0,0,0]) self.border_program.uniform('mat_modelview', modelview.mat4) glDrawArrays(GL_TRIANGLES, 0, 6) self._shape_vaos[shape_object_id].unbind() self.border_program.unuse() glEnable(GL_BLEND)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tile_border(draw, r_s, r_e, c_s, c_e, color, border_size=TILE_BORDER_SIZE):\n for x in range(0, border_size):\n draw.rectangle([(c_s + x, r_s + x), (c_e - 1 - x, r_e - 1 - x)], outline=color)", "def borders(self):\n border_left = pm.Segment(self.space.static_body, (-5, 0), (-5, self.screen_height), 10)\n border_right = pm.Segment(self.space.static_body, (self.screen_width + 5, 0),\n (self.screen_width + 5, self.screen_height), 10)\n border_top = pm.Segment(self.space.static_body, (0, self.screen_height + 5),\n (self.screen_width, self.screen_height + 5), 10)\n border_bottom = pm.Segment(self.space.static_body, (0, 0), (self.screen_width, 0),\n self.screen_height * 0.1)\n border_bottom.friction = TERRAIN_FRICTION # Set the bottom border friction\n border_bottom.color = DARK_GREY # Set the bottom border color\n\n # Set the collision types so that the collision handlers check for them\n border_top.collision_type = 4\n border_left.collision_type = 4\n border_right.collision_type = 4\n border_bottom.collision_type = 4\n self.space.add(border_left, border_right, border_top, border_bottom) # Add the borders to the Pymunk space", "def render(self, ctx):\n image = np.zeros((self.height, self.width, 3), np.uint8)\n for shape in ctx.shapes:\n if isinstance(shape, context.Rectangle):\n x = int(shape.width / 2)\n y = int(shape.height / 2)\n rad = np.radians(shape.rotation)\n rotation = np.array([[np.cos(rad), -np.sin(rad)],\n [np.sin(rad), np.cos(rad)]])\n translation = np.array([[shape.center.x], [shape.center.y]])\n corners = np.array([[-x, x, x, -x], [y, y, -y, -y]])\n transformed_corners = rotation.dot(corners) + translation\n transformed_corners = transformed_corners.T.astype(int)\n cv2.fillPoly(image, pts=[transformed_corners],\n color=shape.color)\n elif isinstance(shape, context.Circle):\n center = (int(shape.center.x), int(shape.center.y))\n image = cv2.circle(image, center, int(shape.radius),\n color=shape.color, thickness=shape.thickness)\n elif isinstance(shape, context.Text):\n center = (int(shape.center.x), int(shape.center.y))\n image = cv2.putText(image, shape.content, center,\n cv2.FONT_HERSHEY_SIMPLEX, shape.size,\n shape.color, 3, cv2.LINE_AA)\n elif isinstance(shape, context.Image):\n file_image = cv2.imread(shape.filepath, cv2.IMREAD_UNCHANGED)\n file_image = cv2.resize(file_image, (shape.width, shape.height))\n\n y1 = int(shape.center.y - shape.height / 2)\n y2 = int(y1 + file_image.shape[0])\n x1 = int(shape.center.x - shape.width / 2)\n x2 = int(x1 + file_image.shape[1])\n\n rgba = cv2.cvtColor(file_image, cv2.COLOR_RGB2RGBA)\n alpha_s = rgba[:, :, 3] / 255.0\n alpha_l = 1.0 - alpha_s\n\n image_save = image.copy()\n for c in range(0, 3):\n try:\n image[y1:y2, x1:x2, c] = (\n alpha_s * file_image[:, :, c] +\n alpha_l * image[y1:y2, x1:x2, c])\n except ValueError:\n image = image_save\n\n self._display_frame(image)", "def drawCube( self ):\n glBegin(GL_QUADS);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glEnd()", "def draw_border():\n \n length = len(BORDER_COORDS)\n \n # Constants for sine wave\n b = 2 * math.pi / length\n speed = 2\n \n # Draw sinusoid red/green design\n for i in range(length):\n # Sine function\n t = perf_counter()\n sine = math.sin(b*i + speed*t) # Wave with period 28\n \n # Map sine value from [-1, 1] to [0, 4)\n red = min(math.floor(2 * sine + 2), 3)\n \n # Fade red and green colors\n lp.led_ctrl_xy(*BORDER_COORDS[i], red, 3 - red)", "def draw_on(self, surface):\n for x, y in self.alive_cells():\n #size = (self.box_size, self.box_size)\n #position = (x * self.box_size, y * self.box_size)\n #thickness = 1\n pygame.draw.rect(surface, DARK_RED, (x * self.box_size, y * self.box_size,self.box_size, self.box_size ))", "def render(pictures):\n # Display the background\n display.fill(BLACK)\n\n # Draw each picture\n i = 0 # Number of pictures drawn\n for picture in pictures:\n offsets = get_offsets(i)\n\n for polygon in picture.polygons:\n # Adjust vertices so pictures aren't overlapping\n adjusted_vertices = []\n\n for v in polygon.vertices:\n adjusted_vertices.append((v[0] + offsets[0], v[1] + offsets[1]))\n pygame.draw.polygon(display, polygon.color, adjusted_vertices)\n\n i += 1\n\n pygame.display.update()", "def drawCube(self):\r\n glBegin(GL_QUADS);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(-1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(1.0, 1.0, 1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(-1.0, 1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(-1.0, -1.0, -1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(-1.0, 1.0, -1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(1.0, 1.0, -1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(1.0, -1.0, -1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(-1.0, 1.0, -1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(-1.0, 1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(1.0, 1.0, 1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(1.0, 1.0, -1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(-1.0, -1.0, -1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(1.0, -1.0, -1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(-1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(1.0, -1.0, -1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(1.0, 1.0, -1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(1.0, 1.0, 1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(1.0, -1.0, 1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(-1.0, -1.0, -1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(-1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(-1.0, 1.0, 1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(-1.0, 1.0, -1.0);\r\n glEnd()", "def draw_foreground(self):\n index = 0\n for tile in self.foreground_data:\n if tile != self.empty_tile:\n x_pos = (index * self.tile_size) % self.w\n y_pos = math.floor((index * self.tile_size) / self.w) * self.tile_size\n b = Block(tile, x_pos, y_pos)\n self.screen.entity_layer_1.add(b)\n index += 1", "def render(self, context):\n pygame.draw.rect(context, (255, 0, 0), self.box)", "def render(self,surf,box,r,size=None,offset=None):\n\n if box == 0: return\n\n if is_color(box):\n surf.fill(box,r)\n return\n \n x,y,w,h=r.x,r.y,r.w,r.h\n\n if (size and offset):\n pass\n# destx = x\n# desty = y\n\n # Calculate the size of each tile\n tilew, tileh = int(box.get_width()/3), int(box.get_height()/3)\n xx, yy = x+w, y+h\n src = pygame.rect.Rect(0, 0, tilew, tileh)\n dest = pygame.rect.Rect(0, 0, tilew, tileh)\n\n # Render the interior of the box\n surf.set_clip(pygame.Rect(x+tilew, y+tileh, w-tilew*2, h-tileh*2))\n src.x,src.y = tilew,tileh\n for dest.y in range(y+tileh,yy-tileh,tileh):\n for dest.x in range(x+tilew,xx-tilew,tilew): \n surf.blit(box,dest,src)\n\n # Render the top side of the box\n surf.set_clip(pygame.Rect(x+tilew,y,w-tilew*2,tileh))\n src.x,src.y,dest.y = tilew,0,y\n for dest.x in range(x+tilew, xx-tilew*2+tilew, tilew): \n surf.blit(box,dest,src)\n \n # Render the bottom side\n surf.set_clip(pygame.Rect(x+tilew,yy-tileh,w-tilew*2,tileh))\n src.x,src.y,dest.y = tilew,tileh*2,yy-tileh\n for dest.x in range(x+tilew,xx-tilew*2+tilew,tilew): \n surf.blit(box,dest,src)\n\n # Render the left side\n surf.set_clip(pygame.Rect(x,y+tileh,xx,h-tileh*2))\n src.y,src.x,dest.x = tileh,0,x\n for dest.y in range(y+tileh,yy-tileh*2+tileh,tileh): \n surf.blit(box,dest,src)\n\n # Render the right side\n surf.set_clip(pygame.Rect(xx-tilew,y+tileh,xx,h-tileh*2))\n src.y,src.x,dest.x=tileh,tilew*2,xx-tilew\n for dest.y in range(y+tileh,yy-tileh*2+tileh,tileh): \n surf.blit(box,dest,src)\n\n # Render the upper-left corner\n surf.set_clip()\n src.x,src.y,dest.x,dest.y = 0,0,x,y\n surf.blit(box,dest,src)\n \n # Render the upper-right corner\n src.x,src.y,dest.x,dest.y = tilew*2,0,xx-tilew,y\n surf.blit(box,dest,src)\n \n # Render the lower-left corner\n src.x,src.y,dest.x,dest.y = 0,tileh*2,x,yy-tileh\n surf.blit(box,dest,src)\n \n # Render the lower-right corner\n src.x,src.y,dest.x,dest.y = tilew*2,tileh*2,xx-tilew,yy-tileh\n surf.blit(box,dest,src)", "def drawCube( self ):\n glBegin(GL_QUADS);\n mTexture(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glEnd()", "def draw_boxes(image, bounds):\n draw = ImageDraw.Draw(image)\n if bounds[0].normalized_vertices:\n width = image.width\n height = image.height\n for i in range(len(bounds)):\n draw.polygon([\n bounds[i].normalized_vertices[0].x * width, bounds[i].normalized_vertices[0].y * height,\n bounds[i].normalized_vertices[1].x * width, bounds[i].normalized_vertices[1].y * height,\n bounds[i].normalized_vertices[2].x * width, bounds[i].normalized_vertices[2].y * height,\n bounds[i].normalized_vertices[3].x * width, bounds[i].normalized_vertices[3].y * height],\n None, colors[i % len(colors)])\n return image\n else:\n for i in range(len(bounds)):\n draw.polygon([\n bounds[i].vertices[0].x, bounds[i].vertices[0].y,\n bounds[i].vertices[1].x, bounds[i].vertices[1].y,\n bounds[i].vertices[2].x, bounds[i].vertices[2].y,\n bounds[i].vertices[3].x, bounds[i].vertices[3].y],\n None, colors[i % len(colors)])\n return image", "def draw_boxes(image, bounds, color):\n draw = ImageDraw.Draw(image)\n\n for bound in bounds:\n draw.polygon([\n bound.vertices[0].x, bound.vertices[0].y,\n bound.vertices[1].x, bound.vertices[1].y,\n bound.vertices[2].x, bound.vertices[2].y,\n bound.vertices[3].x, bound.vertices[3].y], None, color)\n # font = ImageFont.truetype(\"sans-serif.ttf\", 10)\n draw.text((bound.vertices[0].x, bound.vertices[0].y,),bound,(255,255,255),font=font)\n return image", "def draw(self, screen):\n for branch_points in self.branches:\n pygame.draw.polygon(screen, self.branch_color, branch_points)\n for bottom_points in self.bottom:\n pygame.draw.polygon(screen, self.bottom_color, bottom_points)", "def __draw_board_texture(self, texture):\n\n textureWidth, textureHeight = texture.size\n\n for x in range(0, self.width, textureWidth):\n for y in range(0, self.height, textureHeight):\n self.baseImage.paste(texture, (x, y))", "def draw(self,screen):\n for tile in self.tile_list:\n screen.blit(tile[0],tile[1])\n # pygame.draw.rect(screen,(255,255,255),tile[1],2)\n\n for tile in self.objList:\n screen.blit(tile[0],tile[1])\n # pygame.draw.rect(screen,(255,255,255),tile[1],2)\n # rectangle print for tiles", "def draw_pyramid(self):\n for item in self.subdivision_list:\n glBegin(GL_POLYGON)\n glColor3f(0.5, 0.5, 0.5)\n glVertex3f(item[0].x, item[0].y, item[0].z)\n glVertex3f(item[1].x, item[1].y, item[1].z)\n glVertex3f(item[2].x, item[2].y, item[2].z)\n glEnd()", "def generate_outlines(self):\n morphed_atlas = bio.load_nii(self.registered_atlas_img_path, as_array=False)\n atlas_scale = morphed_atlas.header.get_zooms()\n morphed_atlas = morphed_atlas.get_data()\n boundaries_mask = sk_segmentation.find_boundaries(morphed_atlas, mode='inner')\n boundaries = morphed_atlas * boundaries_mask\n bio.to_nii(boundaries, self.outlines_file_path, scale=atlas_scale)", "def _draw_border(self, grid):\n # Left and Right border\n for i, x in enumerate(grid):\n x[0] = x[len(grid) - 1] = self._wall_color\n grid[i] = x\n\n # Top and Bottom border\n grid[0] = grid[len(grid) - 1] = [self._wall_color for _ in range(len(grid))]\n return grid", "def test_drawSolid(self):\n\n image_name = filename(sys._getframe().f_code.co_name)\n result_file, reference_file = get_path(image_name)\n\n ''' This function is to create an empty image with a specific dimension\n with white background, and black/white colored '''\n\n image, canvas = get_image('RGB', (640, 480), 'white')\n\n '''\n for different representations of colors see\n \"https://pillow.readthedocs.io/en/3.0.x/reference/ImageColor.html#color-names\"\n '''\n drawSolid(canvas, regularPolygon(3, np.array([160, 120]), 50), 'red')\n drawSolid(canvas, regularPolygon(4, np.array([480, 120]), 90), 'blue')\n drawSolid(canvas, regularPolygon(5, np.array([420, 360]), 60), 'green')\n drawSolid(canvas, regularPolygon(6, np.array([160, 360]), 80), 'black')\n drawSolid(canvas, regularPolygon(7, np.array([320, 160]), 70), 'brown')\n\n \"\"\" saving the file and closing it \"\"\"\n image.save(result_file)\n image.close()\n\n \"\"\" validate the resultant file against the reference images\"\"\"\n\n validate(reference_file, result_file)", "def squarePrimitive(self):\n\n def drawWireframe(self):\n\n # Draw line\n glFT.glBegin(OpenMayaRender.MGL_LINE_STRIP)\n\n # Get Color\n # self.getColor(\"edge\")\n\n for edges in SQUARE[\"EDGES\"]:\n for edge in edges:\n\n point = OpenMaya.MVector(edge[0], edge[1], edge[2])\n point *= self.shape_size\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n \n \n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n\n def drawShaded(self):\n\n # Draw quad\n glFT.glBegin(OpenMayaRender.MGL_POLYGON)\n\n # Get Color\n # self.getColor(\"polygon\")\n\n for polygons in SQUARE[\"POLYGONS\"]:\n for polygon in polygons:\n\n point = OpenMaya.MVector(polygon[0], polygon[1], polygon[2])\n point *= self.shape_size\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n\n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n\n glFT.glNewList(self.vList_id, OpenMayaRender.MGL_COMPILE)\n\n # Draw lines\n if self.draw_type == 0 or self.draw_type == 2:\n drawWireframe(self)\n \n # Draww Polygons\n if self.draw_type == 1 or self.draw_type == 2:\n drawShaded(self)\n\n glFT.glEndList()", "def generatePolygons():", "def borders(w, h):\r\n pygame.draw.line(window, WHITE, [25, 0], [25, h - 50], 6)\r\n pygame.draw.line(window, WHITE, [w - 25, 0], [w - 25, h - 50], 6)\r\n pygame.draw.line(window, WHITE, [25, h - 50], [w - 25, h - 50], 6)\r\n pygame.draw.line(window, WHITE, [25, 25], [w - 25, 25], 6)", "def render_tiles(self, tiles):\n for row in tiles:\n for tile in row:\n if tile is not None:\n if tile.height < 0:\n color = (0, 100, 0)\n else:\n z = max(0, tile.height)\n color = tuple([z * 255] * 3)\n self.surface.set_at((tile.x, tile.y), color)", "def draw_shape(self, shape):\n for row in range(len(shape.squares)):\n for col in range(len(shape.squares[0])):\n if shape.squares[row][col]:\n self.draw_square(shape.x + col, shape.y + row, shape.color)", "def basic_render(self, surface) -> None:\n if not self.visible:\n return\n l, t = self.pos\n r, b = self.get_anchor_pos(Anchor.bottom_right)\n tpos = self.get_anchor_pos(Anchor.middle)\n backcolor = (128, 128, 128)\n forecolor = {False: (255, 255, 192), True: (255, 0, 0)}\n pts = ((l, t), (r, t), (r, b), (l, b))\n pygame.draw.polygon(surface, backcolor, pts, 0)\n pygame.draw.polygon(surface, forecolor[self.hover], pts, 1)\n BitmapFont.set_colors(BitmapFont.medium, backcolor, forecolor[self.hover])\n BitmapFont.render(surface, str(self.label), BitmapFont.medium, tpos, Anchor.middle)", "def draw_main_surface(win, color, dimensions):\n width, height = dimensions\n\n pygame.draw.rect(win, color, (BORDER, BORDER,\n width*CELL_SIZE - BORDER*2,\n height*CELL_SIZE - BORDER*2))", "def render(self):\n GL.glColor(*self._color)\n\n GL.glLoadIdentity()\n GL.glTranslate(self._x, self._y, 0)\n\n GL.glBegin(GL.GL_QUADS)\n GL.glVertex3f(0, 0, 0)\n GL.glVertex3f(self._width, 0, 0)\n GL.glVertex3f(self._width, self._height, 0)\n GL.glVertex3f(0, self._height, 0)\n GL.glEnd()", "def pygame_render(objects_to_draw: DrawingObjects, surface):\n\n for arc in objects_to_draw.arcs:\n pygame.draw.arc(surface, arc.color, arc.enclosing_rect, arc.angle_begin, arc.angle_end, arc.lines_thickness)\n for rect in objects_to_draw.rects:\n pygame.draw.rect(surface, rect.color, rect.shape, rect.lines_thickness)\n for circle in objects_to_draw.circles:\n pygame.draw.circle(surface, circle.color, circle.center, circle.radius, circle.line_thickness)\n for a_line in objects_to_draw.lines:\n pygame.draw.line(surface, a_line.color, a_line.begin, a_line.end, a_line.line_thickness)" ]
[ "0.61772597", "0.604624", "0.5954987", "0.59350353", "0.58965117", "0.58747965", "0.5859048", "0.58586574", "0.5853385", "0.5800375", "0.5792888", "0.5761924", "0.5725997", "0.5725489", "0.5707735", "0.56973356", "0.56697094", "0.56612206", "0.5657879", "0.56512725", "0.5595216", "0.55950797", "0.5586739", "0.5586686", "0.5583009", "0.55643094", "0.555917", "0.55315655", "0.55153537", "0.55125475" ]
0.6801152
0
Copy the score to the NN after each tick.
def update(self, game): super().update(game) self.nn_def.set_score(self.score)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateScore(self, score):\n self.__score += score", "def update_score():\n pass", "def update_score(self):\n self.score = TurboMQ.calculate_fitness(self.result, self.graph)", "def update_score(self, board):\n self._score += 1", "def update_score(self):\n td = self.created - datetime.datetime(1970, 1, 1)\n epoch_seconds = td.days * 86400 + td.seconds + (float(td.microseconds) / 1000000)\n order = math.log(max(abs(self.points), 1), 10)\n sign = 1 if self.points > 0 else -1 if self.points < 0 else 0\n seconds = epoch_seconds - 1134028003\n self.score = round(order + sign * seconds / 45000, 7)", "def score(self, score):\n\n self._score = score", "def score(self, score):\n\n self._score = score", "def score(self, score):\n\n self._score = score", "def set_score(self, points):\n self.score += points", "def forward_train(self, cls_score, gt_label):\n if self._do_squeeze:\n cls_score = cls_score.unsqueeze(0).squeeze()\n return super().forward_train(cls_score, gt_label)", "def reset_score(self):\n self.x_score = 0\n self.o_score = 0", "def reset_score(self):\n self._score = p.params['initial_score']", "def update_scores(self):\n self.score[0] = (-1)*sum(self.board[self.board == -1])\n self.score[1] = sum(self.board[self.board == 1])\n #self.score[i] = sum(1 for j in range(len(stones_on_board)) if stones_on_board[j] == i)", "def adjust_score(self):\n self.score += game.temporary_score", "def _tally(self, score):\n self._score[self._turn] += score", "def update_score(self, score: int) -> int:\n self.score += score\n return self.score", "def l_point(self):\n self.l_score += 1\n self.update_scoreboard()", "def set_score(self,score):\n self._score = score", "def r_point(self):\n self.r_score += 1\n self.update_scoreboard()", "def update_turn_score(self, score):\n\n # Increment the attribute by the passed value\n self._current_score += score", "def __call__(self, score, model):\n if self.best_score is None:\n # assign the best score and save the model at the end of the first epoch\n self.best_score = score\n self.save_checkpoint(model)\n elif score < self.best_score + self.delta:\n # if the score not increase of at least delta, increment the counter and if it reach the patience early stops\n self.counter += 1\n if self.counter >= self.patience:\n self.early_stop = True\n else:\n # otherwise the score is better that the saved one, so replace the best score and save the model\n self.best_score = score\n self.save_checkpoint(model)\n self.counter = 0", "def l_point(self):\n self.l_score += 1\n self.update()", "def set_token_score(self, _score: Address) -> None:\n if self.msg.sender == self.owner:\n self._token_score.set(_score)", "def set_score(self, score):\n self._score = score", "def add_score(self, score):\n self._score += score", "def increase_score(self):\n self.score += 1", "def set_score(self, change):\n self._score = self._score + change", "def new_epoch(self):\n self._curr_batch = 0\n if self.shuffle_order:\n self.shuffle()", "def update_score(self, current_connection, number, thread_lock):\n thread_lock.acquire()\n current_connection['score'] += number\n thread_lock.release()", "def r_point(self):\n self.r_score += 1\n self.update()" ]
[ "0.64578915", "0.6423904", "0.6408726", "0.62740093", "0.61973757", "0.6169047", "0.6169047", "0.6169047", "0.60679024", "0.60264367", "0.6015221", "0.59940284", "0.5993987", "0.5973198", "0.59721255", "0.5949299", "0.5942236", "0.5902771", "0.58866864", "0.5841765", "0.5836997", "0.5812782", "0.58078575", "0.5805118", "0.5788406", "0.57860166", "0.5779127", "0.57698095", "0.57654506", "0.57604194" ]
0.7277563
0
Set suffixes if they begin with a +
def set_suffixes(args): return [arg[1:] for arg in args if arg[0] == '+']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removesuffix(self, x) -> String:\n pass", "def suffix_replace(original, old, new):\n ...", "def add_suffix(word, suffix):\n suffix, sep, rest = suffix.partition(' ')\n expanded = _add_suffix(word, suffix)\n return expanded + sep + rest", "def setSuffixes(self, s):\n return self._set(suffixes=s)", "def prefix_suffix_modify():\n prefixes = \"JKLMNOPQ\"\n suffix = \"ack\"\n for letter in prefixes:\n if letter == \"O\" or letter == \"Q\":\n print(letter + \"u\" + suffix)\n else:\n print(letter + suffix)", "def add_suffix(name: str, suffix: str):\n return f'{name}_{suffix}'", "def add_suffix(in_image,\n suffix_str):\n bandnames = in_image.bandNames().map(lambda elem: ee.String(elem).toLowerCase().cat('_').cat(suffix_str))\n nb = bandnames.length()\n return in_image.select(ee.List.sequence(0, ee.Number(nb).subtract(1)), bandnames)", "def apply_prefix(string):\n for short, long in PREFIXES.items():\n if string.startswith(long):\n return short + ':' + string[len(long):]\n return string", "def strip_suffix(s, suffixes):\n for suffix in suffixes:\n if s.endswith(suffix):\n return s.rstrip(suffix)\n return s", "def suffix(string, suffix, sep = '_'):\n if suffix == 'production':\n suffixed = string\n else:\n suffixed = string + sep + suffix\n return suffixed", "def _replace_suffix(self, word, suffix, replacement):\n assert word.endswith(suffix), \"Given word doesn't end with given suffix\"\n if suffix == \"\":\n return word + replacement\n else:\n return word[: -len(suffix)] + replacement", "def strip_any_ends(s: str, prefixes: Union[str, Sequence[str]], suffixes: Union[str, Sequence[str]]) -> str:\n\t\tprefixes = [str(z) for z in prefixes] if StringTools.is_true_iterable(prefixes) else [str(prefixes)]\n\t\tsuffixes = [str(z) for z in suffixes] if StringTools.is_true_iterable(suffixes) else [str(suffixes)]\n\t\ts = str(s)\n\t\tfor pre in prefixes:\n\t\t\tif s.startswith(pre):\n\t\t\t\ts = s[len(pre):]\n\t\tfor suf in suffixes:\n\t\t\tif s.endswith(suf):\n\t\t\t\ts = s[:-len(suf)]\n\t\treturn s", "def addSuffixes(self, alist):\n for i, (word, filename) in enumerate(alist):\n withsuffix = self._findVideoFile(filename)\n alist[i] = (word, withsuffix)\n return alist", "def replace_suffix (name, new_suffix):\n assert isinstance(name, basestring)\n assert isinstance(new_suffix, basestring)\n split = os.path.splitext (name)\n return split [0] + new_suffix", "def setSuffix(self, value):\n return self._set(suffix=value)", "def prefix_replace(original, old, new):\n ...", "def test_add_filename_suffix(self):\r\n self.assertEqual(add_filename_suffix('/foo/bar/baz.txt', 'z'),\r\n 'bazz.txt')\r\n self.assertEqual(add_filename_suffix('baz.txt', 'z'),\r\n 'bazz.txt')\r\n self.assertEqual(add_filename_suffix('/foo/bar/baz', 'z'),\r\n 'bazz')\r\n self.assertEqual(add_filename_suffix('baz', 'z'),\r\n 'bazz')\r\n self.assertEqual(add_filename_suffix('/baz.fasta.txt', 'z'),\r\n 'baz.fastaz.txt')\r\n self.assertEqual(add_filename_suffix('baz.fasta.txt', 'z'),\r\n 'baz.fastaz.txt')\r\n self.assertEqual(add_filename_suffix('/foo/', 'z'), 'z')", "def strip_suffix(value:str, suffixes:Iterable[str]) -> Tuple[str, bool]:\n for suffix in suffixes:\n if value.endswith(suffix):\n return value[:len(value) - len(suffix)], True\n return value, False", "def _extend_with_prefix(base, extensions, prefix):\n for key, value in extensions.items():\n base[prefix + key] = value", "def add_suffix_to_filename(filename, suffix):\n name, ext = os.path.splitext(filename)\n return ''.join([name, suffix, ext])", "def replsuffix(files, suffix):\n\toutfiles = []\n\tif suffix is None: return\n\tif type(files) is type(\"\"):\n\t\tfiles = [files]\n\tfor f in files:\n\t\tfname, ext = os.path.splitext(f)\n\t\tnewfname = fname + suffix\n\t\toutfiles.append(newfname)\n\treturn outfiles", "def normalize_suffix_1(string, logger_=_LOGGER):\n numbers_end_string_regex = r\"(\\d+$)\"\n count_regex = r\"(_\\d+_\\D+)\"\n match = re.search(numbers_end_string_regex, string)\n # If we find a number in the suffix of the string we delete it. And\n # generate the correct count and put in the correct place in the string.\n if match:\n logger.log(\n level=\"warning\",\n message='Suffix of string \"'\n + string\n + '\" should not have a number. Numbers removed from the suffix',\n logger=logger_,\n )\n instance = match.groups()[0]\n string = re.sub(numbers_end_string_regex, \"\", string)\n count_match = re.search(count_regex, string)\n instance_ = count_match.groups()[0]\n count_list = [str_ for str_ in instance_.split('_') if str_]\n new_count = int(count_list[0]) + int(instance)\n new_count = '_{}_{}'.format(new_count, count_list[1])\n string = string.replace(instance_, new_count)\n return string", "def strip_optional_suffix(string, suffix):\n if string.endswith(suffix):\n string = string[:-len(suffix)]\n return string", "def __extend_uri(prefixes, short):\n for prefix in prefixes:\n if short.startswith(prefix):\n return short.replace(prefix + ':', prefixes[prefix])\n return short", "def add_suffix(filenames, suffix):\n\n new_filenames = set([])\n # loop over the list of files and add the suffix\n for name in filenames:\n new_filenames.add(name + \".\" + suffix)\n #print \"filenames = \"\n #print name + \".\" + suffix\n \n return new_filenames", "def AddStringPrefixOrSuffixToSelected(rig, insert_text, is_suffix):\n\n hierarchy_mod = rig.get_hierarchy_modifier()\n selection = hierarchy_mod.get_selection()\n\n if not selection:\n\n return\n\n for item in selection:\n\n src_name = str(item.get_editor_property(\"name\"))\n\n new_name = \"{0}_{1}\".format(insert_text, src_name)\n\n if is_suffix:\n \n new_name = \"{0}_{1}\".format(src_name, insert_text)\n\n hierarchy_mod.rename_element(item, new_name)", "def add_suffix(self, suffix):\n # Append the suffix vowel to this WordForm.\n self.segments.append(Segment.new_segment(suffix))", "def suffix(rem):\n if rem == 0:\n suf = ''\n else:\n if rem <= 600: #Class A suffix -- only letters.\n rem = rem - 1\n suf = base34[rem // 25]\n if rem % 25 > 0:\n suf = suf + base34[rem % 25 - 1]# second class A letter, if present.\n else: #rems > 600 : First digit of suffix is a number. Second digit may be blank, letter, or number.\n rem = rem - 601\n suf = base10[rem // 35]\n if rem % 35 > 0:\n suf = suf + base34[rem % 35 - 1]\n return suf", "def check_suffix(custom_str: str) -> bool:\r\n\r\n if custom_str.startswith(\"-\"):\r\n return True\r\n if len(custom_str) < 4:\r\n custom_str = custom_str.lower()\r\n for c in ASCII_LOWER:\r\n if c in custom_str:\r\n return True\r\n return False", "def is_suffix(suffix: str, word: str):\n return word.endswith(suffix)" ]
[ "0.6363864", "0.6346127", "0.6332669", "0.61678916", "0.6081603", "0.59654105", "0.59258044", "0.5911821", "0.586002", "0.585793", "0.58109426", "0.57019335", "0.56996137", "0.5689837", "0.5645532", "0.5638323", "0.56038296", "0.5587256", "0.5583112", "0.5580947", "0.5578123", "0.55541974", "0.5540938", "0.5531577", "0.54994184", "0.5473115", "0.5464155", "0.54583603", "0.5427856", "0.54273105" ]
0.75712377
0
Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is the aspect ratio
def convert_bbox_to_z(bbox): w = bbox[2] - bbox[0] h = bbox[3] - bbox[1] x = bbox[0] + w / 2. y = bbox[1] + h / 2. s = w * h # scale is just area r = w / float(h) return np.array([x, y, s, r]).reshape((4, 1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_bbox_to_z(bbox):\n w = bbox[2] - bbox[0]\n h = bbox[3] - bbox[1]\n x = bbox[0] + w/2.\n y = bbox[1] + h/2.\n s = w * h #scale is just area\n r = w / float(h)\n return np.array([x, y, s, r]).reshape((4, 1))", "def convert_bbox_to_z(bbox):\n w = bbox[2] - bbox[0]\n h = bbox[3] - bbox[1]\n x = bbox[0] + w / 2.\n y = bbox[1] + h / 2.\n s = w * h # scale is just area\n r = w / float(h)\n return np.array([x, y, s, r], dtype=np.float32).reshape((4, 1))", "def convert_bbox_to_z(bbox):\n # Asignamos cada variable del vector a una variable descriptiva\n xmin = bbox[0]\n ymin = bbox[1]\n xmax = bbox[2]\n ymax = bbox[3]\n\n # Obtenemos los valores para el vector de estado\n width = xmax-xmin\n height = ymax-ymin\n centroid_x_location = xmin+width/2.\n centroid_y_location = ymin+height/2.\n scale = width*height #scale is just area\n aspect_ratio = width/float(height)\n\n return np.array([centroid_x_location,centroid_y_location,scale,aspect_ratio]).reshape((4,1)) # Convert to column vector", "def square_boundaries(px , py, pz, incx, incy, incz, min_x, min_y, min_z, max_x, max_y, max_z):\n\n if px < min_x or px > max_x: \n pcx = px - incx \n\n if py < min_y or py > max_y:\n pcy = py - incy \n\n if pz < min_z or pz > max_z:\n pcz = pz - incz \n\n return pcx, pcy, pcz", "def bounding_box(points):\n x, y, z = zip(*points)\n min_x = min(x)\n max_x = max(x)\n min_y = min(y)\n max_y = max(y)\n min_z = min(z)\n max_z = max(z)\n return [\n [min_x, min_y, min_z],\n [max_x, min_y, min_z],\n [max_x, max_y, min_z],\n [min_x, max_y, min_z],\n [min_x, min_y, max_z],\n [max_x, min_y, max_z],\n [max_x, max_y, max_z],\n [min_x, max_y, max_z],\n ]", "def bounding_box(points):\n x, y, z = zip(*points)\n min_x = min(x)\n max_x = max(x)\n min_y = min(y)\n max_y = max(y)\n min_z = min(z)\n max_z = max(z)\n return [(min_x, min_y, min_z),\n (max_x, min_y, min_z),\n (max_x, max_y, min_z),\n (min_x, max_y, min_z),\n (min_x, min_y, max_z),\n (max_x, min_y, max_z),\n (max_x, max_y, max_z),\n (min_x, max_y, max_z)]", "def create_box(self, a, b, c):\n proj_to_xy = lambda x: x[:2]\n get_angle = lambda x,y: (x @ y) / (np.linalg.norm(x) * np.linalg.norm(y))\n\n ab = proj_to_xy(b) - proj_to_xy(a)\n ac = proj_to_xy(c) - proj_to_xy(a)\n bc = proj_to_xy(c) - proj_to_xy(b)\n\n ab_ac = np.abs(get_angle(ab, ac))\n ab_bc = np.abs(get_angle(ab, bc))\n\n x1, y1, z1 = a\n x2, y2, z2 = b\n x3, y3, z3 = c\n\n z = (z1 + z2)/2\n\n down = np.array([0., 0., z - z3])\n\n if ab_ac < ab_bc: # 3. point is bottom-left\n back = np.array([ac[0], ac[1], 0])\n else: # 3. point is bottom-right\n back = np.array([bc[0], bc[1], 0])\n\n tfl = np.array([x1, y1, z])\n tfr = np.array([x2, y2, z])\n\n tbl = tfl + back\n tbr = tfr + back\n\n bfl = tfl - down\n bfr = tfr - down\n\n bbl = bfl + back\n bbr = bfr + back\n\n return np.array([\n tfl, tfr,\n tbl, tbr,\n bfl, bfr,\n bbl, bbr\n ])", "def calculate_bounding_box(coordinates, backbone_trace = []):\n coords = numpy.array(coordinates)\n if(len(backbone_trace)>0):\n [max_x,max_y,max_z] = numpy.max([numpy.max(numpy.max(coords,1),0).tolist()]+[numpy.max(backbone_trace,0).tolist()],0)\n [min_x,min_y,min_z] = numpy.min([numpy.min(numpy.min(coords,1),0).tolist()]+[numpy.min(backbone_trace,0).tolist()],0)\n else:\n [max_x,max_y,max_z] = numpy.max(numpy.max(coords,1),0)\n [min_x,min_y,min_z] = numpy.min(numpy.min(coords,1),0)\n\n center = numpy.array([min_x,min_y,min_z]) + ((numpy.array([max_x,max_y,max_z])-numpy.array([min_x,min_y,min_z])) /2.)\n return ([[max_x, max_y, max_z],\n [max_x, max_y, min_z],\n [max_x, min_y, max_z],\n [max_x, min_y, min_z],\n [min_x, max_y, max_z],\n [min_x, max_y, min_z],\n [min_x, min_y, max_z],\n [min_x, min_y, min_z]], center.tolist(), [max_x,max_y,max_z])", "def box2cs(box):\r\n input_size = (256, 256)\r\n\r\n x, y, w, h = box[:4]\r\n aspect_ratio = input_size[0] / input_size[1]\r\n center = np.array([x + w * 0.5, y + h * 0.5], dtype=np.float32)\r\n\r\n if w > aspect_ratio * h:\r\n h = w * 1.0 / aspect_ratio\r\n elif w < aspect_ratio * h:\r\n w = h * aspect_ratio\r\n\r\n # pixel std is 200.0\r\n scale = np.array([w / 200.0, h / 200.0], dtype=np.float32)\r\n\r\n scale = scale * 1.25\r\n\r\n return center, scale", "def find_square_box(box):\n width = box['bottom_right_x'] - box['top_left_x']\n height = box['bottom_right_y'] - box['top_left_y']\n if width <= height:\n offset = int((width - height) / 2)\n box['top_left_x'] = box['top_left_x'] - offset\n box['bottom_right_x'] = box['bottom_right_x'] + offset\n else:\n offset = int((height - width) / 2)\n box['top_left_y'] = box['top_left_y'] - offset\n box['bottom_right_y'] = box['bottom_right_y'] + offset\n return box", "def getBoundingBoxCenter(self, shell=False, *args, **kwargs):\n if shell:\n self.grabShell()\n uvBB = pm.polyEvaluate(boundingBoxComponent2d=True)\n uvCenter = [((uvBB[0][1] + uvBB[0][0]) / 2), ((uvBB[1][1] + uvBB[1][0]) / 2)]\n return uvCenter", "def calculate_box(vertices: [[float]]) -> [float]:\n x_coords = [x[0] for x in vertices]\n y_coords = [x[1] for x in vertices]\n z_coords = [x[2] for x in vertices]\n\n return [min(x_coords), min(y_coords), min(z_coords), max(x_coords), max(y_coords), max(z_coords)]", "def boundingBoxArea(box):\n return (box[2] - box[0] + 1) * (box[3] - box[1] + 1)", "def get_boundingbox(face, width, height, scale=1.3, minsize=None):\n x1 = face.left()\n y1 = face.top()\n x2 = face.right()\n y2 = face.bottom()\n size_bb = int(max(x2 - x1, y2 - y1) * scale)\n if minsize:\n if size_bb < minsize:\n size_bb = minsize\n center_x, center_y = (x1 + x2) // 2, (y1 + y2) // 2\n\n # Check for out of bounds, x-y top left corner\n x1 = max(int(center_x - size_bb // 2), 0)\n y1 = max(int(center_y - size_bb // 2), 0)\n # Check for too big bb size for given x, y\n size_bb = min(width - x1, size_bb)\n size_bb = min(height - y1, size_bb)\n\n return x1, y1, size_bb", "def get_bound(box_list):\n box_xyxy_list = []\n for box in box_list:\n box_xyxy = xywh2xyxy(box)\n box_xyxy_list.append(box_xyxy)\n\n box_xyxy_list = np.array(box_xyxy_list)\n x1max, y1max, x2max, y2max = np.amax(box_xyxy_list, axis=0)\n x1min, y1min, x2min, y2min = np.amin(box_xyxy_list, axis=0)\n\n boundbox = xyxy2xywh([x1min, y1min, x2max, y2max])\n return boundbox", "def _get_center_coordinates_and_sizes_vector(box_data):\n ymin, xmin, ymax, xmax = [np.squeeze(i) for i in np.split(box_data, 4, 0)]\n width = np.subtract(xmax, xmin)\n height = np.subtract(ymax, ymin)\n ycenter = np.add(ymin, np.multiply(height, 0.5))\n xcenter = np.add(xmin, np.multiply(width, 0.5))\n return ycenter, xcenter, height, width", "def center(self):\n bounds = self.bounds\n x = (bounds[1] + bounds[0]) / 2\n y = (bounds[3] + bounds[2]) / 2\n z = (bounds[5] + bounds[4]) / 2\n return [x, y, z]", "def _compute_bounding_box(self, points_2d):\n max_x = max(map(lambda point: int(point[0]), points_2d))\n min_x = min(map(lambda point: int(point[0]), points_2d))\n max_y = max(map(lambda point: int(point[1]), points_2d))\n min_y = min(map(lambda point: int(point[1]), points_2d))\n\n width = max_x - min_x + 1\n height = max_y - min_y + 1\n\n return [min_x, min_y, width, height]", "def extract_bounding_boxes(self, scene):\n objs = scene[\"objects\"]\n rotation = scene[\"directions\"][\"right\"]\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n\n for i, obj in enumerate(objs):\n [x, y, z] = obj[\"pixel_coords\"]\n\n [x1, y1, z1] = obj[\"3d_coords\"]\n\n cos_theta, sin_theta, _ = rotation\n\n x1 = x1 * cos_theta + y1 * sin_theta\n y1 = x1 * -sin_theta + y1 * cos_theta\n\n height_d = 6.9 * z1 * (15 - y1) / 2.0\n height_u = height_d\n width_l = height_d\n width_r = height_d\n\n if obj[\"shape\"] == \"cylinder\":\n d = 9.4 + y1\n h = 6.4\n s = z1\n\n height_u *= (s * (h / d + 1)) \\\n / ((s * (h / d + 1)) - (s * (h - s) / d))\n height_d = height_u * (h - s + d) / (h + s + d)\n\n width_l *= 11 / (10 + y1)\n width_r = width_l\n\n if obj[\"shape\"] == \"cube\":\n height_u *= 1.3 * 10 / (10 + y1)\n height_d = height_u\n width_l = height_u\n width_r = height_u\n\n ymin.append((y - height_d) / 320.0)\n ymax.append((y + height_u) / 320.0)\n xmin.append((x - width_l) / 480.0)\n xmax.append((x + width_r) / 480.0)\n\n return xmin, ymin, xmax, ymax", "def box(self):\n r2 = self.radius\n res = [self.x - r2, self.y - r2, self.x + r2, self.y + r2]\n return res", "def bounding_box(self):\n latlon00 = self.ij_to_latlon(-1,-1)\n latlon01 = self.ij_to_latlon(-1,self.domain_size[1]+1)\n latlon11 = self.ij_to_latlon(self.domain_size[0]+1,self.domain_size[1]+1)\n latlon10 = self.ij_to_latlon(self.domain_size[0]+1,-1)\n return (latlon00,latlon01,latlon11,latlon10)", "def bbox_3D(img):\n\tr = np.any(img, axis=(1, 2))\n\tc = np.any(img, axis=(0, 2))\n\tz = np.any(img, axis=(0, 1))\n\n\trmin, rmax = np.where(r)[0][[0, -1]]\n\tcmin, cmax = np.where(c)[0][[0, -1]]\n\tzmin, zmax = np.where(z)[0][[0, -1]]\n\n\treturn rmin, rmax, cmin, cmax, zmin, zmax", "def get_bounding_box(self):\n lon, lat = self.coordinates\n\n ll = (np.min(lon),np.min(lat))\n ul = (np.min(lon),np.max(lat))\n ur = (np.max(lon),np.max(lat))\n lr = (np.max(lon),np.min(lat))\n\n return (ll, ul, ur, lr)", "def zone_from_center_size(x, y, size):\n half_size = size // 2\n size = half_size * 2\n x1 = x - half_size\n x2 = x + half_size -1\n if x1 < 0:\n x1 = 0\n x2 = size - 1\n if x2 >= w:\n x2 = w - 1\n x1 = w - size\n y1 = y - half_size\n y2 = y + half_size\n if y1 < 0:\n y1 = 0\n y2 = size - 1\n if y2 >= h:\n y2 = h - 1\n y1 = h - size\n return [x1, y1, x2, y2]", "def get_bounds(shape, affine):\n adim, bdim, cdim = shape\n adim -= 1\n bdim -= 1\n cdim -= 1\n # form a collection of vectors for each 8 corners of the box\n box = np.array([[0., 0, 0, 1],\n [adim, 0, 0, 1],\n [0, bdim, 0, 1],\n [0, 0, cdim, 1],\n [adim, bdim, 0, 1],\n [adim, 0, cdim, 1],\n [0, bdim, cdim, 1],\n [adim, bdim, cdim, 1]]).T\n box = np.dot(affine, box)[:3]\n return zip(box.min(axis=-1), box.max(axis=-1))", "def bounding_box(points):\n x, y, w, h = cv2.boundingRect(np.array([p for p in points]))\n bounding = Box(x, y, w, h)\n return bounding", "def scale_box(box, img_size):\n xscale = img_size[0] / FLAGS.size\n yscale = img_size[1] / FLAGS.size\n x0, y0, x1, y1 = box\n return [\n float(x0) * xscale,\n float(y0) * yscale,\n float(x1) * xscale,\n float(y1) * yscale,\n ]", "def boundingBox(self):\n y_max = np.max(self.points[:,0])\n x_max = np.max(self.points[:,1])\n y_min = np.min(self.points[:,0])\n x_min = np.min(self.points[:,1])\n \n return ((x_max, y_max), (x_min, y_min))", "def rectCenter(rect):\n return np.array([rect[:2].mean(), rect[2:].mean()])", "def computeBox(a):\n xmin , ymin = a[:,0].min(), a[:,1].min()\n xmax , ymax = a[:,0].max(), a[:,1].max()\n\n return xmin, ymin, xmax-xmin, ymax-ymin" ]
[ "0.70684916", "0.6832517", "0.67437243", "0.67029804", "0.667289", "0.65944505", "0.6579306", "0.63725936", "0.63596195", "0.63322043", "0.6323071", "0.62650836", "0.6200155", "0.6196621", "0.6168873", "0.6150329", "0.6144695", "0.6138646", "0.61194324", "0.61101246", "0.6088846", "0.6066564", "0.60621035", "0.60596895", "0.60347575", "0.6001923", "0.59799397", "0.59779906", "0.5976275", "0.5968434" ]
0.6905524
1
Decorator for exposing a method as an RPC call with the given signature.
def expose_rpc(permission, return_type, *arg_types): def decorator(func): if not hasattr(func, '_xmlrpc_signatures'): func._xmlrpc_signatures = [] func._xml_rpc_permission = permission func._xmlrpc_signatures.append((return_type,) + tuple(arg_types)) return func return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rpc_call(func):\n @wraps(func)\n def decorator(*args, **kwargs):\n return func(*args, **kwargs)\n decorator.rpc_call = True\n return decorator", "def rpc_method(func):\n func.rpc_callable = True\n return func", "def rpcmethod(func):\n func.rpcmethod = True\n return func", "def xmlrpc_method(returns='string', args=None, name=None):\r\n # Args should be a list\r\n if args is None:\r\n args = []\r\n\r\n def _xmlrpc_func(func):\r\n \"\"\"Inner function for XML-RPC method decoration. Adds a signature to\r\n the method passed to it.\r\n\r\n func\r\n The function to add the signature to\r\n \"\"\"\r\n # Add a signature to the function\r\n func._xmlrpc_signature = {\r\n 'returns': returns,\r\n 'args': args\r\n }\r\n return func\r\n\r\n return _xmlrpc_func", "def _call(self, rpc_method_name, *args, **kwargs):\n method = getattr(self, rpc_method_name)\n return method(*args, **kwargs)", "def ProtoRPCServiceMethod(method):\n\n def wrapper(self, request):\n assert isinstance(request, wrapper.rpc_method_spec.request_type)\n logging.info(\"Request:\\n%s\", request)\n response = method(self, request)\n assert isinstance(response, wrapper.rpc_method_spec.response_type)\n logging.info(\"Response:\\n%s\", response)\n return response\n\n # Since the service's descriptor will be parsed when the class is created,\n # which is later than the invocation time of this decorator, here it just\n # place the placeholder with dummy contents.\n wrapper.rpc_method_spec = _ProtoRPCServiceMethodSpec(None, None)\n return wrapper", "def __getattr__(self, name):\n name = name.replace('_', '-')\n\n def wrapper(*args, **kwargs):\n if len(args) != 0 and len(kwargs) != 0:\n raise RpcError(name, {}, \"Cannot mix positional and non-positional arguments\")\n elif len(args) != 0:\n return self.call(name, payload=args)\n else:\n return self.call(name, payload=list(kwargs.values()))\n return wrapper", "def client_member_function(self, method: ProtoServiceMethod) -> None:", "def proxy_method(self, rest_path, sign, kwargs):", "def remoteboundmethod(func):\n def new_func(self, *args, **kwargs):\n try:\n rpcclt = self.editwin.flist.pyshell.interp.rpcclt\n except AttributeError:\n rpcclt = None\n\n if rpcclt:\n return rpcclt.run_extension_function(self.__class__.__name__, func.__name__, args, kwargs)\n else:\n return func(self, *args, **kwargs)\n new_func.orig_func = func\n return new_func", "def rpc_call(self, request, method=None, params=None, **kwargs):\r\n args = []\r\n kwargs = dict()\r\n if isinstance(params, dict):\r\n kwargs.update(params)\r\n else:\r\n args = list(as_tuple(params))\r\n\r\n method_key = \"{0}.{1}\".format(self.scheme_name, method)\r\n if method_key not in self.methods:\r\n raise AssertionError(\"Unknown method: {0}\".format(method))\r\n method = self.methods[method_key]\r\n\r\n if hasattr(method, 'request'):\r\n args.insert(0, request)\r\n\r\n return method(*args, **kwargs)", "def rpc_immediate(func):\n decorator = rpc_call(func)\n decorator.rpc_immediate = True\n return decorator", "def _xmlrpc_func(func):\r\n # Add a signature to the function\r\n func._xmlrpc_signature = {\r\n 'returns': returns,\r\n 'args': args\r\n }\r\n return func", "def api_method(func):\n @wraps(func)\n def decorator(self, return_request_args=False, *args, **kwargs):\n request_args = func(self, *args, **kwargs)\n request_args.update({\n 'method': '{module}.{method}'.format(\n module=self.__class__.__name__,\n method=func.__name__)})\n request_args = self._preprocess(request_args)\n if return_request_args:\n return request_args\n else:\n return self.pa.request(**request_args)\n return decorator", "def decorate_HTTP_verb_method(method):\n @functools.wraps(method)\n def wrapper(self, RIC_base_uri, **kwargs):\n partition = kwargs.pop('partition', '')\n name = kwargs.pop('name', '')\n sub_path = kwargs.pop('subPath', '')\n suffix = kwargs.pop('suffix', '')\n uri_as_parts = kwargs.pop('uri_as_parts', False)\n if uri_as_parts:\n REST_uri = generate_bigip_uri(RIC_base_uri, partition, name,\n sub_path, suffix, **kwargs)\n else:\n REST_uri = RIC_base_uri\n pre_message = \"%s WITH uri: %s AND suffix: %s AND kwargs: %s\" %\\\n (method.__name__, REST_uri, suffix, kwargs)\n logging.debug(pre_message)\n response = method(self, REST_uri, **kwargs)\n post_message =\\\n \"RESPONSE::STATUS: %s Content-Type: %s Content-Encoding:\"\\\n \" %s\\nText: %r\" % (response.status_code,\n response.headers.get('Content-Type', None),\n response.headers.get('Content-Encoding', None),\n response.text)\n logging.debug(post_message)\n if response.status_code not in range(200, 207):\n error_message = '%s Unexpected Error: %s for uri: %s\\nText: %r' %\\\n (response.status_code,\n response.reason,\n response.url,\n response.text)\n raise iControlUnexpectedHTTPError(error_message, response=response)\n return response\n return wrapper", "def ipcmethod(fn):\n def proxy_fn(inst, *args, **kwargs):\n # pylint: disable=protected-access\n return inst._ipc_call(fn.__name__, *args, **kwargs)\n proxy_fn.orig_fn = fn\n return proxy_fn", "def call(self, method, name, params=None, payload=None, **kwds):", "def make_xml_rpc_api_call(uri, method, args=None, headers=None,\r\n http_headers=None, timeout=None, proxy=None):\r\n if args is None:\r\n args = tuple()\r\n try:\r\n largs = list(args)\r\n largs.insert(0, {'headers': headers})\r\n\r\n payload = xmlrpc_client.dumps(tuple(largs),\r\n methodname=method,\r\n allow_none=True)\r\n session = requests.Session()\r\n req = requests.Request('POST', uri, data=payload,\r\n headers=http_headers).prepare()\r\n LOGGER.debug(\"=== REQUEST ===\")\r\n LOGGER.info('POST %s', uri)\r\n LOGGER.debug(req.headers)\r\n LOGGER.debug(payload)\r\n\r\n response = session.send(req,\r\n timeout=timeout,\r\n proxies=_proxies_dict(proxy))\r\n LOGGER.debug(\"=== RESPONSE ===\")\r\n LOGGER.debug(response.headers)\r\n LOGGER.debug(response.content)\r\n response.raise_for_status()\r\n result = xmlrpc_client.loads(response.content,)[0][0]\r\n return result\r\n except xmlrpc_client.Fault as ex:\r\n # These exceptions are formed from the XML-RPC spec\r\n # http://xmlrpc-epi.sourceforge.net/specs/rfc.fault_codes.php\r\n error_mapping = {\r\n '-32700': NotWellFormed,\r\n '-32701': UnsupportedEncoding,\r\n '-32702': InvalidCharacter,\r\n '-32600': SpecViolation,\r\n '-32601': MethodNotFound,\r\n '-32602': InvalidMethodParameters,\r\n '-32603': InternalError,\r\n '-32500': ApplicationError,\r\n '-32400': RemoteSystemError,\r\n '-32300': TransportError,\r\n }\r\n raise error_mapping.get(ex.faultCode, SoftLayerAPIError)(\r\n ex.faultCode, ex.faultString)\r\n except requests.HTTPError as ex:\r\n raise TransportError(ex.response.status_code, str(ex))\r\n except requests.RequestException as ex:\r\n raise TransportError(0, str(ex))", "def callRemote(self, methname, *args, **kwargs):\n schema = self._referenceable.getInterface()[methname]\n if self.check_args:\n schema.checkAllArgs(args, kwargs, inbound=False)\n # TODO: Figure out how to call checkResults on the result.\n return execute(\n self._referenceable.doRemoteCall,\n methname,\n args,\n kwargs,\n )", "def xen_rpc_call(ip, method, *args):\n try:\n if not ip:\n return xen_api_error(\"Invalid ip for rpc call\")\n # create\n proxy = ServerProxy(\"http://\" + ip + \":9363/\")\n \n # login \n response = proxy.session.login('root')\n if cmp(response['Status'], 'Failure') == 0:\n log.exception(response['ErrorDescription'])\n return xen_api_error(response['ErrorDescription']) \n session_ref = response['Value']\n \n # excute\n method_parts = method.split('_')\n method_class = method_parts[0]\n method_name = '_'.join(method_parts[1:])\n \n if method.find(\"host_metrics\") == 0:\n method_class = \"host_metrics\"\n method_name = '_'.join(method_parts[2:])\n #log.debug(method_class)\n #log.debug(method_name)\n if method_class.find(\"Async\") == 0:\n method_class = method_class.split(\".\")[1]\n response = proxy.__getattr__(\"Async\").__getattr__(method_class).__getattr__(method_name)(session_ref, *args)\n else:\n response = proxy.__getattr__(method_class).__getattr__(method_name)(session_ref, *args)\n if cmp(response['Status'], 'Failure') == 0:\n log.exception(response['ErrorDescription'])\n return xen_api_error(response['ErrorDescription'])\n # result\n return response\n except socket.error:\n return xen_api_error('socket error')", "def remote_call(func):\n @func_utils.getargsfrom(func)\n def rem_func(self, *args, **kwargs):\n return self.call_in_thread_sync(func,args=(self,)+args,kwargs=kwargs,sync=True,same_thread_shortcut=True)\n return rem_func", "def wrapper_fun(*args):\n print(\"Hello Decorator\")\n return fun(*args)", "def xmlrpc_view(wrapped):\n \n def _curried(context, request):\n params, method = parse_xmlrpc_request(request)\n value = wrapped(context, *params)\n return xmlrpc_response(value)\n _curried.__name__ = wrapped.__name__\n _curried.__grok_module__ = wrapped.__module__ \n\n return _curried", "def servicemethod(*args, **kwargs):\n # Default options\n options = {'name': None, 'store': None, 'request_arg': True, 'store_arg': True}\n\n # Figure out if we were called with arguments\n # If we were called with args, ie:\n # @servicemethod(name='Foo')\n # Then the only argument here will be the pre-decorated function/method object.\n method = ( (len(args) == 1) and callable(args[0]) ) and args[0] or None\n\n if method is None:\n # We were called with args, (or @servicemethod() )\n # so figure out what they were ...\n\n # The method name should be either the first non-kwarg\n # or the kwarg 'name'\n # Example: @servicemethod('my_method', ...) or @servicemethod(name='my_method')\n options.update({\n 'name': bool(args) and args[0] or kwargs.pop('name', None),\n 'store': (len(args) >= 2) and args[1] or kwargs.pop('store', None),\n 'request_arg': kwargs.pop('request_arg', True),\n 'store_arg': kwargs.pop('store_arg', True),\n })\n else:\n options['name'] = method.__name__\n method.__servicemethod__ = options\n\n def method_with_args_wrapper(method):\n \"\"\" Wrapper for a method decorated with decorator arguments\n \"\"\"\n if options['name'] is None:\n options['name'] = method.__name__\n method.__servicemethod__ = options\n\n if options['store'] is not None:\n options['store'].service.add_method(method)\n\n return method\n\n return method or method_with_args_wrapper", "def __getattr__(self, method):\n def run_callback(func, plus, result):\n \"\"\"Execute the given callback safely.\n Get data and/or error from result and call func passing it\n data, plus (if needed) and error. Catch, log and suppress\n all exceptions.\n func (function): the callback to invoke.\n plus (object): optional additional data.\n result (AsyncResult): the result of a (finished) RPC call.\n \"\"\"\n data = result.value\n error = None if result.successful() else \"%s\" % result.exception\n try:\n if plus is None:\n func(data, error=error)\n else:\n func(data, plus, error=error)\n except Exception as error:\n logger.error(\"RPC callback for %s.%s raised exception.\",\n self.remote_service_coord.name, method,\n exc_info=True)\n\n def remote_method(**data):\n \"\"\"Forward arguments to execute_rpc.\n \"\"\"\n callback = data.pop(\"callback\", None)\n plus = data.pop(\"plus\", None)\n result = self.execute_rpc(method=method, data=data)\n if callback is not None:\n callback = functools.partial(run_callback, callback, plus)\n result.rawlink(functools.partial(gevent.spawn, callback))\n return result\n\n return remote_method", "def present_rpc_method(method, send_probe):\n svc_path_bk = method[\"rmtSvcIntName\"].split(\".\")[-1]\n if method[\"service\"] is None:\n svc_path_bk = svc_path_bk[0].lower() + svc_path_bk[1:]\n writer(\n f\"Warning: Unable to correlate method to a service path. Guessed /{svc_path_bk}\\n\"\n + \" - Strong name unknown - Use --svc to see options\",\n FORMAT['WARNING']\n )\n if RPC_VERSION != \"7\":\n writer(\n \"Warning: RPC body generation may be invalid - version 7 expected\"\n + f\", version {RPC_VERSION} found\",\n FORMAT['WARNING']\n )\n if len(method[\"complexTypes\"]) != 0:\n writer(\n \"Warning: Unhandled complex type found - RPC likely invalid:\\n - {}\"\n .format('\\n - '.join(method['complexTypes'])),\n FORMAT['WARNING']\n )\n\n service_path = (\n method[\"service\"][\"servicePath\"]\n if method[\"service\"] is not None\n else svc_path_bk\n )\n rpc_call = '|'.join(method[\"methodRpcCall\"]) + \"|\"\n\n writer(\n \"POST /{}{} HTTP/1.1\\r\".format(\n '/'.join(BASE_URL.split(\"/\")[3:]), service_path\n ).replace(\"//\", \"/\")\n )\n writer(f\"Host: {BASE_URL.split('/')[2]}\\r\")\n writer(f\"Content-Type: {CONTENT_TYPE}\\r\")\n writer(f\"X-GWT-Permutation: {GWT_PERMUTATION}\\r\")\n writer(f\"X-GWT-Module-Base: {BASE_URL}\\r\")\n writer(f\"Content-Length: {len(rpc_call.encode('utf-8'))}\\r\\n\\r\")\n writer(f\"{rpc_call}\\n\")\n\n if send_probe:\n url = (BASE_URL + service_path)\n send_rpc_probe(url, rpc_call)", "def callRemote(self, methname, *args, **kwargs):\n try:\n schema = self._referenceable.getInterface()[methname]\n if self.check_args:\n schema.checkAllArgs(args, kwargs, inbound=True)\n _check_copyables(list(args) + kwargs.values())\n result = self._referenceable.doRemoteCall(\n methname,\n args,\n kwargs,\n )\n schema.checkResults(result, inbound=False)\n _check_copyables([result])\n return succeed(result)\n except:\n return fail()", "def method(rtype):\n\n def decorator(func):\n argcount = func.__code__.co_argcount\n argnames = func.__code__.co_varnames[:argcount]\n ndefaults = 0\n if func.__defaults__:\n ndefaults = len(func.__defaults__)\n\n argNames = func.__code__.co_varnames[(argcount - ndefaults):]\n\n if ndefaults < (argcount - 1):\n raise cSyntaxError(\n 'Type declarations missing from arguments %(args)r in the BLM '\n 'method %(func)s().' % {\n 'args': list(reversed(argnames))[ndefaults:],\n 'func': func.__name__,})\n params = []\n if func.__defaults__:\n params = [ arg._instantiate(name) for arg, name in\n zip(func.__defaults__, argNames)]\n\n func.__defaults__ = None\n m = ExternalMethod(func.__name__, func)\n if rtype:\n m.rtype = rtype._instantiate('result')\n m.params = params\n\n return m\n\n return decorator", "def _remote_call(self,\n method_name,\n target='Widget',\n args=None,\n kwargs=None):\n args = [] if args is None else args\n kwargs = {} if kwargs is None else kwargs\n\n msg = {}\n\n if 'component_index' in kwargs:\n msg['component_index'] = kwargs.pop('component_index')\n if 'repr_index' in kwargs:\n msg['repr_index'] = kwargs.pop('repr_index')\n\n msg['target'] = target\n msg['type'] = 'call_method'\n msg['methodName'] = method_name\n msg['args'] = args\n msg['kwargs'] = kwargs\n\n def callback(widget, msg=msg):\n widget.send(msg)\n\n callback._method_name = method_name\n callback._ngl_msg = msg\n\n if self.loaded:\n self._remote_call_thread.q.append(callback)\n else:\n # send later\n # all callbacks will be called right after widget is loaded\n self._ngl_displayed_callbacks_before_loaded.append(callback)\n\n if callback._method_name not in _EXCLUDED_CALLBACK_AFTER_FIRING:\n self._ngl_displayed_callbacks_after_loaded.append(callback)", "def __getattr__(self, method: str):\n @exception_handler\n def func(*args, **kwargs):\n return self._client.PyCall(method, list(args), kwargs,\n self._wait_for_ready, self._call_timeout,\n self._compress)\n\n setattr(self, method, func)\n return func" ]
[ "0.72693485", "0.7231531", "0.6921579", "0.6753611", "0.6449031", "0.6424492", "0.6306513", "0.614029", "0.6127533", "0.598983", "0.5951367", "0.5933623", "0.58960724", "0.58644986", "0.581652", "0.5811746", "0.58007073", "0.5770051", "0.57460624", "0.57426834", "0.5732241", "0.5712989", "0.57001656", "0.56991273", "0.56891495", "0.56706035", "0.56485623", "0.55744207", "0.55626774", "0.5559681" ]
0.7436393
0
Serialize the result of the RPC call and send it back to the client.
def send_rpc_result(req, result):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def send_result(\n self,\n rpc_message: RpcMessage,\n result_message: ResultMessage,\n return_path: str,\n bus_client: \"BusClient\",\n ):\n raise NotImplementedError()", "def _success(self, result_ser, request):\n result = json.dumps(result_ser)\n request.write(result)\n request.finish()", "def execute_rpc(self, method, data):\n # Determine the ID.\n id_ = uuid.uuid4().hex\n\n # Build the request.\n request = {\"__id\": id_,\n \"__method\": method,\n \"__data\": data}\n\n result = gevent.event.AsyncResult()\n\n # Encode it.\n try:\n data = json.dumps(request).encode('utf-8')\n except (TypeError, ValueError):\n logger.error(\"JSON encoding failed.\", exc_info=True)\n result.set_exception(RPCError(\"JSON encoding failed.\"))\n return result\n\n # Send it.\n try:\n self._write(data)\n except IOError:\n result.set_exception(RPCError(\"Write failed.\"))\n return result\n\n # Store it.\n self.pending_outgoing_requests[id_] = request\n self.pending_outgoing_requests_results[id_] = result\n\n return result", "def RemoteCall(self, address, argDict):\r\n print(f\"Making RPC for {argDict[RPC_ARG_REQUEST_TYPE]} to {address}\")\r\n requestSocket = self.CreateAClientSocket(address)\r\n requestByteArray = pickle.dumps(argDict)\r\n requestSocket.sendall(requestByteArray)\r\n requestSocket.shutdown(socket.SHUT_WR)\r\n value = pickle.loads(requestSocket.recv(BUFFER_SIZE))\r\n requestSocket.shutdown(socket.SHUT_RD)\r\n requestSocket.close()\r\n return value", "def _r_send_result(self, response, protocol):\n #print(\"Send result: %s\" % result)\n protocol.send_message(response)", "def get_rpc_resp(self,rpc, ignore_warning, format):\n # data comes in JSON format, needs to be converted \n rpc_val = xmltodict.unparse(rpc) \n rpc_val = rpc_val.encode('utf-8')\n parser = etree.XMLParser(ns_clean=True, recover=True, encoding='utf-8')\n rpc_etree = etree.fromstring(rpc_val, parser=parser)\n resp = self.dev.rpc(rpc_etree, normalize=bool(format == 'xml'), ignore_warning=ignore_warning)\n if(format == 'json'):\n return resp\n return etree.tostring(resp)", "def rpc(self) -> global___Rpc:", "def _build_rpc_result(self, id, result):\n if id is None:\n return None\n\n return {\n 'jsonrpc': '2.0',\n 'id': id,\n 'result': result\n }", "def _send_response(self, result, peer):\n try:\n response = json.dumps(result).encode()\n self._socket.sendto(response, peer)\n except (ConnectionRefusedError, FileNotFoundError, PermissionError,\n TypeError):\n pass", "def serialize(self, buff):\n try:\n buff.write(_struct_B.pack(self.result))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def encode_result(value: object) -> bytes:\n raise NotImplementedError()", "def rpc_call(self, method: str, params: Optional[list] = None) -> Any:\r\n if params is None:\r\n params = []\r\n data = json.dumps({ # json string used in HTTP requests\r\n 'jsonrpc': '2.0',\r\n 'method': method,\r\n 'params': params,\r\n 'id': self.id\r\n })\r\n url = \"http://{}:{}\".format(self.ip.address, self.rpc_port)\r\n with SEMAPHORE:\r\n with requests.Session() as r:\r\n # sleep(0.01) ###\r\n response = r.post(url=url, data=data, headers=self._headers)\r\n while response.headers['Content-Type'] != 'application/json':\r\n print(self.ip.address, self.rpc_port)\r\n print(response.status_code, response.headers)\r\n print(response.content)\r\n sleep(0.05)\r\n response = r.post(url=url, data=data, headers=self._headers)\r\n content = response.json()\r\n # sleep(0.02)\r\n print(content)\r\n result = content.get('result')\r\n err = content.get('error')\r\n if err:\r\n raise RuntimeError(self.ip.address, self.rpc_port, err.get('message'))\r\n\r\n print('%s @%s : %s %s' % (method, self.ip.address, self.rpc_port, result))\r\n return result", "def send_output(self, result, output):\n data = pickle.dumps((result, output))\n self.wfile.write('%d\\n' % len(data))\n self.wfile.write(data)\n self.wfile.flush()", "def roundtrip(data):\r\n body = xmlrpclib.dumps(data)\r\n result = xmlrpclib.loads(body)[0]\r\n if result != data:\r\n print result", "def to_response(self):\n self.ctx[\"graph\"] = self.execute_op()\n return result_response(GraphExportCtrl.RESPONSE_SERIALIZER, self.ctx)", "def __repr__(self):\n result = json.dumps({'processed': self._processed,\n 'failed': self._failed,\n 'total': self._total,\n 'time': str(self._time),\n 'chunk': self._chunk})\n return result", "def do_rpc(self, method, **params):\n data = {\n 'jsonrpc': '2.0',\n 'method': method,\n 'params': params,\n 'authorization': self._auth_token,\n 'id': '1'\n }\n\n r = requests.post(self._url, json=data)\n validate_response(r)\n\n return r.json()['result']", "def xmlrpc_marshal(data):\n if isinstance(data, xmlrpclib.Fault):\n return xmlrpclib.dumps(data)\n else:\n return xmlrpclib.dumps((data,), methodresponse=True)", "def __call__(self):\n params, method = parse_xmlrpc_request(self.request)\n return xmlrpc_response(getattr(self,method)(*params))", "def respond(self, result: Any) -> Optional['JSONRPCSuccessResponse']:\n if self.one_way or self.unique_id is None:\n return None\n\n response = JSONRPCSuccessResponse()\n\n response.result = result\n response.unique_id = self.unique_id\n\n return response", "async def receive_result(\n self, rpc_message: RpcMessage, return_path: str, options: dict, bus_client: \"BusClient\"\n ) -> ResultMessage:\n raise NotImplementedError()", "def P_SendResult(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def serialize(self):\n pass", "def serialize_response(self, response):\n raise NotImplementedError()", "def propagateResult(self):\n\n call = self.persisted_data\n self.result = json.dumps(CallSerializer(call, context={'request': None}).data)\n\n if call.start_timestamp and call.stop_timestamp:\n super().propagateResult()\n return True\n\n return False", "def to_response(self):\n op_result, remote_branch = self.execute_and_sync()\n if isinstance(op_result, Job):\n return result_response(MigrateProjectCtrl.JOB_RESPONSE_SERIALIZER, op_result)\n\n was_migrated, template_migrated, docker_migrated, messages, warnings, errors = op_result\n\n response = {\n \"messages\": messages,\n \"warnings\": warnings,\n \"errors\": errors,\n \"was_migrated\": was_migrated,\n \"template_migrated\": template_migrated,\n \"docker_migrated\": docker_migrated,\n \"remote_branch\": remote_branch,\n }\n\n return result_response(self.RESPONSE_SERIALIZER, response)", "def serialize(self) -> bytes:\n pass", "def serialize(self) -> bytes:\n pass", "def output(self):\r\n return self.result", "def on_success(self, res):\n if self.state == CANCELED:\n self._reject()\n return\n\n self.state = SUCCESS\n self.retries = 0\n\n # result save\n if not self.graph_uuid:\n if not self.ignore_result:\n self.app.backend_adapter.result_set(self.uuid, res)\n self._ack()\n return\n\n if not self.ignore_result:\n self.app.backend_adapter.result_set(self.uuid, res, graph_uuid=self.graph_uuid)\n\n graph_dict = self.app.backend_adapter.graph_get(self.graph_uuid)\n graph = Graph.from_dict(graph_dict)\n current = graph.vertex_by_uuid(self.uuid)\n current.state = self.state\n self.update_graph_states(graph, current, result=res)\n for v in graph.next_vertex(from_uuid=current.uuid):\n if v.args_from:\n if v.args_from == current.fr:\n args_from = res\n elif graph.vertex_by_uuid(v.args_from).operation == GROUP:\n args_from = self.app.backend_adapter.group_get_result(graph_uuid=graph.uuid, group_uuid=v.args_from)\n else:\n args_from_parent_vertex = graph.vertex_by_uuid(v.args_from)\n args_from_vertex = graph.vertex_by_uuid(args_from_parent_vertex.children[1]).uuid\n args_from = self.app.backend_adapter.result_get(graph_uuid=graph.uuid,\n task_uuid=args_from_vertex)\n if v.bind:\n v.args[0:0] = [args_from] if v.bind else [args_from] + v.args\n else:\n v.args = [args_from] + v.args\n v.apply(graph.uuid)\n\n # acknowledge broker message\n self._ack()" ]
[ "0.66543037", "0.6296766", "0.6142886", "0.6120978", "0.61099845", "0.60895675", "0.5985333", "0.59626645", "0.59577644", "0.59334546", "0.5809743", "0.5802393", "0.5798993", "0.5763221", "0.57578456", "0.57577705", "0.57374424", "0.5700236", "0.56850314", "0.56526905", "0.5616064", "0.5590852", "0.55696434", "0.55514044", "0.5548172", "0.552775", "0.545868", "0.545868", "0.5458492", "0.5457306" ]
0.7619752
0
Provide the namespace in which a set of methods lives. This can be overridden if the 'name' element is provided by xmlrpc_methods().
def xmlrpc_namespace():
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def xmlrpc_methods():", "def namespace(self):\n raise exceptions.NotImplementedError()", "def namespaces(self):\n return ()", "def _ns(self, *args):\n return \"%s.%s\" % (self.namespace, \".\".join([str(arg) for arg in args]))", "def createNamespace(self):\r\n raise NotImplementedError('Endpoint can not be used directly.')", "def namespace(self):\n assert self._namespace\n return self._namespace", "def XmlNamespace(self) -> str:", "def gen_namespace(self, node):\n node.functions = self.define_function_suffix(node.functions)\n for ns in node.namespaces:\n self.gen_namespace(ns)", "def getNamespace(self):\n pass;", "def test_list_net_namespace(self):\n pass", "def setNamespaces(self, *args):\n return _libsbml.SBase_setNamespaces(self, *args)", "def namespaces(self):\n return [self._namespace_prefix]", "def _getnamespaces(cls):\n return \" \".join(Kmlable._namespaces)", "def ResponseXmlNamespace(self) -> str:", "def XmlTypeNamespace(self) -> str:", "def namespace (self) :\n\n return self.__namespace__", "def test_get_namespace(self):\n pass", "def xmlrpc_method(returns='string', args=None, name=None):\r\n # Args should be a list\r\n if args is None:\r\n args = []\r\n\r\n def _xmlrpc_func(func):\r\n \"\"\"Inner function for XML-RPC method decoration. Adds a signature to\r\n the method passed to it.\r\n\r\n func\r\n The function to add the signature to\r\n \"\"\"\r\n # Add a signature to the function\r\n func._xmlrpc_signature = {\r\n 'returns': returns,\r\n 'args': args\r\n }\r\n return func\r\n\r\n return _xmlrpc_func", "def test_patch_net_namespace(self):\n pass", "def getNamespaces(self, *args):\n return _libsbml.SBMLNamespaces_getNamespaces(self, *args)", "def test_replace_net_namespace(self):\n pass", "def namespace(self) -> _iomanagers.Namespace:\n # It cannot set self.__namespace,\n # but it can function as a setter to the namespace variables.\n return self.__namespace", "def namespace(self) -> str:\n return pulumi.get(self, \"namespace\")", "def namespace(self):\n return Namespace(self)", "def namespace(self) -> str:\n return self._namespace", "def namespace(self, namespace):\n return self.client.call('GET',\n self.name, params={'namespace': namespace})", "def prefixes(self):\n # a new OntCuries-like object that wraps NamespaceManager\n # and can leverage its trie\n self.namespace_manager\n raise NotImplementedError('yet')", "def get_namespace(self) -> str:\n return self._namespace", "def getNamespaces(self):\n return _libsbml.SBase_getNamespaces(self)", "def get_rpc_method_names(self):\n return self._get_rpc_method_names()" ]
[ "0.67840374", "0.6330559", "0.60799056", "0.6017906", "0.58106625", "0.57945573", "0.5761432", "0.57473826", "0.5687743", "0.56653756", "0.5655525", "0.560176", "0.55967504", "0.54732215", "0.546771", "0.54126364", "0.5406012", "0.54021627", "0.53649825", "0.5364481", "0.5357154", "0.5349935", "0.5324016", "0.53062797", "0.5296351", "0.5293359", "0.5291297", "0.527945", "0.5234269", "0.52210444" ]
0.7973318
0
This method takes one parameter, the name of a method implemented by the RPC server. It returns a documentation string describing the use of that method. If no such string is available, an empty string is returned. The documentation string may contain HTML markup.
def methodHelp(self, req, method): p = self.get_method(method) return '\n'.join((p.signature, '', p.description))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def methodHelp(self, name):\r\n methods = self._listMethods()\r\n for methodname in methods.keys():\r\n if methodname == name:\r\n return methods[methodname]\r\n raise RPCError(Faults.SIGNATURE_UNSUPPORTED)", "def method(name, doc):\n import html\n\n params = method_params(doc)\n doc = html.escape(doc)\n return string.Template(METHOD_TEMPLATE).substitute(\n name=name, params=params, doc=doc\n )", "def brief_documentation(method: object) -> str:\n doc = method.__doc__\n if doc is not None:\n lines = doc.splitlines()\n if len(lines) > 0:\n return lines[0]\n return ''", "def Help():\n names=api_method_dict.keys()\n names.sort()\n return ''.join(['**** ' + api_method_dict[name].__name__ + '\\n' + api_method_dict[name].__doc__ + '\\n'\n for name in names])", "def method_description(self):\n pass", "def DocString():\n return", "def method_decl(self):\r\n return '\\t{\"%s\", %s, %s, \"%s\"}' % (\r\n self.name, self.name, self.method, self.doc)", "def rpc_help(self, cmd: str = None) -> str:\n if cmd:\n return self._call_command([\"help\", cmd])\n return self._call_command([\"help\"])", "def _get_doc(self, name):\r\n doc = \"No documentation for %s\" % name\r\n\r\n engine = self._engine\r\n if not engine:\r\n msg = \"Session is not open\"\r\n raise Oct2PyError(msg)\r\n doc = engine.eval('help(\"%s\")' % name, silent=True)\r\n\r\n if \"syntax error:\" in doc.lower():\r\n raise Oct2PyError(doc)\r\n\r\n if \"error:\" in doc.lower():\r\n doc = engine.eval('type(\"%s\")' % name, silent=True)\r\n doc = \"\\n\".join(doc.splitlines()[:3])\r\n\r\n default = self.feval.__doc__\r\n default = \" \" + default[default.find(\"func_args:\") :] # type:ignore\r\n default = \"\\n\".join([line[8:] for line in default.splitlines()])\r\n\r\n doc = \"\\n\".join(doc.splitlines())\r\n doc = \"\\n\" + doc + \"\\n\\nParameters\\n----------\\n\" + default\r\n doc += \"\\n**kwargs - Deprecated keyword arguments\\n\\n\"\r\n doc += \"Notes\\n-----\\n\"\r\n doc += \"Keyword arguments to dynamic functions are deprecated.\\n\"\r\n doc += \"The `plot_*` kwargs will be ignored, but the rest will\\n\"\r\n doc += \"used as key - value pairs as in version 3.x.\\n\"\r\n doc += \"Use `set_plot_settings()` for plot settings, and use\\n\"\r\n doc += \"`func_args` directly for key - value pairs.\"\r\n return doc", "def xmlrpc_method(returns='string', args=None, name=None):\r\n # Args should be a list\r\n if args is None:\r\n args = []\r\n\r\n def _xmlrpc_func(func):\r\n \"\"\"Inner function for XML-RPC method decoration. Adds a signature to\r\n the method passed to it.\r\n\r\n func\r\n The function to add the signature to\r\n \"\"\"\r\n # Add a signature to the function\r\n func._xmlrpc_signature = {\r\n 'returns': returns,\r\n 'args': args\r\n }\r\n return func\r\n\r\n return _xmlrpc_func", "def xmlrpc_methods():", "def get_documentation(self, *args, **dargs):\n pass", "def docstrings(param1, param2):\n return \"example string\"", "def __doc__(self, ???):", "def describe(self, *args, **kwargs):\n def _autodoc(func, *_args, **_kwargs):\n if len(_args) > 0:\n #: Instance or class method.\n response = func(_args[0])\n else:\n #: Function.\n if len(_kwargs) > 0:\n response = func(**_kwargs)\n else:\n response = func()\n\n self.parse(args[0], response)\n\n return func\n\n return decorator(_autodoc)", "def retrieve_docstring(self):\n method = str(self.method).lower()\n if not hasattr(self.callback, method):\n return None\n\n return get_view_description(getattr(self.callback, method))", "def function(self, name):\n return function_documentor(name)", "def __repr__(self):\n\t\treturn self.func.__doc__", "def __repr__(self):\r\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def __repr__(self):\n return self.func.__doc__", "def getHelp(self,func = None):\n if func == None:\n print(self.__doc__)\n pass\n else:\n print(func.__doc__)\n pass", "def __repr__(self):\r\n return self.func.__doc__", "def __repr__(self):\r\n return self.func.__doc__", "def __repr__(self):\r\n return self.func.__doc__", "def __repr__(self):\r\n return self.func.__doc__", "def __repr__(self):\r\n return self.func.__doc__", "def func_doc():", "def __repr__(self):\n return self.func.__doc__" ]
[ "0.7297669", "0.694731", "0.68896276", "0.6688466", "0.6388553", "0.6385201", "0.63499844", "0.6292997", "0.6292498", "0.62701833", "0.6107226", "0.60769576", "0.60552335", "0.60015124", "0.59173465", "0.58777815", "0.58560765", "0.58364666", "0.5824243", "0.5823201", "0.5823201", "0.5823201", "0.5819293", "0.58159286", "0.58159286", "0.58159286", "0.58159286", "0.58159286", "0.5780362", "0.57670283" ]
0.71314836
1
Extract all phrases from word alignment.
def extract_phrase(self, src_text, tgt_text, alignment, max_phrase_len=0): def extract_from_range(tgt_start, tgt_end, src_start, src_end, max_phrase_len): """Extract a set of possible phrase given the source, language ranges. """ # print("rages", tgt_start, tgt_end, src_start, src_end) if tgt_end < 0: return # If `src_align_idx` out of the `src_start` and `src_target`. for src_align_idx, tgt_align_idx in alignment: # target align point # sorce align point out of range if ((tgt_start <= tgt_align_idx <= tgt_end) and (src_align_idx < src_start or src_align_idx > src_end)): return phrase_set = set() ts = tgt_start # For increment while True: te = min(tgt_end, ts+max_phrase_len-1) # For decrement # te = tgt_end while True: # Add phrase pair (src_start, src_end, tgt_start, tgt_end) src_phrase = " ".join(src_sent[i] for i in range(src_start,src_end+1)) tgt_phrase = " ".join(tgt_sent[i] for i in range(ts,te+1)) phrase_set.add(((src_start, src_end+1), src_phrase, tgt_phrase)) te+= 1 # Add phrase until `te` aligned or out of range if te in tgt_aligned or te == tgt_len: break ts-=1 # Add phrase until `te` aligned or out of range if ts in tgt_aligned or ts < 0: break return phrase_set # List of words src_sent = src_text.split() tgt_sent = tgt_text.split() # Set ot collect hrases phrase_set = set() # Length of sentences src_len = len(src_sent) tgt_len = len(tgt_sent) # Target language's align points tgt_aligned = [tgt_idx for _,tgt_idx in alignment ] max_phrase_len = max_phrase_len or max(src_len, tgt_len) ### Extraction ##### # Two steps: # (1) Loop all possible soruce language phrases matching minimal target language phrases # (2) By finding shortest target language phrases that includes # all the foreign counterparts for the source words. # ### Extraction ##### # Go over each source substring starting from begin for src_start in range(src_len): # Set maximal length for phrase length max_idx = min(src_len, src_start+max_phrase_len) for src_end in range(src_start, max_idx): # print('src_start, end', src_start, src_end) # Find the minimal matching of foreign phrase tgt_start, tgt_end = tgt_len-1, -1 for src_align_idx, tgt_align_idx in alignment: # print('alignment', src_align_idx, tgt_align_idx) # Length of phrase is greater or equal to one if src_start <= src_align_idx <= src_end: # print(tgt_align_idx, tgt_start, tgt_end) # Longest substring in target langage phrase tgt_start = min(tgt_align_idx, tgt_start) tgt_end = max(tgt_align_idx, tgt_end) # print(tgt_start, tgt_end, end='\n\n') # print(src_start, src_end) # print(tgt_start, tgt_end, end='\n\n') # Extract a set of phrases phrase = extract_from_range(tgt_start, tgt_end, src_start, src_end,max_phrase_len) if phrase: phrase_set.update(phrase) return phrase_set
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_phrases(data,model):\n phrases = []\n alignment = model.alignment_idx\n for i in range(len(data)):\n sent_phrases = phrase_extraction(data[i][\"fr\"],data[i][\"en\"],alignment[i])\n phrases.append(sent_phrases)\n return phrases", "def lemmatized_phrases(self):\n phrases = [set(lower_words(TextBlob(p).words.lemmatize()))\n for p in self.blob.noun_phrases]\n return [' '.join(p) for p in phrases if not STOPWORDS.intersection(p)]", "def process_text(self, text, lemma=False):\n processed_text = TextGraph.nlp(text.lower())\n words = [t.text.strip() if not lemma else t.lemma_ for t in processed_text if not t.is_punct]\n return words", "def extract_phrase_from_parallel_sentences(self, src_lst, \n tgt_lst,\n word_alignment,\n max_phrase_len=0,\n save_phrase_table=None):\n # asser equal length\n assert len(src_lst) == len(tgt_lst) == len(word_alignment)\n \n # Collect all phrase \n # Convert to list of source and target phrase pair \n # [(\"Wiederaufnahme der Sitzungsperiode\", \"Resumption of the session\"),\n # (\"Ich bitte\", \"Please rise then\"), \n # (\"Das Parlament\", \"The House rose and\")] \n flatten_phrase_lst = list()\n\n # Extract all phrases for word alignment by `extract_phrase()`\n phrase_collector = list()\n total_num = 0\n\n start = time.time() \n for idx, triplet in enumerate(zip(src_lst, tgt_lst, word_alignment)):\n if (idx+1) % 5000 == 0:\n sys.stdout.write(f\"Extracting phrases for {idx+1} sentences...\\n\")\n\n src_text, tgt_text, alignment = triplet \n phrase_set = self.extract_phrase(src_text, tgt_text, alignment, max_phrase_len)\n #print(\"extracted phrase\", phrase_set)\n phrase_collector.append(phrase_set)\n\n #print(\"number of phrase\", len(phrase_set), end=\"\\n\")\n total_num+= len(phrase_set)\n\n # Get source phrase and target phrase into a list\n flatten_phrase_lst.extend([ (tup[1], tup[2]) for tup in phrase_set])\n end=time.time()\n runtime = end-start\n avg_per_sentence = runtime / (idx+1)\n sys.stdout.write(f\"Running {runtime:.4f} seconds for {idx+1} sentences.\\n\")\n sys.stdout.write(f\"{avg_per_sentence:.5f} seconds per sentence.\\n\")\n \n # # Sort according the source phrase's length \n for idx, phrase_set in enumerate(phrase_collector):\n # Collect all English phrases for the corresponding phrase in German language.\n # {\"English phrase\": [(English alignment), [(German phrase),(German phrase)]], ..., ...}\n # {\"assumes\": [(0,1), [\"geht davon aus\", \"geht davon aus, \"]]}\n dlist = {}\n for alignment, src_phrase, tgt_phrase in phrase_set:\n \n if src_phrase in dlist:\n dlist[src_phrase][1].append(tgt_phrase)\n else:\n dlist[src_phrase] = [alignment, [tgt_phrase]]\n\n # Sort the list of translations based on their length. Shorter phrases first.\n for v in dlist.values():\n v[1].sort(key=lambda x: len(x))\n\n # List of phrase contans a tuple of `source phrase`, [(source alignment), [phrase,phrase]]\n sorted_phrase_list = sorted(dlist.items(), key=lambda x:x[1])\n # update it\n phrase_collector[idx] = sorted_phrase_list\n\n\n # Save phrase file\n if save_phrase_table is not None:\n with Path(save_phrase_table).open(\"w\") as wf:\n for phrase_lst in phrase_collector:\n for i, p in enumerate(phrase_lst):\n k, v = p\n joint_phrase = \"; \".join(v[1])\n wf.write(f\"{i} | {v[0]} | {k} | {joint_phrase}\\n\")\n #print(\"({0:2}) {1} {2} — {3}\".format( i, v[0], k, \" ; \".join(v[1])))\n sys.stdout.write(f\"Saving phrase file to path: {save_phrase_table}.\\n\")\n \n return phrase_collector, flatten_phrase_lst,total_num", "def lookup_pronunciations_for_phrase(words: Sequence[Text]) -> Sequence[Phrase]:\n return EnglishUtils.all_possible_phrases_for(words)", "def get_sample_text_passages(self, expression, no_passages):\n count = 0\n output = []\n phrase = nltk_tokenize.word_tokenize(expression)\n random.seed(expression)\n random_documents = self.documents.copy()\n random.shuffle(random_documents)\n\n for document in random_documents:\n if count >= no_passages:\n break\n current_document = document.get_tokenized_text()\n for index in range(len(current_document)):\n if current_document[index] == phrase[0]:\n if current_document[index:index+len(phrase)] == phrase:\n passage = \" \".join(current_document[index-20:index+len(phrase)+20])\n output.append((document.filename, passage))\n count += 1\n\n if len(output) <= no_passages:\n return output\n return output[:no_passages]", "def process_words(texts, bigram_mod,trigram_mod,stop_words=stop_words, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\r\n texts = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]\r\n texts = [bigram_mod[doc] for doc in texts]\r\n texts = [trigram_mod[bigram_mod[doc]] for doc in texts]\r\n texts_out = []\r\n nlp = spacy.load('en_core_web_sm')\r\n for sent in texts:\r\n doc = nlp(\" \".join(sent)) \r\n texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])\r\n texts_out = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts_out] \r\n return texts_out", "def process_words(texts, stop_words=stop_words, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\n texts = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]\n texts = [bigram_mod[doc] for doc in texts]\n texts = [trigram_mod[bigram_mod[doc]] for doc in texts]\n texts_out = []\n nlp = spacy.load('en', disable=['parser', 'ner'])\n for sent in texts:\n doc = nlp(\" \".join(sent)) \n texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])\n # remove stopwords once more after lemmatization\n texts_out = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts_out] \n return texts_out", "def _remove_phrase_words(self, phrase: Phrase) -> None:\n for wi, word in enumerate(re.finditer(r\"\\w+\", phrase.phrase_string)):\n if wi == 0:\n del self.first_word_in_phrase[word.group(0)][phrase.phrase_string]\n if len(self.first_word_in_phrase[word.group(0)].keys()) == 0:\n del self.first_word_in_phrase[word.group(0)]\n self.word_in_phrase[word.group(0)].remove(phrase.phrase_string)\n if len(self.word_in_phrase[word.group(0)]) == 0:\n del self.word_in_phrase[word.group(0)]", "def remove_phrases(self, phrases: List[Union[str, Dict[str, Union[str, List[str]]], Phrase]]):\n print('REMOVING PHRASES')\n for phrase in phrases:\n print('\\tphrase:', phrase)\n phrase = as_phrase_object(phrase, ngram_size=self.ngram_size, skip_size=self.skip_size)\n print('\\tas phrase:', phrase)\n if phrase.phrase_string not in self.phrase_index:\n raise KeyError(f\"Unknown phrase: {phrase.phrase_string}\")\n self.remove_phrase(phrase)", "def phraseMaker(self):\n phrase_lst = []\n phrase = str(self.phrase_ent.get())\n keyword = str(self.keyword_ent.get())\n for i in range(self.city_lbx.size()):\n city = str(self.city_lbx.get(i))\n new_phrase = re.sub(keyword, city, phrase)\n phrase_lst.append(new_phrase)\n return phrase_lst", "def process_words(texts, stop_words=stop_words, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\r\n texts = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]\r\n texts = [bigram_mod[doc] for doc in texts]\r\n texts = [trigram_mod[bigram_mod[doc]] for doc in texts]\r\n texts_out = []\r\n nlp = spacy.load('en', disable=['parser', 'ner'])\r\n for sent in texts:\r\n doc = nlp(\" \".join(sent))\r\n texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])\r\n # remove stopwords once more after lemmatization\r\n texts_out = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts_out]\r\n return texts_out", "def get_phrases(self) -> List[Phrase]:\n return list(self.phrase_index.values())", "def raw_text_to_phrases(raw_phrases, language='english'):\n tokenized = nltk.sent_tokenize(raw_phrases, language)\n return [Phrase(phrase) for phrase in tokenized]", "def processwords(list_of_matches, lemmatag = False):\n list_of_matches = [w.lower() for w in list_of_matches]\n # remove nonwords, strip . to normalise \"dr.\"\n if translated_option != 'o' and translated_option != 'u':\n list_of_matches = [w.lstrip('.').rstrip('.') for w in list_of_matches if re.search(regex_nonword_filter, w)]\n \n list_of_matches.sort()\n \n # tokenise if multiword:\n if phrases and not n_gramming:\n from nltk import word_tokenize as word_tokenize\n list_of_matches = [word_tokenize(i) for i in list_of_matches]\n\n # this is just for plaintext ... should convert to unicode on file open\n if datatype == 'plaintext':\n try:\n list_of_matches = [unicode(w, errors = 'ignore') for w in list_of_matches]\n except TypeError:\n pass\n\n if not dependency and exclude and 'w' in exclude.keys():\n list_of_matches = [w for w in list_of_matches if not re.match(exclude['w'], w)]\n\n if lemmatise or 'l' in show:\n if not dependency:\n tag = gettag(query, lemmatag = lemmatag)\n lemmata = lemmatiser(list_of_matches, tag)\n tups = zip(list_of_matches, lemmata)\n res = []\n for w, l in tups:\n single_result = []\n if exclude and 'l' in exclude.keys():\n if re.match(exclude['l'], l):\n continue\n if 'w' in show:\n single_result.append(w)\n if 'l' in show:\n single_result.append(l)\n # bad fix:\n # this currently says, if pos in show, there must only be pos ...\n if 'p' in show:\n if lemmatise:\n single_result.append(l)\n else:\n single_result.append(w)\n\n single_result = '/'.join(single_result)\n res.append(single_result)\n list_of_matches = res\n\n if titlefilter and not dependency:\n list_of_matches = titlefilterer(list_of_matches)\n if spelling:\n list_of_matches = convert_spelling(list_of_matches, spelling = spelling)\n\n # use blacklist option in gui\n if 'blacklist' in kwargs.keys():\n stopwords = False\n if kwargs['blacklist'] is not False:\n if kwargs['blacklist'] is True:\n from dictionaries.stopwords import stopwords as my_stopwords\n stopwords = [i.lower() for i in my_stopwords]\n list_of_matches = [w for w in list_of_matches if w not in stopwords]\n else:\n if type(kwargs['blacklist']) == list:\n stopwords = [i.lower() for i in kwargs['blacklist']]\n list_of_matches = [w for w in list_of_matches if w not in stopwords]\n else:\n regexblacklist = re.compile(kwargs['blacklist'])\n list_of_matches = [w for w in list_of_matches if not re.search(regexblacklist, w)]\n\n #if not split_con:\n # list_of_matches = unsplitter(list_of_matches)\n \n # turn every result into a single string again if need be:\n if phrases:\n output = []\n for res in list_of_matches:\n joined = ' '.join(res)\n output.append(joined)\n return output\n else:\n return list_of_matches", "def extract_phrases(tdocs, docs, idf):\n # Gather existing keyphrases\n keyphrases = set()\n for doc in tdocs:\n for t in doc:\n if len(t.split(' ')) > 1:\n keyphrases.add(t)\n\n # Count document co-occurrences\n t_counts = defaultdict(int)\n pair_docs = defaultdict(list)\n for i, terms in enumerate(tdocs):\n # We dont convert the doc to a set b/c we want to preserve order\n # Iterate over terms as pairs\n for pair in zip(terms, terms[1:]):\n t_counts[pair] += 1\n pair_docs[pair].append(i)\n\n # There are a lot of co-occurrences, filter down to those which could\n # potentially be phrases.\n t_counts = {kw: count for kw, count in t_counts.items() if count >= 2}\n\n # Identify novel phrases by looking at\n # keywords which co-occur some percentage of the time.\n # This could probably be more efficient/cleaned up\n for (kw, kw_), count in t_counts.items():\n # Only consider terms above a certain avg global IDF (to reduce noise)\n if (idf[kw]+idf[kw_])/2 <= 0.4:\n continue\n\n # Look for phrases that are space-delimited or joined by 'and' or '-'\n ph_reg = re.compile('({0}|{1})( |-)(and )?({0}|{1})'.format(kw, kw_))\n\n # Extract candidate phrases and keep track of their counts\n phrases = defaultdict(int)\n phrase_docs = defaultdict(set)\n for i in pair_docs[(kw, kw_)]:\n for m in ph_reg.findall(docs[i].lower()):\n phrases[''.join(m)] += 1\n phrase_docs[''.join(m)].add(i)\n\n if not phrases:\n continue\n\n # Get the phrase encountered the most\n top_phrase = max(phrases.keys(), key=lambda k: phrases[k])\n top_count = phrases[top_phrase]\n\n # Only count phrases that appear in _every_ document\n if top_count/count == 1:\n # Check if this new phrase is contained by an existing keyphrase.\n if any(top_phrase in ph for ph in keyphrases):\n continue\n keyphrases.add(top_phrase)\n\n # Add the new phrase to each doc it's found in\n for i in phrase_docs[top_phrase]:\n tdocs[i].append(top_phrase)\n\n return tdocs, keyphrases", "def proc_text(self, text):\n\n lemmas = []\n tokens = []\n doc = self(text)\n for tokObj in doc:\n if self._remove_punct and tokObj.is_punct:\n continue\n lemma = tokObj.lemma_\n text = tokObj.text\n if self._keep_only_alpha_num and not is_alpha_num(text):\n continue\n tok1 = text.lower()\n tok2 = lemma.lower()\n if tok1 in self._stopwords or tok2 in self._stopwords:\n continue\n\n if self._lower_case:\n text = text.lower()\n lemma = lemma.lower()\n\n lemmas.append(lemma)\n tokens.append(text)\n\n return ' '.join(lemmas), ' '.join(tokens)", "def match_all_phrases(self, inphrases):\n# temporary - attempted matches\n attempted_matches = []\n phrase_attempts = {}\n phrase = \"\"\n step = \"A\"\n # ALL full phrases \n for phrase in inphrases:\n phrase_attempts[phrase] = 1\n attempted_matches.append(phrase + ':' + step)\n if phrase in self.cls_phrases:\n match_choices = self.cls_phrases[phrase]\n #return match_choices, attempted_matches, phrase\n return (self.get_list_counts(match_choices), attempted_matches, \n phrase, self.get_most_common(match_choices))\n\n # Normalised version of ALL all full phrases \n phrases = [self.get_normalised_phrase(p) for p in inphrases]\n\n # 3 all prefix trigrams \n step = \"3\"\n for ngram in [p.split()[0:3] for p in phrases if len(p.split()) > 2]:\n phrase = ' '.join(ngram)\n phrase_attempts[phrase] = 1\n attempted_matches.append(phrase + ':' + step)\n if phrase in self.cls_phrases:\n match_choices = self.cls_phrases[phrase]\n return (self.get_list_counts(match_choices), attempted_matches, \n phrase, self.get_most_common(match_choices))\n\n # 2 all prefix bigrams \n step = \"2\"\n for ngram in [p.split()[0:2] for p in phrases if len(p.split()) > 1]:\n phrase = ' '.join(ngram)\n phrase_attempts[phrase] = 1\n attempted_matches.append(phrase + ':' + step)\n if phrase in self.cls_phrases:\n match_choices = self.cls_phrases[phrase]\n return (self.get_list_counts(match_choices), attempted_matches, \n phrase, self.get_most_common(match_choices))\n\n # 1 all valid words \n step = \"1\"\n for phr_elem in phrases:\n #print phr_elem.split()\n for phrase in [w.strip() for w in phr_elem.split() \n if self.isExcluded(w.strip()) == False and w.strip() not in phrase_attempts]:\n #print \"***\", phrase\n phrase_attempts[phrase] = 1\n attempted_matches.append(phrase + ':' + step)\n if phrase in self.cls_phrases:\n match_choices = self.cls_phrases[phrase]\n return (self.get_list_counts(match_choices), attempted_matches, \n phrase, self.get_most_common(match_choices))\n\n return [], attempted_matches, phrase, None", "def process_text(text):\n doc = spacy_model(text.lower())\n result = []\n for token in doc:\n if token.text in spacy_model.Defaults.stop_words:\n continue\n if token.is_punct:\n continue\n if token.lemma_ == '-PRON-':\n continue\n result.append(token.lemma_)\n return \" \".join(result)", "def getPhrases(self, word, limit=None, wlmi=None, useCanonical=None, ):\n\n # Parse inputs\n resourcePath = '/word.{format}/{word}/phrases'\n resourcePath = resourcePath.replace('{format}', 'json')\n method = 'GET'\n\n queryParams = {}\n headerParams = {}\n\n queryParams['limit'] = self.apiClient.toPathValue(limit)\n queryParams['wlmi'] = self.apiClient.toPathValue(wlmi)\n queryParams['useCanonical'] = self.apiClient.toPathValue(useCanonical)\n\n\n if word != None:\n resourcePath = resourcePath.replace('{word}', word)\n\n\n # Make the API Call\n response = self.apiClient.callAPI(resourcePath, method, queryParams,\n None, headerParams)\n if not response:\n return None\n\n\n responseObjects = []\n for responseObject in response:\n responseObjects.append(self.apiClient.deserialize(responseObject,\n model.Bigram.Bigram))\n return responseObjects", "def post_process(keyphrases):\n processed_keyphrases = []\n\n # Remove duplicates from the single phrases which are occurring in multi-keyphrases\n multi_phrases = [phrases for phrases in keyphrases if len(phrases[0].split()) > 1]\n single_phrase = [phrases for phrases in keyphrases if len(phrases[0].split()) == 1]\n for tup in single_phrase:\n kw = tup[0]\n for tup_m in multi_phrases:\n kw_m = tup_m[0]\n r = kw_m.find(kw)\n if r > -1:\n try:\n single_phrase.remove(tup)\n except:\n continue\n\n # Remove same word occurrences in a multi-keyphrase\n for multi_key, multi_score in multi_phrases:\n kw_m = multi_key.split()\n unique_kp_list = list(dict.fromkeys(kw_m))\n multi_keyphrase = ' '.join(unique_kp_list)\n processed_keyphrases.append((multi_keyphrase, multi_score))\n\n processed_keyphrases.extend(single_phrase)\n\n return processed_keyphrases", "def GetWords(phrase):\n # Remove special characters regex\n # It works faster than the standard \\w+ pattern\n regex = re.compile(r'([^\\d\\`\\~\\!\\@\\#\\$\\%\\^\\&\\*\\(\\)\\+\\=\\[\\{\\]\\}\\|\\\\\\'\\<\\,\\.\\>\\?\\/\\\"\"\\;\\:\\s]+)+',\n re.UNICODE)\n return re.findall(regex,phrase.lower())", "def get_alignments(self) -> list:", "def extract_passages(s, docs):\n if os.name == 'windows':\n docs = [doc.replace('/', '\\\\') for doc in docs]\n \n query_terms = set(tokenize(s))\n passages = []\n for doc in docs:\n with io.open(doc, encoding='utf-8', errors='ignore') as f:\n for para in f:\n for sent in sent_tokenize(para):\n if len(query_terms.intersection(set(tokenize(sent)))) == 0:\n continue\n passages.append(sent)\n return passages", "def extract_phrases_with_keywords(text, keyword):\n sentences = split_text(text)\n phrases = []\n keyword = word_process(keyword)\n for sentence in sentences:\n words = re.findall(r'\\w+', sentence)\n for i, word in enumerate(words):\n if word_process(word) == word_process(keyword): # Both word and keyword have been processed, so we can compare them directly\n start = sentence.index(words[max(0,i-2)])\n end = sentence.index(word) + len(word)\n phrases.append(sentence[start:end])\n return phrases", "def convert_to_valid_phrases(x):\n updated_x = []\n \n for doc in x:\n tokens = doc.split()\n phrase_tokens = []\n \n # Maintain the start of the phrase we are processing\n index = 0\n \n # Initially consider a phrase of word length 3\n phrase_length = 3\n \n while index < len(tokens):\n \n # Check the phrase length doesn't exceed the end of the string\n while index + phrase_length > len(tokens):\n phrase_length -= 1\n \n # Try and find a match for 3 word phrase, then 2 then 1\n while phrase_length > 0:\n # Check to see if we have a valid phrase match\n updated_tokens = []\n for token in tokens[index:index+phrase_length]:\n updated_tokens.extend(token.split('_'))\n \n phrase = '_'.join(updated_tokens)\n \n try:\n embedding_model.get_vector(phrase)\n phrase_tokens.append(phrase)\n break\n except:\n phrase_length -= 1\n \n # Cover the case where we couldn't find a valid phrase\n if phrase_length == 0:\n phrase_length = 1\n \n # We have a match, consume the phrase\n index += phrase_length\n \n # Reset phrase length for processing next index\n phrase_length = 3\n \n updated_x.append(' '.join(phrase_tokens))\n \n return updated_x", "def all_phrases(grammar, root):\n #\n # if root not in grammar:\n # return [[root]]\n #\n # phrases = []\n # for structure in grammar[root]:\n # for fragment in structure:\n # phrases = phrases + all_phrases(grammar,fragment)\n # print(phrases)\n # return phrases\n\n if root not in grammar:\n return [[root]]\n phrases = []\n for structure in grammar[root]:\n phrase_template = []\n for speech_part in structure:\n if speech_part not in grammar:\n if len(phrase_template)>0:\n new_phrase_template = []\n for phrase in phrase_template:\n if type(phrase)==str:\n phrase = [phrase]\n new_phrase_template.append(phrase+[speech_part])\n phrase_template = new_phrase_template\n else:\n phrase_template.append([speech_part])\n else:\n if len(phrase_template)>0:\n new_phrase_template = []\n for phrase in phrase_template:\n if type(phrase)==str:\n phrase = [phrase]\n for fragment in grammar[speech_part]:\n fragmented_bool = False\n for fragmented in fragment:\n if fragmented in grammar:\n fragmented_bool = True\n for subfragment in grammar[fragmented]:\n new_phrase_template.append(phrase+subfragment)\n if not fragmented_bool:\n new_phrase_template.append(phrase+fragment)\n phrase_template = new_phrase_template\n else:\n for fragment in grammar[speech_part]:\n if fragment[0] in grammar:\n for subfragment in grammar[fragment[0]]:\n phrase_template.append(subfragment)\n else:\n phrase_template.append(fragment)\n phrases = phrases + phrase_template\n return phrases", "def extractWords(self, inputDataset):\n reviewFile = open(inputDataset, \"r\", encoding=\"utf-8-sig\")\n for record in reviewFile:\n record = record.strip().split(\"\\t\") # tab-delimited .txt file\n self.addUnigrams(int(record[0]), record[1])\n reviewFile.close()", "def process_raw_phrases(file_path):", "def get_corpus_alignments(self,bitext,f_vocab,e_vocab):\n alignments = [] # all alignments in the corpus\n sentence_alignments = {} ## associated alignments for each snetence pair\n sent_count = 0\n for pair in bitext:\n sentence_alignments[sent_count] = []\n f_sent = pair[\"fr\"]\n e_sent = pair[\"en\"]\n e_count = len(e_sent) # number of wrods in each sentence\n f_count = len(f_sent)\n ## generate all combinations of alignments\n tuple_sets = []\n # all possible e->f mappings for each english word in separate list\n for i in range(e_count): # getting english words count of sets of ali tuples\n list = []\n iv_idx = e_vocab.index(e_sent[i]) ## getting corresponding index of word in the the vocabulary list\n for j in range(f_count):\n jv_idx = f_vocab.index(f_sent[j])\n list.append((iv_idx,jv_idx)) #of form (e,f)\n tuple_sets.append(list)\n for combination in product(*tuple_sets): ## change thos for more than 3 words\n alignments.append(combination)\n sentence_alignments[sent_count].append(len(alignments)-1)\n sent_count += 1\n #print(alignments)\n return alignments,sentence_alignments" ]
[ "0.7157194", "0.6438471", "0.61811405", "0.6119933", "0.6069589", "0.60057", "0.59385145", "0.58645695", "0.5848218", "0.58418983", "0.58405614", "0.58251745", "0.57417756", "0.57353705", "0.57185733", "0.57103014", "0.5676527", "0.56694734", "0.56662035", "0.5655986", "0.5638014", "0.5628557", "0.56256944", "0.56215894", "0.55727065", "0.5567203", "0.5562535", "0.5542538", "0.5530337", "0.55200297" ]
0.6709278
1
Extract a set of possible phrase given the source, language ranges.
def extract_from_range(tgt_start, tgt_end, src_start, src_end, max_phrase_len): # print("rages", tgt_start, tgt_end, src_start, src_end) if tgt_end < 0: return # If `src_align_idx` out of the `src_start` and `src_target`. for src_align_idx, tgt_align_idx in alignment: # target align point # sorce align point out of range if ((tgt_start <= tgt_align_idx <= tgt_end) and (src_align_idx < src_start or src_align_idx > src_end)): return phrase_set = set() ts = tgt_start # For increment while True: te = min(tgt_end, ts+max_phrase_len-1) # For decrement # te = tgt_end while True: # Add phrase pair (src_start, src_end, tgt_start, tgt_end) src_phrase = " ".join(src_sent[i] for i in range(src_start,src_end+1)) tgt_phrase = " ".join(tgt_sent[i] for i in range(ts,te+1)) phrase_set.add(((src_start, src_end+1), src_phrase, tgt_phrase)) te+= 1 # Add phrase until `te` aligned or out of range if te in tgt_aligned or te == tgt_len: break ts-=1 # Add phrase until `te` aligned or out of range if ts in tgt_aligned or ts < 0: break return phrase_set
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_phrase(self, src_text, tgt_text, alignment, max_phrase_len=0):\n def extract_from_range(tgt_start, tgt_end, src_start, src_end, max_phrase_len):\n \"\"\"Extract a set of possible phrase given the source, language ranges.\n\n \"\"\"\n # print(\"rages\", tgt_start, tgt_end, src_start, src_end)\n if tgt_end < 0:\n return \n # If `src_align_idx` out of the `src_start` and `src_target`.\n for src_align_idx, tgt_align_idx in alignment:\n # target align point\n # sorce align point out of range\n if ((tgt_start <= tgt_align_idx <= tgt_end) and \n (src_align_idx < src_start or src_align_idx > src_end)): \n return\n phrase_set = set()\n ts = tgt_start # For increment\n while True:\n te = min(tgt_end, ts+max_phrase_len-1) # For decrement\n # te = tgt_end \n while True:\n # Add phrase pair (src_start, src_end, tgt_start, tgt_end)\n src_phrase = \" \".join(src_sent[i] for i in range(src_start,src_end+1))\n tgt_phrase = \" \".join(tgt_sent[i] for i in range(ts,te+1))\n phrase_set.add(((src_start, src_end+1), src_phrase, tgt_phrase))\n te+= 1\n # Add phrase until `te` aligned or out of range\n if te in tgt_aligned or te == tgt_len:\n break\n ts-=1\n # Add phrase until `te` aligned or out of range\n if ts in tgt_aligned or ts < 0:\n break\n \n return phrase_set\n\n # List of words\n src_sent = src_text.split()\n tgt_sent = tgt_text.split()\n \n # Set ot collect hrases\n phrase_set = set()\n \n # Length of sentences \n src_len = len(src_sent)\n tgt_len = len(tgt_sent)\n\n # Target language's align points\n tgt_aligned = [tgt_idx for _,tgt_idx in alignment ]\n max_phrase_len = max_phrase_len or max(src_len, tgt_len)\n\n\n ### Extraction ##### \n # Two steps:\n # (1) Loop all possible soruce language phrases matching minimal target language phrases\n # (2) By finding shortest target language phrases that includes \n # all the foreign counterparts for the source words.\n #\n ### Extraction #####\n # Go over each source substring starting from begin \n for src_start in range(src_len):\n # Set maximal length for phrase length \n max_idx = min(src_len, src_start+max_phrase_len)\n for src_end in range(src_start, max_idx):\n # print('src_start, end', src_start, src_end)\n # Find the minimal matching of foreign phrase\n tgt_start, tgt_end = tgt_len-1, -1\n for src_align_idx, tgt_align_idx in alignment:\n # print('alignment', src_align_idx, tgt_align_idx)\n # Length of phrase is greater or equal to one\n if src_start <= src_align_idx <= src_end:\n # print(tgt_align_idx, tgt_start, tgt_end)\n # Longest substring in target langage phrase\n tgt_start = min(tgt_align_idx, tgt_start)\n tgt_end = max(tgt_align_idx, tgt_end)\n # print(tgt_start, tgt_end, end='\\n\\n')\n # print(src_start, src_end)\n # print(tgt_start, tgt_end, end='\\n\\n')\n # Extract a set of phrases \n phrase = extract_from_range(tgt_start, tgt_end, src_start, src_end,max_phrase_len)\n if phrase:\n phrase_set.update(phrase)\n\n\n return phrase_set", "def _get_source_chunks(self, input_text, language=None):\n chunks = ChunkList()\n seek = 0\n result = self._get_annotations(input_text, language=language)\n tokens = result['tokens']\n language = result['language']\n for i, token in enumerate(tokens):\n word = token['text']['content']\n begin_offset = token['text']['beginOffset']\n label = token['dependencyEdge']['label']\n pos = token['partOfSpeech']['tag']\n if begin_offset > seek:\n chunks.append(Chunk.space())\n seek = begin_offset\n chunk = Chunk(word, pos, label)\n if chunk.label in _DEPENDENT_LABEL:\n # Determining concatenating direction based on syntax dependency.\n chunk.dependency = i < token['dependencyEdge']['headTokenIndex']\n if chunk.is_punct():\n chunk.dependency = chunk.is_open_punct()\n chunks.append(chunk)\n seek += len(word)\n return chunks, language", "def read_corpus(file_path, source):\n data = []\n for line in open(file_path):\n sent = line.strip().split(' ')\n # only append <s> and </s> to the target sentence\n if source == 'tgt':\n sent = ['<s>'] + sent + ['</s>'] #TODO: Change\n data.append(sent)\n return data", "def _get_source_chunks(self, input_text, language=None):\n chunks = ChunkList()\n sentence_length = 0\n tokens = api.get_annotations(self.service, input_text, language)\n for i, token in enumerate(tokens):\n word = token['text']['content']\n begin_offset = token['text']['beginOffset']\n label = token['dependencyEdge']['label']\n pos = token['partOfSpeech']['tag']\n if begin_offset > sentence_length:\n chunks.append(Chunk.space())\n sentence_length = begin_offset\n chunk = Chunk(word, pos, label)\n # Determining default concatenating direction based on syntax dependency.\n chunk.maybe_add_dependency(\n i < token['dependencyEdge']['headTokenIndex'])\n chunks.append(chunk)\n sentence_length += len(word)\n return chunks", "def read_corpus(file_path, source):\n data = []\n for line in open(file_path, encoding='utf-8'):\n sent = line.strip().split(' ')\n # only append <s> and </s> to the target sentence\n if source == 'tgt':\n sent = ['<s>'] + sent + ['</s>']\n data.append(sent)\n\n return data", "def _generate_base_candidates(self, target_text):\n\n result_list = []\n tagged_text = tag(target_text)\n\n for i in range(1, 5):\n temp = []\n grams = find_ngrams(tagged_text, i)\n\n for gram in grams:\n phrase = \" \".join(list(map(lambda x: x[0], gram)))\n pos = \" \".join(list(map(lambda x: x[1], gram)))\n\n if pos in self.candidate_pattern:\n temp.append(phrase)\n\n result_list.append(temp)\n\n return result_list", "def complete_set(self, text, line, begidx, endidx):\n tokens = split(line[:begidx])\n if len(tokens) == 1:\n return [i for i in ('filter ', 'default ', 'time-format ') if i.startswith(text)]\n if len(tokens) == 2 and tokens[1] == 'time-format':\n return [i for i in ('long', 'short') if i.startswith(text)]\n return []", "def find_all( source, substring, start=None, end=None, overlap=False ):\n return [x for x in find_all_iter( source, substring, start, end, overlap )]", "def recover_para_segmentations(aligned_data, gt_paras_sents, human_translator_data):\n sents_covered = 0\n outputs = \"\"\n for p1, s1, src in zip(aligned_data[\"gt_paras\"], gt_paras_sents, aligned_data[\"source_paras\"]):\n num_sents = len(s1)\n outputs += f\"<b>Source</> = {src}\\n<b>Google Translate</> = {p1}\\n\"\n\n for translator, htd in human_translator_data.items():\n s2_sent_idx = []\n sent_aligns = extract_match(htd[\"match_matrix\"], sents_covered, sents_covered + num_sents, readable=True)\n for salign in sent_aligns:\n s2_sent_idx.extend(salign['trans_idx'])\n s2_sent_idx = list(set(s2_sent_idx))\n s2_sent_idx.sort()\n\n p2_alignment = \" \".join([htd['all_sents'][x] for x in s2_sent_idx])\n outputs += f\"<b>{translator}</> = {p2_alignment}\\n\"\n\n aligned_data[\"translator_data\"][translator][\"translator_paras\"].append(p2_alignment)\n aligned_data[\"translator_data\"][translator][\"sent_alignments\"].append(sent_aligns)\n sents_covered += num_sents\n outputs += \"\\n\\n\"\n return aligned_data, outputs", "def extract_passages(s, docs):\n if os.name == 'windows':\n docs = [doc.replace('/', '\\\\') for doc in docs]\n \n query_terms = set(tokenize(s))\n passages = []\n for doc in docs:\n with io.open(doc, encoding='utf-8', errors='ignore') as f:\n for para in f:\n for sent in sent_tokenize(para):\n if len(query_terms.intersection(set(tokenize(sent)))) == 0:\n continue\n passages.append(sent)\n return passages", "def generatorToList(generator):\n # segs, postags, nertags\n\n\n '''\n words = []\n i = 0\n while i < len(segs):\n\n seg, postag, nertag = segs[i], postags[i], nertags[i]\n if postag == 'ws':\n currWord = seg\n while (i+1) < len(segs) and postags[i+1] == 'ws':\n currWord += segs[i+1]\n i += 1\n words.append((currWord, 'eng'))\n\n elif nertag == 'O':\n words.append((seg, postag))\n i += 1\n else:\n words.append((seg, nertag))\n i += 1\n return words\n '''\n words = []\n for word, flag in generator:\n words.append([word, flag])\n return words", "def _expand_ranges(ranges_text, delimiter=',', indicator='-'):\n\n results = set()\n for group in filter(None, ranges_text.split(delimiter)):\n group = group.strip()\n if indicator not in group:\n if not group.isdigit():\n raise ValueError((\n \"group '{group}' could not be interpreted as a valid digit\"\n ).format(**locals()))\n results.add(int(group))\n else:\n (start, finish,) = list(filter(None, group.split(indicator)))\n if not start.isdigit() or not finish.isdigit():\n raise ValueError((\n \"group '{group}' could not be interpreted as a valid range\"\n ).format(**locals()))\n for entry in range(int(start), (int(finish) + 1), 1):\n results.add(entry)\n return sorted(list(results))", "def tag_range(str_text, i_from, i_to, lst_tag_types):\n rgx_tag = re.compile(target_pattern(lst_tag_types))\n return [o_match.span() for o_match in rgx_tag.finditer(str_text) if\n overlap((i_from, i_to), o_match.span())]", "def getSubstitutions(self):\n\n\t\tnative_sequence = self.native.sequence()\n\t\tdesign_sequence = self.design.protein.sequence()\n\n\t\tslist = getSubstitutionPositions(native_sequence, design_sequence)\n\t\twordlist = []\n\t\tfor i in slist:\n\t\t\twordlist.append(str(i))\n\t\t\n\t\tdiff_list = string.join(wordlist, \",\")\n\t\tprint diff_list\n\t\tcmd.select(\"desres\", \"(resi \" + diff_list + \")\")\n\t\tcmd.disable(\"desres\")", "def generate_solutions(possible_words, labels):\r\n return []", "def language_text_sources(lang):\n return [\n DATA + \"/tokenized/{source}/{lang}.txt\".format(source=source, lang=lang)\n for source in LANGUAGE_SOURCES[lang]\n if source in FULL_TEXT_SOURCES\n ]", "def get_para_asqp_targets(sents, labels):\n targets = []\n for label in labels:\n all_quad_sentences = []\n for quad in label:\n at, ac, sp, ot = quad\n\n man_ot = sentword2opinion[sp] # 'POS' -> 'good' \n\n if at == 'NULL': # for implicit aspect term\n at = 'it'\n\n one_quad_sentence = f\"{ac} is {man_ot} because {at} is {ot}\"\n all_quad_sentences.append(one_quad_sentence)\n\n target = ' [SSEP] '.join(all_quad_sentences)\n targets.append(target)\n return targets", "def get_texts(detected_licenses):\n # FIXME: the current license data structure is contrived and will soon be\n # streamlined. See https://github.com/nexB/scancode-toolkit/issues/2416\n\n # set of (start line, end line, matched_rule identifier)\n seen = set()\n for lic in detected_licenses:\n key = lic['start_line'], lic['end_line'], lic['matched_rule']['identifier']\n if key not in seen:\n yield lic['matched_text']\n seen.add(key)", "def get_targets(self):\n\t\n\t\tself.target = []\n\t\ttarget_ins = self.settings['target']\n\t\tfor key in target_ins.keys():\n\t\t\tif key == 'raw':\n\t\t\t\tself.target.append(target_ins[key])\n\t\t\telif key == 'textfile':\n\t\t\t\twith open(target_ins[key],'r') as fp: targs = fp.readlines()\n\t\t\t\tfor t in targs:\n\t\t\t\t\tif re.match('^[a-z,A-Z,_].+\\s*:\\s*[A-Z].+$',t):\n\t\t\t\t\t\tself.target.append(tuple([i.strip() for i in t.split(':')]))\n\t\t\telif key == 'textfile_rna':\n\t\t\t\twith open(target_ins[key],'r') as fp: targs = fp.readlines()\n\t\t\t\tfor t in targs:\n\t\t\t\t\tif re.match('^[a-z,A-Z,0-9,_].+\\s*:\\s*[A-Z,a-z].+$',t):\n\t\t\t\t\t\tself.target.append(list([i.strip() for i in t.split(':')]))\n\t\t\t\t\t\trnaseq = self.target[-1][1]\n\t\t\t\t\t\t#---extra substitutions for later\n\t\t\t\t\t\tif 'regex_subs' in self.settings.keys():\n\t\t\t\t\t\t\tfor regex in self.settings['regex_subs']:\n\t\t\t\t\t\t\t\trnaseq = re.sub(regex[0],regex[1],rnaseq)\n\t\t\t\t\t\trnaseq = rnaseq.upper()\n\t\t\t\t\t\trnaseq = re.sub('T','U',rnaseq)\n\t\t\t\t\t\taminoseq = ''.join([dna_mapping[i] for i in [rnaseq[i:i+3] \n\t\t\t\t\t\t\tfor i in range(0,len(rnaseq),3)]])\n\t\t\t\t\t\tself.target[-1][1] = re.sub('T','U',aminoseq)\n\t\t\t\t\t\tself.target[-1] = tuple(self.target[-1])\n\t\t\telse: raise Exception('except: unclear target type')", "def lookup_pronunciations_for_phrase(words: Sequence[Text]) -> Sequence[Phrase]:\n return EnglishUtils.all_possible_phrases_for(words)", "def parse_words(source_dict):\n name, syn = None, []\n if 'synonym' in source_dict:\n syn = source_dict['synonym']\n if 'prefLabel' in source_dict:\n name = source_dict['prefLabel']\n return name, syn", "def parse_range(seq: str) -> list[int]:\n seq = seq.split(\",\")\n acc = []\n for i in seq:\n m = re.match(r\" *(?P<start>\\d+) *(- *(?P<end>\\d+))? *\", i)\n\n if not m:\n continue\n\n a = [m.group(\"start\"), m.group(\"end\")]\n a = [int(x) for x in a if x]\n\n if len(a) > 1:\n a = range(int(a[0]), int(a[1] + 1))\n\n acc.append(a)\n\n return list(\n set([x for x in list(itertools.chain.from_iterable(acc)) if x])\n )", "def get_alert_sources_as_text_list():\n\n text_list = \"\"\n for alert_source in ALERT_SOURCES[0:-1]:\n text_list += alert_source + \", \"\n if text_list:\n text_list += \" and \"\n text_list += ALERT_SOURCES[-1]\n\n return text_list", "def phraseMaker(self):\n phrase_lst = []\n phrase = str(self.phrase_ent.get())\n keyword = str(self.keyword_ent.get())\n for i in range(self.city_lbx.size()):\n city = str(self.city_lbx.get(i))\n new_phrase = re.sub(keyword, city, phrase)\n phrase_lst.append(new_phrase)\n return phrase_lst", "def tr(text, sourcelang, targetlang):\n request = urllib2.Request(url.format(text, sourcelang, targetlang),\n headers={ 'User-Agent': 'Mozilla/5.0', 'Accept-Charset': 'utf-8' })\n response = urllib2.urlopen(request).read()\n fixedJSON = re.sub(r',{2,}', ',', response).replace(',]', ']')\n data = json.loads(fixedJSON)\n result = {}\n result[\"definition\"] = data[0][0]\n for row in data[1]:\n try:\n result[row[0]] = row[1]\n except:\n pass\n return result", "def rangestr(\n src: str,\n lower: Optional[int] = None,\n upper: Optional[int] = None,\n delimiter: str = parsers.DEFAULT_DELIMITER,\n implicit_inclusion: bool = False,\n) -> Iterator[int]:\n ranges = parsers.parse_ranges(src, lower, upper, delimiter, implicit_inclusion)\n return _chain.from_iterable(map(lambda r: range(*r), ranges))", "def complete_set(self, text, line, begidx, endidx):\n # text = line[begidx:endidx] is the word we want to complete\n # split the completed words, should either be ['set'], or ['set', <option_key>]\n split_line = line[:begidx].split()\n if len(split_line) == 1:\n return [option for option in self.get_option_names() if option.startswith(text) or '.' + text in option]\n\n if len(split_line) == 2:\n key = split_line[1]\n options = self.get_options(key)\n if options is not None:\n scoped_key = key.split('.')[1] if '.' in key else key\n values = options.get_acceptable_values(scoped_key)\n if values is not None:\n return [value for value in values if value.startswith(text)]\n\n return []", "def solve(target, strings):\n result = list()\n word = set([s for s in target])\n for s in strings:\n if len(set([c for c in s]) - word) == 1:\n result.append(s)\n print(\",\".join(result))", "def getExcerpts(self, text, DICECodeResults):\n\t\t\"\"\" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \"\"\"\n\t\t\n\t\tdef getKernels(indices):\n\t\t\t\"\"\"\n\t\t\t\tgetKernels() is a sub-method that extracts strings from a doc-\n\t\t\t\tument using indices provided by the DICECodeResults data struc-\n\t\t\t\tture passed into this sub-method's parent method, getExcerpts().\n\t\t\t\tThis sub-method returns three strings.\n\n\t\t\t\tindices --> tuple containing indices in the document with text to extract.\n\t\t\t\"\"\"\n\n\t\t\ti = indices[0]\n\t\t\tj = indices[1]\n\n\t\t\th = i - self.scope\n\t\t\tk = j + self.scope\n\n\t\t\tif h < 0: h = 0\n\t\t\tif k > len(text): k = len(text)-1\n\n\t\t\treturn text[h:i].replace(\"\\n\", \"__\").replace(\"\\t\", \" \"), text[i:j].replace(\"\\n\", \"__\").replace(\"\\t\", \" \"), text[j:k].replace(\"\\n\", \"__\").replace(\"\\t\", \" \")\n\t\t\t#return \"|\"+text[h:i].replace(\"\\n\", \"__\").replace(\"\\t\", \" \")+\"|\", text[i:j].replace(\"\\n\", \"__\").replace(\"\\t\", \" \"), \"|\"+text[j:k].replace(\"\\n\", \"__\").replace(\"\\t\", \" \")+\"|\"\n\n\t\tdef getComboTerms(tuples):\n\t\t\t\"\"\"\n\t\t\t\tgetComboTerms() is a sub-method that combines search terms and \n\t\t\t\ttheir indices provided in the tuple parameter into a string with\n\t\t\t\tthe following structure: [(variant, index)]. This sub-method re-\n\t\t\t\tturns a string of that structure.\n\n\t\t\t\ttuples --> data structure containing the search term and index of the search term in the form of: (term, index)\n\t\t\t\"\"\"\t\t\t\n\t\t\t#return \"[{0}]\".format('; '.join([\"({0})\".format(','.join([text[indices[0]:indices[1]], str(indices[0])])) for indices in tuples]))\n\t\t\treturn \"{0}\".format('; '.join((\"{0}\".format(text[indices[0]:indices[1]]) for indices in tuples)))\n\n\t\tdef getProximity(tuples):\n\t\t\t\"\"\"\n\t\t\t\tgetProximity() is a sub-method that calculates the distance of the search terms provided in the tuple parameter. \n\t\t\t\tThis sub-method returns an absolute value integer.\n\n\t\t\t\ttuples:\tdata structure containing the search term and index of the search term in the form of: (term, index)\n\t\t\t\"\"\"\n\t\t\tsortedIndices = [indices for indices in tuples]\n\t\t\t#return abs(sortedIndices[0][1] - sortedIndices[-1][0])\n\t\t\treturn sortedIndices[-1][0] - sortedIndices[0][1] \n\n\t\t\"\"\" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \"\"\"\n\n\t\texcerptsResults = list()\t\t# NEW list to contain the expanded data structure provided by the DICECodeResults parameter\n\n\t\t# CALL THESE JUST ONCE BEFORE LOOP(S)\n\t\tappend = excerptsResults.append\n\t\tformat = str.format\n\t\t# - - - - - - - - - - - - - - - - - -\n\n\t\tfor row in DICECodeResults:\n\n\t\t\tDICECode \t\t= row[0]\t# (1) DICE code as specified in C:\\Users\\a5rjqzz\\Desktop\\Python\\files\\Types.gd\n\t\t\tTYPECode \t\t= row[1]\t# (2) Type code as specified in C:\\Users\\a5rjqzz\\Desktop\\Python\\files\\Types.gd\n\t\t\tCombo \t\t= False\t\t# (3) Boolean status of the presence of a combo term\n\t\t\tdocumentIndex \t= 0\t\t\t# (4) Index of this search term in the document\n\t\t\tindices \t\t= row[2]\t# (5) Indices of the search term and combo term if present\n\t\t\tproximity\t\t= 0\t\t\t# (6) Distance between search term and combo terms\n\n\t\t\tif type(row[2][0]) == type(tuple()):\n\t\t\t\tCombo = True\t# If the type of search term is a combo, this is true\n\n\t\t\t\tfor tuples in row[2]:\n\t\t\t\t\tindices \t\t\t\t\t\t= tuples[0]\t\t\t\t# (1) Location(s) of the search term in the tuple\n\t\t\t\t\tdocumentIndex \t\t\t\t\t= indices[0]\t\t\t# (2) Location of the search term in the document\n\t\t\t\t\tcomboTerms \t\t\t\t\t\t= getComboTerms(tuples)\t# (3) Multiple terms assigned to variable comboTerms\n\t\t\t\t\tproximity \t\t\t\t\t\t= getProximity(tuples)\t# (4) Proximity of combo terms if present\n\t\t\t\t\tkernelLeft, kernel, kernelRight = getKernels(indices)\t# (5) Left, center, and right kernels or excerpts\n\n\t\t\t\t\tappend([DICECode, TYPECode, Combo, documentIndex, kernelLeft, kernel, kernelRight, comboTerms, proximity])\n\n\t\t\telse:\n\t\t\t\tdocumentIndex \t\t\t\t\t= indices[0]\t\t\t\t\t\t\t\t\t# (1) Location of the search term in the document\n\t\t\t\tcomboTerms \t\t\t\t\t\t= format(\"[{0}]\", text[indices[0]:indices[1]])\t# (2) Single term assigned to variable comboTerms\n\t\t\t\tkernelLeft, kernel, kernelRight = getKernels(indices)\t\t\t\t\t\t\t# (3) Left, center, and right kernels or excerpts\n\n\t\t\t\tappend([DICECode, TYPECode, Combo, documentIndex, kernelLeft, kernel, kernelRight, comboTerms, proximity])\n\n\t\treturn excerptsResults", "def expand_range(txt, range_operator='~'):\n if range_operator not in txt:\n return txt\n\n result = []\n index_pattern = r'(\\D*)(\\d+)'\n pair = txt.split(range_operator)\n result.append(pair[0])\n\n # Find start/end points\n match1 = re.search(index_pattern, pair[0])\n match2 = re.search(index_pattern, pair[1])\n start = int(match1.group(2))\n end = int(match2.group(2))\n label = match1.group(1) if match1.group(1) != match1.group(2) else ''\n result.extend([str(label) + str(i) for i in range(start + 1, end + 1)])\n return result" ]
[ "0.6873448", "0.5602197", "0.5513425", "0.55112517", "0.5479709", "0.54267764", "0.5333438", "0.52627516", "0.5251202", "0.5215412", "0.52010435", "0.5196199", "0.5195391", "0.51823145", "0.5178213", "0.51594794", "0.51498526", "0.5126929", "0.50857365", "0.50619686", "0.50372", "0.5018379", "0.50029826", "0.4964108", "0.49485084", "0.49356213", "0.49339318", "0.4930135", "0.49289882", "0.4924845" ]
0.70351136
0
Extract phrase from list of the parallel sentences.
def extract_phrase_from_parallel_sentences(self, src_lst, tgt_lst, word_alignment, max_phrase_len=0, save_phrase_table=None): # asser equal length assert len(src_lst) == len(tgt_lst) == len(word_alignment) # Collect all phrase # Convert to list of source and target phrase pair # [("Wiederaufnahme der Sitzungsperiode", "Resumption of the session"), # ("Ich bitte", "Please rise then"), # ("Das Parlament", "The House rose and")] flatten_phrase_lst = list() # Extract all phrases for word alignment by `extract_phrase()` phrase_collector = list() total_num = 0 start = time.time() for idx, triplet in enumerate(zip(src_lst, tgt_lst, word_alignment)): if (idx+1) % 5000 == 0: sys.stdout.write(f"Extracting phrases for {idx+1} sentences...\n") src_text, tgt_text, alignment = triplet phrase_set = self.extract_phrase(src_text, tgt_text, alignment, max_phrase_len) #print("extracted phrase", phrase_set) phrase_collector.append(phrase_set) #print("number of phrase", len(phrase_set), end="\n") total_num+= len(phrase_set) # Get source phrase and target phrase into a list flatten_phrase_lst.extend([ (tup[1], tup[2]) for tup in phrase_set]) end=time.time() runtime = end-start avg_per_sentence = runtime / (idx+1) sys.stdout.write(f"Running {runtime:.4f} seconds for {idx+1} sentences.\n") sys.stdout.write(f"{avg_per_sentence:.5f} seconds per sentence.\n") # # Sort according the source phrase's length for idx, phrase_set in enumerate(phrase_collector): # Collect all English phrases for the corresponding phrase in German language. # {"English phrase": [(English alignment), [(German phrase),(German phrase)]], ..., ...} # {"assumes": [(0,1), ["geht davon aus", "geht davon aus, "]]} dlist = {} for alignment, src_phrase, tgt_phrase in phrase_set: if src_phrase in dlist: dlist[src_phrase][1].append(tgt_phrase) else: dlist[src_phrase] = [alignment, [tgt_phrase]] # Sort the list of translations based on their length. Shorter phrases first. for v in dlist.values(): v[1].sort(key=lambda x: len(x)) # List of phrase contans a tuple of `source phrase`, [(source alignment), [phrase,phrase]] sorted_phrase_list = sorted(dlist.items(), key=lambda x:x[1]) # update it phrase_collector[idx] = sorted_phrase_list # Save phrase file if save_phrase_table is not None: with Path(save_phrase_table).open("w") as wf: for phrase_lst in phrase_collector: for i, p in enumerate(phrase_lst): k, v = p joint_phrase = "; ".join(v[1]) wf.write(f"{i} | {v[0]} | {k} | {joint_phrase}\n") #print("({0:2}) {1} {2} — {3}".format( i, v[0], k, " ; ".join(v[1]))) sys.stdout.write(f"Saving phrase file to path: {save_phrase_table}.\n") return phrase_collector, flatten_phrase_lst,total_num
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_phrases(data,model):\n phrases = []\n alignment = model.alignment_idx\n for i in range(len(data)):\n sent_phrases = phrase_extraction(data[i][\"fr\"],data[i][\"en\"],alignment[i])\n phrases.append(sent_phrases)\n return phrases", "def sentence_parse(list_of_posts): \n for parsedPosts in nlp.pipe(line_review(list_of_posts)):\n for sent in parsedPosts.sents:\n yield str(sent)", "def parser(sent_list): #input: list of sentences", "def get_paraphrase_text(words, par_indices):\n paraphrase_words = [words[i] for i in par_indices]\n paraphrase = ' '.join(paraphrase_words)\n yield paraphrase", "def sentences(a, b):\n\n # TODO\n return []", "def phraseMaker(self):\n phrase_lst = []\n phrase = str(self.phrase_ent.get())\n keyword = str(self.keyword_ent.get())\n for i in range(self.city_lbx.size()):\n city = str(self.city_lbx.get(i))\n new_phrase = re.sub(keyword, city, phrase)\n phrase_lst.append(new_phrase)\n return phrase_lst", "def get_sample_text_passages(self, expression, no_passages):\n count = 0\n output = []\n phrase = nltk_tokenize.word_tokenize(expression)\n random.seed(expression)\n random_documents = self.documents.copy()\n random.shuffle(random_documents)\n\n for document in random_documents:\n if count >= no_passages:\n break\n current_document = document.get_tokenized_text()\n for index in range(len(current_document)):\n if current_document[index] == phrase[0]:\n if current_document[index:index+len(phrase)] == phrase:\n passage = \" \".join(current_document[index-20:index+len(phrase)+20])\n output.append((document.filename, passage))\n count += 1\n\n if len(output) <= no_passages:\n return output\n return output[:no_passages]", "def process_text(text):\n doc = spacy_model(text.lower())\n result = []\n for token in doc:\n if token.text in spacy_model.Defaults.stop_words:\n continue\n if token.is_punct:\n continue\n if token.lemma_ == '-PRON-':\n continue\n result.append(token.lemma_)\n return \" \".join(result)", "def test_get_passage_with_list(self):\n simple = self.TEI.getPassage([\"1\", \"pr\", \"2\"])\n self.assertEqual(\n simple.text().strip(),\n \"tum, ut de illis queri non possit quisquis de se bene\",\n \"Ensure passage finding with context is fully TEI / Capitains compliant (Different level range Passage)\"\n )", "def sentencing(any_text, nlp):\n nlp.add_pipe(nlp.create_pipe('sentencizer'))\n doc = nlp(any_text)\n sentences = [sent.string.strip() for sent in doc.sents]\n return sentences", "def parse_subject(self, word_list):\n\n self.skip(word_list, 'stop')\n\n next_word = self.peek(word_list)\n\n if next_word == 'noun':\n return self.match(word_list, 'noun')\n\n elif next_word == 'verb':\n return ('noun', 'player')\n\n else:\n raise ParserError('Expected a verb. Got a %s.' % next_word)", "def calculate_texts(self) -> None:\n texts = []\n for text in self.texts:\n paragraphs = list(filter(lambda x: x != \"\", text.split(\"\\n\\n\")))\n for paragraph in paragraphs:\n text = paragraph.replace(\"\\n\", \" \").strip()\n if len(text) > self.split_threshold_min:\n text_sentences = nlp(text)\n sentences = []\n for sentence in text_sentences.sents:\n current = sentence.text\n sentences.append(current.strip())\n texts.extend(sentences)\n else:\n texts.append(text)\n self.texts = list(set(texts))", "def extract_passages(s, docs):\n if os.name == 'windows':\n docs = [doc.replace('/', '\\\\') for doc in docs]\n \n query_terms = set(tokenize(s))\n passages = []\n for doc in docs:\n with io.open(doc, encoding='utf-8', errors='ignore') as f:\n for para in f:\n for sent in sent_tokenize(para):\n if len(query_terms.intersection(set(tokenize(sent)))) == 0:\n continue\n passages.append(sent)\n return passages", "def process_text(text):\n return [token.text for token in nlp(text) if not token.is_stop]", "def extract(intent, SF_list, sentence):\n\n sen = \"\".join(sentence.split())\n item = \"\"\n money = \"\"\n\n # extract money amount\n money_start_idx = 0\n money_end_idx = 0\n\n for i, s in enumerate(SF_list):\n if (s == 1) or (s == 2):\n item += sen[i]\n\n if (s == 3):\n money_start_idx = i\n if (s == 4 and sen[i].isdigit()):\n money_end_idx = i\n\n if money_start_idx and money_end_idx:\n money = intent + sen[money_start_idx:money_end_idx+1]\n elif money_start_idx:\n money = intent + sen[money_start_idx]\n\n # item: str, money: str\n return item, money", "def get_sentences(text):\n \n return text.split('.')", "def extract_sentences(paper_path, para_yes):\n\n f = open(paper_path, 'rb')\n doc = Document.from_file(f, readers=[HtmlReader()])\n\n sen_yes_arr = list()\n sen_no_arr = list()\n\n elem_all = np.arange(0,len(doc))\n para_no = np.delete(elem_all, para_yes)\n\n for i in para_no:\n if type(doc.elements[i]) == chemdataextractor.doc.text.Paragraph:\n for sentence in doc.elements[i]:\n sen_no_arr.append(sentence)\n\n for i in para_yes:\n if type(doc.elements[i]) == chemdataextractor.doc.text.Paragraph:\n for sentence in doc.elements[i]:\n sen_yes_arr.append(sentence)\n\n\n return sen_yes_arr, sen_no_arr", "def get_tweet_texts(tweet_list):\n relevant_tweet_text = ''\n for text in tweet_list:\n relevant_tweet_text += ' ' + text['text']\n\n return relevant_tweet_text", "def extract(self, text: str) -> list:\n nes={}\n if self.ner_model == 'spacy':\n nes=self.extract_spacy(text)\n return nes", "def get_sentences(self, text_to_parse):\r\n simple_sentence, *rest = text_to_parse.split('(', 1)\r\n kana_sentence, *rest = rest[0].split(')', 1)\r\n return simple_sentence.strip(), kana_sentence.strip().replace(' ', '')", "def get_valid_phrases():\n return [x[0] for x in all_topics if x[1] == \"1\"]", "def get_sentences(paragraph):\n punctuation = re.compile(r'[\\.!?]')\n sentences = [sentence.strip() for sentence in punctuation.split(paragraph)]\n sentences = filter(lambda x: x, sentences)\n return sentences", "def extract_sentences_from_text(self, text_data):\n pass", "def statement(analysis):\n\n #Recovering the subject\n phrase = element_rebuilding.nom_struc_rebuilding(analysis.sn)\n\n if not phrase:\n return []\n\n if analysis.sv:\n #Recovering the end of the sentence\n phrase = element_rebuilding.end_statement_rebuilding(phrase, analysis.sv, analysis.sn, analysis.data_type,\n analysis.aim)\n\n #Recovering subsentences\n for s in analysis.sv[0].vrb_sub_sentence:\n phrase = phrase + sub_process(s)\n\n #Eliminate redundancies if there are\n phrase = other_functions.eliminate_redundancy(phrase)\n\n #If it is a relative form\n if analysis.data_type == RELATIVE or analysis.data_type.startswith(SUBSENTENCE):\n if phrase[len(phrase) - 1][len(phrase[len(phrase) - 1]) - 1] != ',':\n phrase[len(phrase) - 1] += ','\n return phrase\n if analysis.data_type == W_QUESTION:\n return phrase + ['?']\n\n #To take of all not useless comma\n while phrase[len(phrase) - 1][len(phrase[len(phrase) - 1]) - 1] == ',':\n phrase[len(phrase) - 1] = phrase[len(phrase) - 1][:len(phrase[len(phrase) - 1]) - 1]\n return phrase + ['.']", "def filter(results, keyword, max = 100):\n global nlp\n if not nlp:\n nlp = spacy.load('en')\n keyLemmas = set([w.lemma_ for w in nlp(keyword)])\n\n ret = []\n for res in results:\n r = res[1]\n doc = nlp(r)\n words = []\n sub_verbs = []\n\n for word in doc:\n if word.dep_ == \"nsubj\":\n if word.lemma_ in keyLemmas:\n if word.head.pos_ == \"VERB\":\n v = word.head\n if list(v.rights) or not v.lemma_ == \"be\":\n sub_verbs.append( (word, v) )\n\n if sub_verbs: \n beg = sub_verbs[0][1].left_edge.idx \n end = sub_verbs[0][1].right_edge.idx + len(sub_verbs[0][1].right_edge.text)\n phrase = res[1][beg: end]\n \n ret.append((res[0], phrase))\n if len(ret) >= max:\n return ret\n\n return ret", "def lemmatized_phrases(self):\n phrases = [set(lower_words(TextBlob(p).words.lemmatize()))\n for p in self.blob.noun_phrases]\n return [' '.join(p) for p in phrases if not STOPWORDS.intersection(p)]", "def _sentence_segmenter(paragr):\n # this is relatively high because we are only looking for sentences that\n # will have subject and object\n MIN_SENTLENGTH = 100\n MAX_SENTLENGTH = 512\n\n # sentence termination pattern used in sentence_segmenter(paragr)\n terpat = re.compile('[\\.\\?!]\\s+[A-Z\\\"]')\n\n # source: LbjNerTagger1.11.release/Data/KnownLists/known_title.lst from\n # University of Illinois with editing\n ABBREV_LIST = ['mrs.', 'ms.', 'mr.', 'dr.', 'gov.', 'sr.', 'rev.', 'r.n.',\n 'pres.', 'treas.', 'sect.', 'maj.', 'ph.d.', 'ed. psy.',\n 'proc.', 'fr.', 'asst.', 'p.f.c.', 'prof.', 'admr.',\n 'engr.', 'mgr.', 'supt.', 'admin.', 'assoc.', 'voc.',\n 'hon.', 'm.d.', 'dpty.', 'sec.', 'capt.', 'c.e.o.',\n 'c.f.o.', 'c.i.o.', 'c.o.o.', 'c.p.a.', 'c.n.a.', 'acct.',\n 'llc.', 'inc.', 'dir.', 'esq.', 'lt.', 'd.d.', 'ed.',\n 'revd.', 'psy.d.', 'v.p.', 'senr.', 'gen.', 'prov.',\n 'cmdr.', 'sgt.', 'sen.', 'col.', 'lieut.', 'cpl.', 'pfc.',\n 'k.p.h.', 'cent.', 'deg.', 'doz.', 'Fahr.', 'Cel.', 'F.',\n 'C.', 'K.', 'ft.', 'fur.', 'gal.', 'gr.', 'in.', 'kg.',\n 'km.', 'kw.', 'l.', 'lat.', 'lb.', 'lb per sq in.', 'long.',\n 'mg.', 'mm.,, m.p.g.', 'm.p.h.', 'cc.', 'qr.', 'qt.', 'sq.',\n 't.', 'vol.', 'w.', 'wt.']\n\n sentlist = []\n # controls skipping over non-terminal conditions\n searchstart = 0\n terloc = terpat.search(paragr)\n while terloc:\n isok = True\n if paragr[terloc.start()] == '.':\n if (paragr[terloc.start() - 1].isupper() and\n paragr[terloc.start() - 2] == ' '):\n isok = False # single initials\n else:\n # check abbreviations\n loc = paragr.rfind(' ', 0, terloc.start() - 1)\n if loc > 0:\n if paragr[loc + 1:terloc.start() + 1].lower() in ABBREV_LIST:\n isok = False\n if paragr[:terloc.start()].count('(') != paragr[:terloc.start()].count(')'):\n isok = False\n if paragr[:terloc.start()].count('\"') % 2 != 0:\n isok = False\n if isok:\n if (len(paragr[:terloc.start()]) > MIN_SENTLENGTH and\n len(paragr[:terloc.start()]) < MAX_SENTLENGTH):\n sentlist.append(paragr[:terloc.start() + 2])\n paragr = paragr[terloc.end() - 1:]\n searchstart = 0\n else:\n searchstart = terloc.start() + 2\n\n terloc = terpat.search(paragr, searchstart)\n\n # add final sentence\n if (len(paragr) > MIN_SENTLENGTH and len(paragr) < MAX_SENTLENGTH):\n sentlist.append(paragr)\n\n return sentlist", "def parallel_word_dict(w_list, st, end):\n import spacy\n w_list = w_list[st:end]\n nlp, out_dict, count = spacy.load('en_core_web_lg'), {}, 0\n for word in w_list:\n word_obj = nlp(word)\n if word_obj.has_vector:\n out_dict[word] = word_obj.vector\n count += 1\n return out_dict", "def extract_phrases_with_keywords(text, keyword):\n sentences = split_text(text)\n phrases = []\n keyword = word_process(keyword)\n for sentence in sentences:\n words = re.findall(r'\\w+', sentence)\n for i, word in enumerate(words):\n if word_process(word) == word_process(keyword): # Both word and keyword have been processed, so we can compare them directly\n start = sentence.index(words[max(0,i-2)])\n end = sentence.index(word) + len(word)\n phrases.append(sentence[start:end])\n return phrases", "def extract_phrase(self, src_text, tgt_text, alignment, max_phrase_len=0):\n def extract_from_range(tgt_start, tgt_end, src_start, src_end, max_phrase_len):\n \"\"\"Extract a set of possible phrase given the source, language ranges.\n\n \"\"\"\n # print(\"rages\", tgt_start, tgt_end, src_start, src_end)\n if tgt_end < 0:\n return \n # If `src_align_idx` out of the `src_start` and `src_target`.\n for src_align_idx, tgt_align_idx in alignment:\n # target align point\n # sorce align point out of range\n if ((tgt_start <= tgt_align_idx <= tgt_end) and \n (src_align_idx < src_start or src_align_idx > src_end)): \n return\n phrase_set = set()\n ts = tgt_start # For increment\n while True:\n te = min(tgt_end, ts+max_phrase_len-1) # For decrement\n # te = tgt_end \n while True:\n # Add phrase pair (src_start, src_end, tgt_start, tgt_end)\n src_phrase = \" \".join(src_sent[i] for i in range(src_start,src_end+1))\n tgt_phrase = \" \".join(tgt_sent[i] for i in range(ts,te+1))\n phrase_set.add(((src_start, src_end+1), src_phrase, tgt_phrase))\n te+= 1\n # Add phrase until `te` aligned or out of range\n if te in tgt_aligned or te == tgt_len:\n break\n ts-=1\n # Add phrase until `te` aligned or out of range\n if ts in tgt_aligned or ts < 0:\n break\n \n return phrase_set\n\n # List of words\n src_sent = src_text.split()\n tgt_sent = tgt_text.split()\n \n # Set ot collect hrases\n phrase_set = set()\n \n # Length of sentences \n src_len = len(src_sent)\n tgt_len = len(tgt_sent)\n\n # Target language's align points\n tgt_aligned = [tgt_idx for _,tgt_idx in alignment ]\n max_phrase_len = max_phrase_len or max(src_len, tgt_len)\n\n\n ### Extraction ##### \n # Two steps:\n # (1) Loop all possible soruce language phrases matching minimal target language phrases\n # (2) By finding shortest target language phrases that includes \n # all the foreign counterparts for the source words.\n #\n ### Extraction #####\n # Go over each source substring starting from begin \n for src_start in range(src_len):\n # Set maximal length for phrase length \n max_idx = min(src_len, src_start+max_phrase_len)\n for src_end in range(src_start, max_idx):\n # print('src_start, end', src_start, src_end)\n # Find the minimal matching of foreign phrase\n tgt_start, tgt_end = tgt_len-1, -1\n for src_align_idx, tgt_align_idx in alignment:\n # print('alignment', src_align_idx, tgt_align_idx)\n # Length of phrase is greater or equal to one\n if src_start <= src_align_idx <= src_end:\n # print(tgt_align_idx, tgt_start, tgt_end)\n # Longest substring in target langage phrase\n tgt_start = min(tgt_align_idx, tgt_start)\n tgt_end = max(tgt_align_idx, tgt_end)\n # print(tgt_start, tgt_end, end='\\n\\n')\n # print(src_start, src_end)\n # print(tgt_start, tgt_end, end='\\n\\n')\n # Extract a set of phrases \n phrase = extract_from_range(tgt_start, tgt_end, src_start, src_end,max_phrase_len)\n if phrase:\n phrase_set.update(phrase)\n\n\n return phrase_set" ]
[ "0.6296526", "0.6136638", "0.5852756", "0.5819079", "0.57264394", "0.5696509", "0.5664151", "0.559129", "0.55865973", "0.55777866", "0.5577783", "0.5568261", "0.553962", "0.54893243", "0.5487933", "0.54764163", "0.5456427", "0.5454422", "0.543709", "0.5425575", "0.5423253", "0.5414294", "0.54082394", "0.53754836", "0.5367155", "0.53640383", "0.53504807", "0.53503543", "0.534815", "0.53355217" ]
0.6505854
0
Compute log probability of sorucetarget phrase pairs. Note that we compute log p(f|e) due to the noise channel assumption.
def compute_log_probs(self, phrases, save_to_file=None): # Co-ocurrence for source and target phrase tgt_src_cnt = defaultdict(lambda: defaultdict(int)) # ocurrence for target (English) tgt_cnt = defaultdict(int) # Compute frequency and co-occurence for phrase_pair in phrases: assert len(phrase_pair) == 2 src_p, tgt_p = phrase_pair # defaultdict[target_phrase][source_phrase] tgt_src_cnt[tgt_p][src_p]+=1 # defaultdict[target_phrase] tgt_cnt[tgt_p]+=1 # Calculate log prob of translation model p(f|e) log_prob_lst = list() for tgt_phrase in tgt_src_cnt: for src_phrase in tgt_src_cnt[tgt_phrase]: # count(source, target) / count(target) prob = float(tgt_src_cnt[tgt_phrase][src_phrase])/tgt_cnt[tgt_phrase] log_prob = math.log(prob) log_prob_str = f"{src_phrase} ||| {tgt_phrase} ||| {str(log_prob)}" log_prob_lst.append(log_prob_str) # Save to file if save_to_file is not None: with Path(save_to_file).open("w") as wf: wf.write('\n'.join(log_prob_lst)) sys.stdout.write(f"Saving phrase log probability file to path: {save_to_file}.\n") # List of log probability for phrase pair return log_prob_lst
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sentence_logprob(self, sentence):\n line = get_ngrams(sentence,3)\n log_por = 0.0\n for item in line:\n raw_por = self.smoothed_trigram_probability(item)\n log_por = log_por+math.log2(raw_por)\n\n return float(log_por)", "def sentence_logprob(self, sentence):\n grams = get_ngrams(sentence, 3)\n p = 1\n\n for gram in grams:\n p *= np.longfloat(self.smoothed_trigram_probability(gram))\n\n return np.log2(p)", "def log_prob(self):", "def log_prob(self, sents):\n log_prob = 0\n for sent in sents:\n log_prob += self.sent_log_prob(sent)\n return log_prob", "def sentence_logprob(self, sentence):\n sum_logprob = 0\n trigrams = get_ngrams(sentence, 3)\n for trigram in trigrams :\n sum_logprob += math.log2(self.smoothed_trigram_probability(trigram))\n\n return sum_logprob", "def log_probability(self, sequence):\n sequence = self._transform(sequence)\n\n T = len(sequence)\n\n if T > 0 and sequence[0][_TAG]:\n last_state = sequence[0][_TAG]\n p = self._priors.logprob(last_state) + self._output_logprob(\n last_state, sequence[0][_TEXT]\n )\n for t in range(1, T):\n state = sequence[t][_TAG]\n p += self._transitions[last_state].logprob(\n state\n ) + self._output_logprob(state, sequence[t][_TEXT])\n last_state = state\n return p\n else:\n alpha = self._forward_probability(sequence)\n p = logsumexp2(alpha[T - 1])\n return p", "def log_prob(self, scores : torch.Tensor, permutations):\n s = torch.log(select_indices(scores, permutations))\n n = len(scores)\n p = self.upto if self.upto is not None else n - 1\n return -sum(\n torch.log(torch.exp((s[k:] - s[k]) * self.shape).sum(dim=0))\n for k in range(p))", "def log_probability(self, samples):\n pass", "def compute_log_prob(self,params: ndarray) -> float:\n return self.compute_log_prior(params) + self.compute_log_likelihood(params)", "def log_probability(self, tokens):\n\n log_sum = 0\n for leader, token in generate_ngrams(tokens, self.size, include_terminator = self.include_terminator):\n if not leader in self.frequencies:\n return float(\"-inf\")\n\n word_frequency = self.frequencies[leader][token]\n leader_frequency = self.frequencies[leader].total\n\n probability = (word_frequency + self.smoothing) / (leader_frequency + len(self.vocab) * self.smoothing)\n\n if probability == 0:\n return float(\"-inf\")\n\n log_sum += math.log2(probability)\n \n return log_sum", "def compute_probability_of_state(state):\n p = compute_log_probability_of_text(state[\"text\"], state[\"char_to_ix\"], \n state[\"frequency_statistics\"], state[\"transition_matrix\"])\n \n return p", "def compute_log_probability_of_text(text, char_to_ix, frequency_statistics, transition_matrix):\n t = text\n cix = char_to_ix\n fr = frequency_statistics\n tm = transition_matrix\n \n i0 = cix[t[0]]\n p = np.log(fr[i0])\n i = 0\n while i < len(t)-1:\n i1 = cix[t[i+1]]\n p += np.log(tm[i0, i1])\n i0 = i1\n i += 1\n \n return p", "def log_prob(sentence, LM, smoothing=False, delta=0, vocabSize=0):\n word_list = sentence.split()\n log_prob = 0\n for i in range(len(word_list)-1):\n print(word_list[i], word_list[i+1])\n bi_count = LM['bi'][word_list[i]][word_list[i+1]]\n uni_count = LM['uni'][word_list[i]]\n if uni_count == 0 and smoothing:\n return float('-inf')\n log_prob += log(((bi_count + delta)/(uni_count + delta * vocabSize)))\n return log_prob", "def sent_log_prob(self, sent):\n\n prob = 0\n sent = ['<s>']*(self.n-1)+sent+['</s>']\n\n for i in range(self.n-1, len(sent)):\n c_p = self.cond_prob(sent[i], tuple(sent[i-self.n+1:i]))\n # to catch a math error\n if not c_p:\n return float('-inf')\n prob += log(c_p, 2)\n\n return prob", "def _logp(self, trace, **inputs):\n def calc_log(step):\n exp_pred = np.dot(inputs['gwas_gen'],\n step['beta_med'].T).ravel()\n phen_pred = step['alpha'] * exp_pred\n phen_prob = norm.logpdf(x=inputs['gwas_phen'],\n loc=phen_pred,\n scale=step['phenotype_sigma'])\n return phen_prob\n\n phen_probs = [calc_log(trace[idx])\n for idx in np.random.randint(0, len(self.trace), 500)]\n phen_probs = np.asmatrix(phen_probs)\n mc_logp = phen_probs.sum(axis=1).mean()\n return mc_logp", "def perplexity(self, corpus):\n l = 0\n total_word_count = 0\n for sentence in corpus :\n l += self.sentence_logprob(sentence)\n # 2 extra START tokens and 1 extra STOP token\n total_word_count += len(sentence)\n l /= total_word_count\n return math.pow(2, -l)", "def action_log_probs(self, state):\n dist = self.action_distribution(state)\n raw_action = dist.rsample() # reparametrization trick\n\n # enforcing action bounds\n tanh_action = torch.tanh(raw_action) # prevent recomputation later.\n action = tanh_action * self.action_scale + self.action_bias\n\n # change of variables for log prob\n raw_log_prob = dist.log_prob(raw_action)\n log_prob = raw_log_prob - torch.log(\n self.action_scale * (1 - tanh_action.pow(2)) + FEPS\n )\n log_prob = log_prob.sum(1, keepdim=True)\n\n return action, log_prob", "def calculate_entropy(prob):\n return -(prob * math.log(prob,2))", "def log_prob(target_distribution, x0, xs, accepteds):\n return np.mean([target_distribution.log_probability(x) for x in xs])", "def perplexity(self, corpus):\n sum_pro = 0.0\n total_words = 0\n for sentence in corpus:\n sen_pro = self.sentence_logprob(sentence)\n sum_pro += sen_pro\n total_words += len(sentence)\n\n \n\n l = sum_pro/total_words\n w = 0.0\n w = 2**(-l)\n\n return w", "def probability(self, tokens):\n\n return 2 ** self.log_probability(tokens)", "def sumLogProb(a, b):\n if a > b:\n return a + log1p(exp(b - a))\n else:\n return b + log1p(exp(a - b))", "def logprob(predictions, labels):\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]", "def logprob(predictions, labels):\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]", "def logprob(predictions, labels):\n # prevent negative probability\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]", "def sent_log_prob(self, sent):\n n = self._n\n sent.insert(0, '<s>')\n sent.append('</s>')\n prob = 0\n for i in range(len(sent) - n):\n segment = sent[i:i + n]\n cond = self.cond_prob(segment.pop(), segment)\n if cond == 0.0:\n return -float(\"inf\")\n prob = prob + math.log(cond, 2)\n return prob", "def log_prob(self, x, y):\n p = self.tag_log_prob(y)\n for i in range(len(y)):\n if self.out_prob(x[i], y[i]) == 0:\n return -math.inf\n\n p += math.log2(self.out_prob(x[i], y[i]))\n\n return p", "def log_probability_of_sentence(self, tokens: Sequence[str]):\n if isinstance(tokens, str):\n raise ValueError(\"Input to log_probability_of_sentence is a sequence of token strings,\"\n \" not a single string\")\n # these don't matter when we are running the model in inference mode\n targets = np.zeros([_BATCH_SIZE, _NUM_TIMESTEPS], np.int32)\n weights = np.ones([_BATCH_SIZE, _NUM_TIMESTEPS], np.float32)\n\n # these contain information about the previous word\n # we initialize them with the beginning-of-sentence marker\n inputs = np.zeros([_BATCH_SIZE, _NUM_TIMESTEPS], np.int32)\n inputs[0, 0] = self._vocab.word_to_id(_START_SENTENCE_SYMBOL)\n\n char_ids_inputs = np.zeros(\n [_BATCH_SIZE, _NUM_TIMESTEPS, self._vocab.max_word_length], np.int32)\n char_ids_inputs[0, 0, :] = self._vocab.word_to_char_ids(_START_SENTENCE_SYMBOL)\n\n # we take the log probability of a token sequence to be the sum of the log-probs\n # of each of its tokens given the preceding context\n log_prob_sum = 0.0\n for token in tokens:\n with contexttimer.Timer() as token_timer:\n dist_over_next_words = self._session.run(\n self._name_to_node['softmax_out'],\n feed_dict={\n self._name_to_node['char_inputs_in']: char_ids_inputs,\n self._name_to_node['inputs_in']: inputs,\n self._name_to_node['targets_in']: targets,\n self._name_to_node['target_weights_in']: weights})\n token_idx = self._vocab.word_to_id(token)\n log_prob_sum += math.log(dist_over_next_words[0][token_idx])\n\n # prepare this word to be the context for the next word\n inputs[0, 0] = token_idx\n char_ids_inputs[0, 0, :] = self._vocab.word_to_char_ids(token)\n\n # restore original state so that future calls to log_probability_of_sentence\n # are not affected by past calls\n self._reset_state()\n\n return log_prob_sum", "def logp(self, args):\n mean, stddev, action = args\n dist = tfp.distributions.Normal(loc=mean, scale=stddev)\n logp = dist.log_prob(action)\n return logp", "def logprob(predictions, labels):\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions)))" ]
[ "0.74536216", "0.72376466", "0.6940813", "0.69313174", "0.6906261", "0.6710182", "0.6671943", "0.66348374", "0.66269255", "0.66226804", "0.658965", "0.6519465", "0.6512543", "0.6429427", "0.6417837", "0.6415285", "0.63980013", "0.63869506", "0.6386877", "0.63799185", "0.63781476", "0.6376572", "0.6359355", "0.6359355", "0.63415045", "0.6332147", "0.6328631", "0.6326105", "0.6310959", "0.6309043" ]
0.7457257
0
Flatten nested list into a list of sorucetarget phrase pairs.
def flatten_phrase_collect(phrase_collect): flatten_phrse_collect = list() for phrase_lst in phrase_collect: pass return flatten_phrse_collect
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def flatten(nested_list):\r\n return list(chain.from_iterable(nested_list))", "def _flatten(phrases):\n return [' '.join(phrase) for phrase in phrases]", "def flatten(nested_list):\n for elt in nested_list:\n if isinstance(elt, collections.Iterable) and not isinstance(elt, six.string_types):\n for sub in flatten(elt):\n yield sub\n else:\n yield elt", "def flatten(nested_list):\n return [item for a_list in nested_list for item in a_list]", "def flatten(nested_list):\n t_l = []\n for i in nested_list:\n if not isinstance(i, list):\n t_l.append(i)\n else:\n t_l.extend(flatten(i))\n return t_l", "def flattenList(input_list):\r\n return [item for sublist in input_list for item in sublist]", "def flatten(nested_list: List[List[T]]) -> List[T]:\n return [item for sublist in nested_list for item in sublist]", "def flatten(ls):\r\n return [item for sublist in ls for item in sublist]", "def flatten(src):\n return [item for sublist in src for item in sublist]", "def flatten(list):\n\n if isinstance(list, collections.Iterable) and not isinstance(list, (str, bytes)):\n return [a for i in list for a in flatten(i)]\n else:\n return [list]", "def flatten(t: typing.Iterable[typing.List[FlattenItem]]) -> typing.List[FlattenItem]:\n return list(itertools.chain.from_iterable(t))", "def flatten_list(in_list):\n return [item for sublist in in_list for item in sublist]", "def flatten(nested_lst):\r\n if not isinstance(nested_lst, list):\r\n return(nested_lst)\r\n\r\n res = []\r\n for l in nested_lst:\r\n if not isinstance(l, list):\r\n res += [l]\r\n else:\r\n res += flatten(l)\r\n\r\n\r\n return(res)", "def flatten(list_to_flatten): \n flattened_list = []\n for item in list_to_flatten:\n if isinstance(item, list) or isinstance(item, tuple):\n flattened_list += flatten(item)\n else:\n flattened_list.append(item)\n return flattened_list", "def flatten(l):\n return [item for sublist in l for item in sublist]", "def flatten(l):\n return [item for sublist in l for item in sublist]", "def flatten(nested):\n result_list = []\n\n for element in nested:\n if hasattr(element, \"__iter__\") and not\\\n isinstance(element, basestring):\n result_list.extend(flatten(element))\n else:\n result_list.append(element)\n\n return result_list", "def flatten(t):\n flat_list = []\n for sublist in t:\n for item in sublist:\n flat_list.append(item)\n return flat_list", "def flatten(nested_list):\n result = []\n for item in nested_list:\n if type(item) == type([]):\n result.extend(flatten(item))\n else:\n result.append(item)\n return result", "def flatten(xss):\n return chain.from_iterable(xss)", "def flatten_list(alist):\n return list(flatten_list_gen(alist))", "def flatten_list(lol):\n return list(itertools.chain.from_iterable(lol))", "def _flatten(x: Sequence) ->list:\n return [item for sublist in x for item in sublist]", "def flatten_list(nested_list):\n nested_list = deepcopy(nested_list)\n while nested_list:\n sublist = nested_list.pop(0)\n if isinstance(sublist, list):\n nested_list = sublist + nested_list\n else:\n yield sublist", "def _flatten_list(x):\n return list(chain.from_iterable(x))", "def flatten(lst):\n \n for x in lst:\n if isinstance(x, list):\n for x in flatten(x):\n yield x\n else:\n yield x", "def flatten(lol ):\n return [item for sublist in lol for item in sublist]", "def flatten_list(x):\n result = []\n for el in x:\n if hasattr(el, \"__iter__\") and not isinstance(el, basestring):\n result.extend(flatten_list(el))\n else:\n result.append(el)\n return result", "def flatten_list(l):\n return [item for sublist in l for item in sublist]", "def flatten(x):\n result = []\n for el in x:\n if hasattr(el, \"__iter__\") and not isinstance(el, basestring):\n result.extend(flatten(el))\n else:\n result.append(el)\n return result" ]
[ "0.68353754", "0.6703909", "0.6512857", "0.65088475", "0.64734936", "0.640399", "0.618579", "0.6086874", "0.6073905", "0.60664827", "0.60623664", "0.604358", "0.60098505", "0.5991506", "0.5989105", "0.5989105", "0.5974142", "0.5966722", "0.5949544", "0.5914635", "0.59093", "0.5908747", "0.59002703", "0.5856789", "0.5851991", "0.5844155", "0.5836143", "0.58290863", "0.58072317", "0.58046883" ]
0.69660723
0
Summary Split data into nq blocks.
def split_data(data, nq, flag=1): if flag == 1: quantiles = np.linspace(data.min(), data.max(), nq + 1) elif flag == 2: segs = np.linspace(0, 100, nq + 1) quantiles = np.percentile(data, segs) quantiles[0] = quantiles[0] - 1e-15 quantiles[-1] = quantiles[-1] + 1e-15 grp_names = np.digitize(data, quantiles) - 1 return grp_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_chunk_size_priority_over_n_splits(self):\n with self.subTest(input='list', chunk_size=1, n_splits=6):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=1, n_splits=6, n_jobs=None), 13)\n with self.subTest(input='numpy', chunk_size=1, n_splits=6):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=1, n_splits=6,\n n_jobs=None), 100)\n\n with self.subTest(input='list', chunk_size=3, n_splits=3):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=3, n_splits=3, n_jobs=None), 5)\n with self.subTest(input='numpy', chunk_size=3, n_splits=3):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=3, n_splits=3,\n n_jobs=None), 34)", "def split_data(self):\r\n print('split data')\r\n np.random.shuffle(self.dataList)\r\n l = len(self.dataList)/self.fold\r\n self.dataList = [self.dataList[i*l: (i+1)*l] for i in range(self.fold-1)] + [self.dataList[(self.fold-1)*l:]] # each element in the list is splitted data list\r", "def disaggregate_chunk(self, test_mains):\n raise NotImplementedError()", "def test_n_splits(self):\n for n_splits, n_jobs in product([1, 6], [None, 2, 8]):\n with self.subTest(input='list', n_splits=n_splits, n_jobs=n_jobs):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=None, n_splits=n_splits,\n n_jobs=n_jobs), n_splits)\n\n with self.subTest(input='numpy', n_splits=n_splits, n_jobs=n_jobs):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=None,\n n_splits=n_splits, n_jobs=n_jobs), n_splits)", "def basic_summary(data):\n headers = [\"Split\", \"Samples\", \"Height\", \"Width\", \"Channels\", \"Classes\"]\n print(table_format(headers, header = True))\n for split in [\"train\", \"valid\", \"test\"]:\n X, y = data[split]\n n, h, w, c = X.shape\n n_classes = np.unique(y).shape[0]\n row = [split, n, h, w, c, n_classes]\n print(table_format(row))", "def total_chunks(self) -> global___Expression:", "def test_n_group_split(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupPartitioner(2)\n\n for isreversed, splitter in enumerate((hs, hs)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]\n self.assertTrue(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.assertTrue( len(p) == 2 )\n self.assertTrue( p[0].nsamples == 50 )\n self.assertTrue( p[1].nsamples == 50 )\n\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n\n # check if it works on pure odd and even chunk ids\n moresplits = [ list(spl.generate(p)) for p in hs.generate(splits[0][0])]\n\n for split in moresplits:\n self.assertTrue(split[0] != None)\n self.assertTrue(split[1] != None)\n\n # now test more groups\n s5 = NGroupPartitioner(5)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in s5splitter.generate(self.data) ]\n\n # must have 10 splits\n self.assertTrue(len(splits) == 5)\n\n # check split content\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [2, 3, 4, 5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [2, 3])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 4, 5, 6, 7, 8, 9])\n # ...\n assert_array_equal(splits[4][1-isreversed].sa['chunks'].unique,\n [8, 9])\n assert_array_equal(splits[4][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4, 5, 6, 7])\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return list(spl.generate(dat))\n s20 = NGroupPartitioner(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)", "def chunks(data, n):\n newn = int(len(data) / n) # chunk size \n \n for i in range(0, n-1):\n test_chunk = data[i*newn:i*newn+newn]\n train_chunk = [el for el in data if el not in test_chunk]\n yield train_chunk, test_chunk\n \n test_chunk = data[n*newn-newn:]\n train_chunk = [el for el in data if el not in test_chunk]\n \n yield train_chunk, test_chunk", "def _chunk_data(self):\n for n in range(0, len(self.data) + 1, len(self.data) //\n self.num_of_chunks):\n yield self.data[0 + n:len(self.data) // self.num_of_chunks + n]", "def buckets(data, n):\n # Shuffle all datasets to get a more consistent workload for all threads.\n random.shuffle(data)\n\n for i in range(0, len(data), n):\n yield data[i:i + n]", "def get_split_summary_table(all_df, train_df, test_df):\n table = PrettyTable()\n table.field_names = ['set', 'N total', 'N non-ICH', 'N ICH', 'frac non-ICH', 'frac ICH']\n for df, name in zip([all_df, train_df, test_df],['All', 'Train', 'Test']):\n table.add_row([name, len(df), len(df[df.Hemorrhage == 0]), len(df[df.Hemorrhage == 1]),\n f'{len(df[df.Hemorrhage == 0])/len(df):.3%}', f'{len(df[df.Hemorrhage == 1])/len(df):.3%}'])\n return table", "def test_everything_none(self):\n with self.subTest(input='list'):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=None, n_splits=None,\n n_jobs=None), min(13, cpu_count() * 4))\n with self.subTest(input='numpy'):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=None, n_splits=None,\n n_jobs=None), min(100, cpu_count() * 4))", "def test_n_jobs(self):\n for n_jobs in [1, 6]:\n with self.subTest(input='list', n_jobs=n_jobs):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=None, n_splits=None,\n n_jobs=n_jobs), min(4 * n_jobs, len(self.test_data)))\n\n with self.subTest(input='numpy', n_jobs=n_jobs):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=None, n_splits=None,\n n_jobs=n_jobs), min(4 * n_jobs, len(self.test_data_numpy)))", "def getChunks():", "def _get_data_chunk(self):\n if self._start_pos < self.max_pos:\n self._current_sho_spec_slice = slice(self.sho_spec_inds_per_forc * self._current_forc,\n self.sho_spec_inds_per_forc * (self._current_forc + 1))\n self._end_pos = int(min(self.h5_main.shape[0], self._start_pos + self.max_pos))\n self.data = self.h5_main[self._start_pos:self._end_pos, self._current_sho_spec_slice]\n elif self._current_forc < self._num_forcs - 1:\n # Resest for next FORC\n self._current_forc += 1\n\n self._current_sho_spec_slice = slice(self.sho_spec_inds_per_forc * self._current_forc,\n self.sho_spec_inds_per_forc * (self._current_forc + 1))\n self._current_met_spec_slice = slice(self.metrics_spec_inds_per_forc * self._current_forc,\n self.metrics_spec_inds_per_forc * (self._current_forc + 1))\n self._get_dc_offset()\n\n self._start_pos = 0\n self._end_pos = int(min(self.h5_main.shape[0], self._start_pos + self.max_pos))\n self.data = self.h5_main[self._start_pos:self._end_pos, self._current_sho_spec_slice]\n\n else:\n self.data = None\n\n return", "def summarize(data, verbal=False, using_files=True):\n\n if using_files:\n for file_name in tqdm(data):\n fill_table(pd.read_csv(file_name))\n else:\n for table in tqdm(data):\n fill_table(table)\n\n for cluster in table_summary:\n #total_genes = sum(table_summary[cluster][\"phylum\"].values) # number of genes\n #total_genes = table_summary[cluster][\"N\"] # number of samples\n total_genes = table_summary[cluster][\"eggNOG\"].eggNOG.sum() # number of genes in COGs with duplicates\n \n phylum_percent = table_summary[cluster][\"phylum\"].apply(lambda x: x/total_genes * 100)\n phylum_percent.columns = [\"percent\"]\n table_summary[cluster][\"phylum\"] = pd.concat([table_summary[cluster][\"phylum\"],phylum_percent],axis=1)\n\n #Read above for fix\n genus_percent = table_summary[cluster][\"genus\"].apply(lambda x: x/total_genes * 100)\n genus_percent.columns = [\"percent\"]\n table_summary[cluster][\"genus\"] = pd.concat([table_summary[cluster][\"genus\"],genus_percent],axis=1)\n\n #read above for fix\n cog_percent = table_summary[cluster][\"eggNOG\"].apply(lambda x: x/table_summary[cluster][\"gene_cog\"] * 100)\n cog_percent.columns = [\"percent\"]\n table_summary[cluster][\"eggNOG\"] = pd.concat([table_summary[cluster][\"eggNOG\"],cog_percent],axis=1)\n\n #Print the data\n if verbal:\n print \"Cluster %s:\\n\" % cluster\n print \"Number of Samples: %d\\n\" % table_summary[cluster][\"N\"]\n print \"Taxonomy:\"\n print table_summary[cluster][\"phylum\"].sort(\"percent\", ascending=False)\n print \"----------------------------------\"\n print table_summary[cluster][\"genus\"].sort(\"percent\", ascending=False)\n print \"-----------------------------------\"\n print \"COGS:\"\n print table_summary[cluster][\"eggNOG\"].sort(\"percent\", ascending=False)\n print \"------------------------------------\"\n print \"End Summary\"", "def five_num_summary(items):\n \n for num in items:\n s_list = sorted(items)\n n_max = np.max(s_list)\n n_med = np.median(s_list) \n n_q1 = np.percentile(s_list, 25)\n n_min = np.min(s_list)\n n_q3 = np.percentile(s_list, 75)\n \n D = { \"max\":n_max, \"median\":n_med, \"min\":n_min, \"q1\":n_q1, \"q3\":n_q3}\n return D", "def split_test_data():\n outputvis = ROOT_DIR + 'test_imaging/test_split_1eb.ms'\n targ = TARGETS['NGC1333IRAS4A']\n spw = '{0}:236~276'.format(SPWS[targ.name]['NH3_11'].spw_id)\n split(\n vis=get_vis_name(targ),\n outputvis=outputvis,\n field=targ.name,\n spw=spw,\n )", "def analyze_all(q: int = 100, n: int = 75000):\n total_start_time = time.time()\n sort_correct, sort_results = bucket_sort_general(q, n)\n print('sort_correct')\n sort_sorted_list = bucket_sort_sorted_list(q, n)\n print('sort_sorted_list')\n sort_reversed_list = bucket_sort_reversed_list(q, n)\n print('sort_reversed_list')\n sort_unique_list = bucket_sort_unique_list(q, n)\n\n headers = ['Type', 'Avg', 'Min', 'Max', 'Std']\n table = [['Bucket sort normal', sum(sort_results) / len(sort_results), min(sort_results), max(sort_results),\n pstdev(sort_results)],\n ['Bucket sort sorted list', sum(sort_sorted_list) / len(sort_sorted_list), min(sort_sorted_list),\n max(sort_sorted_list), pstdev(sort_sorted_list)],\n ['bucket sort reversed list', sum(sort_reversed_list) / len(sort_reversed_list), min(sort_reversed_list),\n max(sort_reversed_list), pstdev(sort_reversed_list)],\n ['bucket sort unique values', sum(sort_unique_list) / len(sort_unique_list), min(sort_unique_list),\n max(sort_unique_list), pstdev(sort_unique_list)]]\n\n print(f'Running all the metrics took {time.time() - total_start_time} seconds')\n print(f'Bucket sort correct = {sort_correct}')\n print(f'Each metric is calculated with a population of {q} and a list length of {n}')\n print(tabulate(table, headers=headers))\n return table", "def finalize(self):\n self.total_priors = np.sum(list(self.priors.values()))\n self.total_blocks = np.sum(list(self.nblocks.values()))\n self.total_fitness = np.sum(list(self.fitness.values()))\n self.blocks = BedTool.from_dataframe(self.df)", "def split_data(self):\n if not self.load_data:\n raise AttributeError('Preprocessor has not loaded any data.')\n \n # 3 - Find example counts for each set\n self.n_examples = self.data[0].shape[0]\n self.n_train = int(self.n_examples * self.train_ratio)\n self.n_val = int(self.n_examples * self.val_ratio)\n self.n_test = self.n_examples - self.n_train - self.n_val\n \n logger.info(f'Set sizes:')\n logger.info(f'train: {self.n_train}')\n logger.info(f'val: {self.n_val}')\n logger.info(f'test: {self.n_test}')\n if self.n_test < 0:\n raise ValueError('Train + validation ratios must bef < 1')\n\n # 4 - Separate data into train, test, val\n if isinstance(self.data[0], pd.DataFrame):\n logger.info('Dataset is in a dataframe.')\n self.isdataframe = True\n\n self.train_data = [self.data[0].iloc[:self.n_train],\n self.data[1].iloc[:self.n_train]]\n \n self.val_data = [self.data[0].iloc[self.n_train:self.n_val + self.n_train],\n self.data[1].iloc[self.n_train:self.n_val + self.n_train]]\n \n self.test_data = [self.data[0].iloc[self.n_val + self.n_train:],\n self.data[1].iloc[self.n_val + self.n_train:]]\n logger.info('Data was split into train, val, test.')\n else:\n self.isdataframe = False\n logger.info('Dataset is in a numpy array.')\n \n # If datasets are numpy array or sparse\n self.train_data = [self.data[0][:self.n_train],\n self.data[1][:self.n_train]]\n \n self.val_data = [self.data[0][self.n_train:self.n_val + self.n_train],\n self.data[1][self.n_train:self.n_val + self.n_train]]\n \n self.test_data = [self.data[0][self.n_val + self.n_train:],\n self.data[1][self.n_val + self.n_train:]]\n logger.info('Data was split into train, val, test.')\n \n assert(self.n_train == self.train_data[0].shape[0])\n assert(self.n_val == self.val_data[0].shape[0])\n assert(self.n_test == self.test_data[0].shape[0])\n \n # Free memory\n del self.data\n \n if self.save_sets:\n self.save_datasets()", "def test_n_jobs(self):\n for n_jobs, expected_n_chunks in [(1, 4), (3, 12), (40, 100), (150, 100)]:\n with self.subTest(n_jobs=n_jobs):\n iterable_of_args, iterable_len, chunk_size, n_splits_ = apply_numpy_chunking(\n self.test_data_numpy, n_jobs=n_jobs\n )\n\n # Materialize generator and test contents. We simply test if every row of the original input occurs in\n # the chunks\n iterable_of_args = list(iterable_of_args)\n self.assertEqual(len(iterable_of_args), expected_n_chunks)\n offset = 0\n for chunk in iterable_of_args:\n self.assertIsInstance(chunk[0], np.ndarray)\n np.testing.assert_array_equal(chunk[0], self.test_data_numpy[offset:offset + len(chunk[0])])\n offset += len(chunk[0])\n self.assertEqual(offset, 100)\n\n # Test other output\n self.assertEqual(iterable_len, expected_n_chunks)\n self.assertEqual(chunk_size, 1)\n self.assertIsNone(n_splits_)", "def hf_summarizer(sentences):\n\n max_chunk = 512\n current_chunk = 0\n chunks = []\n\n for sentence in sentences:\n if len(chunks) == current_chunk +1 :\n if len(chunks[current_chunk]) + len(sentence.split()) <= max_chunk:\n chunks[current_chunk].extend(sentence.split())\n else:\n current_chunk += 1\n chunks.append(sentence.split())\n else:\n print(current_chunk)\n chunks.append(sentence.split())\n\n # print(chunks[0])\n\n for chunk_id in range(len(chunks)):\n chunks[chunk_id] = ' '.join(chunks[chunk_id])\n\n #print(len(chunks[0].split()))\n\n summarizer = pipeline(\"summarization\")\n summarized = summarizer(chunks, min_length = 50, max_length = 100, do_sample=False)\n\n text = ''.join([sum[\"summary_text\"] for sum in summarized])\n\n with open(\"static/files/book.txt\", \"w\",encoding=\"utf-8\") as f:\n f.write(text)\n \n return summarized", "def calculate_data_split(self, n_train=2, n_vali=0, n_test=1, n_splits=1,\n num_percentiles=4, random_state=87, verbosity=1,\n manual_split=False,train_indices=None, test_indices=None,\n train_redshift_indices=[0], test_redshift_indices=[0],\n interpolate_over_redshift_only=False, **kwargs):\n \n n_extrema=kwargs.get('n_extrema', 2)\n ind_extrema=kwargs.get('ind_extrema', [0,-1])\n self.data_separation(n_extrema=n_extrema, ind_extrema=ind_extrema)\n\n too.condprint(\"number of wanted training vectors\", n_train, level=2, verbosity=verbosity)\n too.condprint(\"number of wanted test vectors\", n_test, level=1, verbosity=verbosity)\n if n_train+n_test > (self.size_fullspace):\n print(\"Warning n_train is larger than total full sample space\")\n\n self.random_state = random_state\n self.num_percentiles = num_percentiles\n self.n_splits = n_splits\n\n stratif_labels = self.stratify_array(self.midspace, num_percentiles=self.num_percentiles)\n self.test_splitdict = dict()\n self.train_splitdict = dict()\n self.vali_splitdict = dict()\n\n if manual_split == False:\n n_vali = self.size_midspace-n_test-n_train\n if n_vali !=0 and len(self.ind_midspace)> 1:\n kf = StratifiedShuffleSplit(n_splits=self.n_splits, test_size=n_test, random_state=self.random_state)\n for ii, (trainvali, test) in enumerate(kf.split(self.midspace,stratif_labels)):\n #test = test[np.in1d(test, extspace_ind, invert=True)]\n\n test = self.ind_midspace[test]\n if n_train > 0:\n train, valitest = train_test_split(trainvali, test_size=n_vali, shuffle=True, random_state=self.random_state)\n train = self.ind_midspace[train]\n train = np.unique(np.concatenate([train,self.ind_extremaspace]))\n train = self.ind_fullspace[train]\n else:\n train = self.ind_extremaspace\n train = self.ind_fullspace[train]\n valitest=trainvali\n\n #valitest = valitest[np.in1d(valitest, extspace_ind, invert=True)]\n valitest = self.ind_midspace[valitest]\n #print(test, trr, \" s tr\", len(train)-2, \" tr: \", train, \" va: \", valitest)\n self.test_splitdict[ii] = test\n self.vali_splitdict[ii]= valitest\n self.train_splitdict[ii] = train\n elif len(self.ind_midspace)> 1 and n_vali == 0:\n kf = StratifiedShuffleSplit(n_splits=self.n_splits, test_size=n_test, random_state=self.random_state)\n for ii, (train, test) in enumerate(kf.split(self.midspace,stratif_labels)):\n test = self.ind_midspace[test]\n train = self.ind_midspace[train]\n train = np.unique(np.concatenate([train,self.ind_extremaspace]))\n train = self.ind_fullspace[train]\n self.test_splitdict[ii] = test\n self.train_splitdict[ii] = train\n\n else:\n test = self.ind_midspace\n train = self.ind_extremaspace\n self.test_splitdict[0] = test\n self.train_splitdict[0] = train\n \n ###/!\\ Warning /!\\ For now we always use manual split (which not really manual now...)\n elif manual_split == True:\n ### Determine the number of samples avaible with different values of parameters. e.g nb_param = 101 for MassiveNus\n nb_param = int(len(self.fullspace)/len(self.z_requested)) \n if len(self.z_requested)==1:\n nb_param = int(len(self.fullspace))\n \n\n for ii in range (n_splits):\n ###Here the user has chosen to provide the test indices\n if test_indices is not None:\n test_indices = np.atleast_2d(test_indices)\n test = test_indices[ii]\n ###We make sure that the indice lies into a correct space. e.g if we have nb_param = 101, and a indices i = 103 it will become i =2\n test_origin = [tt%nb_param for tt in test]\n \n ###Do we want to construct a interpolation only over the redshift ? /!\\ Warning /!\\ this is case is not really used....\n if interpolate_over_redshift_only == False and train_indices is None:\n train_origin = [ii for ii in range(1,nb_param-1) if ii not in test_origin ]\n\n elif interpolate_over_redshift_only == False and train_indices is not None:\n train_origin = [tt%nb_param for tt in train ]\n else :\n train_origin = test_origin\n ###Here the user has chosen not to provide the test indices\n ## so we first randomly generate them\n else:\n if train_indices is None:\n test_origin = [ii for ii in range(1,nb_param-1)]\n test_origin = shuffle(test_origin)[:n_test]\n if interpolate_over_redshift_only == False:\n train_origin = [ii for ii in range(1,nb_param-1) if ii not in test_origin ]\n else:\n train_origin = test_origin\n ###The user has specified train indices so must be sure that train and test do not overlap !\n else:\n train_indices = np.atleast_2d(train_indices)\n train = train_indices[ii]\n train_origin = [tt%nb_param for tt in train ]\n test_origin = [ii for ii in range(1,nb_param-1) if ii not in train_origin ] ####!!!\n \n train_origin = shuffle(train_origin)\n \n train_origin = train_origin[:n_train]\n test_origin = shuffle(test_origin)[:n_test]\n if train_indices is None:\n if [0] not in test_origin:\n train_origin +=[0]\n if [nb_param-1]not in test_origin:\n \n train_origin += [nb_param-1]\n if [0] in test_origin or [nb_param-1] in test_origin :\n print(\"Warning : trying to interpolate a extramal value\")\n \n\n train_redshift = self.z_requested[train_redshift_indices]\n test_redshift = self.z_requested[test_redshift_indices]\n self.train_redshift = train_redshift \n self.test_redshift = test_redshift\n too.condprint(\"redshift used for training\", train_redshift,level=1,verbosity=verbosity)\n too.condprint(\"redshfit used for testing\", test_redshift,level=1,verbosity=verbosity)\n train = []\n test = []\n ### looping over the redshift \n for zz in train_redshift_indices:\n train+= [ii + zz*nb_param for ii in train_origin ]\n\n for zz in test_redshift_indices: \n test += [ii + zz*nb_param for ii in test_origin ]\n \n\n self.train_splitdict[ii] = train\n self.test_splitdict[ii] = test\n shuffled = shuffle(train)\n self.train_splitdict[ii] = shuffled\n self.vali_splitdict[ii] = shuffled\n\n return None", "def split_data(self, verbose=False):\n # group sample by patient and body part\n tmp = self.data_info.groupby(['patientID', 'body_part']).max()\n # get the index (i.e. patient and bodypart) where none of the body part XR of a given patient are abnormal\n idx_list_normal = tmp[tmp.body_part_abnormal == 0].index.to_list()\n # get the index (i.e. patient and bodypart) where at least one but not all of the body part XR of a given patient are abnormal\n idx_list_mixt = tmp[tmp.body_part_abnormal == 0.5].index.to_list()\n # get the index (i.e. patient and bodypart) where all one of the body part XR of a given patient are abnormal\n idx_list_abnormal = tmp[tmp.body_part_abnormal == 1].index.to_list()\n total = len(idx_list_normal)+len(idx_list_mixt)+len(idx_list_abnormal)\n train_size = self.train_frac*total\n assert train_size < len(idx_list_normal), f'There are not enough normal sample for the given train_frac : {self.train_frac}. \\\n There are {len(idx_list_normal)} normal sample over {total} total samples.'\n valid_size = (1-self.train_frac)*0.5*total\n test_size = (1-self.train_frac)*0.5*total\n # randomly pick (1-ratio_known_abnormal)*train_frac*total from the normal index for the train set\n train_idx_normal, remain = train_test_split(idx_list_normal, \\\n train_size=int((1-self.ratio_known_abnormal)*train_size),\\\n random_state=self.random_state)\n # split the rest equally in the validation and test set\n valid_idx_normal, test_idx_normal = train_test_split(remain, test_size=0.5, random_state=self.random_state)\n # add ratio_known_abnormal*train_frac*total from the abnormal index\n if self.ratio_known_abnormal == 0.0:\n train_idx_abnormal, remain = [], idx_list_abnormal\n else:\n train_idx_abnormal, remain = train_test_split(idx_list_abnormal, \\\n train_size=int(self.ratio_known_abnormal*train_size),\\\n random_state=self.random_state)\n # split the rest equally in the validation and test set\n valid_idx_abnormal, test_idx_abnormal = train_test_split(remain, test_size=0.5, random_state=self.random_state)\n # split the mixt between test and validation and consider them as abnormal patients bodypart\n valid_idx_mixt, test_idx_mixt = train_test_split(idx_list_mixt, test_size=0.5, random_state=self.random_state)\n valid_idx_abnormal += valid_idx_mixt\n test_idx_abnormal += test_idx_mixt\n # get the known and unknown index for each sets\n # get a fraction of normal known\n if self.ratio_known_normal == 0.0:\n train_idx_known, train_idx_unknown = [], train_idx_normal\n valid_idx_known, valid_idx_unknown = [], valid_idx_normal\n test_idx_known, test_idx_unknown = [], test_idx_normal\n else:\n train_idx_known, train_idx_unknown = train_test_split(train_idx_normal, \\\n train_size=int(self.ratio_known_normal*train_size),\\\n random_state=self.random_state)\n valid_idx_known, valid_idx_unknown = train_test_split(valid_idx_normal, \\\n train_size=int(self.ratio_known_normal*valid_size),\\\n random_state=self.random_state)\n test_idx_known, test_idx_unknown = train_test_split(test_idx_normal, \\\n train_size=int(self.ratio_known_normal*test_size), \\\n random_state=self.random_state)\n # get the abnormal known\n # all abnormal in train are known\n train_idx_known += train_idx_abnormal\n if self.ratio_known_abnormal == 0.0:\n valid_idx_unknown += valid_idx_abnormal\n test_idx_unknown += test_idx_abnormal\n else:\n valid_idx_known_abnormal, valid_idx_unknown_abnormal = train_test_split(valid_idx_abnormal, \\\n train_size=int(self.ratio_known_abnormal*valid_size), \\\n random_state=self.random_state)\n valid_idx_known += valid_idx_known_abnormal\n valid_idx_unknown += valid_idx_unknown_abnormal\n test_idx_known_abnormal, test_idx_unknown_abnormal = train_test_split(test_idx_abnormal, \\\n train_size=int(self.ratio_known_abnormal*test_size),\\\n random_state=self.random_state)\n test_idx_known += test_idx_known_abnormal\n test_idx_unknown += test_idx_unknown_abnormal\n\n # get the subsample dataframe with semi-label\n train_df = self.generate_semisupervized_label(train_idx_known, train_idx_unknown)\n valid_df = self.generate_semisupervized_label(valid_idx_known, valid_idx_unknown)\n test_df = self.generate_semisupervized_label(test_idx_known, test_idx_unknown)\n # shuffle the dataframes\n self.subsets['train'] = train_df.sample(frac=1, random_state=self.random_state).reset_index(drop=True)\n self.subsets['valid'] = valid_df.sample(frac=1, random_state=self.random_state).reset_index(drop=True)\n self.subsets['test'] = test_df.sample(frac=1, random_state=self.random_state).reset_index(drop=True)\n # Print summary\n if verbose:\n self.print_stat()", "def chunk(wb_run,sample_run,ei_guess,rebin,mapingfile,nchunk,**kwargs):\n global reducer,rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n try:\n n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=inst_name+str(sample_run)+'.spe'\n if kwargs.has_key('sum') and kwargs.get('sum')==True:\n wksp_out=inst_name+str(sample_run[0])+'sum'+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n \n reducer.energy_bins = rebin\n \n mon_list1=reducer.ei_mon_spectra\n mon_list2=reducer.mon1_norm_spec\n mon_list1.append(mon_list2)\n #mon_list1.sort()\n print 'Monitors for this chunk are: ',mon_list1\n # monitors for merlin[69634,69638]\n \n if inst_name == 'MER':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=69632\n spectrum_start=1\n if inst_name == 'MAP':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=41472\n spectrum_start=1\n \n if kwargs.has_key('det_cal_file'):\n cal_file = kwargs.get('det_cal_file') \n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n \n reducer.det_cal_file =None\n reducer.relocate_dets = False\n nums=range(spectrum_start,numspec,nchunk)\n output_wkspName=wksp_out\n for i in nums:\n print '=========================================================================='\n print 'start spectra for this chunk',i\n chunk=range(i,i+nchunk)\n endIndex=nchunk-1\n if i+nchunk > numspec:\n chunk=range(i,numspec+1)\n endIndex=len(chunk)-1\n print 'end spectra for this chunk ', i+endIndex\n \n speclist=mon_list1+chunk\n #print speclist\n LoadRaw(Filename=wb_run,OutputWorkspace=\"wb_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n LoadRaw(Filename=sample_run,OutputWorkspace=\"run_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n tmp=arb_units(\"wb_wksp\",\"run_wksp\",ei_guess,rebin,'none_for_this_run_type',one2one=True,bleed=False,**kwargs)\n \n \n DeleteWorkspace(Workspace=\"wb_wksp\")\n DeleteWorkspace(Workspace=\"run_wksp\")\n #DeleteWorkspace(\"_wksp.spe\")\n #DeleteWorkspace(\"_wksp.spe-white\")\n \n if i == spectrum_start:\n #crop the workspace to remove the monitors, the workpsace seems sorted on specnumber so this is ok for instruments where the monitors are at the end of the \n # spectrum list\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=wksp_out,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n else:\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=tmp,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n ConjoinWorkspaces(InputWorkspace1=wksp_out,InputWorkspace2=tmp,CheckOverlapping='0')\n print int(((float(i+endIndex))/float(numspec))*100),'% complete'\n print '===============================================================================' \n \n GroupDetectors(InputWorkspace=output_wkspName,OutputWorkspace=output_wkspName,MapFile=mapingfile)\n\n \n \n print 'Elapsed time =',time.time()-start_time, 's'\n return mtd[wksp_out]", "def test_chunk_size(self):\n for chunk_size, expected_n_chunks in [(1, 100), (3, 34), (200, 1), (None, 1)]:\n with self.subTest(chunk_size=chunk_size):\n iterable_of_args, iterable_len, chunk_size_, n_splits = apply_numpy_chunking(\n self.test_data_numpy, chunk_size=chunk_size, n_splits=1\n )\n\n # Materialize generator and test contents. The chunks should be of size chunk_size (expect for the last\n # chunk which can be smaller)\n iterable_of_args = list(iterable_of_args)\n self.assertEqual(len(iterable_of_args), expected_n_chunks)\n chunk_size = chunk_size or 100\n for chunk_idx, chunk in enumerate(iterable_of_args):\n self.assertIsInstance(chunk[0], np.ndarray)\n np.testing.assert_array_equal(chunk[0], self.test_data_numpy[chunk_idx * chunk_size:\n (chunk_idx + 1) * chunk_size])\n\n # Test other output\n self.assertEqual(iterable_len, expected_n_chunks)\n self.assertEqual(chunk_size_, 1)\n self.assertIsNone(n_splits)", "def doSummary(self):\n for name in self.stockList:\n tempVolume=0.\n for dateStr in self.listOfDates:\n rawTradeDataPath = FileNames.BinRTTradesDir + '/' + dateStr + '/' + name + '_trades.binRT'\n tradeReader = TAQTradesReader(rawTradeDataPath)\n tempVolume=tempVolume+np.nansum(tradeReader._s)/10000.0 # divide 10000 because otherwise the sum could exceed the range of int32\n self.dict[name]=tempVolume", "def run(self, dataset_size=4, n_jobs=-1, starting_block=0):\n data_files = sorted(self.input_directory.glob(\"**/*.txt\"))\n log.info(f\"Creating shape file based on {len(data_files)} samples.\")\n\n n_blocks = int(len(data_files) / dataset_size)\n data_file_blocks = split(data_files, n_blocks)\n dataset_blocks_ids = np.arange(len(data_file_blocks))\n\n if starting_block != 0:\n data_file_blocks = data_file_blocks[starting_block:]\n dataset_blocks_ids = dataset_blocks_ids[starting_block:]\n log.info(f\"Starting at a different block number: {starting_block}.\")\n n_blocks = int(len(data_file_blocks))\n\n log.info(f\"Going through {n_blocks} blocks in parallel.\")\n Parallel(n_jobs=n_jobs)(\n delayed(self.generate_single_block)(data_file_block, dataset_block_id)\n for (data_file_block, dataset_block_id) in tqdm(\n zip(data_file_blocks, dataset_blocks_ids)\n )\n )\n\n log.info(\"Combining the separate index files..\")\n index_floorplan = sorted(self.output_directory.glob(\"index_floorplans_*.csv\"))\n log.info(f\"Found {len(index_floorplan)} index block files.\")\n index_files = pd.concat([pd.read_csv(_file) for _file in index_floorplan])\n index_files = index_files.fillna(0)\n index_files.to_csv(self.output_directory / \"index_floorplans.csv\", index=False)", "def chunk_it(seq, n):\n\n avg = len(seq) / float(n)\n out = []\n last = 0.0\n while last < len(seq):\n out.append(seq[int(last):int(last + avg)])\n last += avg\n return out" ]
[ "0.57900053", "0.5749784", "0.55733985", "0.55712456", "0.55246603", "0.55168134", "0.5494334", "0.5402945", "0.5363317", "0.5348577", "0.532229", "0.52557427", "0.5247931", "0.5193257", "0.5171128", "0.51542133", "0.5153879", "0.51524353", "0.5149456", "0.51452446", "0.5134185", "0.5133972", "0.5121747", "0.51164794", "0.5114655", "0.505767", "0.5056234", "0.5053657", "0.50349367", "0.5033442" ]
0.5802561
0
online maxminscale normalization using key basic statistics.
def update_maxminscale(stats_on_target, lastest_minmax): target_xss, target_xs, target_xct = stats_on_target xmn, xmx = lastest_minmax zss = (target_xss - 2 * xmn * target_xs + target_xct * xmn**2) / (xmx - xmn)**2 zs = (target_xs - target_xct * xmn) / (xmx - xmn) zct = target_xct return zss, zs, zct
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1", "def scale(train, validate, test):\n train, validate, test = add_scaled_columns(\n train,\n validate,\n test,\n scaler=sklearn.preprocessing.MinMaxScaler(),\n columns_to_scale=['total_lines'],\n )\n return train, validate, test", "def minmax_normalize(X):\n # X -= X.min()\n # X /= X.max()\n # X -= 0.5\n X = (X-X.min()) / (X.max() - X.min())\n return X", "def own_MinMaxColumnScaler(df, columns):\n for col in columns:\n new_col_name = col + '_scld'\n col_min = df[col].min()\n col_max = df[col].max()\n df[new_col_name] = (df[col] - col_min) / (col_max - col_min)", "def min_max_scaler(X_train, X_validate, X_test):\n scaler = sklearn.preprocessing.MinMaxScaler().fit(X_train)\n X_train_scaled = pd.DataFrame(scaler.transform(X_train), index = X_train.index, columns = X_train.columns)\n X_validate_scaled = pd.DataFrame(scaler.transform(X_validate), index = X_validate.index, columns = X_validate.columns)\n X_test_scaled = pd.DataFrame(scaler.transform(X_test), index = X_test.index, columns = X_test.columns)\n \n return scaler, X_train_scaled, X_validate_scaled, X_test_scaled", "def normalize (a_data,a_column,b_method='MinMax') :\n if b_method == 'MinMax' :\n loc_scaler = __minmax()\n a_data[a_column] = loc_scaler.fit_transform(a_data[a_column])\n elif b_method == 'Standard' :\n loc_scaler = __standard()\n a_data[a_column] = loc_scaler.fit_transform(a_data[a_column])", "def mms_scale(values):\r\n mms = MinMaxScaler()\r\n return mms.fit_transform(values)", "def min_max_normalization(input_data):\n\n # Insert debugging assertions\n assert type(input_data) is np.ndarray, \"The 'input_data' must be numpy array.\"\n\n # Get the minimum and maximun values of the input numpy array along the axis \n Max = np.max(input_data, axis = 0)\n Min = np.min(input_data, axis = 0)\n\n # Min-max normalization \n normalized_input_data = (input_data - Min) / (Max - Min + sys.float_info.min)\n\n # Return normalized input data\n return normalized_input_data", "def feature_normalization(train, test):\n (N,p) = np.shape(train)\n mins = np.amin(train,axis=0)\n maxs = np.amax(train,axis=0) + mins\n train = (train + mins)/maxs\n test = (test + mins)/maxs\n return train, test", "def rescale_data(self):\n\n # Dividing every array of simulated data vectors by the mean of that array.\n '''# Didnt work\n for key in self.data.keys():\n self.data[key] /= np.mean(self.data[key])\n '''\n\n self.rescaled = True\n\n # Mean normalization\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.mean(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Median normalization\n \"\"\" didnt work, still dividing by large number \n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Divide by median\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.median(self.data[key]))\n \"\"\"\n\n # Take logarithm of data\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] = np.log10(self.data[key])\n \"\"\"\n\n # Scale by length of vector\n \"\"\"\n for key in self.data.keys():\n self.data[key] /= np.linalg.norm(self.Cl_noiseless)\n \"\"\"\n\n \n # Scale by negative of the natural logarithm \n for key in self.data.keys():\n self.data[key] = -1 * np.log(self.data[key]) \n \n \"\"\"\n # Scale by subtracting the mean and dividing by std\n std = np.nanstd(self.data['data'])\n mean = np.nanmean(self.data['data'])\n for key in self.data.keys():\n # self.data[key] -= np.log(self.Cl_noiseless) # -1* # scale this same way\n # self.data[key] -= self.Cl_noiseless # -1* # scale this same way\n self.data[key] -= mean \n self.data[key] /= std\n \"\"\"", "def test_scale_features_min_max_norm(self):\n data = array([[0.564, 20.661], [-18.512, 41.168], [-0.009, 20.440]])\n cdata = CData(data)\n\n # correct answer computed with Mathematica\n # TODO: can we compute the right answer in Python?\n answer = array([[1, 0.0106619], [0, 1], [0.969962, 0]])\n\n # perform min-max norm scaling on features and check answer\n cdata.scale_features('min-max norm')\n self.assertTrue(allclose(cdata.data, answer))", "def __scale(data, max_value_list, min_value_list, scale_value_list, process_cols_list):\n features = np.array(data.features, dtype=float)\n for i in process_cols_list:\n value = features[i]\n if value > max_value_list[i]:\n value = max_value_list[i]\n elif value < min_value_list[i]:\n value = min_value_list[i]\n\n features[i] = (value - min_value_list[i]) / scale_value_list[i]\n _data = copy.deepcopy(data)\n _data.features = features\n return _data", "def feature_normalization(train, test):\n mins_of_features = np.amin(train, axis=0)\n maxs_of_features = np.amax(train, axis=0)\n range_of_features = maxs_of_features-mins_of_features\n range_of_features[range_of_features==0] = 1\n \n train_normalized = (train - mins_of_features)/range_of_features\n test_normalized = (test - mins_of_features)/range_of_features\n \n return (train_normalized, test_normalized)", "def normalize_minmax(data):\n _min = np.float(np.min(data))\n _max = np.float(np.max(data))\n if (_max-_min)!=0:\n img = (data - _min) / (_max-_min)\n else:\n img = np.zeros_like(data) \n return img", "def normalization(x, x_min=-5.12, x_max=5.12):\n for i in range(len(x.vect)):\n x.vect[i] = x_min + x.vect[i]*(x_max-x_min)\n return x", "def feature_normalization(train, test):\n # TODO\n col_max = np.apply_along_axis(max, 0, train)\n col_min = np.apply_along_axis(min, 0, train)\n\n train_normalized = (train-col_min)/(col_max-col_min)\n test_normalized = (test-col_min)/(col_max-col_min)\n \n return train_normalized, test_normalized", "def scale_down_and_clean(self):\n self.load /= 10.0\n for pp in self.powerplants:\n pp[\"pmin\"] /= 10.0\n pp[\"pmax\"] /= 10.0\n if \"index\" in pp and \"vals\" in pp:\n pp[\"p\"] = pp[\"vals\"][pp[\"index\"]] / 10\n del (pp[\"vals\"])\n del (pp[\"index\"])\n else:\n pp[\"p\"] = 0", "def normalize(x):\n\n return (x - x.values.min()) / (x.values.max() - x.values.min())", "def normalize(self, factor):", "def min_max_normalization(x, min_x = None, max_x = None):\n if min_x is None:\n min_x = np.min(x, axis=0)\n if max_x is None:\n max_x = np.max(x, axis=0)\n return (x - (min_x)) / (max_x - min_x), min_x, max_x", "def _localNormalizeData(self,values,names,feat):\n self.muAndSigmaFeatures[feat] = (0.0,1.0)", "def normalize(data):\n min = np.min(data)\n if min:\n data = data + min\n return old_div(data,np.max(data))\n else: # if min is 0\n return old_div(data,np.max(data))", "def normalise_min_max(df):\n return (df - df.min()) / (df.max() - df.min())", "def normalise_min_max(df):\n return (df - df.min()) / (df.max() - df.min())", "def min_max_normalize_one_image(image):\n\n image = image.astype(np.float32)\n for i in range(len(image)):\n max_int = image[i].max()\n min_int = image[i].min()\n image[i] = (image[i] - min_int) / (max_int - min_int)\n\n return image", "def nudged_min_max_compute(min_broadcast, max_broadcast, num_bits, narrow_range):\n\n\n dtype = min_broadcast.dtype\n quant_min = 1 if narrow_range else 0\n quant_max = (2 ** num_bits) - 1\n\n # because of need compute each channel, so quant_min and quant_max need to broadcast.\n quant_min_float = topi.full(min_broadcast.shape, dtype, tvm.const(quant_min, dtype))\n quant_max_float = topi.full(min_broadcast.shape, dtype, tvm.const(quant_max, dtype))\n\n # caculate each channel max and min difference.\n max_sub_min = topi.subtract(max_broadcast, min_broadcast)\n quant_max_sub_quant_min = topi.subtract(quant_max_float, quant_min_float)\n # compute scale = (max_broadcast - min_broadcast) / (quant_max - quant_min)\n # and min_div_scale = min_broadcast / scale\n if product_is_mini():\n scale = mul(max_sub_min, reciprocal(quant_max_sub_quant_min), target=utils.CCE)\n min_div_scale = Mul(min_broadcast, reciprocal(scale), target=utils.CCE)\n else:\n scale = divide(max_sub_min, quant_max_sub_quant_min, target=utils.CCE)\n min_div_scale = divide(min_broadcast, scale, target=utils.CCE)\n\n # zero_point_from_min = quant_min_float - min_broadcast / scale\n zero_point_from_min = topi.subtract(quant_min_float, min_div_scale)\n # if zero_point_from_min < quant_min_float, bool_less_quant_min_float = 1 else 0\n bool_less_quant_min_float = less_compare_float32(zero_point_from_min, quant_min_float)\n # if quant_max_float < zero_point_from_min, bool_more_quant_max_float = 1 else 0\n bool_more_quant_max_float = less_compare_float32(quant_max_float, zero_point_from_min)\n\n # according to above bool param to select effective value\n less_quant_min_float = topi.multiply(quant_min_float, bool_less_quant_min_float)\n more_quant_max_float = topi.multiply(quant_max_float, bool_more_quant_max_float)\n\n # compute which num is not less than quant_min_float and not large than quant_max_float\n tensor_one = topi.full(min_broadcast.shape, dtype, dc.one_const(dtype))\n bool_not_less_quant_min_float = topi.subtract(tensor_one, bool_less_quant_min_float)\n bool_not_more_quant_max_float = topi.subtract(tensor_one, bool_more_quant_max_float)\n bool_between_min_max = topi.multiply(bool_not_less_quant_min_float, bool_not_more_quant_max_float)\n between_min_max_float = topi.multiply(zero_point_from_min, bool_between_min_max)\n # add 0.5 to num which min <= num <= max and then floor them.\n between_min_max_add_half_one = topi.add(between_min_max_float, dc.half_const(dtype))\n between_min_max_round = akg.lang.ascend.floor(between_min_max_add_half_one)\n if product_is_mini():\n between_min_max_round = topi.cast(between_min_max_round, \"float16\")\n\n between_min_max_round = topi.cast(between_min_max_round, \"float32\")\n\n # calculate the maximum and minimum values of the quantization\n nudged_zero_point_tmp = topi.add(less_quant_min_float, more_quant_max_float)\n nudged_zero_point = topi.add(nudged_zero_point_tmp, between_min_max_round)\n\n nudged_min_tmp = topi.subtract(quant_min_float, nudged_zero_point)\n nudged_max_tmp = topi.subtract(quant_max_float, nudged_zero_point)\n nudged_min = topi.multiply(nudged_min_tmp, scale)\n nudged_max = topi.multiply(nudged_max_tmp, scale)\n res = [nudged_min, nudged_max, scale]\n\n return res", "def normalize2(data):\n return old_div(data,np.max([np.max(data),-1.0*np.min(data)]))", "def normalize_matrix(matrix, min_val, max_val):\n return (max_val - min_val) * (matrix - np.min(matrix)) / (np.max(matrix) - np.min(matrix)) + min_val", "def set_physical_minmax(self, min, max):\n # This allows you to set the min and the max of the quantity that you want the MLP to measure. \n # Once you set this, you can pass in a physical number to get_mlp_value() and it will be mapped to an MLP value and returned\n pass\n \n # Maybe we should let the holder of the MLP determine these values and do the mapping? ", "def normalize_data(data, min=0, max=1):\r\n import numpy as np\r\n assert isinstance(data, np.ndarray)\r\n\r\n max_value = np.max(data)\r\n min_value = np.min(data)\r\n\r\n scaled = np.interp(data, [min_value, max_value], [min, max])\r\n # convert to float64\r\n scaled = scaled.astype(np.float64)\r\n\r\n return scaled" ]
[ "0.63708794", "0.62702864", "0.61845", "0.61675406", "0.6165542", "0.61498797", "0.61322546", "0.6095306", "0.60869277", "0.6003787", "0.6002606", "0.60010904", "0.5944249", "0.59147775", "0.5865857", "0.58601046", "0.58502215", "0.5845374", "0.58325875", "0.5817523", "0.5789236", "0.5787909", "0.5784515", "0.5784515", "0.576157", "0.57505745", "0.573425", "0.573341", "0.5721979", "0.5714376" ]
0.6510492
0
Construct the flow graph by connecting this node to another node or a command. The predicate is a function that tells the flow executor if the flow can enter the step without the user intervention (automatically).
def connect(self, node_or_command: Union['FlowNode', str], predicate: Predicate = lambda _: False, hints: bool = True): node_to_connect_to = node_or_command if isinstance(node_or_command, FlowNode) else FlowNode(node_or_command, hints=hints) self.children.append((predicate, node_to_connect_to)) return node_to_connect_to
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_new_flow(flow_root, requestor: Identifier, initial_command) \\\n -> Tuple[Optional[Flow], Optional[FlowNode]]:\n empty_context = {}\n flow = Flow(flow_root, requestor, empty_context)\n for possible_next_step in flow.next_steps():\n if possible_next_step.command == initial_command:\n # The predicate is good as we just executed manually the command.\n return flow, possible_next_step\n return None, None", "def build_graph_from_input(self, input_node):\n raise NotImplementedError", "def add_true_edge(source, sink):\n assert isinstance(source, Branch)\n source.add_outgoing_edge(sink, \"T\")\n source.true_edge = sink\n sink.add_incoming_edge(source, \"T\")", "def build_connection(self, src, tgt) -> NoReturn:\n # If src and tgt are the same node, src not in node_collection or\n # tgt not in node_collection,\n # then skip this edge.\n if src == tgt or src not in self._nodes_collection or tgt not in self._nodes_collection:\n if src.split(':')[0] not in self._nodes_collection:\n warnings.warn(f\"Graph construct a self-loop node {src}. Ignored.\")\n return\n\n if tgt not in self._nodes_collection[src.split(':')[0]].successor_nodes:\n self._nodes_collection[src.split(':')[0]].successor_nodes.append(tgt)\n if src not in self._nodes_collection[tgt].precursor_nodes:\n self._nodes_collection[tgt.split(':')[0]].precursor_nodes.append(src)", "def __init__(self, command: str = None, hints: bool = True):\n self.command = command\n self.children = [] # (predicate, node)\n self.hints = hints", "def _make_graph_action(graphtype):\n class X(ObtainGraphAction):\n def __init__(self, option_strings, dest, nargs=None, **kwargs):\n if nargs is not None:\n raise ValueError(\"nargs not allowed\")\n super(ObtainSimpleGraph, self).__init__(option_strings, dest,\n **kwargs)\n\n def __call__(self, parser, args, values, option_string=None):\n try:\n G = make_graph_from_spec(graphtype, values)\n setattr(args, self.dest, G)\n except ValueError as e:\n parser.error(str(e))\n except FileNotFoundError as e:\n parser.error(str(e))", "def check_and_process_graph(self, allow_empty=False):\n\n if self.is_empty() and allow_empty:\n self._start_steps = []\n return [], None, []\n\n def has_loop(step, previous):\n for next_step in step.after or []:\n if next_step in previous:\n return step.name\n downstream = has_loop(self[next_step], previous + [next_step])\n if downstream:\n return downstream\n return None\n\n start_steps = []\n for step in self._steps.values():\n step._next = None\n if step.after:\n loop_step = has_loop(step, [])\n if loop_step:\n raise GraphError(\n f\"Error, loop detected in step {loop_step}, graph must be acyclic (DAG)\"\n )\n else:\n start_steps.append(step.name)\n\n responders = []\n for step in self._steps.values():\n if hasattr(step, \"responder\") and step.responder:\n responders.append(step.name)\n if step.on_error and step.on_error in start_steps:\n start_steps.remove(step.on_error)\n if step.after:\n prev_step = step.after[0]\n self[prev_step].set_next(step.name)\n if self.on_error and self.on_error in start_steps:\n start_steps.remove(self.on_error)\n\n if (\n len(responders) > 1\n ): # should not have multiple steps which respond to request\n raise GraphError(\n f'there are more than one responder steps in the graph ({\",\".join(responders)})'\n )\n\n if self.from_step:\n if self.from_step not in self.steps:\n raise GraphError(\n f\"from_step ({self.from_step}) specified and not found in graph steps\"\n )\n start_steps = [self.from_step]\n\n self._start_steps = [self[name] for name in start_steps]\n\n def get_first_function_step(step, current_function):\n # find the first step which belongs to the function\n if (\n hasattr(step, \"function\")\n and step.function\n and step.function == current_function\n ):\n return step\n for item in step.next or []:\n next_step = self[item]\n returned_step = get_first_function_step(next_step, current_function)\n if returned_step:\n return returned_step\n\n current_function = get_current_function(self.context)\n if current_function and current_function != \"*\":\n new_start_steps = []\n for from_step in self._start_steps:\n step = get_first_function_step(from_step, current_function)\n if step:\n new_start_steps.append(step)\n if not new_start_steps:\n raise GraphError(\n f\"did not find steps pointing to current function ({current_function})\"\n )\n self._start_steps = new_start_steps\n\n if self.engine == \"sync\" and len(self._start_steps) > 1:\n raise GraphError(\n \"sync engine can only have one starting step (without .after)\"\n )\n\n default_final_step = None\n if self.final_step:\n if self.final_step not in self.steps:\n raise GraphError(\n f\"final_step ({self.final_step}) specified and not found in graph steps\"\n )\n default_final_step = self.final_step\n\n elif len(self._start_steps) == 1:\n # find the final step in case if a simple sequence of steps\n next_obj = self._start_steps[0]\n while next_obj:\n next = next_obj.next\n if not next:\n default_final_step = next_obj.name\n break\n next_obj = self[next[0]] if len(next) == 1 else None\n\n return self._start_steps, default_final_step, responders", "def _visit(self, node, pre_action=None, post_action=None):\n Q = Queue()\n self.parent[node] = None # before Q.put\n Q.put(node)\n if pre_action: # when Q.put\n pre_action(node)\n while not Q.empty():\n source = Q.get()\n for edge in self.graph.iteroutedges(source):\n if edge.target not in self.parent:\n self.parent[edge.target] = source # before Q.put\n self.dag.add_edge(edge)\n Q.put(edge.target)\n if pre_action: # when Q.put\n pre_action(edge.target)\n if post_action:\n post_action(source)", "def _visit(self, node, pre_action=None, post_action=None):\n self.color[node] = \"GREY\"\n self.distance[node] = 0\n self.parent[node] = None\n Q = Queue()\n Q.put(node) # node is GREY\n if pre_action: # when Q.put\n pre_action(node)\n while not Q.empty():\n source = Q.get()\n for edge in self.graph.iteroutedges(source):\n if self.color[edge.target] == \"WHITE\":\n self.color[edge.target] = \"GREY\"\n self.distance[edge.target] = self.distance[source] + 1\n self.parent[edge.target] = source\n self.dag.add_edge(edge)\n Q.put(edge.target) # target is GREY\n if pre_action: # when Q.put\n pre_action(edge.target)\n self.color[source] = \"BLACK\"\n if post_action: # source became BLACK\n post_action(source)", "def _step(self, action):\n\n reward = 0.0\n x, y = action\n\n if not Creator.add_edge(self.nxgraph, x+1, y+1):\n reward = 0.0\n # TODO: do we return here?\n raise NotImplementedError\n else:\n reward = 1.0\n new_state = EnvTools.get_state(self.nxgraph)\n EnvTools.calculate_reward(self.state, self.previous_state)\n raise NotImplementedError\n\n\n\n pass", "def _create_global_step(self, graph):\n return _create_global_step(graph)", "def _start_oef_node(self, network_node):", "def connect_to_node(self, vertex, where_to=OUTGOING):\n\n if not isinstance(vertex, Vertex):\n raise TypeError(\"Graph vertex can only connect to other Graph vertex\")\n\n if where_to == Vertex.OUTGOING:\n link = Edge(self, vertex)\n self.add_link(link, Vertex.OUTGOING)\n vertex.add_link(link, Vertex.INCOMING)\n\n elif where_to == Vertex.INCOMING:\n link = Edge(vertex, self)\n self.add_link(link, Vertex.INCOMING)\n vertex.add_link(link, Vertex.OUTGOING)", "def link_to(self, criterion_or_node):\n if callable(criterion_or_node):\n target_node = criterion_or_node(self.graph)\n else:\n target_node = criterion_or_node\n return self.send(target_node, 'accept_link',\n originating_node=self.id)", "def child(self, problem, action):\n result = problem.result(self.state, action)\n return Node(result, self, action,\n problem.step_cost(self.state, action, result))", "def connect(\n self, *, start: Node, verb: str, end: Node, data: dict = None\n ) -> Edge:", "def create_graph(self, global_step):\n if self._adapt is None:\n should_adapt = False\n else:\n should_adapt = self._adapt.should_compute(global_step)\n\n return self.should_compute(global_step) or should_adapt", "def _instantiate_graph(self, context=None):\n\n # Use to recursively traverse processes\n def build_dependency_sets_by_traversing_projections(sender_mech):\n\n # If sender is an ObjectiveMechanism being used for learning or control, or a LearningMechanism,\n # Assign as MONITORING and move on\n if ((isinstance(sender_mech, ObjectiveMechanism) and sender_mech.role) or\n isinstance(sender_mech, LearningMechanism)):\n sender_mech.systems[self] = MONITORING\n return\n\n # Delete any projections to mechanism from processes or mechanisms in processes not in current system\n for input_state in sender_mech.inputStates.values():\n for projection in input_state.receivesFromProjections:\n sender = projection.sender.owner\n system_processes = self.processes\n if isinstance(sender, Process):\n if not sender in system_processes:\n del projection\n elif not all(sender_process in system_processes for sender_process in sender.processes):\n del projection\n\n # If sender_mech has no projections left, raise exception\n if not any(any(projection for projection in input_state.receivesFromProjections)\n for input_state in sender_mech.inputStates.values()):\n raise SystemError(\"{} only receives projections from other processes or mechanisms not\"\n \" in the current system ({})\".format(sender_mech.name, self.name))\n\n # Assign as TERMINAL (or SINGLETON) if it:\n # - is not an Objective Mechanism used for Learning or Control and\n # - has no outgoing projections or\n # - only ones to ObjectiveMechanism(s) used for Learning or Control and\n # Note: SINGLETON is assigned if mechanism is already a TERMINAL; indicates that it is both\n # an ORIGIN AND A TERMINAL and thus must be the only mechanism in its process\n # It is not a ControlMechanism\n if (\n\n not (isinstance(sender_mech, ControlMechanism_Base) or\n # It is not an ObjectiveMechanism used for Learning or Control\n (isinstance(sender_mech, ObjectiveMechanism) and sender_mech.role in (LEARNING,CONTROL))) and\n # All of its projections\n all(\n all(\n # are to ControlMechanism(s)...\n isinstance(projection.receiver.owner, ControlMechanism_Base) or\n # or ObjectiveMechanism(s) used for Learning or Control\n (isinstance(projection.receiver.owner, ObjectiveMechanism) and\n projection.receiver.owner.role in (LEARNING, CONTROL))\n for projection in output_state.sendsToProjections)\n for output_state in sender_mech.outputStates.values())):\n try:\n if sender_mech.systems[self] is ORIGIN:\n sender_mech.systems[self] = SINGLETON\n else:\n sender_mech.systems[self] = TERMINAL\n except KeyError:\n sender_mech.systems[self] = TERMINAL\n return\n\n for outputState in sender_mech.outputStates.values():\n\n for projection in outputState.sendsToProjections:\n receiver = projection.receiver.owner\n receiver_tuple = self._allMechanisms._get_tuple_for_mech(receiver)\n\n # MODIFIED 2/8/17 NEW:\n # If receiver is not in system's list of mechanisms, must belong to a process that has\n # not been included in the system, so ignore it\n if not receiver_tuple:\n continue\n # MODIFIED 2/8/17 END\n\n try:\n self.graph[receiver_tuple].add(self._allMechanisms._get_tuple_for_mech(sender_mech))\n except KeyError:\n self.graph[receiver_tuple] = {self._allMechanisms._get_tuple_for_mech(sender_mech)}\n\n # Use toposort to test whether the added dependency produced a cycle (feedback loop)\n # Do not include dependency (or receiver on sender) in executionGraph for this projection\n # and end this branch of the traversal if the receiver has already been encountered,\n # but do mark for initialization\n # Notes:\n # * This is because it is a feedback connection, which introduces a cycle into the graph\n # that precludes use of toposort to determine order of execution;\n # however, the feedback projection will still be used during execution\n # so the sending mechanism should be designated as INITIALIZE_CYCLE\n # * Check for receiver mechanism and not its tuple,\n # since the same mechanism can appear in more than one tuple (e.g., with different phases)\n # and would introduce a cycle irrespective of the tuple in which it appears in the graph\n # FIX: MODIFY THIS TO (GO BACK TO) USING if receiver_tuple in self.executionGraph\n # FIX BUT CHECK THAT THEY ARE IN DIFFERENT PHASES\n if receiver in self.execution_graph_mechs:\n # Try assigning receiver as dependent of current mechanism and test toposort\n try:\n # If receiver_tuple already has dependencies in its set, add sender_mech to set\n if self.executionGraph[receiver_tuple]:\n self.executionGraph[receiver_tuple].\\\n add(self._allMechanisms._get_tuple_for_mech(sender_mech))\n # If receiver_tuple set is empty, assign sender_mech to set\n else:\n self.executionGraph[receiver_tuple] = \\\n {self._allMechanisms._get_tuple_for_mech(sender_mech)}\n # Use toposort to test whether the added dependency produced a cycle (feedback loop)\n list(toposort(self.executionGraph))\n # If making receiver dependent on sender produced a cycle (feedback loop), remove from graph\n except ValueError:\n self.executionGraph[receiver_tuple].\\\n remove(self._allMechanisms._get_tuple_for_mech(sender_mech))\n # Assign sender_mech INITIALIZE_CYCLE as system status if not ORIGIN or not yet assigned\n if not sender_mech.systems or not (sender_mech.systems[self] in {ORIGIN, SINGLETON}):\n sender_mech.systems[self] = INITIALIZE_CYCLE\n if not (receiver.systems[self] in {ORIGIN, SINGLETON}):\n receiver.systems[self] = CYCLE\n continue\n\n else:\n # Assign receiver as dependent on sender mechanism\n try:\n # FIX: THIS WILL ADD SENDER_MECH IF RECEIVER IS IN GRAPH BUT = set()\n # FIX: DOES THAT SCREW UP ORIGINS?\n self.executionGraph[receiver_tuple].\\\n add(self._allMechanisms._get_tuple_for_mech(sender_mech))\n except KeyError:\n self.executionGraph[receiver_tuple] = \\\n {self._allMechanisms._get_tuple_for_mech(sender_mech)}\n\n if not sender_mech.systems:\n sender_mech.systems[self] = INTERNAL\n\n # Traverse list of mechanisms in process recursively\n build_dependency_sets_by_traversing_projections(receiver)\n\n self.graph = OrderedDict()\n self.executionGraph = OrderedDict()\n\n\n # Sort for consistency of output\n sorted_processes = sorted(self.processes, key=lambda process : process.name)\n\n for process in sorted_processes:\n first_mech = process.firstMechanism\n\n # Treat as ORIGIN if ALL projections to the first mechanism in the process are from:\n # - the process itself (ProcessInputState)\n # - another mechanism in the in process (i.e., feedback projections from *within* the process)\n # - mechanisms from other process for which it is an origin\n # Notes:\n # * This precludes a mechanism that is an ORIGIN of a process from being an ORIGIN for the system\n # if it receives any projections from any other mechanisms in the system (including other processes)\n # other than ones in processes for which it is also their ORIGIN\n # * This does allow a mechanism to be the ORIGIN (but *only* the ORIGIN) for > 1 process in the system\n if all(\n all(\n # All projections must be from a process (i.e., ProcessInputState) to which it belongs\n # # MODIFIED 2/8/17 OLD:\n # # [THIS CHECKED FOR PROCESS IN SYSTEM'S LIST OF PROCESSES\n # # IT CRASHED IF first_mech WAS ASSIGNED TO ANY PROCESS THAT WAS NOT ALSO\n # # ASSIGNED TO THE SYSTEM TO WHICH THE first_mech BELONGS\n # projection.sender.owner in sorted_processes or\n # MODIFIED 2/8/17 NEW:\n # [THIS CHECKS THAT PROJECTION IS FROM A PROCESS IN first_mech's LIST OF PROCESSES]\n # PROBABLY ISN\"T NECESSARY, AS IT SHOULD BE COVERED BY INITIAL ASSIGNMENT OF PROJ]\n projection.sender.owner in first_mech.processes or\n # MODIFIED 2/8/17 END\n # or from mechanisms within its own process (e.g., [a, b, a])\n projection.sender.owner in list(process.mechanisms) or\n # or from mechanisms in other processes for which it is also an ORIGIN ([a,b,a], [a,c,a])\n all(ORIGIN in first_mech.processes[proc]\n for proc in projection.sender.owner.processes\n if isinstance(projection.sender.owner,Mechanism))\n # For all the projections to each inputState\n for projection in input_state.receivesFromProjections)\n # For all inputStates for the first_mech\n for input_state in first_mech.inputStates.values()):\n # Assign its set value as empty, marking it as a \"leaf\" in the graph\n mech_tuple = self._allMechanisms._get_tuple_for_mech(first_mech)\n self.graph[mech_tuple] = set()\n self.executionGraph[mech_tuple] = set()\n first_mech.systems[self] = ORIGIN\n\n build_dependency_sets_by_traversing_projections(first_mech)\n\n # MODIFIED 4/1/17 NEW:\n # HACK TO LABEL TERMINAL MECHS -- SHOULD HAVE BEEN HANDLED ABOVE\n # LABELS ANY MECH AS A TARGET THAT PROJECTION TO AN ObjectiveMechanism WITH LEARNING AS ITS role\n for mech in self.mechanisms:\n for output_state in mech.outputStates.values():\n for projection in output_state.sendsToProjections:\n receiver = projection.receiver.owner\n if isinstance(receiver, ObjectiveMechanism) and receiver.role == LEARNING:\n mech.systems[self] = TERMINAL\n break\n if mech.systems[self] == TERMINAL:\n break\n # MODIFIED 4/1/17 END\n\n # Print graph\n if self.verbosePref:\n warnings.warn(\"In the system graph for \\'{}\\':\".format(self.name))\n for receiver_mech_tuple, dep_set in self.executionGraph.items():\n mech = receiver_mech_tuple.mechanism\n if not dep_set:\n print(\"\\t\\'{}\\' is an {} mechanism\".\n format(mech.name, mech.systems[self]))\n else:\n status = mech.systems[self]\n if status is TERMINAL:\n status = 'a ' + status\n elif status in {INTERNAL, INITIALIZE_CYCLE}:\n status = 'an ' + status\n print(\"\\t\\'{}\\' is {} mechanism that receives projections from:\".format(mech.name, status))\n for sender_mech_tuple in dep_set:\n print(\"\\t\\t\\'{}\\'\".format(sender_mech_tuple.mechanism.name))\n\n # For each mechanism (represented by its tuple) in the graph, add entry to relevant list(s)\n # Note: ignore mechanisms belonging to controllerProcesses (e.g., instantiated by EVCMechanism)\n # as they are for internal use only;\n # this also ignored learning-related mechanisms (they are handled below)\n self._origin_mech_tuples = []\n self._terminal_mech_tuples = []\n self.recurrent_init_mech_tuples = []\n self._control_mech_tuple = []\n\n for mech_tuple in self.executionGraph:\n\n mech = mech_tuple.mechanism\n\n if mech.systems[self] in {ORIGIN, SINGLETON}:\n for process, status in mech.processes.items():\n if process._isControllerProcess:\n continue\n self._origin_mech_tuples.append(mech_tuple)\n break\n\n if mech_tuple.mechanism.systems[self] in {TERMINAL, SINGLETON}:\n for process, status in mech.processes.items():\n if process._isControllerProcess:\n continue\n self._terminal_mech_tuples.append(mech_tuple)\n break\n\n if mech_tuple.mechanism.systems[self] in {INITIALIZE_CYCLE}:\n for process, status in mech.processes.items():\n if process._isControllerProcess:\n continue\n self.recurrent_init_mech_tuples.append(mech_tuple)\n break\n\n if isinstance(mech_tuple.mechanism, ControlMechanism_Base):\n if not mech_tuple.mechanism in self._control_mech_tuple:\n self._control_mech_tuple.append(mech_tuple)\n\n self.originMechanisms = MechanismList(self, self._origin_mech_tuples)\n self.terminalMechanisms = MechanismList(self, self._terminal_mech_tuples)\n self.recurrentInitMechanisms = MechanismList(self, self.recurrent_init_mech_tuples)\n self.controlMechanism = MechanismList(self, self._control_mech_tuple)\n\n try:\n self.execution_sets = list(toposort(self.executionGraph))\n except ValueError as e:\n if 'Cyclic dependencies exist' in e.args[0]:\n # if self.verbosePref:\n # print('{} has feedback connections; be sure that the following items are properly initialized:'.\n # format(self.name))\n raise SystemError(\"PROGRAM ERROR: cycle (feedback loop) in {} not detected by _instantiate_graph \".\n format(self.name))\n\n # Create instance of sequential (execution) list:\n # MODIFIED 10/31/16 OLD:\n # self.executionList = toposort_flatten(self.executionGraph, sort=False)\n # MODIFIED 10/31/16 NEW:\n temp = toposort_flatten(self.executionGraph, sort=False)\n self.executionList = self._toposort_with_ordered_mech_tuples(self.executionGraph)\n # MODIFIED 10/31/16 END\n\n # MODIFIED 2/8/17 NEW:\n # Construct self.variable from inputs to ORIGIN mechanisms\n self.variable = []\n for mech in self.originMechanisms:\n orig_mech_input = []\n for input_state in mech.inputStates.values():\n orig_mech_input.extend(input_state.value)\n self.variable.append(orig_mech_input)\n self.variable = convert_to_np_array(self.variable, 2)\n # MODIFIED 2/8/17 END\n\n # Instantiate StimulusInputStates\n self._instantiate_stimulus_inputs()\n\n # Validate initial values\n # FIX: CHECK WHETHER ALL MECHANISMS DESIGNATED AS INITIALIZE HAVE AN INITIAL_VALUES ENTRY\n # FIX: ONLY CHECKS FIRST ITEM OF self._value_template (ASSUMES THAT IS ALL THAT WILL GET ASSIGNED)\n # FIX: ONLY CHECK ONES THAT RECEIVE PROJECTIONS\n for mech, value in self.initial_values.items():\n if not mech in self.execution_graph_mechs:\n raise SystemError(\"{} (entry in initial_values arg) is not a Mechanism in \\'{}\\'\".\n format(mech.name, self.name))\n mech._update_value\n if not iscompatible(value, mech._value_template[0]):\n raise SystemError(\"{} (in initial_values arg for \\'{}\\') is not a valid value for {}\".\n format(value, self.name, append_type_to_name(self)))", "def generateChild(problem, goal, node, action):\r\n # get the next state\r\n state = applyAction(node.state, action)\r\n # calculate hueristic cost\r\n estimateCost = evaluateCurrentPosition(problem, state)\r\n return Node(estimateCost, 0, state, node, action)", "def makeMove(self, movable_statement):\n ### Student code goes here\n # ToGO:\n # if not(self.isMovableLegal(movable_statement)):\n # pass\n currDisk = movable_statement.terms[0]\n prevPeg = movable_statement.terms[1]\n newPeg = movable_statement.terms[2]\n\n # On next peg\n newOnStatement = Statement()\n newOnStatement.predicate = 'on'\n newOnStatement.terms = [currDisk, newPeg]\n newOnFact = Fact(newOnStatement)\n self.kb.kb_assert(newOnFact) #1\n\n #Not on previous peg\n removeOnStatement = Statement()\n removeOnStatement.predicate = 'on'\n removeOnStatement.terms = [currDisk, prevPeg]\n removeOnFact = Fact(removeOnStatement)\n self.kb.kb_retract(removeOnFact) #2\n\n\n #If Prev Empty Logic\n ONStatement = Statement()\n ONTerm1 = Term('?x')\n ONTerm2 = Term(prevPeg)\n ONStatement.terms = (ONTerm1, ONTerm2)\n ONStatement.predicate = 'on'\n ONFact = Fact(ONStatement)\n if not(self.kb.kb_ask(ONFact)):\n prevEmptyStatement = Statement()\n prevEmptyStatement.terms = [prevPeg]\n prevEmptyStatement.predicate = 'empty'\n prevEmptyFact = Fact(prevEmptyStatement)\n self.kb.kb_assert(prevEmptyFact) #3\n else:\n # previous disk now on top\n # AND\n # Not above previous disk\n abovePrevStatement = Statement()\n aboveTerm = Term('?x')\n abovePrevStatement.terms = [currDisk, aboveTerm]\n abovePrevStatement.predicate = 'Above'\n for fact in self.kb.facts:\n if match(fact.statement, abovePrevStatement):\n prevDisk = fact.statement.terms[1]\n self.kb.kb_retract(fact) #7\n break\n prevonTopStatement = Statement()\n prevonTopStatement.predicate = 'onTop'\n prevonTopStatement.terms = [prevDisk, prevPeg]\n prevonTopFact = Fact(prevonTopStatement)\n self.kb.kb_assert(prevonTopFact) #8\n\n # Above next disk\n # If next not empty\n nextEmptyBool = False\n nextEmptyStatement = Statement()\n nextEmptyStatement.terms = [newPeg]\n nextEmptyStatement.predicate = 'empty'\n for fact in self.kb.facts:\n if match(fact.statement, nextEmptyStatement):\n nextEmptyBool = True\n self.kb.kb_retract(fact) #9\n break\n\n if nextEmptyBool == False:\n nextOnTopStatement = Statement()\n nextOnTopStatement.predicate = 'onTop'\n onTopTerm1 = Term('?x')\n nextOnTopStatement.terms = [onTopTerm1, newPeg]\n nextOnTopFact = Fact(nextOnTopStatement)\n for fact in self.kb.facts:\n if match(fact.statement, nextOnTopStatement):\n nextOnTop = fact.statement.terms[0]\n aboveNextStatement = Statement()\n aboveNextStatement.predicate = 'Above'\n aboveNextStatement.terms = [currDisk, nextOnTop]\n aboveNextFact = Fact(aboveNextStatement)\n self.kb.kb_assert(aboveNextFact) #6\n self.kb.kb_retract(nextOnTopFact)\n break\n\n\n\n #On top of new peg\n newonTopStatement = Statement()\n newonTopStatement.predicate = 'onTop'\n newonTopStatement.terms = [currDisk, newPeg]\n newonTopFact = Fact(newonTopStatement)\n self.kb.kb_assert(newonTopFact) #4\n\n #Not on top of previous peg\n removeonTopStatement = Statement()\n removeonTopStatement.predicate = 'onTop'\n removeonTopStatement.terms = [currDisk, prevPeg]\n removeonTopFact = Fact(removeonTopStatement)\n self.kb.kb_retract(removeonTopFact) #5\n\n\n\n\n\n #Destination not empty", "def add(self, node, arrow = None):\n## print(node)\n self.graph = addNode(self.graph, node, arrow)", "def create_graph(self, global_step):\n should_adapt = any(\n a.should_compute(global_step) for a in self._adapt if a is not None\n )\n\n return self.should_compute(global_step) or should_adapt", "def add_redirect(self, expr, node_host, node_port, openflow_host, openflow_port):\n pusher = self.StaticFlowEntryPusher(openflow_host, openflow_port)\n device = self.Device(openflow_host, openflow_port)\n try:\n (_, connected_dpid, node_mac, node_vlan) = device.get(node_host)\n except KeyError:\n raise\n request_hands_off = {\n \"switch\": connected_dpid,\n \"name\": \"request_hands_off-\" + node_host + \"-\" + node_port + \"-\" + expr,\n \"priority\": \"32767\",\n \"ether-type\": 0x0800,\n \"protocol\": 0x06,\n \"src-ip\": node_host,\n \"src-mac\": node_mac,\n \"dst-ip\": expr,\n \"dst-port\":\"80\",\n \"vlan-id\":node_vlan,\n \"active\":\"true\",\n \"actions\":\"output=normal\"\n }\n request_in = {\n \"switch\": connected_dpid,\n \"name\": \"request_in-\" + node_host + \"-\" + node_port + \"-\" + expr,\n \"priority\": \"32766\",\n \"ether-type\": 0x0800,\n \"protocol\": 0x06,\n \"dst-ip\": expr,\n \"dst-port\": \"80\",\n \"vlan-id\":node_vlan,\n \"active\": \"true\",\n \"actions\": \"set-dst-mac=\" + node_mac + \",set-dst-ip=\" + node_host +\n \",set-dst-port=\" + node_port +\",output=normal\"\n }\n request_out = {\n \"switch\": connected_dpid,\n \"name\": \"request_out-\" + node_host + \"-\" + node_port + \"-\" + expr,\n \"cookie\": \"0\",\n \"priority\": \"32766\",\n \"ether-type\": 0x0800,\n \"protocol\": 0x06,\n \"src-ip\": node_host,\n \"src-mac\": node_mac,\n \"src-port\": node_port,\n \"vlan-id\":node_vlan,\n \"active\": \"true\",\n \"actions\": \"set-src-port=80,set-src-ip=\" + expr + \",output=normal\"\n }\n pusher.remove({\"name\":\"request_hands_off-\" + node_host + \"-\" + node_port + \"-\" + expr})\n pusher.remove({\"name\":\"request_out-\" + node_host + \"-\" + node_port + \"-\" + expr})\n pusher.remove({\"name\":\"request_in-\" + node_host + \"-\" + node_port + \"-\" + expr})\n pusher.set(request_hands_off)\n pusher.set(request_out)\n pusher.set(request_in)", "def enterPredicates(self, **kwargs):\n try:\n precond = kwargs['precondition']\n except:\n precond = self.mm.default_values[self.mm.modes[self.mm.cur_mode]]\n self.bl.isPrecondition(precond)\n \n members = self.bl.getCurrentPredicate().keys() \n entries={}\n\n for member in members:\n entries[member] = getattr(self.bl, 'predicateSelection') # save predicate names in entries\n entries['view selection'] = [getattr(self.bl, 'displayText'), str(self.bl.getAllPredicates())]\n entries['reset selection'] = getattr(self.bl, 'predicateReset')\n title = \"Select %s for the new operator\" % ('preconditions' if precond else 'effects')\n self.mm.addGenericMenu(\"predicate\", self.mm.cur_page, title, entries)\n self.mm.loadMenu(\"predicate\")", "def __follow_node(node, tree_graph, seed_space, seed):\n\n def node_has_filter(x):\n \"\"\"\n Check if a node is a pattern node and has an object filter\n \"\"\"\n p_node = list(self.__plan_graph.objects(subject=x, predicate=AGORA.byPattern))\n try:\n p_node = p_node.pop()\n return 'filter_object' in self.__patterns[p_node] or 'filter_subject' in self.__patterns[p_node]\n except IndexError:\n return False\n\n try:\n # Get the sorted list of current node's successors\n nxt = sorted(list(self.__plan_graph.objects(node, AGORA.next)),\n key=lambda x: node_has_filter(x), reverse=True)\n\n # Per each successor...\n for n in nxt:\n if seed_space in self.__node_spaces[n]:\n node_patterns = self.__node_patterns.get(n, [])\n\n # In case the node is not a leaf, 'onProperty' tells which is the next link to follow\n try:\n link = list(self.__plan_graph.objects(subject=n, predicate=AGORA.onProperty)).pop()\n except IndexError:\n link = None\n\n filter_next_seeds = set([])\n next_seeds = set([])\n # If the current node is a pattern node, it must search for triples to yield\n for pattern in node_patterns:\n pattern_space = self.__patterns[pattern].get('space', None)\n if pattern_space != seed_space or seed in self.__subjects_to_ignore[pattern_space]:\n continue\n\n subject_filter = self.__patterns[pattern].get('filter_subject', None)\n if subject_filter is not None and seed != subject_filter:\n self.__subjects_to_ignore[pattern_space].add(seed)\n continue\n\n pattern_link = self.__patterns[pattern].get('property', None)\n\n # If pattern is of type '?s prop O'...\n if pattern_link is not None:\n if (seed, pattern_link) not in self.__fragment:\n obj_filter = self.__patterns[pattern].get('filter_object', None)\n if on_plink is not None:\n on_plink(pattern_link, [seed], pattern_space)\n\n seed_was_filtered = True\n try:\n for seed_object in list(\n __process_pattern_link_seed(seed, tree_graph, pattern_link)):\n __check_stop()\n quad = (pattern, seed, pattern_link, seed_object)\n if obj_filter is None or u''.join(seed_object).encode(\n 'utf-8') == u''.join(obj_filter.toPython()).encode('utf-8'):\n self.__fragment.add((seed, pattern_link))\n __put_triple_in_queue(quad)\n seed_was_filtered = False\n if isinstance(obj_filter, URIRef):\n filter_next_seeds.add(obj_filter)\n if obj_filter is not None and seed_was_filtered:\n self.__subjects_to_ignore[pattern_space].add(seed)\n except AttributeError as e:\n log.warning('Trying to find {} objects of {}: {}'.format(link, seed, e.message))\n\n # If pattern is of type '?s a Concept'...\n obj_type = self.__patterns[pattern].get('type', None)\n if obj_type is not None:\n check_type = self.__patterns[pattern].get('check', False)\n if on_type is not None:\n on_type(obj_type, [seed], pattern_space)\n\n __dereference_uri(tree_graph, seed)\n try:\n seed_objects = list(tree_graph.objects(subject=seed, predicate=link))\n for seed_object in seed_objects:\n type_triple = (pattern, seed_object, RDF.type, obj_type)\n # In some cases, it is necessary to verify the type of the seed\n if (seed_object, obj_type) not in self.__fragment:\n if check_type:\n __dereference_uri(tree_graph, seed_object)\n types = list(\n tree_graph.objects(subject=seed_object, predicate=RDF.type))\n if obj_type in types:\n self.__fragment.add((seed_object, obj_type))\n __put_triple_in_queue(type_triple)\n else:\n self.__subjects_to_ignore[pattern_space].add(seed_object)\n else:\n self.__fragment.add((seed_object, obj_type))\n __put_triple_in_queue(type_triple)\n except AttributeError as e:\n log.warning('Trying to find {} objects of {}: {}'.format(link, seed, e.message))\n\n # If the current node is not a leaf... go on finding seeds for the successors\n if link is not None and seed not in self.__subjects_to_ignore[seed_space]:\n if on_link is not None:\n on_link(link, [seed], seed_space)\n __process_link_seed(seed, tree_graph, link, next_seeds)\n\n if filter_next_seeds:\n next_seeds = set.intersection(next_seeds, filter_next_seeds)\n\n chs = list(chunks(list(next_seeds), min(len(next_seeds), max(1, workers / 2))))\n next_seeds.clear()\n try:\n while True:\n __check_stop()\n chunk = chs.pop()\n threads = []\n for s in chunk:\n try:\n workers_queue.put_nowait(s)\n future = pool.submit(__follow_node, n, tree_graph, seed_space, s)\n threads.append(future)\n except Queue.Full:\n # If all threads are busy...I'll do it myself\n __follow_node(n, tree_graph, seed_space, s)\n except Queue.Empty:\n pass\n\n wait(threads)\n [(workers_queue.get_nowait(), workers_queue.task_done()) for _ in threads]\n except (IndexError, KeyError):\n pass\n except Queue.Full:\n stop_event.set()\n except Exception as e:\n traceback.print_exc()\n log.error(e.message)\n return", "def goalTest(node, goal):\r\n if node.state == goal:\r\n return node", "def connect(self, *, start: Node, verb: str, end: Node, data: dict = None):", "def install_node_instance_subgraph(ctx,instance, graph, hist=None):\n subgraph = graph.subgraph('install_{0}'.format(instance.id))\n\n ct=None\n if hist:\n #get completed tasks for instance\n ct=_completed_tasks(ctx,hist,instance.id)\n\n sequence = subgraph.sequence()\n\n #CREATE\n run=True\n if(hist and 'create' in ct):\n run=False\n\n ctx.logger.info(\"run={} CREATE {}\".format(str(run),instance.id))\n if(run):\n ctx.logger.info(\" hist={} ct={}\".format(str(hist),str(ct)))\n\n if(run):\n sequence.add(\n instance.set_state('initializing'),\n forkjoin(instance.send_event('Creating node'),\n instance.set_state('creating')),\n _add_es_log(ctx,instance,'create',instance.execute_operation('cloudify.interfaces.lifecycle.create')),\n instance.set_state('created'),\n forkjoin(*_relationships_operations(\n instance,\n 'cloudify.interfaces.relationship_lifecycle.preconfigure'\n )))\n\n #CONFIGURE\n run=True\n if(hist and 'configure' in ct):\n run=False\n\n ctx.logger.info(\"run={} CONFIGURE {}\".format(str(run),instance.id))\n\n if(run):\n sequence.add(\n forkjoin(instance.set_state('configuring'),\n instance.send_event('Configuring node')),\n _add_es_log(ctx,instance,'configure',instance.execute_operation('cloudify.interfaces.lifecycle.configure')),\n instance.set_state('configured'),\n forkjoin(*_relationships_operations(\n instance,\n 'cloudify.interfaces.relationship_lifecycle.postconfigure'\n )))\n\n # STARTING\n run=True\n if(hist and 'start' in ct):\n run=False\n\n ctx.logger.info(\"run={} START {}\".format(str(run),instance.id))\n\n if(run):\n sequence.add(\n forkjoin(instance.set_state('starting'),\n instance.send_event('Starting node')),\n instance.execute_operation('cloudify.interfaces.lifecycle.start'))\n\n # If this is a host node, we need to add specific host start\n # tasks such as waiting for it to start and installing the agent\n # worker (if necessary)\n if run and is_host_node(instance):\n sequence.add(*_host_post_start(instance))\n\n sequence.add(\n forkjoin(\n _add_es_log(ctx,instance,'start',instance.execute_operation('cloudify.interfaces.monitoring.start')),\n *_relationships_operations(\n instance,\n 'cloudify.interfaces.relationship_lifecycle.establish'\n )),\n instance.set_state('started'))\n\n subgraph.on_failure = get_install_subgraph_on_failure_handler(ctx,instance)\n return subgraph", "def parse_dataflow(self, dataflow: Dict) -> Tuple[bool, Union[Dict, str]]:\n\n try:\n interface_to_id = {}\n graph = dataflow['graph']\n for dataflow_node in graph['nodes']:\n kenning_node = self.nodes[dataflow_node['type']]\n parameters = dataflow_node['properties']\n parameters = {\n parameter['name']: parameter['value']\n for parameter in parameters\n }\n node_id = self.dataflow_graph.create_node(\n kenning_node,\n parameters\n )\n\n for interface in dataflow_node['interfaces']:\n interface_to_id[interface['id']] = node_id\n\n for conn in graph['connections']:\n self.dataflow_graph.create_connection(\n interface_to_id[conn['from']],\n interface_to_id[conn['to']]\n )\n\n return True, self.dataflow_graph.flush_graph()\n except RuntimeError as e:\n self.dataflow_graph.start_new_graph()\n return False, str(e)", "def forward(self, action, new_state):\n if self._hash_action(action) in self.root.children.keys():\n rnd_node = self.root.children[self._hash_action(action)]\n if len(rnd_node.children) > 1:\n self.root = DecisionNode(state=new_state, is_root=True)\n else:\n next_decision_node = np.random.choice(list(rnd_node.children.values()))\n if np.linalg.norm(next_decision_node.state-new_state) > 1e-3:\n raise RuntimeWarning(\"The env is probably stochastic\")\n else:\n next_decision_node.father = None\n self.root.children.pop(self._hash_action(action))\n self.root = next_decision_node\n self.root.is_root = True\n else:\n raise RuntimeWarning(\"Action taken: {} is not in the children of the root node.\".format(action))" ]
[ "0.6413768", "0.5331428", "0.52776724", "0.5124311", "0.51223505", "0.5108684", "0.50600624", "0.5043233", "0.5042275", "0.4964955", "0.49122655", "0.48988894", "0.48981482", "0.48565575", "0.484811", "0.4824072", "0.48154116", "0.4813976", "0.4800156", "0.4783596", "0.4780878", "0.4771647", "0.4756565", "0.47533873", "0.473991", "0.47392625", "0.47367057", "0.47112232", "0.47050697", "0.470352" ]
0.65801656
0
gets the predicate function for the specified child node.
def predicate_for_node(self, node: 'FlowNode'): for predicate, possible_node in self.children: if node == possible_node: return predicate return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_predicate(self):\n return self._predicate", "def predicate(f):\n wrapper = Predicate(f)\n update_wrapper(wrapper, f)\n return wrapper", "def predicate (self) :\n\n return self.__predicate__", "def to_predicate(self, node, f='predicate', map=None):\n\n if 'AK{}' == node.name:\n assert 1 == len(node.children), \"Error: AK{} had more than one child\"\n pred = self.to_predicate(node.children[0], f, map)\n pred.always_known = True\n return pred\n\n args = PDDL_Utils.read_type(node)\n\n # change the type if there is only 1 type\n if len (self.types) == 1:\n t_args = args\n t = list (self.types) [0]\n args = []\n for arg in t_args:\n if arg[1] != t:\n args.append ( (arg[0], t) )\n else:\n args.append (arg)\n\n # here is where the map comes in...\n if map is None:\n if 'predicate' == f:\n return Predicate(node.name, args)\n elif 'fluent' == f:\n return Predicate(node.name, args=None, ground_args=args)\n else:\n new_args = []\n for v, t in args:\n if v in map:\n new_args.append((v, map[v]))\n else:\n new_args.append((v, t))\n\n if 'predicate' == f:\n return Predicate(node.name, new_args)\n elif 'fluent' == f:\n return Predicate(node.name, args=None, ground_args=new_args)", "def find(self, predicate):\n return [d for d in self.iter_tree if predicate(d)]", "def iter_child_nodes(predicate, cursor):\n return (c for c in cursor.get_children() if predicate(c))", "def get_child_by(self, selector):\r\n for child in self.get_children():\r\n if selector(child):\r\n return child\r\n return None", "def __parse_predicate(self, predicate):\n try:\n position = int(predicate)\n if self.axis == AXIS_DESCENDANT:\n return PredicateFilter('position', value=position)\n else:\n # use the search limit feature instead of a checker\n self.soup_args['limit'] = position\n self.index = position - 1\n return None\n except ValueError:\n pass\n\n if predicate == \"last()\":\n self.index = -1\n return None\n\n negate = self._re_predicate_not.match(predicate)\n if negate:\n predicate = negate.group(1)\n\n function_match = self._re_predicate_function.match(predicate)\n if function_match:\n name = function_match.group(1)\n arguments = function_match.group(2)\n value = function_match.group(4)\n if value is not None:\n value = function_match.group(5)\n return PredicateFilter(name, arguments, value)\n\n axis_match = self._re_predicate_axis.match(predicate)\n if axis_match:\n axis = axis_match.group(1)\n if axis is None:\n axis = AXIS_CHILD\n elif axis == '@':\n axis = AXIS_ATTRIBUTE\n if axis == AXIS_ATTRIBUTE:\n # use the attribute search feature instead of a checker\n attribute_name = axis_match.group(3)\n if axis_match.group(5) is not None:\n attribute_value = axis_match.group(6)\n elif not negate:\n attribute_value = True\n else:\n attribute_value = None\n if not self.soup_args.has_key('attrs'):\n self.soup_args['attrs'] = {}\n self.soup_args['attrs'][attribute_name] = attribute_value\n return None\n elif axis == AXIS_CHILD:\n node_test = axis_match.group(3)\n node_value = axis_match.group(6)\n return PredicateFilter('axis', node_test, value=node_value,\n negate=negate)\n\n raise NotImplementedError(\"This predicate is not implemented\")", "def visit_bound_predicate(self, predicate) -> T:", "def __call__ (self, item, * args, ** kw) :\n return self.predicate (item, * args, ** kw)", "def _find(xs, predicate):\n for x in xs:\n if predicate(x):\n return x\n return None", "def fm_get_child(self, idx):\n return self._relation_lst[self.CHILD][idx]", "def filter(self, predicate):\n def _filter(iterator):\n while True:\n item = next(iterator)\n if predicate(item):\n return item\n return self.__class__(self, _filter)", "def predicate (self, qx) :\n hg = self.head_getter\n tg = self.tail_getter\n tn = self.type_name\n result = qx (hg) [tn]\n if tg is not None :\n result = tg (result)\n return result", "def __call__(self, parent):\n if self._predicate is not None and not self._predicate(parent):\n raise KeyError(parent)\n return self.node_cls(self._path, parent)", "def predicate (self, qx) :\n lhs = qx (self.lhs)\n op = self.op\n name = op.__name__\n op = _Op_Map_.get (name, op)\n return lhs._op_call (name, op, * self.args, ** self.kw)", "def get_child(self, test, expression):\n\n if test == 'equals':\n return self.children_dict.get(expression, None)\n else:\n try:\n return next(self.get_children(test, expression))\n except StopIteration:\n return None", "def predicate (self, X, * args, ** kw) :\n self.lhs = self.lhs.predicate (X, * args, ** kw)\n return self", "def predicate (self, X, * args, ** kw) :\n XR = X.REF (X.ETW, _polymorphic_x = self._polymorphic_x)\n self.predicates = list \\\n (p.predicate (XR, * args, ** kw) for p in self.predicates)\n return self", "def change_predicate(self, new_predicate):\n raise NotImplementedError", "def key_predicate(name, get_key=None, fallback=None, default=None):\n return Predicate(name, KeyIndex, get_key, fallback, default)", "def searchTreeF(node, d):\n if isinstance(node, DecisionTree):\n if node.i == 999: return node.mostCommon()\n if d[node.i] < node.v:\n return searchTreeF(node.lt, d)\n else:\n return searchTreeF(node.gt, d)\n else:\n return node", "def filter(self, predicate):\n self.children = [c for c in self.children if predicate(c)]\n for c in self.children:\n c.filter(predicate)", "def get_child(node, name):\r\n for child in node.childNodes:\r\n if child.localName == name:\r\n return child", "def get_child(self, value):\n for node in self.children:\n if node.value == value:\n return node\n\n return None", "def function(self):\n ret = libxml2mod.xmlXPathGetFunction(self._o)\n return ret", "def get_score_function(self, idattr):\n return self.get_node(\n '//ScoreFunctions/ScoreFunction[@id=\"%s\"]' % idattr)", "def _addPredicate(self, pid, chunks):\n parent = chunks[pid]\n sub = None\n obj = None\n aux = list()\n auxlabel = \"\"\n # 1st round find absolute subject & object\n for i in range(len(parent.children)):\n child = chunks[parent.children[i]]\n # Process by categories.\n if child.func in SubDict:\n sub = child\n elif child.func in ObjDict:\n obj = child\n\n # 2nd round find potential subject & object with aux.\n for i in range(len(parent.children)):\n child = chunks[parent.children[i]]\n # Process by categories.\n if child.func in SubDict or child.func in ObjDict:\n continue\n elif child.func in ObjPostDict:\n if not obj and child.type in EntityTypeDict:\n obj = child\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n elif child.func in SubPassiveObjDict:\n if parent.passive == 1:\n if not obj and child.type in EntityTypeDict:\n obj = child\n elif not sub and child.type in EntityTypeDict:\n sub = child\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n else:\n if not sub and child.type in EntityTypeDict:\n sub = child\n elif not obj and child.type in EntityTypeDict:\n obj = child\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n elif child.func in ObjPassiveSubDict:\n if parent.passive == 1:\n if not sub and child.type in EntityTypeDict:\n sub = child\n elif not obj and child.type in EntityTypeDict:\n obj = child\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n else:\n if not obj and child.type in EntityTypeDict:\n obj = child\n elif not sub and child.type in EntityTypeDict:\n sub = child\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n else:\n aux.append(child.id)\n auxlabel += \"[{0}]\\n\".format(child.surface)\n\n if parent.passive == 0:\n # Add parent and subject.\n # if sub and obj:\n # parent.main = \"<{0}>[{2}]{1}\".format(sub.main, parent.main, obj.main)\n # elif sub:\n # parent.main = \"<{0}>[NONE]{1}\".format(sub.main, parent.main)\n # elif obj:\n # parent.main = \"<NONE>[{1}]{0}\".format(parent.main, obj.main)\n if sub:\n parent.main = \"<{0}>{1}\".format(sub.main, parent.main)\n self._addNode(parent, sub=sub.main)\n if not self.G.has_node(sub.main):\n self._addNode(sub)\n self._addEdge(sub.main, parent.main, label=\"主体\\n\", etype=\"sub\")\n else:\n self._addNode(parent)\n # Add object.\n if obj:\n if not self.G.has_node(obj.main):\n self._addNode(obj)\n self._addEdge(parent.main, obj.main, label=\"客体\\n\" + auxlabel, etype=\"obj\")\n else:\n # Add obj as sub\n # if sub and obj:\n # parent.main = \"<{0}>[{2}]{1}\".format(sub.main, parent.main, obj.main)\n # elif obj:\n # parent.main = \"<{0}>[NONE]{1}\".format(obj.main, parent.main)\n # elif sub:\n # parent.main = \"<NONE>[{1}]{0}\".format(parent.main, sub.main)\n if obj:\n parent.main = \"<{0}>{1}\".format(obj.main, parent.main)\n self._addNode(parent, sub=obj.main)\n if not self.G.has_node(obj.main):\n self._addNode(obj)\n self._addEdge(obj.main, parent.main, label=\"主体\\n\", etype=\"sub\")\n else:\n self._addNode(parent)\n # Add sub as obj\n if sub:\n if not self.G.has_node(sub.main):\n self._addNode(sub)\n self._addEdge(parent.main, sub.main, label=\"客体\\n\", etype=\"obj\")\n # # Add obj as aux.\n # if obj:\n # aux.append(obj.id)\n # auxlabel += \"[{0}]\\n\".format(obj.surface)\n self._processAux(aux, parent.main, chunks)", "def filter(\n self, predicate: Union[Callable, Iterable], columns: Optional[List[str]] = None\n ):\n if columns is None:\n return super().filter(predicate)\n\n self._check_columns(columns)\n\n if not isinstance(predicate, Iterable) and not callable(predicate):\n raise TypeError(\n \"predicate must be a unary boolean predicate or iterable of booleans\"\n )\n\n res = Scope._EmptyColumn(self._dtype)\n cols = []\n for n in columns:\n idx = self._data.type().get_child_idx(n)\n cols.append(\n ColumnFromVelox.from_velox(\n self.device,\n self.dtype.fields[idx].dtype,\n self._data.child_at(idx),\n True,\n )\n )\n if callable(predicate):\n for i in range(len(self)):\n if predicate(*[col[i] for col in cols]):\n res._append(self[i])\n elif isinstance(predicate, Iterable):\n for x, p in zip(self, predicate):\n if p:\n res._append(x)\n else:\n pass\n return res._finalize()", "def check_predicate(self, predicate, x):\n if isinstance(predicate, dtype.TypeMeta):\n return issubclass(x, predicate)\n elif isinstance(predicate, type):\n return isinstance(x, predicate)\n elif callable(predicate):\n return predicate(self, x)\n else:\n raise ValueError(predicate) # pragma: no cover" ]
[ "0.60676956", "0.59117705", "0.5859783", "0.5716894", "0.5592004", "0.54098916", "0.5391563", "0.5362847", "0.52606004", "0.52374494", "0.51913834", "0.50643146", "0.5036615", "0.49944842", "0.4963765", "0.49336788", "0.4933024", "0.49324915", "0.49315336", "0.49314764", "0.4917601", "0.4881666", "0.48753065", "0.48216304", "0.48107475", "0.48077008", "0.47888118", "0.47604096", "0.4687045", "0.4669066" ]
0.69061804
0
Get all the possible next steps after this one (predicates statisfied or not).
def next_steps(self) -> List[FlowNode]: return [node for predicate, node in self._current_step.children]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSteps():", "def get_next_steps(self, steps):\n step_list = []\n\n steps_remaining = set(steps.keys())\n counter = 0\n max_counter = 10000\n next_steps = set()\n\n for step in steps_remaining:\n dependencies = steps[step]\n if len(dependencies) == 0:\n next_steps.add(step)\n\n # this is the list of things that can be take for work now\n return sorted(next_steps)", "def greedy(self):\n n_step_t = self.filter['n_step_t']\n n_traj = self.filter['n_traj']\n traj = self.filter['traj']\n steps = [0 for i in xrange(n_step_t)]\n for i in xrange(n_traj):\n n_step = traj[i]['n_step']\n for j in xrange(n_step):\n steps[j] += 1\n self.filter['steps'] = steps\n \n return", "def next_step(self):\n self.proceed()\n self.execute_current()", "def step(\n self,\n actions,\n ) -> Tuple[\"next_state\", \"reward\", \"done\", \"env_info\"]:\n env_info = self.env.step(actions)[self.brain_name]\n next_states = env_info.vector_observations\n rewards = env_info.rewards\n dones = env_info.local_done\n return (next_states, rewards, dones, env_info)", "def iterate_steps(steps):\n pop = None\n while steps:\n for step, depends in steps.items():\n if depends == []:\n pop = step\n if not pop:\n return\n pop_step(pop, steps)\n yield pop", "def _compute_next_all_q_values(self, next_time_steps, info):\n network_observation = next_time_steps.observation\n\n if self._observation_and_action_constraint_splitter is not None:\n network_observation, _ = self._observation_and_action_constraint_splitter(\n network_observation)\n\n next_target_q_values, _ = self._target_q_network(\n network_observation, step_type=next_time_steps.step_type)\n #batch_size = (\n # next_target_q_values.shape[0] or tf.shape(next_target_q_values)[0])\n #dummy_state = self._target_greedy_policy.get_initial_state(batch_size)\n # Find the greedy actions using our target greedy policy. This ensures that\n # action constraints are respected and helps centralize the greedy logic.\n #greedy_actions = self._target_greedy_policy.action(\n # next_time_steps, dummy_state).action\n\n return next_target_q_values", "def proceed(self):\n if self.current_step is None or self.step_position == StepPosition.Before:\n return\n\n for condition, transition in self.current_step.conditions:\n if condition.satisfied():\n new_proc = transition.procedure\n self.current_procedure_id = new_proc\n self.current_step = self._suite[new_proc].steps[transition.step]\n self.step_position = StepPosition.Before\n break", "def step_forward(self):", "def next_autosteps(self) -> List[FlowNode]:\n return [node for predicate, node in self._current_step.children if predicate(self.ctx)]", "def get_pipeline_steps(config):\n steps = []\n partial_execution = config.getboolean('General','partial_execution')\n if partial_execution:\n start_step = config.getint('General','start_step')\n end_step = config.getint('General','end_step')\n else:\n start_step = 1\n end_step = 6\n # Run batching and relation extraction steps?\n batching = True if start_step == 1 else False\n rel_extraction = True if end_step == 6 else False\n #nel = True if start_step<=4 and end_step>4 else False\n # Parallel pipeline steps, removed parsing.UnstParser(configmap) from list, \n parallel_step_list = [pre.Preprocessor(configmap), ner.Ner(configmap),nel.Nel(configmap)]\n parallel_steps = parallel_step_list[max(0,start_step-2):end_step-1]\n return parallel_steps, batching, rel_extraction", "def step(self):\n #1. Time progresses\n self.time_operator.step()\n \n #2. Form and dissolve relationships\"\n self.relationship_operator.step()\n\n #3. HIV transmission\n self.infection_operator.step()", "def generate_next_states_and_actions(self, custom_move_ordering = False) :\n actions = self.get_all_actions(custom_move_ordering = custom_move_ordering)\n next_states = [self.generate_next_state(a) for a in actions]\n return list(zip(next_states, actions))", "def _get_next_point(self):\n #Get the index of the current step in each dimension\n nparams = len(self.transform.get_params())\n indices = [0]*nparams\n #Get the number of steps in each dimension\n lengths = [len(self.steps[i]) for i in range(nparams)]\n\n end = False\n while not end:\n yield [self.steps[i][indices[i]] for i in range(nparams)]\n\n #Increment the index of the last paramenter and then check whether it goes over the end\n indices[-1] += 1\n for p in reversed(range(nparams)):\n if indices[p] == lengths[p]:\n indices[p] = 0\n if p > 0:\n indices[p-1] += 1\n else:\n end = True", "def _step(self, action):\n transitions = self.query_model(self.s, action)\n prob, next_s, r, is_terminal = transitions[categorical_sample(\n (t[0] for t in transitions), self.rng)]\n\n next_s = np.asarray(next_s)\n for i in range(len(self.s) - 1):\n if next_s[i+1] < self.observation_space.high[i+1]:\n p = self.p_add[i]\n if(categorical_sample([p, 1-p], self.rng) == 0):\n next_s[i+1] += 1\n\n self.s = tuple(next_s)\n self.lastaction = action\n return (next_s, r, is_terminal, {\"prob\": prob})", "def solve(self):\n return breadth_first_search(self) + [self.goal_url]", "def getCurrentStep():", "def search_possible_steps(self):\n if self.ended:\n return False\n possible_steps_turple = (self.board == 0)\n possible_steps = np.transpose(possible_steps_turple.nonzero())\n return possible_steps", "def __get_final_successor_and_start(actions):\n branch_start_actions = []\n final_successor_action = []\n for steps in actions:\n steps_action = get_action_type(action=steps)\n if \"StartAction\" in steps_action:\n branch_start_actions.append(steps)\n elif \"StopAction\" in steps_action:\n final_successor_action.append(steps)\n return branch_start_actions, final_successor_action", "def find_next_step(start, end, paths):\r\n def find_paths(start, current, distance, paths, choices):\r\n \"\"\"\r\n Given the start point, and the current point, builds a dictionary indicating the first step\r\n and the minimum distance to the end using that step. Distance indicates the distance from\r\n current to end.\r\n \"\"\"\r\n # Find all paths resulting in the minimum distance\r\n options = []\r\n min_distance = min(paths[current].values())\r\n for option, distance in paths[current].items():\r\n if distance == min_distance:\r\n\r\n # If we find the beginning, break out\r\n if option == start:\r\n if option not in choices or choices[current] < distance + min_distance:\r\n choices[current] = distance + min_distance\r\n return\r\n\r\n # Add to list of options\r\n options.append(option)\r\n\r\n # For each path, recursively find minimal paths\r\n for option in options:\r\n find_paths(start, option, min_distance, paths, choices)\r\n\r\n choices = {}\r\n find_paths(start, end, 0, paths, choices)\r\n choices = sorted(choices.keys())\r\n return choices[0]", "def check_and_process_graph(self, allow_empty=False):\n\n if self.is_empty() and allow_empty:\n self._start_steps = []\n return [], None, []\n\n def has_loop(step, previous):\n for next_step in step.after or []:\n if next_step in previous:\n return step.name\n downstream = has_loop(self[next_step], previous + [next_step])\n if downstream:\n return downstream\n return None\n\n start_steps = []\n for step in self._steps.values():\n step._next = None\n if step.after:\n loop_step = has_loop(step, [])\n if loop_step:\n raise GraphError(\n f\"Error, loop detected in step {loop_step}, graph must be acyclic (DAG)\"\n )\n else:\n start_steps.append(step.name)\n\n responders = []\n for step in self._steps.values():\n if hasattr(step, \"responder\") and step.responder:\n responders.append(step.name)\n if step.on_error and step.on_error in start_steps:\n start_steps.remove(step.on_error)\n if step.after:\n prev_step = step.after[0]\n self[prev_step].set_next(step.name)\n if self.on_error and self.on_error in start_steps:\n start_steps.remove(self.on_error)\n\n if (\n len(responders) > 1\n ): # should not have multiple steps which respond to request\n raise GraphError(\n f'there are more than one responder steps in the graph ({\",\".join(responders)})'\n )\n\n if self.from_step:\n if self.from_step not in self.steps:\n raise GraphError(\n f\"from_step ({self.from_step}) specified and not found in graph steps\"\n )\n start_steps = [self.from_step]\n\n self._start_steps = [self[name] for name in start_steps]\n\n def get_first_function_step(step, current_function):\n # find the first step which belongs to the function\n if (\n hasattr(step, \"function\")\n and step.function\n and step.function == current_function\n ):\n return step\n for item in step.next or []:\n next_step = self[item]\n returned_step = get_first_function_step(next_step, current_function)\n if returned_step:\n return returned_step\n\n current_function = get_current_function(self.context)\n if current_function and current_function != \"*\":\n new_start_steps = []\n for from_step in self._start_steps:\n step = get_first_function_step(from_step, current_function)\n if step:\n new_start_steps.append(step)\n if not new_start_steps:\n raise GraphError(\n f\"did not find steps pointing to current function ({current_function})\"\n )\n self._start_steps = new_start_steps\n\n if self.engine == \"sync\" and len(self._start_steps) > 1:\n raise GraphError(\n \"sync engine can only have one starting step (without .after)\"\n )\n\n default_final_step = None\n if self.final_step:\n if self.final_step not in self.steps:\n raise GraphError(\n f\"final_step ({self.final_step}) specified and not found in graph steps\"\n )\n default_final_step = self.final_step\n\n elif len(self._start_steps) == 1:\n # find the final step in case if a simple sequence of steps\n next_obj = self._start_steps[0]\n while next_obj:\n next = next_obj.next\n if not next:\n default_final_step = next_obj.name\n break\n next_obj = self[next[0]] if len(next) == 1 else None\n\n return self._start_steps, default_final_step, responders", "def step(self, action):\n\n # ==\n # Transition, reward and termination\n done = False\n reward = self.get_current_reward(self.state)\n\n # Leaf and absorbing nodes\n if self.state <= 1:\n done = True\n if self.state == 1:\n self.state = 0 # go to absorbing\n else:\n self.state = int(self.state // 2)\n\n # ==\n # Features\n phi = self.state_2_features(self.state)\n\n return phi, reward, done, {}", "def successor(self, state):\n for action in self.actions:\n nexts = state.move(action)\n if nexts is not None:\n yield (action,nexts)", "def step(self, action):\r\n s = self.get_state()\r\n\r\n elements = np.arange(self.S)\r\n # weights = np.squeeze(self.nextStateProbability[s,action])\r\n weights = self.nextStateProbability[s, action]\r\n nexts = choices(elements, weights, k=1)[0]\r\n\r\n # p = self.nextStateProbability[s,action]\r\n # reward = self.rewardsTable[s,action, nexts][0]\r\n reward = self.rewardsTable[s, action, nexts]\r\n\r\n # fully observable MDP: observation is the actual state\r\n self.currentObservation = nexts\r\n\r\n gameOver = False\r\n if self.currentIteration > np.Inf:\r\n ob = self.reset()\r\n gameOver = True # game ends\r\n else:\r\n ob = self.get_state()\r\n\r\n history = {\"time\": self.currentIteration, \"state_t\": s, \"action_t\": action,\r\n \"reward_tp1\": reward, \"state_tp1\": nexts}\r\n # history version with actions and states, not their indices\r\n # history = {\"time\": self.currentIteration, \"action_t\": self.actionListGivenIndex[action],\r\n # \"reward_tp1\": reward, \"observation_tp1\": self.stateListGivenIndex[self.get_state()]}\r\n self.currentIteration += 1\r\n return ob, reward, gameOver, history", "def successorStates(self, state):\n currentState = state[1]\n successors = []\n for action in Directions.CARDINAL:\n x, y = state[0] # currentPosition\n print(\"State: {}\".format(state[0]))\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n hitsWall = self.walls[nextx][nexty]\n\n # Implement a successor discovery, check if any corners are satisfied\n # and update values as they are satisfied\n if (not hitsWall):\n successorsState = []\n nextxy = (nextx, nexty)\n if nextxy == self.corners[0]:\n successorsState.append(True)\n else:\n successorsState.append(currentState[0])\n if nextxy == self.corners[1]:\n successorsState.append(True)\n else:\n successorsState.append(currentState[1])\n if nextxy == self.corners[2]:\n successorsState.append(True)\n else:\n successorsState.append(currentState[2])\n if nextxy == self.corners[3]:\n successorsState.append(True)\n else:\n successorsState.append(currentState[3])\n # Put all updated values of 4 corners to a variable\n successorPost = (successorsState[0], successorsState[1],\n successorsState[2], successorsState[3])\n # Append to go to the next move\n successors.append(((nextxy, successorPost), action, 1))\n\n self._numExpanded += 1 # Count the number of nodes expanded\n return successors", "def trace_back(self, node):\n state_id = node.get_state_id()\n steps = [eval(state_id)]\n if state_id in self._history:\n previous = self._history[state_id]\n else:\n previous = False\n while previous:\n steps.append(eval(previous))\n if previous in self._history:\n previous = self._history[previous]\n else:\n previous = False\n return steps[::-1]", "def _step(self) -> None:", "def find_good_paths(self):\n return self.robot_step((0,0),[])", "def steps():\n global recorder\n if recorder is None:\n print \"nothing has been recorded\"\n return None\n result = []\n for r in recorder:\n result += [ r ]\n return result", "def _generate_steps(\n episode: Sequence[Any],\n step_metadata_skip_list: AbstractSet[str]) -> Dict[str, Any]:\n step_metadata = _empty_nested_list(\n get_step_metadata(episode[0], step_metadata_skip_list))\n\n steps = {\n 'observation':\n _empty_nested_list(episode[0].timestep.observation),\n 'action':\n _empty_nested_list(episode[0].action),\n 'reward': [],\n 'discount': [],\n 'is_terminal': [],\n 'is_first': [],\n 'is_last': [],\n }\n steps.update(step_metadata)\n\n prev_step = None\n for step in episode:\n if prev_step is not None:\n steps['is_first'].append(prev_step.timestep.first())\n steps['is_terminal'].append(False)\n steps['is_last'].append(prev_step.timestep.last())\n steps['observation'] = _append_nested(\n steps['observation'], prev_step.timestep.observation)\n steps['reward'].append(step.timestep.reward)\n steps['discount'].append(step.timestep.discount)\n steps['action'] = _append_nested(steps['action'], step.action)\n step_metadata = get_step_metadata(prev_step, step_metadata_skip_list)\n for k, v in step_metadata.items():\n steps[k] = _append_nested(steps[k], v)\n prev_step = step\n if prev_step is not None:\n # We append the observation of the final step (action and reward were\n # included in the previous step.\n # The terminal flag is inferred like in termination(), truncation()\n # from dm_env/_environment.py\n is_terminal = (\n prev_step.timestep.last() and prev_step.timestep.discount == 0.0)\n steps['is_first'].append(prev_step.timestep.first())\n steps['is_terminal'].append(is_terminal)\n steps['is_last'].append(True)\n steps['observation'] = _append_nested(\n steps['observation'], prev_step.timestep.observation)\n # Discount, action and reward are meaningless in the terminal step\n steps['reward'].append(np.zeros_like(prev_step.timestep.reward))\n steps['discount'].append(\n np.zeros_like(prev_step.timestep.discount))\n steps['action'] = _append_nested(\n steps['action'],\n tf.nest.map_structure(np.zeros_like, prev_step.action))\n step_metadata = get_step_metadata(prev_step, step_metadata_skip_list)\n for k, v in step_metadata.items():\n steps[k] = _append_nested(steps[k], v)\n return steps" ]
[ "0.6234041", "0.58860874", "0.58491534", "0.5800428", "0.5592601", "0.5583821", "0.5574898", "0.5521146", "0.5507032", "0.5441927", "0.5365685", "0.5339986", "0.5337542", "0.5333407", "0.5318446", "0.53063506", "0.5300591", "0.5297238", "0.5295074", "0.5279935", "0.5268858", "0.5267007", "0.5260512", "0.5260152", "0.5246016", "0.5243274", "0.52393585", "0.52231354", "0.52164096", "0.52082497" ]
0.6419919
0
The original flowroot of this flow.
def root(self) -> FlowRoot: return self._root
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def root_orig(self):\n if hasattr(self, \"orig\"):\n return self.orig.root_orig\n return self", "def original(self):\n return self._original", "def original(self):\n return self._original", "def getOriginal(self,):\n\t\treturn self.original;", "def root(self):\n\t\treturn self._root", "def root(self):\n\t\treturn self._root", "def clone_as_root(self) :\n clone = deepcopy(self)\n clone.parent = None\n clone.path_length = 0\n clone.previous_action = None\n return clone", "def root(self):\n\n return self.parent.root", "def _get_root(self):\n return repr(str(self.src_root))", "def root(self):\n return self if self.is_root else self.__parent.root", "def root(self):\n return self._root", "def root(self):\n return self._root", "def root(self):\n return self._root", "def root(self):\n return self._root", "def getRoot(self):\n return self.__root", "def get_root(self):\n return self.__root", "def get_root(self):\n return self.__root", "def get_original_tree(self, tree):\n if not tree:\n return\n tree = copy.deepcopy(tree)\n PCFG.__revert_step_4(tree.root)\n PCFG.__revert_step_2(tree.root)\n # Get rid of step 1, namely get rid of S_0 -> S\n new_root = tree.root.children[0]\n new_tree = ParseTree(new_root, tree.probability)\n return new_tree", "def tmproot(self):\n return self._tmproot", "def get_root(self):\n return self.root", "def root(self):\n return self.__root__", "def original(self) -> Any:\n raise NotImplementedError", "def __copy__(self):\n logger.debug(\"Copying Flow() object.\")\n c = Flow()\n c.workingDir = self.workingDir \n c.cleanupTemp = self.cleanupTemp\n c.default_inputpaths = self.default_inputpaths\n c.default_outputpath = self.default_outputpath\n c.startNode = self.startNode\n c.lastNode = self.lastNode\n return c", "def root_node(self):\n return self.process_tree", "def get_root(self):\n return self._root", "def flow(self):\n return self._flow", "def get_root(self):\n return self._root", "def root(self):\n return self._root()", "def parent(self):\n return # Optional to overwrite", "def parent(self):\n return self if self.is_root else self.__parent" ]
[ "0.73445743", "0.6893548", "0.6893548", "0.6484647", "0.63805735", "0.63805735", "0.6353325", "0.6331243", "0.6286299", "0.62303126", "0.62067777", "0.62067777", "0.62067777", "0.6187001", "0.6182318", "0.6174468", "0.6174468", "0.6173917", "0.6116025", "0.60984105", "0.6096798", "0.60730076", "0.6064862", "0.60568494", "0.6051486", "0.6040705", "0.60286933", "0.60232717", "0.6006759", "0.599754" ]
0.6993407
1
Register a flow with this executor.
def add_flow(self, flow: FlowRoot): with self._lock: self.flow_roots[flow.name] = flow
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register_flow(self, state, flow_id):\n self._flows[state] = flow_id\n _LOGGER.debug(\"Register state %s for flow_id %s\", state, flow_id)", "def flow(self, flow):\n\n self._flow = flow", "def add_flow_controller(cls, name, controller):\n cls.registered_controllers[name] = controller", "def add_flow(self, flow: \"Flow\") -> str:\n self.flows = {flow.name: flow.name} # type: Dict[str, str]\n self._flows = {flow.name: flow} # type: Dict[str, Flow]\n return flow.name", "def __init__(self, flow, register=True):\n\n self.flow = flow\n self.domain = flow.domain\n self.version = '1.0'\n self.activities = activity.find_workflow_activities(flow)\n self.task_list = flow.name\n super(DeciderWorker, self).__init__()\n\n if register:\n self.register()", "def addFlowLocal(self, flow=1):\n self.flow_local += flow", "def add(self, host, flow):\n if host not in list(self.__locusts__.keys()):\n self.__locusts__[host] = self.__locust_code(flow)\n else:\n tmp = self.__locusts__[host][:-100]\n tmp += self.__locust_task(flow)\n tmp += '\\n'\n tmp += self.__locusts__[host][-100:]\n self.__locusts__[host] = tmp\n return", "def update_flow(self, flow):\r\n self.flow = flow", "def register(self, execution_unit):\n self.execution_units[execution_unit.capability()].add(execution_unit)", "async def create_flow(self, flow: \"FlowObject\") -> UUID:\n return await self.create_flow_from_name(flow.name)", "def register_execution(in_progress, future, node):\n in_progress[future] = node", "def register_flow_implementation(hass, client_id, client_secret, base_url, profiles):\n if DATA_FLOW_IMPL not in hass.data:\n hass.data[DATA_FLOW_IMPL] = OrderedDict()\n\n hass.data[DATA_FLOW_IMPL] = {\n const.CLIENT_ID: client_id,\n const.CLIENT_SECRET: client_secret,\n const.BASE_URL: base_url,\n const.PROFILES: profiles,\n }", "def register_flow_implementation(hass, client_name, client_secret):\n hass.data.setdefault(DATA_AIS_WIFI_SERVICE_IMPL, {})\n\n hass.data[DATA_AIS_WIFI_SERVICE_IMPL] = {\n CONF_NAME: client_name,\n CONF_PASSWORD: client_secret,\n }", "def addFlowWithin(self, flow=1):\n self.flow_within_group += flow", "def register(self, task):\n self.registered.append(task)\n self.registered_dict[task.key_with_class()] = task", "def action_flow(self, action_flow):\n\n self._action_flow = action_flow", "async def _register(self, name, source):\n self._last[name] = {}\n\n self._srcTaskList[name] = asyncio.create_task(\n self._monitor(name, source)\n )", "def add(self, flow_detail):\n self._flowdetails.append(flow_detail)\n # When added the backend that the flow details (and any owned task\n # details) is using will be automatically switched to whatever backend\n # this logbook is using.\n if flow_detail.backend != self.backend:\n flow_detail.backend = self.backend\n for task_detail in flow_detail:\n if task_detail.backend != self.backend:\n task_detail.backend = self.backend", "def register(self, ar):\n\n # state_field = self._meta.get_field(self.workflow_state_field)\n state_field = self.workflow_state_field\n target_state = state_field.choicelist.registered\n self.set_workflow_state(ar, state_field, target_state)", "def register(self):\n raise NotImplementedError()", "def register(self):\n raise NotImplementedError", "def register(self):\n raise NotImplementedError", "def set_cur_flow(self, flow):\n self.cur_flow = flow", "def register(self):\n raise NotImplementedError(\"Should have implemented this\")", "def add_flow(self, priority, match, instructions, actions, buffer_id=None):\n\n parser = self._datapath.ofproto_parser\n\n # Create the flow mod message\n if buffer_id:\n mod = parser.OFPFlowMod(\n datapath=self._datapath,\n buffer_id=buffer_id,\n priority=priority,\n match=match,\n instructions=instructions)\n else:\n mod = parser.OFPFlowMod(\n datapath=self._datapath,\n priority=priority,\n match=match,\n instructions=instructions)\n\n self._datapath.send_msg(mod)", "def register(cls):\n register(cls, cls.provided_class)", "def register(blk):\n pass", "def add_workflow(self, workflow):\n self.workflow_manager.add_workflow(workflow)", "def register(self, fd):\n if fd not in self._fds:\n self._fds.append(fd)", "def flows(self, flows):\n\n self._flows = flows" ]
[ "0.69882387", "0.67626894", "0.6249088", "0.61617434", "0.6143314", "0.5989838", "0.58785754", "0.5878573", "0.58688176", "0.5865792", "0.5640574", "0.56151783", "0.5605409", "0.56050324", "0.553389", "0.5516025", "0.5483355", "0.5407733", "0.53915477", "0.53600454", "0.53564143", "0.53564143", "0.5328493", "0.5287452", "0.5270512", "0.52415633", "0.52391785", "0.52080786", "0.5205104", "0.5198079" ]
0.6806603
1
Trigger workflows that may have command cmd as a auto_trigger or an in flight flow waiting for command. This assume cmd has been correctly executed.
def trigger(self, cmd: str, requestor: Identifier, extra_context=None) -> Optional[Flow]: flow, next_step = self.check_inflight_flow_triggered(cmd, requestor) if not flow: flow, next_step = self._check_if_new_flow_is_triggered(cmd, requestor) if not flow: return None flow.advance(next_step, enforce_predicate=False) if extra_context: flow.ctx = dict(extra_context) self._enqueue_flow(flow) return flow
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_if_new_flow_is_triggered(self, cmd: str, user: Identifier) -> Tuple[Optional[Flow], Optional[FlowNode]]:\n log.debug(\"Test if the command %s is an auto-trigger for any flow ...\", cmd)\n with self._lock:\n for name, flow_root in self.flow_roots.items():\n if cmd in flow_root.auto_triggers and not self.check_inflight_already_running(user):\n log.debug(\"Flow %s has been auto-triggered by the command %s by user %s\", name, cmd, user)\n return self._create_new_flow(flow_root, user, cmd)\n return None, None", "def check_inflight_flow_triggered(self, cmd: str, user: Identifier) -> Tuple[Optional[Flow], Optional[FlowNode]]:\n log.debug(\"Test if the command %s is a trigger for an inflight flow ...\", cmd)\n # TODO: What if 2 flows wait for the same command ?\n with self._lock:\n for flow in self.in_flight:\n if flow.check_identifier(user):\n log.debug(\"Requestor has a flow %s in flight\", flow.name)\n for next_step in flow.next_steps():\n if next_step.command == cmd:\n log.debug(\"Requestor has a flow in flight waiting for this command !\")\n return flow, next_step\n log.debug(\"None matched.\")\n return None, None", "def _run_trigger_inference_task(self):\n\n if self._running_task is None and self._running_thread is None:\n\n self._running_task = self._system.trigger_inference_task\n self._running_thread = threading.Thread(target=self._running_task.start)\n self._status_text.set('TriggerBot - Running')\n self._running_thread.start()", "def execute(self, flow: Flow):\n while True:\n autosteps = flow.next_autosteps()\n steps = flow.next_steps()\n\n if not steps:\n log.debug(\"Flow ended correctly.Nothing left to do.\")\n with self._lock:\n self.in_flight.remove(flow)\n break\n\n if not autosteps and flow.current_step.hints:\n possible_next_steps = [f'You are in the flow **{flow.name}**, you can continue with:\\n\\n']\n for step in steps:\n cmd = step.command\n cmd_fnc = self._bot.all_commands[cmd]\n reg_cmd = cmd_fnc._err_re_command\n syntax_args = cmd_fnc._err_command_syntax\n reg_prefixed = cmd_fnc._err_command_prefix_required if reg_cmd else True\n syntax = self._bot.prefix if reg_prefixed else ''\n if not reg_cmd:\n syntax += cmd.replace('_', ' ')\n if syntax_args:\n syntax += syntax_args\n possible_next_steps.append(f'- {syntax}')\n self._bot.send(flow.requestor, '\\n'.join(possible_next_steps))\n break\n\n log.debug('Steps triggered automatically %s.', ', '.join(str(node) for node in autosteps))\n log.debug('All possible next steps: %s.', ', '.join(str(node) for node in steps))\n\n for autostep in autosteps:\n log.debug(\"Proceeding automatically with step %s\", autostep)\n if autostep == FLOW_END:\n log.debug('This flow ENDED.')\n with self._lock:\n self.in_flight.remove(flow)\n return\n try:\n msg = Message(frm=flow.requestor, flow=flow)\n result = self._bot.commands[autostep.command](msg, None)\n log.debug('Step result %s: %s', flow.requestor, result)\n\n except Exception as e:\n log.exception('%s errored at %s', flow, autostep)\n self._bot.send(flow.requestor, f'{flow} errored at {autostep} with \"{e}\"')\n flow.advance(autostep) # TODO: this is only true for a single step, make it forkable.\n log.debug('Flow execution suspended/ended normally.')", "def consider_trigger(self):\n if self.trigger_nav_task_active and self.trigger_local_path_published:\n self.trigger_local_path_published = False\n self.trigger_nav_task_active = False\n if self.monitor:\n self.trigger_plp_task()\n if self.capture:\n self.capture_params()", "def test_execute_xia_automated_workflow(self, mock_run):\n self.assert_(execute_xia_automated_workflow.run())\n\n self.assert_(execute_xia_automated_workflow.run())\n self.assertEqual(mock_run.call_count, 2)\n\n self.assert_(execute_xia_automated_workflow.run())\n self.assertEqual(mock_run.call_count, 3)", "def trigger(self):\n\t\tself.setAquireState(\"RUN\")\n\t\tself.setStopAfter(\"SEQ\")\n\t\tself.osc.trigger()", "def call_in_sequence(self, cmds, shell=True):\n for cmd in cmds:\n if subprocess.call(cmd, shell=shell) == 1:\n sys.exit(1)", "def _invoke_cmd(self, cmd):\n if cmd in self.COMMANDS:\n self.COMMANDS[cmd]()\n else:\n print(ERROR_UNKNOWN_COMMAND.format(cmd=cmd))", "def do_workflow(self, arg=None):\n\n def add_steps_to_workflow(curr_flow):\n while True:\n cmd_call = simple_input('Please choose a command to add to the workflow.', cmds, True)\n if cmd_call not in ['DONE', 'EXIT']:\n if self.is_output_cmd(cmd_call):\n curr_flow.add_output(cmd_call)\n else:\n curr_flow.add_step(cmd_call)\n cmds.pop(cmds.index(cmd_call))\n\n _conf = simple_input('Do you want to configure this command?', ['Y','N'], True) if self.is_configureable(cmd) else None\n if _conf == 'Y':\n curr_flow.configure_step(cmd_call)\n\n elif cmd_call == 'DONE':\n break\n else:\n return\n return curr_flow.has_steps()\n\n def confirm_workflow(curr_flow):\n checks = [('START', 'Start workflow?'), ('ADD', 'Do you want to add more steps?'),\n ('RESTART', 'Do you want to start over?')]\n curr_flow.draw_steps()\n for check in checks:\n _continue = simple_input(check[1], ['Y', 'N', 'EXIT'])\n if _continue == 'Y':\n return check[0]\n if _continue == 'EXIT':\n return 'EXIT'\n return 'INVALID'\n\n print('Preparing Workflow Wizard...')\n options = sorted(self.cmds + self.output_cmds)\n from smores.workflow import Workflow\n workflow = Workflow(self)\n target, load_type = self.validate_args('', 'file')\n if target:\n _l = True if target in self.inputs['files'].keys() else False\n workflow.add_target(target, load_type, _l)\n print('Please choose the commands you would like to add to the workflow.'\n '\\nCommands will be executed in the order in which they are added.'\n '\\n\\nPlease note that some commands have dependencies that must be satisfied. An overview of '\n 'command dependencies is available on the main SMOREs wiki on Github')\n print('\\nAvailable Commands for WorkFlow')\n cmds = []\n for i, _o in enumerate(options):\n print('{1}'.format(i, _o))\n cmds.append(_o)\n cmds.append('DONE')\n steps_added = add_steps_to_workflow(workflow)\n while steps_added:\n _run = confirm_workflow(workflow)\n if _run == 'START':\n break\n elif _run == 'ADD':\n _ = add_steps_to_workflow(workflow)\n elif _run == 'RESTART':\n self.do_workflow('')\n else:\n return\n workflow.run()\n print('Workflow has completed.')\n return\n\n else:\n print('Workflows currently have to be setup without the file already being loaded.')\n return", "def __do_trigger(self, request):\n dmp_trigger.DmpTrigger().trigger(request)\n return defines.ReturnCode.SUCC", "def issue(self, cmd):\n self.send([cmd])\n return self.read_until_prompt()[1:] # drop the echo", "def run_command_check(self):\n pass", "def _run_trigger_training_task(self):\n\n if self._running_task is None and self._running_thread is None:\n\n self._running_task = self._system.trigger_training_task\n self._running_thread = threading.Thread(target=self._running_task.start)\n self._status_text.set('TriggerBot - Training')\n self._running_thread.start()", "def LaunchAndWait(cmd):\n call(cmd)", "def tick(self, cmd=None):\n if cmd and isinstance(cmd, str):\n parts = cmd.split()\n if parts[0] in self.commands:\n try:\n self.commands[parts[0]](parts[1:])\n except Exception as e:\n print(\"Exception processing command\", e)", "def os_call( self, cmd_arg, ):\n while True: # will exit when it works or run out of editors\n a_command = self.working_command\n if a_command is None:\n a_command = self.get_next_command( )\n\n if a_command is None: # no commands left to try\n msg = \"Run out of editors to try\"\n# AppGlobal.__logger.error( msg )\n raise RuntimeError( msg ) # or fail in some other where\n break # we are aread done\n try:\n if cmd_arg is None:\n proc = Popen( [ a_command, ] )\n else:\n proc = Popen( [ a_command, cmd_arg ] )\n self.working_command = a_command\n break # do not get here if exception so command \"worked \"\n except Exception as excpt: # this should let us loop ignoring exception\n pass\n msg = ( f\"os_call exception trying to use >{a_command}< with cmd_arg >{cmd_arg}< exception: {excpt}\" )\n # if exception proc not returned f\"\\npopen returned {proc}\" )\n AppGlobal.logger.debug( msg )", "def test_change_trigger_carefully(self):\n self._test_change_trigger(True)", "def run_cmd(self, cmd, timeout,\n force_execution=False,\n wait_for_response=True,\n silent_mode=False):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def run_cmd(self):\r\n self.run = True", "async def module_commanded(self, cmd_msg: abc.Message, ctx: Context, delay: float = None):\n\n async def delay_wrapper(seconds: float, coro):\n try:\n await asyncio.sleep(seconds)\n await coro\n except asyncio.CancelledError:\n coro.close()\n\n invoker = cmd_msg.source\n dest = cmd_msg.destination\n cmd_str = cmd_msg.clean_content\n if not cmd_str.startswith(self.cmdprefix):\n raise NotACommand(f\"Not a command string: {cmd_str}\")\n try:\n name, *args = shellish_split(cmd_str)\n except ValueError:\n await self.module_send_event(\"invalid_command\", ctx, cmd_msg, CmdErrorType.BadSyntax)\n return\n name = name.lstrip(self.cmdprefix)\n if delay:\n self.logger.debug(\n f\"Received delayed command from '{invoker}' at '{dest}': \"\n f\"{name=}, {args=}. Executing in {delay} seconds.\"\n )\n else:\n self.logger.debug(f\"Received command from '{invoker}' at '{dest}': {name=}, {args=}\")\n try:\n cmd = self._commands[name]\n namespace = cmd.parse_args(args)\n except KeyError:\n await self.module_send_event(\"invalid_command\", ctx, cmd_msg, CmdErrorType.NotFound)\n return\n except (ArgumentError, ArgumentTypeError, CommandParseError):\n await self.module_send_event(\"invalid_command\", ctx, cmd_msg, CmdErrorType.BadSyntax)\n return\n method = getattr(cmd.module.handle, f\"module_command_{name}\", None)\n if callable(method):\n parsed = ParsedCommand(name, vars(namespace), cmd, cmd_msg)\n if delay:\n self._delayed_command_count += 1\n wait_id = self._delayed_command_count\n info = WaitingCmdInfo(wait_id, cmd_str, delay, invoker, dest, time.time())\n task = self.eventloop.create_task(\n delay_wrapper(delay, method(ctx, parsed)),\n name=f\"ZeroBot_Wait_Cmd_{wait_id}\",\n )\n self._delayed_commands[wait_id] = (task, info)\n await task\n del self._delayed_commands[wait_id]\n else:\n await method(ctx, parsed)", "def execute_workflow(self):\n self._gdb_interface.execute_workflow()", "def run(cmd):\n print('running', cmd)\n proc = sp.Popen([cmd], shell=True)\n proc.wait()\n assert proc.poll() == 0", "def run_task(self, cmd):\n # Not initialized here...must be overloaded from outside\n raise NotImplementedError()", "def run_task(self, cmd):\n # Not initialized here...must be overloaded from outside\n raise NotImplementedError()", "def wait_for_trigger(self):\n # Read the status only to determine when the trigger occurs.\n is_running = True\n is_triggered = False\n while is_running and not is_triggered:\n status = self.hat.a_in_scan_status()\n is_running = status.running\n is_triggered = status.triggered", "def _execute_act(self,exec_act,new_time_dt):\n\n # note that currently the ground station is not responsible for initiating any activities\n\n pass", "def test_applying_vim_strategy_fails_apply_immediately(self):\n\n # first api query is before the apply\n self.vim_client.get_strategy.return_value = STRATEGY_READY_TO_APPLY\n\n # return a failed strategy\n self.vim_client.apply_strategy.return_value = STRATEGY_APPLY_FAILED\n\n # invoke the strategy state operation on the orch thread\n self.worker.perform_state_action(self.strategy_step)\n\n # Failure case\n self.assert_step_updated(self.strategy_step.subcloud_id,\n consts.STRATEGY_STATE_FAILED)", "def test_execute(self):\n context = dict()\n cmd = pycell.python_cell(\n source='print(2+2)',\n validate=True\n )\n controller = FakeWorkflowController()\n self.backend.execute_async(\n task=TaskHandle(\n task_id='000',\n project_id=self.PROJECT_ID,\n controller=controller\n ),\n command=cmd,\n artifacts=context\n )\n time.sleep(3)\n self.assertEqual(controller.task_id, '000')\n self.assertEqual(controller.state, 'SUCCESS')\n self.assertEqual(controller.outputs.stdout[0].value, '4')", "def complete_cmd(self):\r\n if self.select_cmd is not None:\r\n self.do_cmd()" ]
[ "0.715326", "0.6214702", "0.5785957", "0.5547166", "0.54532874", "0.545297", "0.5442159", "0.5426565", "0.54126877", "0.54040956", "0.5393063", "0.5381227", "0.53534704", "0.5316226", "0.5302364", "0.5268394", "0.5265039", "0.5208793", "0.5185608", "0.51715356", "0.5159973", "0.514008", "0.5135851", "0.51330256", "0.51330256", "0.5118858", "0.50788176", "0.50767255", "0.5071926", "0.50604033" ]
0.6354978
1
Check if user is already running a flow.
def check_inflight_already_running(self, user: Identifier) -> bool: with self._lock: for flow in self.in_flight: if flow.requestor == user: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def IsRunning(self):\n current_urn = self.Get(self.Schema.CURRENT_FLOW_URN)\n if current_urn:\n current_flow = aff4.FACTORY.Open(urn=current_urn,\n token=self.token, mode=\"r\")\n runner = current_flow.GetRunner()\n return runner.context.state == rdfvalue.Flow.State.RUNNING\n return False", "def _check_if_new_flow_is_triggered(self, cmd: str, user: Identifier) -> Tuple[Optional[Flow], Optional[FlowNode]]:\n log.debug(\"Test if the command %s is an auto-trigger for any flow ...\", cmd)\n with self._lock:\n for name, flow_root in self.flow_roots.items():\n if cmd in flow_root.auto_triggers and not self.check_inflight_already_running(user):\n log.debug(\"Flow %s has been auto-triggered by the command %s by user %s\", name, cmd, user)\n return self._create_new_flow(flow_root, user, cmd)\n return None, None", "def is_running(self):\n\t\treturn self in _running", "def should_keep_running(self):\n return len(self.party.active_users())", "async def is_running(self, **kwargs: Any) -> bool:\n ...", "async def is_running(self, **kwargs: Any) -> bool:\n return True", "def is_running(program):\n return program in get_running()", "def _is_running(self):\n return self._run_state.is_running()", "def can_run(self):\n\t\treturn self._start is None", "def isFlow(self) -> bool:\n ...", "async def is_launcher(ctx):\n member = ctx.message.author\n staff = await is_staff(ctx)\n lhRole = discord.utils.get(member.guild.roles, name=ROLE_LH)\n if staff or lhRole in member.roles: return True", "def is_instantiated(self):\n if Session().checkSession(self.isp_id):\n session_id = Session().get_active_user_session(self.isp_id).id\n self.current_domain_id = Graph().get_last_graph(session_id).domain_id\n\n # TODO substitute this with a controller.get when the get status will be implemented on the UN\n # ask to orchestrator if the isp graph is instantiated\n user_nffg_file = User().getServiceGraph(self.isp_name)\n nffg = NFFG_Manager.getNF_FGFromFile(user_nffg_file)\n user_data = UserData(self.isp_name, self.isp_password, self.isp_tenant)\n orchestrator = GlobalOrchestrator(user_data, self.orchestrator_ip, self.orchestrator_port)\n return UserSession(self.isp_id, None).checkSession(nffg.id, orchestrator)", "def has_user(self, user): # pylint: disable=unused-argument\r\n return False", "def _isrunning(self):\n return self.dp.state()==PyTango.DevState.RUNNING", "def is_running(self) -> bool:\n return False", "def isonce(self, user, id, period=None):\n if period is None:\n period_from = datetime.datetime.min\n else:\n period_from = datetime.datetime.utcnow() - period\n return (History.objects(user=user).filter(\n scenario__attributes__id=id,\n created_at__gte=period_from).count() == 0)", "def running(self):\n return self._lifetime_state in {\"starting\",\"running\",\"finishing\"}", "def running(self):\n\t\treturn self._start is not None", "def user_in_session():\n return 'user_id' in login_session", "def test_user_is_sender(self):\n sender = self.create_user()\n thread = self.create_thread(sender=sender, status='pending')\n self.assertTrue(thread.first_message.visible_to_user(sender))", "def is_running(self):\n return self.action_thread and self.action_thread.is_alive()", "def start_game_check(self):\n if len(self.pending_players) > 0:\n return False\n else:\n return True", "def running(request):\r\n return request.session.get('partial_pipeline') is not None # Avoid False for {}.\r", "def is_running(self):\n return self._task.running()", "def is_running(self):\n return self._task.running()", "def check_flow_is_pending_or_running(self, state: State) -> State:\n\n # the flow run is already finished\n if state.is_finished() is True:\n self.logger.info(\"Flow run has already finished.\")\n raise ENDRUN(state)\n\n # the flow run must be either pending or running (possibly redundant with above)\n elif not (state.is_pending() or state.is_running()):\n self.logger.info(\"Flow is not ready to run.\")\n raise ENDRUN(state)\n\n return state", "def has_run(self, path):\n\n try:\n data = [d for d in self.calls if path in d][0]\n except IndexError:\n return False\n else:\n return data[path]", "def is_running(self) -> Awaitable[bool]:\n return self.instance.is_running()", "def logged_in(self):\n return self.auth.get_user_by_session() is not None", "def running(self) -> bool:" ]
[ "0.69948125", "0.6222682", "0.61390924", "0.60672396", "0.6056235", "0.5998055", "0.59039176", "0.5898025", "0.58831185", "0.5882401", "0.58729315", "0.58365375", "0.58026636", "0.5799237", "0.5794511", "0.57941985", "0.57290274", "0.57271695", "0.5711765", "0.57040274", "0.5694725", "0.5684001", "0.5664874", "0.5661528", "0.5661528", "0.5654919", "0.56325793", "0.5617191", "0.56122243", "0.5604968" ]
0.7122003
0
Trigger workflows that may have command cmd as a auto_trigger.. This assume cmd has been correctly executed.
def _check_if_new_flow_is_triggered(self, cmd: str, user: Identifier) -> Tuple[Optional[Flow], Optional[FlowNode]]: log.debug("Test if the command %s is an auto-trigger for any flow ...", cmd) with self._lock: for name, flow_root in self.flow_roots.items(): if cmd in flow_root.auto_triggers and not self.check_inflight_already_running(user): log.debug("Flow %s has been auto-triggered by the command %s by user %s", name, cmd, user) return self._create_new_flow(flow_root, user, cmd) return None, None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trigger(self, cmd: str, requestor: Identifier, extra_context=None) -> Optional[Flow]:\n flow, next_step = self.check_inflight_flow_triggered(cmd, requestor)\n if not flow:\n flow, next_step = self._check_if_new_flow_is_triggered(cmd, requestor)\n if not flow:\n return None\n\n flow.advance(next_step, enforce_predicate=False)\n if extra_context:\n flow.ctx = dict(extra_context)\n self._enqueue_flow(flow)\n return flow", "def cli(ctx):\n if not ctx.invoked_subcommand:\n auto()", "def _run_trigger_inference_task(self):\n\n if self._running_task is None and self._running_thread is None:\n\n self._running_task = self._system.trigger_inference_task\n self._running_thread = threading.Thread(target=self._running_task.start)\n self._status_text.set('TriggerBot - Running')\n self._running_thread.start()", "def run_command_check(self):\n pass", "def __do_trigger(self, request):\n dmp_trigger.DmpTrigger().trigger(request)\n return defines.ReturnCode.SUCC", "def run_cmd(self):\r\n self.run = True", "def trigger(self):\n\t\tself.setAquireState(\"RUN\")\n\t\tself.setStopAfter(\"SEQ\")\n\t\tself.osc.trigger()", "def test_change_trigger_carefully(self):\n self._test_change_trigger(True)", "def _invoke_cmd(self, cmd):\n if cmd in self.COMMANDS:\n self.COMMANDS[cmd]()\n else:\n print(ERROR_UNKNOWN_COMMAND.format(cmd=cmd))", "def test_execute_xia_automated_workflow(self, mock_run):\n self.assert_(execute_xia_automated_workflow.run())\n\n self.assert_(execute_xia_automated_workflow.run())\n self.assertEqual(mock_run.call_count, 2)\n\n self.assert_(execute_xia_automated_workflow.run())\n self.assertEqual(mock_run.call_count, 3)", "def manual_trigger(pg):\n\tpg.write('trig:seq:imm') #execute\n\treturn", "def consider_trigger(self):\n if self.trigger_nav_task_active and self.trigger_local_path_published:\n self.trigger_local_path_published = False\n self.trigger_nav_task_active = False\n if self.monitor:\n self.trigger_plp_task()\n if self.capture:\n self.capture_params()", "def test_change_trigger(self):\n self._test_change_trigger(False)", "def fire_trigger(self, trigger):\n if not self.exists():\n return\n if trigger in self.events:\n for action in self.events[trigger]:\n action(requestor=self)", "def check_inflight_flow_triggered(self, cmd: str, user: Identifier) -> Tuple[Optional[Flow], Optional[FlowNode]]:\n log.debug(\"Test if the command %s is a trigger for an inflight flow ...\", cmd)\n # TODO: What if 2 flows wait for the same command ?\n with self._lock:\n for flow in self.in_flight:\n if flow.check_identifier(user):\n log.debug(\"Requestor has a flow %s in flight\", flow.name)\n for next_step in flow.next_steps():\n if next_step.command == cmd:\n log.debug(\"Requestor has a flow in flight waiting for this command !\")\n return flow, next_step\n log.debug(\"None matched.\")\n return None, None", "def execute(self):\n\t\tself.drivetrain.bad_auto_drive()\n\t\tself.drivetrain.stop_robot()", "def __trigger(self, toBeExecuted, args = []):\n\n self.__connect()\n [ f(args) for f in toBeExecuted ]\n self.__disconnect()", "def execute(self, flow: Flow):\n while True:\n autosteps = flow.next_autosteps()\n steps = flow.next_steps()\n\n if not steps:\n log.debug(\"Flow ended correctly.Nothing left to do.\")\n with self._lock:\n self.in_flight.remove(flow)\n break\n\n if not autosteps and flow.current_step.hints:\n possible_next_steps = [f'You are in the flow **{flow.name}**, you can continue with:\\n\\n']\n for step in steps:\n cmd = step.command\n cmd_fnc = self._bot.all_commands[cmd]\n reg_cmd = cmd_fnc._err_re_command\n syntax_args = cmd_fnc._err_command_syntax\n reg_prefixed = cmd_fnc._err_command_prefix_required if reg_cmd else True\n syntax = self._bot.prefix if reg_prefixed else ''\n if not reg_cmd:\n syntax += cmd.replace('_', ' ')\n if syntax_args:\n syntax += syntax_args\n possible_next_steps.append(f'- {syntax}')\n self._bot.send(flow.requestor, '\\n'.join(possible_next_steps))\n break\n\n log.debug('Steps triggered automatically %s.', ', '.join(str(node) for node in autosteps))\n log.debug('All possible next steps: %s.', ', '.join(str(node) for node in steps))\n\n for autostep in autosteps:\n log.debug(\"Proceeding automatically with step %s\", autostep)\n if autostep == FLOW_END:\n log.debug('This flow ENDED.')\n with self._lock:\n self.in_flight.remove(flow)\n return\n try:\n msg = Message(frm=flow.requestor, flow=flow)\n result = self._bot.commands[autostep.command](msg, None)\n log.debug('Step result %s: %s', flow.requestor, result)\n\n except Exception as e:\n log.exception('%s errored at %s', flow, autostep)\n self._bot.send(flow.requestor, f'{flow} errored at {autostep} with \"{e}\"')\n flow.advance(autostep) # TODO: this is only true for a single step, make it forkable.\n log.debug('Flow execution suspended/ended normally.')", "def _run_trigger_training_task(self):\n\n if self._running_task is None and self._running_thread is None:\n\n self._running_task = self._system.trigger_training_task\n self._running_thread = threading.Thread(target=self._running_task.start)\n self._status_text.set('TriggerBot - Training')\n self._running_thread.start()", "def airflow_commands():\n pass", "def at_pre_cmd(self):\n pass", "def call_in_sequence(self, cmds, shell=True):\n for cmd in cmds:\n if subprocess.call(cmd, shell=shell) == 1:\n sys.exit(1)", "def os_call( self, cmd_arg, ):\n while True: # will exit when it works or run out of editors\n a_command = self.working_command\n if a_command is None:\n a_command = self.get_next_command( )\n\n if a_command is None: # no commands left to try\n msg = \"Run out of editors to try\"\n# AppGlobal.__logger.error( msg )\n raise RuntimeError( msg ) # or fail in some other where\n break # we are aread done\n try:\n if cmd_arg is None:\n proc = Popen( [ a_command, ] )\n else:\n proc = Popen( [ a_command, cmd_arg ] )\n self.working_command = a_command\n break # do not get here if exception so command \"worked \"\n except Exception as excpt: # this should let us loop ignoring exception\n pass\n msg = ( f\"os_call exception trying to use >{a_command}< with cmd_arg >{cmd_arg}< exception: {excpt}\" )\n # if exception proc not returned f\"\\npopen returned {proc}\" )\n AppGlobal.logger.debug( msg )", "def _get_trigger(self, cursor):\n raise NotImplementedError", "def trigger(self):\n self._kill = False\n self._trigger.set()", "def tick(self, cmd=None):\n if cmd and isinstance(cmd, str):\n parts = cmd.split()\n if parts[0] in self.commands:\n try:\n self.commands[parts[0]](parts[1:])\n except Exception as e:\n print(\"Exception processing command\", e)", "def run_task(self, cmd):\n # Not initialized here...must be overloaded from outside\n raise NotImplementedError()", "def run_task(self, cmd):\n # Not initialized here...must be overloaded from outside\n raise NotImplementedError()", "def execute_workflow(self):\n self._gdb_interface.execute_workflow()", "def _execute_act(self,exec_act,new_time_dt):\n\n # note that currently the ground station is not responsible for initiating any activities\n\n pass" ]
[ "0.5883584", "0.58415097", "0.5791433", "0.5771063", "0.5762324", "0.576105", "0.5728582", "0.567646", "0.5645227", "0.5643713", "0.556337", "0.5523932", "0.54966164", "0.5453627", "0.54442376", "0.54439944", "0.5407391", "0.5404491", "0.53985775", "0.53704196", "0.53561866", "0.53353304", "0.5326245", "0.5323104", "0.5282735", "0.52824503", "0.52785015", "0.52785015", "0.52613336", "0.5260829" ]
0.7032899
0
Stops a specific flow. It is a no op if the flow doesn't exist. Returns the stopped flow if found.
def stop_flow(self, name: str, requestor: Identifier) -> Optional[Flow]: with self._lock: for flow in self.in_flight: if flow.name == name and flow.check_identifier(requestor): log.debug(f'Removing flow {str(flow)}.') self.in_flight.remove(flow) return flow return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop(self, **kwargs):\n return self.client.api.stop(self.id, **kwargs)", "def del_flow(self, flow_name):\n\n try:\n of_response = requests.delete(self.url + \"restconf/config/opendaylight-inventory:nodes/node/\" + self.id +\n \"/table/0/flow/\" + flow_name, headers=self.headers)\n error_text = \"Openflow response {}: {}\".format(of_response.status_code, of_response.text)\n if of_response.status_code != 200:\n self.logger.warning(\"del_flow \" + error_text)\n raise OpenflowConnUnexpectedResponse(error_text)\n self.logger.debug(\"del_flow OK \" + error_text)\n return None\n except requests.exceptions.RequestException as e:\n # raise an exception in case of contection error\n error_text = type(e).__name__ + \": \" + str(e)\n self.logger.error(\"del_flow \" + error_text)\n raise OpenflowConnConnectionException(error_text)", "def stop_workflow(self, state, message=None):\n if state not in [states.SUCCESS, states.ERROR]:\n msg = (\"Illegal state %s: provided while stopping workflow \"\n \"execution id=%s. State can be %s or %s. \"\n \"Stop request IGNORED.\" %\n (state, self.wf_ex.id, states.SUCCESS, states.ERROR))\n raise exc.WorkflowException(msg)\n\n self._set_execution_state(state, message)\n\n return self.wf_ex", "def stop_workflow_execution(self, cntx, **kwargs):\n execution_id = kwargs.get('execution_id')\n\n return db_api.execution_update(execution_id,\n {\"state\": states.STOPPED})", "def stop(self):\n return self._send_command(\"stop\")", "def stop(self):\n with self._lock:\n if not self.stopped():\n self._started = None\n getattr(self.factory, 'stop_' + self.class_name())(self)", "def Stop(self):\n\t\tpayload = { \"Arg1\": self.href }\n\t\treturn self._execute('stop', payload=payload, response_object=None)", "def stop(self):\n self._context.state = STOPPED", "def stop(self):\n return _spacegrant_swig.hdlc_framer_sptr_stop(self)", "def stop_device(self):\n\n self.state = 'stopped'", "def stop_video(self):\n stopped_video = self.video_id\n if stopped_video == None:\n print(\"Cannot stop video: No video is currently playing\")\n else:\n print(f\"Stopping video: {stopped_video}\")", "def stop(self, session, params=None):\n session.set_status('stopping')\n self._run = False", "def stop_stream(self, stream_id, **kwargs):\n stream = self.streams.pop(stream_id, None)\n if not stream:\n return\n\n iface = stream.get('iface')\n if iface:\n self.used_ifaces.remove(iface)\n\n # instance could have already been stopped in send_stream\n return self._stop_and_parse_instance(stream.get('instance_id'), **kwargs)", "def stop(self, sessionId):\n self.send_stop(sessionId)\n self.recv_stop()", "def stop(self):\n self._state.transit(sitcpy.THREAD_STOPPING)", "def stop(self):\n self._state.transit(sitcpy.THREAD_STOPPING)", "def test_stopService(self):\n port = self.port(store=self.store, portNumber=self.lowPortNumber, factory=self.factory)\n port._listen = self.listen\n port.startService()\n stopped = port.stopService()\n stopping = self.ports[0].stopping\n self.failIfIdentical(stopping, None)\n self.assertIdentical(stopped, stopping)", "def stop(self):\n if self._stop is not None:\n LOGGER.info(\n \"Stopping schedule[%s], index[%s]\",\n self._description,\n self._index)\n self._stop.set()\n instruction = self._instructions[self._index]\n instruction.stop()", "def stop_fleet(Name=None):\n pass", "def stop(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n services=None,\n):\n command = \"stop\"\n run_command_with_services(context, user, remote, instance, stack, command, services)", "def stop(self):\n self.requested_state = 'Stopped'\n self.ml_interface.stop()", "def stop(self, _id):\n self.trafficLights.get(int(_id)).stop()", "def stop(self):\n return self.Stop(self._handle)", "def stop(self) -> str:\n return self.rpc_call(\"stop\")", "def stop(self):\n return _uhd_swig.usrp_source_sptr_stop(self)", "async def stop(self):\n await self.pause()\n return await self.send_command_and_read_reply(\n Protocol1Command(command=\"\", execution_command=\"V\")\n )", "def stop_router_transport(self, id, details=None):\n self.log.debug(\"{}.stop_router_transport\".format(self.__class__.__name__), id=id)\n\n # FIXME\n if id not in self.transports:\n # if not id in self.transports or self.transports[id].status != 'started':\n emsg = \"Cannot stop transport: no transport with ID '{}' or transport is already stopping\".format(id)\n self.log.error(emsg)\n raise ApplicationError(u'crossbar.error.not_running', emsg)\n\n self.log.debug(\"Stopping transport with ID '{}'\".format(id))\n\n d = self.transports[id].port.stopListening()\n\n def ok(_):\n del self.transports[id]\n\n def fail(err):\n raise ApplicationError(u\"crossbar.error.cannot_stop\", \"Failed to stop transport: {}\".format(str(err.value)))\n\n d.addCallbacks(ok, fail)\n return d", "async def delete_flow_run(\n self,\n flow_run_id: UUID,\n ) -> None:\n try:\n await self._client.delete(f\"/flow_runs/{flow_run_id}\"),\n except httpx.HTTPStatusError as e:\n if e.response.status_code == status.HTTP_404_NOT_FOUND:\n raise prefect.exceptions.ObjectNotFound(http_exc=e) from e\n else:\n raise", "def stop_run(id_, **kwargs):\n run = get_run_object(id_)\n check_run_permission(run, kwargs[\"token_info\"])\n\n stub = get_runs_services_stub()\n response = stub.Stop(job_pb2.ID(id=id_))\n\n if response.status != 200:\n return ErrorSerializer(status=response.status, title=\"Api Error\",\n detail=response.message), response.status\n\n return StatusSerializer.from_dict(util.deserialize_protobuf(response))", "def stop(self):\n schedule = self._schedules[self._index]\n schedule.stop()\n self._stopped.set()\n self._started.clear()" ]
[ "0.5746109", "0.5730475", "0.56689626", "0.5514384", "0.5421373", "0.5382266", "0.5365279", "0.5311939", "0.53013855", "0.5270593", "0.5240544", "0.5230865", "0.5225166", "0.52059144", "0.5201256", "0.5201256", "0.51601064", "0.51247495", "0.5124382", "0.511578", "0.5107241", "0.5104129", "0.5101935", "0.509174", "0.5088328", "0.50695187", "0.50649273", "0.50623167", "0.50555915", "0.5028696" ]
0.72481436
0
Convert byte data for a Point to a GeoJSON `dict`.
def __load_point(big_endian, type_bytes, data_bytes): endian_token = '>' if big_endian else '<' if type_bytes == WKB_2D['Point']: coords = struct.unpack('%sdd' % endian_token, data_bytes) elif type_bytes == WKB_Z['Point']: coords = struct.unpack('%sddd' % endian_token, data_bytes) elif type_bytes == WKB_M['Point']: # NOTE: The use of XYM types geometries is quite rare. In the interest # of removing ambiguity, we will treat all XYM geometries as XYZM when # generate the GeoJSON. A default Z value of `0.0` will be given in # this case. coords = list(struct.unpack('%sddd' % endian_token, data_bytes)) coords.insert(2, 0.0) elif type_bytes == WKB_ZM['Point']: coords = struct.unpack('%sdddd' % endian_token, data_bytes) return dict(type='Point', coordinates=list(coords))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def as_geom(data: dict) -> dict:\n geom = geom_from_geojson(data)\n validate_geom(geom)\n return geom", "def parse_point(line):\n return json.loads(line)", "def to_data(self):\n point = {\n 'point': [self.pt[0], self.pt[1], self.pt[2]],\n 'layer_height': self.layer_height,\n\n 'mesh_normal': self.mesh_normal.to_data(),\n 'up_vector': self.up_vector.to_data(),\n 'frame': self.frame.to_data(),\n\n 'extruder_toggle': self.extruder_toggle,\n 'velocity': self.velocity,\n 'wait_time': self.wait_time,\n 'blend_radius': self.blend_radius,\n\n 'closest_support_pt': self.closest_support_pt.to_data() if self.closest_support_pt else None,\n 'distance_to_support': self.distance_to_support,\n\n 'is_feasible': self.is_feasible,\n\n 'attributes': utils.get_jsonable_attributes(self.attributes)\n }\n return point", "def preprocess_point(point):\n # TODO: Replace IDs with actual information available at the\n # reference schema\n \n return {\n 'uuid': point['UUID'],\n 'operator_id': point['OperatorID'],\n 'usage_type_id': point['UsageTypeID'],\n 'country': point['AddressInfo']['Country']['Title'],\n 'address': ', '.join(filter(lambda item: item is not None, [\n point['AddressInfo']['Title'],\n point['AddressInfo']['AddressLine1'],\n point['AddressInfo']['AddressLine2'],\n point['AddressInfo']['Town'],\n ])),\n 'latitude': point['AddressInfo']['Latitude'],\n 'longitude': point['AddressInfo']['Longitude'],\n 'num_points': point['NumberOfPoints'],\n **aggregate_points(point),\n }", "def convert_coord_data_to_dict(data):\r\n coord_header = data['coord'][0]\r\n coords = data['coord'][1]\r\n pct_var = data['coord'][3]\r\n coords_dict = {}\r\n pct_var_dict = {}\r\n coords_dict['pc vector number'] = coord_header\r\n for x in range(len(coords)):\r\n coords_dict[str(x + 1)] = coords[0:, x]\r\n pct_var_dict[str(x + 1)] = pct_var[x]\r\n\r\n return coords_dict, pct_var_dict", "def _zk_data_to_dict(data):\n if not isinstance(data, str):\n data = data.decode('utf-8')\n return json.loads(data)", "def ip_to_geojson(ipaddress, name=\"Point\"):\n\n geo = ip_to_geo(ipaddress)\n\n point = {\n \"type\": \"FeatureCollection\",\n \"features\": [\n {\n \"type\": \"Feature\",\n \"properties\": {\n \"name\": name\n },\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [\n geo[\"longitude\"],\n geo[\"latitude\"]\n ]\n }\n }\n ]\n }\n\n return point", "def geojson(self, feature_id):\n lat, lon = self.lat_lon\n return {\n 'type': 'Feature',\n 'id': feature_id,\n 'geometry': {\n 'type': 'Point',\n 'coordinates': (lon, lat),\n },\n }", "def point_to_GeoJSON(point_geometry, point_crs, out_path=None):\n\n # convert postgis bbox to corners array\n lng, lat = point_to_lng_lat(point_geometry)\n\n # convert point to json\n geojson = {\n \"type\": \"FeatureCollection\",\n \"crs\": {\n \"type\": \"name\",\n \"properties\": {\n \"name\": f\"urn:ogc:def:crs:EPSG::{point_crs}\"\n }\n },\n \"features\": [\n {\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [\n [lng, lat]\n ]\n }\n }\n ]\n }\n\n # save geojson json\n if(out_path is not None):\n with open(out_path, 'w') as outfile:\n json.dump(geojson, outfile)\n\n return geojson", "def GEOJsonToEWKT(dict): \n if '__GEOSGeometry__' in dict: # using class hint catch a GEOSGeometry definition \n return dict['__GEOSGeometry__'][1][0]\n \n return dict", "def parse_bytes_to_dict(bytes_to_parse):\n return ast.literal_eval(bytes_to_parse.decode(\"utf-8\"))", "def create_zip_dict() -> dict:\n with open('zip_coordinates.json', 'r') as zip_map:\n return json.loads(zip_map.read())", "def readPointFile(filename):\n pointInfo = {}\n f = open(filename, 'r')\n for data in f.readlines():\n point, info = data.split(\"==\")\n lng, lat = [float(p) for p in point.split(\",\")]\n pointInfo[(lng, lat)] = parseInfoToDict(info)\n f.close()\n\n return pointInfo", "def wkt_to_geojson(wkt_data: str) -> dict:\n parsed_wkt = wkt.loads(wkt_data)\n\n geo = geometry.mapping(parsed_wkt)\n\n if geo[\"type\"] == \"GeometryCollection\":\n feature_collection = []\n for g in geo[\"geometries\"]:\n feature = geojson.Feature(geometry=g)\n feature_collection.append(feature)\n return geojson.FeatureCollection(feature_collection)\n else:\n return geojson.Feature(geometry=geo)", "def geojson2postgis(self, filepath, table_name, geo_type):\n map_data = gpd.GeoDataFrame.from_file(filepath)\n # Maybe you want to change link address\n link = \"postgresql://{0}:{1}@{3}:5432/{2}\".format(self.username, self.password, self.dbname, self.host)\n engine = create_engine(link, encoding='utf-8')\n map_data = self.dict_to_json(map_data)\n map_data['geometry'] = map_data['geometry'].apply(lambda x: WKTElement(x.wkt, 4326))\n # Maybe you want to change 'replace' to 'append' in the future\n map_data.to_sql(\n name=table_name,\n con=engine,\n if_exists='replace',\n dtype={'geometry': Geometry(geometry_type=geo_type, srid=4326)}\n )", "def to_dict(waypoint):\n if isinstance(waypoint, tuple):\n return {\"lat\": waypoint[0], \"lng\": waypoint[1]}\n else:\n return waypoint", "def geom_from_geojson(data: dict) -> dict:\n if set(('coordinates', 'type')).issubset(set(data.keys())):\n # already a geom\n ret = data\n else:\n try:\n # feature\n ret = as_geom(data['geometry'])\n except KeyError:\n try:\n # FeatureCollection\n features = data['features']\n except KeyError:\n raise GeoJSONError(f'Invalid GeoJSON: {data}')\n\n if len(features) > 1:\n raise GeoJSONError(\n 'FeatureCollection has multiple features. Only one feature'\n ' can be used to get geometry.')\n\n ret = as_geom(features[0])\n return ret", "def __dump_point(obj, big_endian):\n wkb_string = b''\n\n if big_endian:\n wkb_string += BIG_ENDIAN\n else:\n wkb_string += LITTLE_ENDIAN\n\n coords = obj['coordinates']\n num_dims = len(coords)\n if num_dims == 2:\n type_byte_str = __WKB['2D']['Point']\n elif num_dims == 3:\n type_byte_str = __WKB['Z']['Point']\n elif num_dims == 4:\n type_byte_str = __WKB['ZM']['Point']\n else:\n pass\n # TODO: raise\n\n if not big_endian:\n # reverse the byte ordering for little endian\n type_byte_str = type_byte_str[::-1]\n wkb_string += type_byte_str\n\n if big_endian:\n byte_fmt = '>'\n else:\n byte_fmt = '<'\n byte_fmt += 'd' * num_dims\n\n wkb_string += struct.pack(byte_fmt, *coords)\n return wkb_string", "def to_dict(self, data):\n return json.loads(json.dumps(data))", "def as_dictionary(self) -> dict:\n dict_point = {key: vars(self)[key] for key in vars(self).keys() if not key.startswith('_')}\n dict_point['SHAPE'] = self.geometry\n return dict_point", "def data_xy(position) -> dict:\n\n return {\"x\": position[0], \"y\": position[1]}", "def keypoints_to_json(datum):\n jsonDict = dict()\n jsonDict[\"pose_keypoints_2d\"] = datum.poseKeypoints.tolist()\n if datum.faceKeypoints.size > 0 :\n jsonDict[\"face_keypoints_2d\"] = []\n else : \n jsonDict[\"face_keypoints_2d\"] = datum.faceKeypoints.tolist()\n jsonDict[\"hand_left_keypoints_2d\"] = datum.handKeypoints[0].tolist()\n jsonDict[\"hand_right_keypoints_2d\"] = datum.handKeypoints[1].tolist()\n return jsonDict", "def map_data_to_dict(map_data: MapData) -> Dict[str, Any]:\n properties = map_data.serialize_init_args(obj=map_data)\n properties[\"__type\"] = map_data.__class__.__name__\n return properties", "def json2polygon(geojson_str):\n geojson_object = geojson.loads(geojson_str)\n return geometry.shape(geojson_object)", "def parse_mapzen_response(txt):\n geoDict = {}\n receivedJSONDict = json.loads(txt)\n if receivedJSONDict['features']:\n geoDict['status'] = \"OK\"\n geoDict['label'] = receivedJSONDict['features'][0]['properties']['label']\n geoDict['confidence'] = receivedJSONDict['features'][0]['properties']['confidence']\n geoDict['latitude'] = receivedJSONDict['features'][0]['geometry']['coordinates'][1]\n geoDict['longitude'] = receivedJSONDict['features'][0]['geometry']['coordinates'][0]\n else:\n \tgeoDict['status'] = None\n return geoDict", "def string_to_json_position(x):\n\n s = x.split(',')\n return {'lat': float(s[0]), 'lng': float(s[1])}", "def bbox_to_geom(bbox: Tuple[float, float, float, float]) -> Dict:\n # TODO: Handle dateline crossing geometry\n return {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [bbox[0], bbox[3]],\n [bbox[0], bbox[1]],\n [bbox[2], bbox[1]],\n [bbox[2], bbox[3]],\n [bbox[0], bbox[3]],\n ]\n ],\n }", "def default(self, o): \n if isinstance(o, GEOSGeometry):\n dictval = json.loads(o.geojson)\n #raise Exception(o.ewkt)\n dictval['__GEOSGeometry__'] = ['__init__', [o.ewkt]] #json class hint; see http://json-rpc.org/wiki/specification\n return dictval\n else:\n super(DjangoGEOJSONEncoder, self).default(o)", "def from_bytes(buf: bytes) -> 'ProposalInfo':\n proposal_info_in_dict: dict = json_loads(buf.decode())\n proposal_info_in_dict[\"id\"] = bytes.fromhex(proposal_info_in_dict[\"id\"])\n proposal_info_in_dict[\"proposer\"] = Address.from_string(proposal_info_in_dict[\"proposer\"])\n return ProposalInfo(**proposal_info_in_dict)", "def get_geojson_feature(id, raw_bbox_string, properties_dict):\n coords = raw_bbox_string.split()\n \n # Tesseract uses ints, but allow floats\n for i, val in enumerate(coords):\n coords[i] = float(val)\n # bbox order = # x0 y0 x1 y1\n \n bbox_json_obj = geojson.Polygon([[\n (coords[0], coords[1]), \n (coords[0], coords[3]), \n (coords[2], coords[3]), \n (coords[2], coords[1]),\n (coords[0], coords[1])\n ]])\n return geojson.Feature(id, bbox_json_obj, properties=properties_dict)" ]
[ "0.61113554", "0.60749865", "0.59794545", "0.58607936", "0.58315086", "0.57933426", "0.5684637", "0.5668959", "0.56447315", "0.55123854", "0.5489289", "0.54598", "0.5451788", "0.54298854", "0.54289055", "0.5410997", "0.5388101", "0.5378883", "0.5308924", "0.5280801", "0.5275772", "0.52756923", "0.5274192", "0.52440804", "0.5241474", "0.5229321", "0.5212802", "0.5212125", "0.5201794", "0.5199889" ]
0.6990642
0
Cluster prey sequences according to mapped RefSeq
def refseq_based_clustering(self): self.refseq_based = NonRedSetDict() for prey in self.ivv_info.Prey_info().preys(): refseqid = self.get_refseq(prey) if refseqid: self.refseq_based.append_Dict(refseqid, prey)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refseq_based_clustering(self):\n self.refseq_based = Usefuls.NonRedSet.NonRedSetDict()\n for prey in self.ivv_info.Prey_info().preys():\n refseqid = self.get_refseq(prey)\n if refseqid:\n self.refseq_based.append_Dict(refseqid, prey)", "def test_first_sequence_placed_in_first_cluster(self):\n seq_len = 20\n num_seqs = 20\n bases = list(standard_bases)\n # Function has different behaviour at below and above seq_len\n for seq_len in [seq_len - 1, seq_len + 1]:\n with self.subTest(min_match_len=seq_len):\n for _ in range(20): # Run on a number of random alignments\n records = []\n for i in range(num_seqs):\n rand_seq = \"\".join(\n [random.choice(bases) for _ in range(seq_len)]\n )\n records.append(SeqRecord(Seq(rand_seq), id=f\"s{i}\"))\n alignment = MultipleSeqAlignment(records)\n result = AlignedSeq.kmeans_cluster_seqs_in_interval(\n [0, seq_len - 1], alignment, 1\n )\n self.assertTrue(result[0][0] == \"s0\")", "def prepareParrallelize(self,segs):\n\n angles = numpy.array([s.angle for s in segs ])\n angles[numpy.where(angles<0)] += _pi # we care about direction, not angle orientation\n clList = clusterValues(angles, 0.15, refScaleAbs='abs')\n\n for cl in clList:\n meanA = angles[list(cl)].mean()\n for i in cl:\n seg = segs[i]\n seg.newAngle = meanA if seg.angle>=0. else meanA-_pi", "def _collapse_exact_matches(self, seqs, prefix_length, suffix_length):\r\n cluster_map = {}\r\n for seq_id, seq in seqs:\r\n seq_id = seq_id.split()[0]\r\n seq_hash = self._build_seq_hash(seq, prefix_length, suffix_length)\r\n try:\r\n cluster_map[seq_hash].append(seq_id)\r\n except KeyError:\r\n cluster_map[seq_hash] = [seq_id]\r\n\r\n return cluster_map.values()", "def self_affinity(self, sequence):\n pass", "def sort_seqs_by_clustersize(seqs, mapping):\r\n ids = []\r\n seqs_cache = {}\r\n for header, seq in seqs:\r\n id = header.split(\"|\")[0]\r\n id = id.rstrip(\" \")\r\n ids.append(id)\r\n seqs_cache[id] = (header, seq)\r\n\r\n for id in sort_ids(ids, mapping):\r\n yield seqs_cache[id]", "def test_fastq_map():\n cluster = clust.Clustering.from_fastq(TMP + 'map.fastq', 4, 'ACGT',\n threshold=2, prefix=1)\n uid1_expect = 'AAAACCCC'\n uid2_expect = 'CCCCAAAA'\n seq1_expect = 'ACCTCTCCCTGTGGGTCATGTGACT'\n seq2_expect = 'TTGTTTGAAAAACCTCGAAAGTAAC'\n\n assert uid1_expect in cluster, \"%r not in %r\" % (uid1_expect, list(cluster.keys()))\n assert uid2_expect in cluster, \"%r not in %r\" % (uid2_expect, list(cluster.keys()))\n assert cluster[uid1_expect].sequence.sequence == seq1_expect, \\\n \"%r != %r\" % (cluster[uid1_expect].sequence.sequence, seq1_expect)\n assert cluster[uid2_expect].sequence.sequence == seq2_expect, \\\n \"%r != %r\" % (cluster[uid2_expect].sequence.sequence, seq2_expect)\n assert cluster[uid1_expect].size == 5, \"%r != %r\" % (cluster[uid1_expect].size, 5)\n assert cluster[uid2_expect].size == 5, \"%r != %r\" % (cluster[uid2_expect].size, 5)", "def find_linked_clusters(self, data, index, cluster, linked_indices, re = False):\n\n if np.size(linked_indices) != 0.0:\n linked_clusters = [self.cluster_arr[1,ID] for ID in linked_indices \\\n if self.cluster_arr[1,ID] != -1]\n else:\n linked_clusters = []\n\n if len(linked_clusters) != 0:\n # Initial clustering\n\n if re is False:\n\n # Identify largest common ancestor of the linked clusters - the\n # antecessor.\n linked_clusters = [self.clusters[ID].antecessor for ID in linked_clusters]\n linked_clusters = remdup_preserve_order(linked_clusters)\n\n # Check to see if the data point satisfies the local conditions\n if self.method==1:\n linked_clusters = local_links(self, index, data, cluster, linked_clusters, re=re)\n\n # Check to see if the data point satisfied the global conditions\n if len(linked_clusters)>1:\n var = []\n for link in linked_clusters:\n var = get_var(self, data, cluster, link, var)\n linked_clusters, var = remove_outliers(self, data, cluster, linked_clusters, var, 5., 7.)\n\n else:\n # Relax phase\n\n # Get the linked clusters\n linked_clusters = [self.clusters[ID] for ID in linked_clusters]\n linked_clusters = remdup_preserve_order(linked_clusters)\n\n # Check to see if the data point satisfied the local conditions\n if self.method==1:\n linked_clusters = local_links(self, index, data, cluster, linked_clusters, re=re)\n\n if len(linked_clusters) >= 1:\n # Check to see if the data point satisfied the global conditions\n var = []\n for link in linked_clusters:\n var = get_var(self, data, cluster, link, var)\n linked_clusters, var = remove_outliers(self, data, cluster, linked_clusters, var, 5., 7.)\n\n # Now identify where the data point can be slotted into an already\n # established hierarchy\n antecessors = [link.antecessor for link in linked_clusters]\n antecessors = remdup_preserve_order(antecessors)\n antecessors = sorted(antecessors, key=get_cluster_idx, reverse=True)\n\n # Find out where the data point can be slotted in to an already\n # established hierarchy. This is based on the floor/ceiling\n # intensity level of the cluster in question. If you link\n # incorrectly then the hierarchy makes no sense.\n if len(antecessors)==1:\n linked_clusters = find_linked_clusters_single_antecessor(self, data, cluster, linked_clusters)\n else:\n linked_clusters = find_linked_clusters_multiple_antecessors(self, data, cluster, linked_clusters, antecessors)\n\n # If method = PPV then we need to check the linked clusters to prevent\n # velocity components from the same position from being linked to the\n # same cluster\n if len(linked_clusters) >= 1.0:\n if self.method == 1:\n if re is False:\n linked_clusters = multi_component_check(self, data, cluster, linked_clusters)\n else:\n linked_clusters = multi_component_check(self, data, cluster, linked_clusters, re = re )\n\n linked_clusters = sorted(linked_clusters, key=get_cluster_idx, reverse=True)\n\n return linked_clusters", "def get_clustered_bins(seqids,ali,delimiter=None,rename=None,outname=None):\n if delimiter!=None and rename!=None:\n raise ValueError('ERROR: both options not compatible either delimiter or rename') \n seq_dict={}\n pats=[None]*len(seqids)\n for i,c in enumerate(seqids):\n if rename!=None: pats[i]=_rename(c,rename)\n elif delimiter!=None: pats[i]=c.split(delimiter)[0]\n else: pats[i]=c\n for seq in ali.ali:\n maxscore=None\n assigned=None\n for seqid in seqids:\n if rename!=None: pat=_rename(seqid,rename)\n elif delimiter!=None: pat=seqid.split(delimiter)[0]\n else: pat=seqid\n if seqid == seq:\n assigned=pat\n maxscore=get_subscore(ali.ali[seqid], ali.ali[seq])\n break\n if not maxscore:\n maxscore=get_subscore(ali.ali[seqid], ali.ali[seq])\n assigned=pat\n else:\n score=get_subscore(ali.ali[seqid], ali.ali[seq])\n if score>maxscore:\n maxscore=score\n assigned=pat\n if assigned not in seq_dict:\n seq_dict[assigned]=[(seq,ali.ali[seq],maxscore)]\n else:\n seq_dict[assigned].append((seq,ali.ali[seq],maxscore))\n for key in seq_dict:\n seq_dict[key].sort(key=lambda x: x[2],reverse=True)\n totlen=len(seq_dict[key])\n seq_dict[key]=seq_dict[key][0:int(0.75*totlen+1)]\n print key, \"Cluster with %d total sequences and with %d when 0.75 best\"%(totlen,len(seq_dict[key]))\n if outname!=None:\n with open(outname+\".pkl\",\"w\") as f:\n pickle.dump(seq_dict,f)\n return pats,seq_dict", "def atlas_clusters():\n pass", "def run(self,\n seq_fasta_fn,\n subst_matrix_fn,\n cost_gap_open,\n clustering):", "def _assign_clusters(self):\n\n dist = np.zeros((self.k, ))\n distortion = 0\n\n for index in range(0, self.data.shape[0]):\n for i in range(0, self.k):\n dist[i] = np.linalg.norm(self.data[index] - self.centroids[i])\n\n self.assigned_clusters[index] = np.argmin(dist)\n distortion += np.min(dist)\n\n return distortion", "def rmsd_cluster(input, ref, output, clusters):\n ifs = oemolistream()\n if not ifs.open(input):\n OEThrow.Fatal(\"Unable to open %s for reading\" % input)\n poses = list()\n mol = OEMol()\n while OEReadMolecule(ifs, mol):\n mol_copy = OEMol(mol)\n #print(dir(mol_copy))\n #print(mol_copy.NumConfs())\n for conf in mol_copy.GetConfs():\n poses.append(conf)\n ifs.close()\n print(\"%d poses read\" % len(poses))\n\n # Create a list of centroids, starting with first molecule.\n centroids = list()\n\n # Make first pose our first centroid.\n centroids.append(poses.pop(0))\n if int(clusters) < len(poses):\n print(\"Will return %s poses...\" % clusters)\n else:\n print(\"Will return %s poses...\" % (len(poses)+1))\n while len(centroids) < int(clusters) and len(poses)>0:\n print(len(centroids))\n # Compute distance from all poses to closest centroid.\n min_rmsd = numpy.zeros([len(poses)])\n for (pose_index, pose) in enumerate(poses):\n centroids_rmsds = [OERMSD(pose, centroid) for centroid in centroids]\n min_rmsd[pose_index] = min(centroids_rmsds)\n # Find pose that is farthest away from all current centroids.\n farthest_pose_index = min_rmsd.argmax()\n print(\"Farthest pose is %d at %f A away from centroids\" % (farthest_pose_index, min_rmsd[farthest_pose_index]))\n # Move farthest pose to centroids.\n centroids.append(poses.pop(farthest_pose_index))\n # Write out all centroids.\n ofs=oemolostream()\n if not ofs.open(output):\n OEThrow.Fatal(\"Unable to open %s for writing\" % itf.GetString(\"-o\"))\n for mol in centroids:\n #OEWritePDBFile(ofs, mol)\n OEWriteMolecule(ofs, mol)\n\n print(\"Done!\")\n\n return 0", "def clusterValues( values, relS=0.1 , refScaleAbs='range' ):\n if len(values)==0:\n return []\n if len(values.shape)==1:\n sortedV = numpy.stack([ values , numpy.arange(len(values))] ,1)\n else:\n # Assume value.shape = (N,2) and index are ok\n sortedV = values \n sortedV = sortedV[ numpy.argsort(sortedV[:,0]) ]\n\n sortedVV = sortedV[:,0]\n refScale = sortedVV[-1]-sortedVV[0]\n #sortedVV += 2*min(sortedVV)) # shift to avoid numerical issues around 0\n\n #print sortedVV\n class Cluster:\n def __init__(self, delta, sum, indices):\n self.delta = delta\n self.sum = sum\n self.N=len(indices)\n self.indices = indices\n def size(self):\n return self.delta/refScale\n \n def combine(self, c):\n #print ' combine ', self.indices[0], c.indices[-1], ' -> ', sortedVV[c.indices[-1]] - sortedVV[self.indices[0]]\n newC = Cluster(sortedVV[c.indices[-1]] - sortedVV[self.indices[0]],\n self.sum+c.sum,\n self.indices+c.indices)\n return newC\n\n def originIndices(self):\n return tuple(int(sortedV[i][1]) for i in self.indices)\n\n def size_local(self):\n return self.delta / sum( sortedVV[i] for i in self.indices) *len(self.indices)\n def size_range(self):\n return self.delta/refScale\n def size_abs(self):\n return self.delta\n\n if refScaleAbs=='range':\n Cluster.size = size_range\n elif refScaleAbs=='local':\n Cluster.size = size_local\n elif refScaleAbs=='abs':\n Cluster.size = size_abs\n \n class ClusterPair:\n next=None\n prev=None\n def __init__(self, c1, c2 ):\n self.c1=c1\n self.c2=c2\n self.refresh()\n def refresh(self):\n self.potentialC =self.c1.combine(self.c2)\n self.size = self.potentialC.size()\n def setC1(self, c1):\n self.c1=c1\n self.refresh()\n def setC2(self, c2):\n self.c2=c2\n self.refresh()\n \n #ave = 0.5*(sortedVV[1:,0]+sortedV[:-1,0])\n #deltaR = (sortedV[1:,0]-sortedV[:-1,0])/ave\n\n cList = [Cluster(0,v,(i,)) for (i,v) in enumerate(sortedVV) ]\n cpList = [ ClusterPair( c, cList[i+1] ) for (i,c) in enumerate(cList[:-1]) ]\n resetPrevNextSegment( cpList )\n\n #print cpList\n def reduceCL( cList ):\n if len(cList)<=1:\n return cList\n cp = min(cList, key=lambda cp:cp.size) \n #print '==', cp.size , relS, cp.c1.indices , cp.c2.indices, cp.potentialC.indices\n\n while cp.size < relS:\n if cp.next:\n cp.next.setC1(cp.potentialC)\n cp.next.prev = cp.prev\n if cp.prev:\n cp.prev.setC2(cp.potentialC)\n cp.prev.next = cp.next\n cList.remove(cp)\n if len(cList)<2:\n break\n cp = min(cList, key=lambda cp:cp.size) \n #print ' -----> ', [ (cp.c1.indices , cp.c2.indices) for cp in cList]\n return cList\n\n cpList = reduceCL(cpList)\n if len(cpList)==1:\n cp = cpList[0]\n if cp.potentialC.size()<relS:\n return [ cp.potentialC.originIndices() ]\n #print cpList\n if cpList==[]:\n return []\n finalCL = [ cp.c1.originIndices() for cp in cpList ]+[ cpList[-1].c2.originIndices() ]\n return finalCL", "def motif_report(chrom, cluster):\n\tsmallest_ref_pos = min([x[0] for x in cluster])\n\tupper_bound_ref = max([x[0] + len(x[1]) - 1 for x in cluster])\n\toriginal_region = get_region_from_das(assembly, chrom, smallest_ref_pos, upper_bound_ref).upper()\n\trefseq = original_region\n\tif not refseq or len(refseq)==0:\n\t\tprint \" problem obtaining region on the reference sequence:\"\n\t\tprint assembly, chrom, smallest_ref_pos, upper_bound_ref\n\t\treturn None, None\n\n\tseqinfo = []\n\tmotifs = []\n\talignment = []\n\t# I don't really need the alignment here, but since I have it implemented ...\n\talt = [smallest_ref_pos, refseq[0], refseq[0], 1, 1, 1, None]\n\talignment.append(refseq)\n\tseqinfo.append(alt)\n\tfor v in cluster:\n\t\t[pos, ref, alts, var_counts, total_count, max_reach] = v\n\t\tif alts=='' or var_counts=='': continue\n\t\trelative_pos = pos - smallest_ref_pos\n\t\tcounts = var_counts.split(\",\")\n\t\talternatives = alts.split(\",\")\n\t\ta = -1 # index of the alternative sequence fo this position\n\t\tfor sequence in alternatives:\n\t\t\ta += 1\n\t\t\tmotif = find_motif_in_pair(sequence, ref)\n\t\t\tif motif and not motif in motifs: motifs.append(motif)\n\t\t\talt = [pos, ref, sequence, int(counts[a]), total_count, len(ref), motif]\n\t\t\tseqinfo.append(alt)\n\t\t\tadd_modified_seq(alignment, sequence, ref, relative_pos)\n\traw_seqs = [original_region]\n\tfreq = {}\n\tfreq[original_region] = \"1:1\"\n\tfor s in range(1,len(alignment)):\n\t\tmodified_seq = alignment[s]\n\t\t[pos, ref, sequence, count, total_count, max_reach, motif] = seqinfo[s]\n\t\traw_seq = modified_seq.replace('-','')\n\t\t# it seemed to me at one point that eac might have duplicates, but they seem\n\t\t# to have caught them and assigned them a frequency of 0\n\t\tif count==0: continue\n\t\tif raw_seq in raw_seqs:\n\t\t\tpass\n\t\telse:\n\t\t\traw_seqs.append(raw_seq)\n\t\tfreq[raw_seq] = \"%d:%d\" % (count,total_count)\n\n\t########### REPORT/STORE TO DB ##########################\n\t# never mind the clustering - I am not sure any more that it helps\n\t# motif counting though should protect me from counting as diffent\n\t# variants when a motif indel is assigned to a different place in the repeat expansion\n\t# as different a variant\n\t#for cluster in find_comparable_patterns(raw_seqs, motifs):\n\treport_items= []\n\tfor seq in raw_seqs:\n\t\tpattern = decomposition(seq, motifs)\n\t\treport_items.append(prettyprint(pattern) + \",\" + freq[to_string(pattern)])\n\n\n\treturn \",\".join(motifs), \";\".join(report_items)", "def cluster(self):\n center_index = np.random.choice(range(100), self.K, replace=False)\n self.centers = np.array([self.X[i] for i in center_index])\n self.cluster_sizes = np.zeros(self.K)\n member_of = np.zeros(100, dtype=int)\n min_dist = np.array([distance.euclidean(self.centers[0], point) for point in self.X])\n self.cluster_sizes[0] = 100\n flag = True\n while flag:\n flag = False\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n if member_of[i] != j:\n dist = distance.euclidean(point, center)\n if dist < min_dist[i]:\n flag = True\n current = member_of[i]\n self.cluster_sizes[current] -= 1\n self.cluster_sizes[j] += 1\n member_of[i] = j\n min_dist[i] = dist\n if np.count_nonzero(self.cluster_sizes) != self.K:\n return self.cluster()\n self.centers = np.zeros((self.K, 2), dtype='d')\n for i, point in enumerate(self.X):\n center = member_of[i]\n self.centers[center] += point\n for i, center in enumerate(self.centers):\n center /= self.cluster_sizes[i]", "def compute_clusters(self, documents):\n ###TODO\n for d in range(0, len(documents)):\n maxi = 999999999\n for cid in range(0, len(self.means)):\n dist = self.distance(documents[d], self.means[cid], self.norms[cid])\n if dist < maxi:\n maxi = dist\n clust = cid \n self.cluster[d] = clust", "def _relocate_clusters(self, cluster_labels):\n for cluster_label in range(self.k):\n if cluster_labels[cluster_label] is not None:\n # mean of the pixels assigned to cluster\n p_sum, p_count = np.asarray(\n cluster_labels[\n cluster_label\n ]).sum(axis=0), len(cluster_labels[cluster_label])\n self._clusters[cluster_label] = p_sum / p_count", "def map_clusters(labels, rows):\r\n counts = Counter(labels)\r\n mappings = {c + 1: ((counts[c] / rows) * 100) for c in sorted(counts)}\r\n\r\n return mappings", "def centerStar_align(refName, dictofSeq):\n dictofFinalStr = {}\n refString = dictofSeq.pop(refName)\n #remove the center sequence from the list of sequence so it won't align to itself\n centerString = refString\n #construct a pointer to center squence\n for name in dictofSeq:\n alignment = sequence_align(centerString, dictofSeq.get(name))\n centerString = alignment[0]\n #print(centerString)\n strAligned = alignment[1]\n #print(strAligned)\n dictofFinalStr[name] = strAligned\n #print(len(listofFinalStr))\n\n for seq in dictofFinalStr:\n #Aligns all the sequence to the final center sequence with all the gaps inserted\n finalScore = gap_align(centerString, dictofFinalStr[seq])\n finalStr = finalScore\n dictofFinalStr[seq] = finalStr\n\n dictofFinalStr[refName] = (centerString)\n return dictofFinalStr", "def recluster(input_jet, alpha=None):\n\n def _rec(jet, parent, node_id, outers_list):\n \"\"\"\n Recursive function to get a list of the leaves\n \"\"\"\n if jet[\"tree\"][node_id, 0] == -1:\n outers_list.append(jet[\"content\"][node_id])\n else:\n _rec(jet, node_id, jet[\"tree\"][node_id, 0], outers_list)\n _rec(jet, node_id, jet[\"tree\"][node_id, 1], outers_list)\n\n return outers_list\n\n outers = []\n jet_const = np.asarray(_rec(input_jet, -1, input_jet[\"root_id\"], outers))\n\n raw_tree, \\\n idx, \\\n jet_content, \\\n root_node, \\\n Nconst, \\\n N_leaves_list, \\\n linkage_list = ktAntiktCA(jet_const, alpha=alpha)\n\n tree, \\\n content, \\\n node_id, \\\n tree_ancestors = _traverse(root_node,\n jet_content,\n tree_dic=raw_tree,\n root_idx=None,\n Nleaves=Nconst,\n )\n\n jet = {}\n jet[\"root_id\"] = 0\n jet[\"tree\"] = np.asarray(tree).reshape(-1, 2)\n jet[\"content\"] = np.asarray([np.asarray(c) for c in content]).reshape(-1, 2)\n\n # print(jet)\n\n # Save reclustered tree\n out_dir = 'data/'\n algo = str(jet_dic[\"name\"]) + '_' + str(alpha)\n out_filename = out_dir + str(algo) + '.pkl'\n print('out_filename=', out_filename)\n with open(out_filename, \"wb\") as f:\n pickle.dump(jet, f, protocol=2)\n\n return node_id, linkage_list, Nconst, tree_ancestors", "def _cluster_segments_all_way(self, segmented_instances, labels, \\\n end_points, stats, cluster_thresh=0.5):\n\n #self.showme(segmented_instances, 'main img')\n segment_association_list = []\n max_num_end_points= 0\n\n # for each stem segment\n for i in range(0, len(labels)):\n # each end point in the current segment i\n if max_num_end_points < len(end_points[i]):\n max_num_end_points = len(end_points[i])\n for k in range(0, len(end_points[i])):\n angle_list=[]\n # find the segment that is most likely connected to segment i at end point[i][k]\n for j in range(0, len(labels)):\n # make sure we are not trying to connect the segment to itself\n if i!= j:\n # angle calculates the angle between the line stats['centroid'][i]-end_points[i][k]\n # and stats['centroid'][i]-stats['centroid'][j]\n\n angle = self._ang([stats['centroid'][i],end_points[i][k]], \\\n [stats['centroid'][i], stats['centroid'][j]] )\n # if the angle value is within the acceptable range of +/- angle_thresh\n if angle<=self.angle_thresh or angle>=360-self.angle_thresh:\n other_angle, other_seg_section, end_point_dist = self._get_best_fit(segmented_instances, \\\n len(labels), \\\n stats, end_points,\\\n i, j, k, pos_angle=angle<=self.angle_thresh)\n # if the best fit segment also has a small angle between its\n # end point-centroid line and centroid-centroid line,\n # add it to segments connected to segment i\n if other_angle!=None and other_angle<=self.angle_thresh:\n angle_list.append((j, other_seg_section, other_angle, end_point_dist, angle))\n #Sort the list of stem segments connected to i by end_point_dist\n angle_list = sorted(angle_list, key=lambda x:x[3])\n #Sorting by the Euclidian distance of the end_point_dist and the other_angle does not change end result\n #angle_list = sorted(angle_list, key=lambda x:(math.sqrt(x[3]**2.0+x[2]**2.0)))\n # the angle value reflects how far segment k is from the straight line\n # going through the centroids\n if len(angle_list)>0:\n # (i, j, k, l, angle between i and centroid line, angle between j and centroid line, distance between closest end points k in seg i and l in seg j)\n segment_association_list.append((i,angle_list[0][0],k, angle_list[0][1], angle_list[0][4], angle_list[0][2], angle_list[0][3]))\n\n\n # sort slope differences in an increasing order\n segment_association_list = sorted(segment_association_list,key=lambda x:(x[6]))\n\n # find best match by iteretively selecting the smallest difference\n # and adding it to the ith cluster\n cluster_list = []\n cluster = np.full(len(labels),None)\n colored_clusterImg = np.zeros(segmented_instances.shape).astype(np.uint8)\n #clusterImg = np.zeros(segmented_instances.shape).astype(np.uint8)\n\n # initialize cluster list to single clusters contianing only each individual segment\n for i in range(0, len(labels)):\n cluster[i]=i\n cluster_list.append([i])\n #self.showme(clusterImg, str(i))\n\n visited=np.full((len(labels),max_num_end_points), False)\n\n #cluster=np.frompyfunc(list,1,1)(cluster) # allows us to append to only the specified list end_points[i]\n new_cluster_num=0\n color_offset=len(labels)\n\n # for each pair of segments in our list of best fit segments\n for curr_tuple in segment_association_list:\n img = np.zeros(segmented_instances.shape)\n i = curr_tuple[0] # index of first segment\n j = curr_tuple[1] # index of second segment in the tuple\n i_section = curr_tuple[2] #end point number in segment i\n j_section = curr_tuple[3] #end point number in segment j\n angle = curr_tuple[4]\n other_angle = curr_tuple[5]\n end_point_dist = curr_tuple[6] #distance between the connecting end points of segments i and j\n img[segmented_instances== i]= 255\n img[segmented_instances== j]= 255\n if (visited[i][i_section]==False)and(visited[j][j_section]==False):\n #cv2.line(clusterImg,(end_points[i][i_section][0],end_points[i][i_section][1]),\\\n # (end_points[j][j_section][0], end_points[j][j_section][1]),150,2)\n #self.showme(clusterImg, str(i))\n visited[i][i_section]=True\n visited[j][j_section]=True\n cluster_num = cluster[i]\n if cluster[i]!=cluster[j]:\n other_cluster_num = cluster[j]\n cluster_list[cluster_num] = list(set(cluster_list[cluster_num]+\\\n copy.deepcopy(cluster_list[other_cluster_num])))\n # update cluster numbers for all segments moved into new cluster\n for seg in cluster_list[other_cluster_num]:\n cluster[seg]=cluster_num\n # update cluster numbers for clusters larger than cluster to be removed\n for idx in range(0, len(cluster)):\n if (cluster[idx]>other_cluster_num):\n cluster[idx]= cluster[idx]-1\n del cluster_list[other_cluster_num]\n\n\n #show clustered segments\n color = 0\n cluster_num = 0\n cluster_mask=[]\n\n for c in cluster_list:\n color = color+0.1\n cluster_mask.append(np.zeros(segmented_instances.shape).astype(np.uint8))\n\n for i in c:\n cluster_mask[cluster_num][(segmented_instances == labels[i])]=1\n colored_clusterImg[(segmented_instances == labels[i])]= int(color*255)\n \"\"\"if self.key in ['../data/images/image1672', '../data/images/image1289']:\n self.showme(colored_clusterImg)\"\"\"\n cluster_num +=1\n\n return cluster_mask, colored_clusterImg", "def findCenterSeq(dictofSeq):\n seqLen = len(dictofSeq)\n pwMatrix = [[\"-\"]*seqLen for i in range(seqLen)]\n listofSeq = []\n for key in dictofSeq:\n listofSeq.append(dictofSeq.get(key))\n \n findMin = []\n acc = 0\n for seq in listofSeq:\n for seq2 in listofSeq:\n # in1 gives row, in2 gives column \n in1 = listofSeq.index(seq)\n in2 = listofSeq.index(seq2)\n pwMatrix[in1][in2] = pairwise(seq, seq2)\n acc += pwMatrix[in1][in2]\n #TypeError: 'int' object is not subscriptable\n findMin.append(acc)\n acc = 0\n posSeq = findMin.index(min(findMin))\n refString = listofSeq[posSeq]\n refName = \"\"\n \n for name, seq in dictofSeq.items():\n if seq == refString:\n refName = name\n \n print(refName)\n \n return refName", "def cross_cluster_timeseries(data1, data2, roi_mask_nparray, n_clusters, similarity_metric, affinity_threshold, cluster_method = 'ward'):\n \n \n \n import scipy as sp\n import time\n import sklearn as sk\n from sklearn import cluster, datasets, preprocessing\n from sklearn.cluster import FeatureAgglomeration\n from sklearn.feature_extraction import image\n\n \n \n print(\"Calculating Cross-clustering\")\n print(\"Calculating pairwise distances between areas\")\n \n dist_btwn_data_1_2 = np.array(sp.spatial.distance.cdist(data1.T, data2.T, metric = similarity_metric))\n sim_btwn_data_1_2=1-dist_btwn_data_1_2\n sim_btwn_data_1_2[np.isnan(sim_btwn_data_1_2)]=0\n sim_btwn_data_1_2[sim_btwn_data_1_2<affinity_threshold]=0\n\n print(\"Calculating pairwise distances between voxels in ROI 1 \")\n dist_of_1 = sp.spatial.distance.pdist(sim_btwn_data_1_2, metric = 'euclidean')\n dist_matrix = sp.spatial.distance.squareform(dist_of_1)\n sim_matrix=1-sk.preprocessing.normalize(dist_matrix, norm='max')\n sim_matrix[sim_matrix<affinity_threshold]=0\n\n\n if cluster_method == 'ward':\n # ## BEGIN WARD CLUSTERING CODE \n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n print(\"ward\")\n if roi_mask_nparray!='empty':\n #import pdb; pdb.set_trace()\n shape = roi_mask_nparray.shape\n connectivity = image.grid_to_graph(n_x=shape[0], n_y=shape[1],\n n_z=shape[2], mask=roi_mask_nparray)\n \n ward = FeatureAgglomeration(n_clusters=n_clusters, connectivity=connectivity,\n linkage='ward')\n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n else:\n print(\"Calculating Hierarchical Cross-clustering\")\n ward = FeatureAgglomeration(n_clusters=n_clusters, affinity='euclidean', linkage='ward') \n ward.fit(sim_matrix)\n y_pred = ward.labels_.astype(np.int)\n \n # # END WARD CLUSTERING CODE \n else:\n \n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n print(\"spectral\")\n #cluster_method== 'spectral':\n #Spectral method\n spectral = cluster.SpectralClustering(n_clusters, eigen_solver='arpack', random_state = 5, affinity=\"precomputed\", assign_labels='discretize') \n spectral.fit(sim_matrix)\n y_pred = spectral.labels_.astype(np.int) \n\n# \n # BEGIN SPECTRAL CLUSTERING CODE \n \n # END SPECTRAL CLUSTERING CODE \n\n\n\n# sim_matrix[np.isnan((sim_matrix))]=0\n# sim_matrix[sim_matrix<0]=0\n# sim_matrix[sim_matrix>1]=1\n\n ## BEGIN WARD CLUSTERING CODE \n# print(\"Calculating Hierarchical Cross-clustering\")\n# ward = FeatureAgglomeration(n_clusters=n_clusters, affinity='euclidean', linkage='ward') \n# ward.fit(sim_matrix)\n# y_pred = ward.labels_.astype(np.int)\n# \n ## END WARD CLUSTERING CODE \n \n# # BEGIN SPECTRAL CLUSTERING CODE \n# spectral = cluster.SpectralClustering(n_clusters, eigen_solver='arpack', random_state = 5, affinity=\"precomputed\", assign_labels='discretize') \n# spectral.fit(sim_matrix)\n# y_pred = spectral.labels_.astype(np.int)\n# # END SPECTRAL CLUSTERING CODE \n \n return y_pred", "def process_cds(cfs, ref):\n # unpack the tuple\n feat, scaffold, phase = cfs\n # First, extract the sequence of the CDS from the scaffold. This should\n # respect the strand, so we won't have to reverse-complement\n featseq = feat.extract(ref[scaffold])\n return featseq", "def mapRev2Cluster(self):\n\n # For each condition, operating on the side effect matching file to reduce down into\n # the more general categories\n clusterMapping = pd.read_csv('ClusteredSideEffects.csv', sep='$', index_col=0)\n for condition in self.conditions:\n print(\"I'm working on {:s}\".format(condition))\n files = glob.glob('ReviewsMatched2SideEffects/{:s}*csv'.format(condition))\n files = np.sort(files)\n\n for i,f in enumerate(files):\n df = pd.read_csv(f, sep='$', index_col=0)\n\n for cluster in np.unique(clusterMapping['Cluster']):\n # Finding the relevant SEs for the cluster\n SEs = clusterMapping[clusterMapping['Cluster'].eq(cluster)]['Side effect']\n\n # Summing across all those SEs in the dataframe and creating a new column\n match = [SE for SE in SEs if SE in df.columns]\n df[cluster] = (df[match].sum(axis=1) > 0)\n \n if not match:\n df[cluster] = [0]*len(df)\n \n # Stacking to allow for the depression split\n if i == 0:\n master_df = df.copy()\n else:\n master_df = master_df.append(df, ignore_index=0, sort=False)\n\n\n # Dropping all columns not in clusters\n clusters = list(np.unique(clusterMapping['Cluster']))\n keepers = ['Medication','Positive polarity','Negative polarity','Medication mentions','Effectiveness']\n keepers += clusters\n master_df = master_df[keepers]\n \n # Writing the stack to a file to load on to AWS\n master_df.to_csv('FinalProcessedReviews/{:s}_processed.csv'.format(condition), sep='$')\n print(\"I've saved the clustered file\\n\")", "def calculate_mapping(self, mask):\n K, F, _ = mask.shape\n\n # (K, F, T)\n features = mask / np.linalg.norm(mask, axis=-1, keepdims=True)\n\n mapping = np.repeat(np.arange(K)[:, None], F, axis=1)\n\n for iterations, start, end in self.alignment_plan:\n for _ in range(iterations):\n # (K, T)\n centroid = np.sum(features[:, start:end, :], axis=1)\n centroid /= np.linalg.norm(centroid, axis=-1, keepdims=True)\n\n break_flag = False\n for f in range(start, end):\n reverse_permutation = self._align_segment(\n features[:, f, :], centroid,\n )\n if not (reverse_permutation == list(range(K))).all():\n break_flag = True\n features[:, f, :] = features[reverse_permutation, f, :]\n mapping[:, f] = mapping[reverse_permutation, f]\n if break_flag:\n break\n\n return mapping", "def new_resolve_unique_contigs(scaffold_list, unique_contigs_list):\n \n contig_location = {}\n s_l = copy.deepcopy(scaffold_list)\n \n #first deal with any scaffolds that have more than one copy of a unique contig\n to_remove = []\n for scaf in s_l: \n for contig in unique_contigs_list:\n if scaf.count(contig) > 1:\n scaffold_parts = split_siamese(contig, scaf)\n to_remove.append(scaf)\n s_l.extend(scaffold_parts)\n break \n for scaf in to_remove:\n s_l.remove(scaf) \n\n\n for contig in unique_contigs_list:\n #if contig[:4] == \"five\": \n finds = find_unique_contig(contig, s_l)\n\n if len(finds) > 1:\n contig_location[contig] = finds\n\n sc_ov = {}\n sc_ov = make_scaff_overlap_dict(contig_location)\n\n #This is the new bit that takes just the first conflicted contig \n first_k = list(sc_ov.items())[0:1]\n first_sc_ov = dict(first_k)\n new_scaffold_list = combine_overlapping_contigs(first_sc_ov, s_l)\n\n #Split off unique scaffolds attached by their 3' ends to multiple scaffolds\n \n for contig in contig_location:\n if contig[:5] == \"three\":\n for scaf in contig_location[contig]:\n conflict = False\n if scaf.index(contig) == 1:\n conflict = True\n new_left_scaf = scaf[:3]\n new_right_scaf = scaf[3:]\n if scaf.index(contig) == len(scaf) - 2:\n conflict = True\n new_left_scaf = scaf[:-3]\n new_right_scaf = scaf[-3:]\n if conflict:\n new_left_scaf.append(\"link_conflict6\")\n new_right_scaf.insert(0,\"link_conflict6\")\n if len(new_left_scaf) >= 4: \n new_scaffold_list.append(new_left_scaf)\n if len(new_right_scaf) >= 4:\n new_scaffold_list.append(new_right_scaf)\n if scaf in new_scaffold_list:\n new_scaffold_list.remove(scaf)\n\n return new_scaffold_list", "def isoformAln(aln, o):\n\n logger = logging.getLogger(\"main.alignment\")\n logger.info(\"Clustering isoforms.\")\n\n dRem={} #for remaining sequences\n dId2Seq={} #for remaining sequences\n laln=0 #alignement length\n for fasta in SeqIO.parse(open(aln),'fasta'):\n post=fasta.id.find(\"_\")\n if post!=-1: #regular format\n sp=fasta.id[:post]\n tag=fasta.id[post+1:]\n if not sp in dId2Seq:\n dId2Seq[sp]={}\n dId2Seq[sp][tag]=str(fasta.seq)\n if laln==0:\n laln=len(fasta.seq)\n else:\n dRem[fasta.id]=str(fasta.seq)\n\n \n outCov = o+aln.split(\"/\")[-1].split(\".\")[0]+\"_clustiso.fasta\"\n clustok=False #flag to check if a cluster has occured\n for sp,dtagseq in dId2Seq.items():\n lclust=[list(dtagseq)] #list of clusters of tags to be split\n for pos in range(laln):\n lclust2=[]\n for clust in lclust:\n dlet={tag:dtagseq[tag][pos] for tag in clust}\n llet=set([x for x in dlet.values() if x!=\"-\"])\n if len(llet)<=1: #one letter at most, keep all\n lclust2.append(clust)\n continue\n else:\n for x in llet:\n lclust2.append([tag for tag in clust if dlet[tag]==x])\n lind=[tag for tag in clust if dlet[tag]==\"-\"] #conservative, do not know wether to merge, may be improved\n if len(lind)!=0:\n lclust2.append(lind)\n lclust=lclust2\n \n #now merge sequences in each cluster\n for clust in lclust:\n if len(clust)==1:\n dRem[sp+\"_\"+clust[0]]=dtagseq[clust[0]]\n else:\n clustok=True\n ntag=clust[-1]+\"_clust\"\n logger.info(\"Clustered sequences \" + sp+\"_\" + (\", %s_\"%(sp)).join(clust) + \" into %s_\"%(sp)+ntag)\n nseq=\"\".join([max([dtagseq[tag][pos] for tag in clust]) for pos in range(laln)])\n dRem[sp+\"_\"+ntag]=nseq\n\n if clustok:\n with open(outCov, \"w\") as outC:\n \t outC.write(FastaResFunc.dict2fasta(dRem))\n \t outC.close()\n\t\n return(outCov)\n else:\n return(aln)", "def linkage(self, other):\n \n distance = 0\n list1 = self.get_cluster_elements\n list2 = other.get_cluster_elements\n lenght = 0\n t = 0\n \n #first it \"de-clusterize\" the cluster into a simple list\n #of Sequence.\n done = False\n while done == False:\n done = True\n try:\n for i in range(0,len(list1)):\n if type(list1[i]) != Sequence:\n for j in range(0,len(list1[i])):\n list1.append(list1[i][j])\n list1.pop(i)\n done = False\n except:\n\n pass\n \n try:\n lenght = len(list1)\n except:\n lenght += 1\n empty_list = []\n empty_list.append(list1)\n list1 = empty_list\n\n \n #it do the same for the second cluster\n done = False\n while done == False:\n done = True\n try:\n for i in range(0,len(list2)):\n if type(list2[i]) != Sequence:\n for j in range(0,len(list2[i])):\n list2.append(list2[i][j])\n list2.pop(i)\n done = False\n except:\n pass\n\n try:\n lenght += len(list2)\n except:\n lenght += 1\n empty_list = []\n empty_list.append(list2)\n list2 = empty_list\n\n #then it calculate the total of all the distance...\n for i in range(0,len(list1)):\n for j in range(0,len(list2)):\n t += list1[i].distance_to(list2[j])\n\n #...and get the average distance (lenght is the sum of the lenght of)\n #the two cluster.\n distance = t/lenght \n return distance" ]
[ "0.70106786", "0.6068481", "0.5995936", "0.5906224", "0.58543235", "0.58283967", "0.57719815", "0.5765596", "0.5720791", "0.56829274", "0.5669235", "0.5604671", "0.55728793", "0.5560698", "0.5556891", "0.5496786", "0.547497", "0.540696", "0.5392086", "0.537271", "0.5355906", "0.53524125", "0.5351251", "0.5322449", "0.53171533", "0.53132534", "0.53121656", "0.5309081", "0.5307491", "0.5292881" ]
0.70810395
0
Returns all RefSeqs. refseq_based_clustering() must be precalled.
def get_all_refseq(self): return self.refseq_based.keys()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def references(self) -> \"IterableList[Reference]\":\n return Reference.list_items(self)", "def get_all_refobjs(self, ):\n return cmds.ls(type=\"jb_reftrack\")", "def references(self):\n ref_nodes = self.root.xpath(\".//bib-reference\")\n return list(\n itertools.chain.from_iterable(\n self.get_reference_iter(node) for node in ref_nodes\n )\n )", "def get_refs(self): \n for row in self._get_references_node():\n yield row.fetch_all_fields()", "def list_all_refs(self):\n self.list_refs()\n self.list_ref0s()\n self.list_defect_refs()", "def find_references(self):\n cls = self.__class__\n nodes = []\n for sobj in self._std.FindDependances(self.get_sobj()):\n nodes.append(cls(self._std, self._bld, sobj.GetID()))\n return nodes", "def references_list( self, theWeaver ):\n return [ (c.name, c.seq) \n for c in theWeaver.reference_style.chunkReferencedBy( self ) ]", "def get_references(self):\n\n return self._refs", "def list_refs(self):\n pass", "def get_all_references(alignedsegment):\n \n references = [alignedsegment.reference_name]\n \n # Some reads don't have secondary hits\n if not alignedsegment.has_tag('XA'):\n return references\n \n # XA is a string contigname1,<other info>;contigname2,<other info>; ...\n secondary_alignment_string = alignedsegment.get_tag('XA')\n secondary_alignments = secondary_alignment_string.split(';')[:-1]\n \n for secondary_alignment in secondary_alignments:\n references.append(secondary_alignment.partition(',')[0])\n \n return references", "def refseq_based_clustering(self):\n self.refseq_based = NonRedSetDict()\n for prey in self.ivv_info.Prey_info().preys():\n refseqid = self.get_refseq(prey)\n if refseqid:\n self.refseq_based.append_Dict(refseqid, prey)", "def get_all_children_seq(self):\n results = []\n queue = []\n children = self.get_immediate_children()\n results.extend(children)\n queue.extend(children)\n while len(queue) > 0:\n node = queue.pop()\n children = node.get_immediate_children()\n results.extend(children)\n queue.extend(children)\n return results", "def refseq_based_clustering(self):\n self.refseq_based = Usefuls.NonRedSet.NonRedSetDict()\n for prey in self.ivv_info.Prey_info().preys():\n refseqid = self.get_refseq(prey)\n if refseqid:\n self.refseq_based.append_Dict(refseqid, prey)", "def get_references(self):\n return self._references", "def get_refs(genome_build, aligner, config):\n find_fn = _find_file(config[CONFIG_KEY], startswith=True)\n ref_prefix = sret.find_ref_prefix(genome_build, find_fn)\n return sret.standard_genome_refs(genome_build, aligner, ref_prefix, _list(config[CONFIG_KEY]))", "def _get_all_sequences(self):\n if self._sequences is None:\n q = \"SELECT c.relname FROM pg_class c WHERE c.relkind = 'S'\"\n self._sequences = set([c.relname for c in self.query(q)])\n return self._sequences", "def get_refs(*args, **kwargs):\n return get_refs_async(*args, **kwargs).get_result()", "def list_refs(self):\n print('----\\nREFs\\n----')\n self._print_dict(self.refs)", "def references(self):\n return self._get_related_resources(False)", "def getRefs( self, par, path ):\n\n return self.db.getRefsPar( par, path )", "def list_reference_genomes(self, params, context=None):\n return self._client.call_method(\n 'ReferenceDataManager.list_reference_genomes',\n [params], self._service_ver, context)", "def get_dependencies(self, recursive=False):\n dependencies = set()\n for reference in self.references:\n if isinstance(reference.ref_cell, Cell):\n if recursive:\n dependencies.update(reference.ref_cell.get_dependencies(True))\n dependencies.add(reference.ref_cell)\n return dependencies", "def getLinks(self):\n refbrains = self.refcat._queryFor(relationship=self.relation,\n tid=self.suid, sid=None)\n if refbrains:\n uids = [brain.sourceUID for brain in refbrains]\n ## XXX non-orthogonal\n return self.resolver.queryUIDs(uids)\n return []", "def _getSequentialRoms(self):\n # Always returns the first cluster currently. Could be done differently.\n return list(self._roms[l] for l in self._clusterInfo['labels'])", "def get_crefs_from( ea ):\r\n\tret = []\r\n\txrf = get_first_cref_from( ea )\r\n\tif xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_cref_from( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_cref_from( ea, xrf )\r\n\treturn ret", "def refs(self):\n return self._refs", "def getListOfCompartmentReferences(self, *args):\n return _libsbml.MultiCompartmentPlugin_getListOfCompartmentReferences(self, *args)", "def list_ref0s(self):\n print('-----\\nREF0s\\n-----')\n self._print_dict(self.ref0s)", "def references(self):\n return tuple(self.__references)", "def get(self, *args):\n return _libsbml.ListOfCompartmentReferences_get(self, *args)" ]
[ "0.67854905", "0.64765537", "0.6383024", "0.6376954", "0.6328095", "0.61806107", "0.61366177", "0.61037624", "0.60236764", "0.60163116", "0.59973496", "0.599216", "0.59824604", "0.59700936", "0.59552693", "0.59420055", "0.5773383", "0.57720006", "0.5713108", "0.5703122", "0.5688902", "0.5647649", "0.56454337", "0.56373537", "0.5636493", "0.56055236", "0.5588578", "0.5583499", "0.556955", "0.556917" ]
0.77024484
1
This creates the symbols out of a stylegallery
def create_symbols(self, style_gallery, style_gallery_name, class_to_export): try: symbols_element = self.xml_document.getElementsByTagName("symbols")[0] except IndexError: symbols_element = self.xml_document.createElement("symbols") root_element = self.xml_document.getElementsByTagName("qgis_style")[0] root_element.appendChild(symbols_element) style_gallery_items = StyleGalleryItemProvider.get_style_gallery_items(style_gallery, class_to_export, style_gallery_name ) if len(style_gallery_items) > 0: arcpy.AddMessage("Export {}".format(class_to_export)) for item in style_gallery_items: try: i_symbol = change_interface(item.Item, ArcGisModules.module_display.ISymbol) symbol_properties = {} SymbolPropertiesProvider.get_symbol_properties_by_symbol_class( symbol_properties, i_symbol, class_to_export ) tags = change_interface(item, ArcGisModules.module_display.IStyleGalleryItem2)\ .Tags\ .replace(";", " ,") SimpleSymbol.create_simple_symbol(self.xml_document, symbols_element, symbol_properties, item.Name, "1", tags ) except (ValueError, Exception): arcpy.AddMessage("Error while Exporting {}".format(item.name)) continue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_image_caption_pairs(self):", "def singleglyph(x):\n return [glyph(x)]", "def build_schematic(self, bg=None):", "def get_stylesheet():\n\n #ss_dict\n ss_dict = {'header_image' : HEADER_IMAGE,\n 'icon_true' : ICON_TRUE,\n 'icon_false' : ICON_FALSE,\n 'futura_lt_light' : FUTURA_LT_LIGHT,\n 'bright_orange' : BRIGHT_ORANGE.name(),\n 'bright_orange_transparent' : 'rgba({0},{1},{2},{3})'.format(BRIGHT_ORANGE.red(), BRIGHT_ORANGE.green(), BRIGHT_ORANGE.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n 'dark_orange' : DARK_ORANGE.name(),\n 'dark_orange_transparent' : 'rgba({0},{1},{2},{3})'.format(DARK_ORANGE.red(), DARK_ORANGE.green(), DARK_ORANGE.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n 'bright_blue' : BRIGHT_BLUE.name(),\n 'bright_blue_transparent' : 'rgba({0},{1},{2},{3})'.format(BRIGHT_BLUE.red(), BRIGHT_BLUE.green(), BRIGHT_BLUE.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n 'dark_blue' : DARK_BLUE.name(),\n 'dark_blue_transparent' : 'rgba({0},{1},{2},{3})'.format(DARK_BLUE.red(), DARK_BLUE.green(), DARK_BLUE.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n 'bright_green' : BRIGHT_GREEN.name(),\n 'bright_green_transparent' : 'rgba({0},{1},{2},{3})'.format(BRIGHT_GREEN.red(), BRIGHT_GREEN.green(), BRIGHT_GREEN.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n 'dark_green' : DARK_GREEN.name(),\n 'dark_green_transparent' : 'rgba({0},{1},{2},{3})'.format(DARK_GREEN.red(), DARK_GREEN.green(), DARK_GREEN.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n\t\t\t 'bright_grey' : BRIGHT_GREY.name(),\n 'bright_grey_transparent' : 'rgba({0},{1},{2},{3})'.format(BRIGHT_GREY.red(), BRIGHT_GREY.green(), BRIGHT_GREY.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n\t\t\t 'grey' : GREY.name(),\n 'grey_transparent' : 'rgba({0},{1},{2},{3})'.format(GREY.red(), GREY.green(), GREY.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n\t\t\t 'dark_grey' : DARK_GREY.name(),\n 'dark_grey_transparent' : 'rgba({0},{1},{2},{3})'.format(DARK_GREY.red(), DARK_GREY.green(), DARK_GREY.blue(), TABLEVIEW_EDITOR_TRANSPARENCY)}\n\n\n #str_stylesheet\n str_stylesheet = \" \\\n\\\n\\\n/* QWidget */\\\nQWidget { background-color: %(dark_grey)s; \\\n font-family: \\\"%(futura_lt_light)s\\\"; \\\n font-size: 14pt; \\\n selection-background-color: %(bright_blue)s; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_header_icon */\\\nQWidget#wdgt_header_icon { border-image: url(%(header_image)s); } \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QToolTip */\\\nQToolTip { background-color: %(dark_grey)s; \\\n font-size: 14pt; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_orange)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QLabel */\\\nQLabel { background-color: transparent; \\\n} \\\n\\\n\\\n/* QLabel - lbl_explanation_header */\\\nQLabel#lbl_explanation_header { font-weight: bold; \\\n font-size: 20pt; \\\n color: %(bright_grey)s; \\\n margin-top: 10; \\\n margin-left: 10; \\\n margin-bottom: 4; \\\n margin-right: 10; \\\n} \\\n\\\n\\\n/* QLabel - lbl_explanation_text */\\\nQLabel#lbl_explanation_text { color: %(bright_grey)s; \\\n margin-top: 4; \\\n margin-left: 10; \\\n margin-bottom: 4; \\\n margin-right: 10; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QProgressBar */\\\nQProgressBar { border: none;\\\n background-color: %(dark_grey)s;\\\n text-align: center;\\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QLineEdit */\\\nQLineEdit { border: none;\\\n background-color: %(grey)s;\\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QMenuBar - mnubar_menu */\\\nQMenuBar#mnubar_menu { background-color: transparent;\\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenuBar - mnubar_menu - item */\\\nQMenuBar#mnubar_menu::item { background: transparent;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenuBar - mnubar_menu - item - selected */\\\nQMenuBar#mnubar_menu::item:selected { background: transparent;\\\n color: %(bright_orange)s; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenuBar - mnubar_menu - item - pressed */\\\nQMenuBar#mnubar_menu::item:pressed { background: transparent;\\\n color: %(dark_orange)s; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QMenu - separator */\\\nQMenu::separator { background: %(bright_orange)s;\\\n height: 1px; \\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_threads */\\\nQMenu#mnu_threads { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_threads - item */\\\nQMenu#mnu_threads::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_threads - item - selected */\\\nQMenu#mnu_threads::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_threads_logging */\\\nQMenu#mnu_threads_logging { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_threads_logging - item */\\\nQMenu#mnu_threads_logging::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_threads_logging - item - selected */\\\nQMenu#mnu_threads_logging::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_gui */\\\nQMenu#mnu_gui { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_gui - item */\\\nQMenu#mnu_gui::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_gui - item - selected */\\\nQMenu#mnu_gui::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_shot_metatada_view */\\\nQMenu#mnu_shot_metatada_view { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_shot_metatada_view - item */\\\nQMenu#mnu_shot_metatada_view::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_shot_metatada_view - item - selected */\\\nQMenu#mnu_shot_metatada_view::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_char_metatada_view */\\\nQMenu#mnu_char_metatada_view { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_char_metatada_view - item */\\\nQMenu#mnu_char_metatada_view::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_char_metatada_view - item - selected */\\\nQMenu#mnu_char_metatada_view::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_prop_metatada_view */\\\nQMenu#mnu_prop_metatada_view { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_prop_metatada_view - item */\\\nQMenu#mnu_prop_metatada_view::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_prop_metatada_view - item - selected */\\\nQMenu#mnu_prop_metatada_view::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_alembic */\\\nQMenu#mnu_alembic { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_alembic - item */\\\nQMenu#mnu_alembic::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_alembic - item - selected */\\\nQMenu#mnu_alembic::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_assets */\\\nQMenu#mnu_assets { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_assets - item */\\\nQMenu#mnu_assets::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_assets - item - selected */\\\nQMenu#mnu_assets::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_attributes */\\\nQMenu#mnu_attributes { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_attributes - item */\\\nQMenu#mnu_attributes::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_attributes - item - selected */\\\nQMenu#mnu_attributes::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QScrollBar */\\\nQScrollBar { background: %(dark_grey)s; \\\n border: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QTableCornerButton */\\\nQTableCornerButton { background-color: %(grey)s; \\\n border: none; \\\n}\\\n\\\n\\\n/* QTableCornerButton - section */\\\nQTableCornerButton::section { background-color: %(grey)s; \\\n border: none; \\\n}\\\n\\\n\\\n\\\n\\\n\\\n\\\n/* ShotMetadataView */\\\nShotMetadataView { background-color: %(grey)s; \\\n selection-background-color: %(bright_orange)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QHeaderView - shot_metadata_view_hor_header*/\\\nQHeaderView#shot_metadata_view_hor_header{ background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QHeaderView - shot_metadata_view_hor_header - section */\\\nQHeaderView#shot_metadata_view_hor_header::section { background-color: qlineargradient(spread:reflect, x1:0.06, y1:0.04, x2:0, y2:0, \\\n stop:0.8 %(grey)s, \\\n stop:1 %(dark_orange)s); \\\n font-weight: bold; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: 1px solid %(bright_grey)s; \\\n} \\\n\\\n\\\n/* QHeaderView - shot_metadata_view_ver_header */\\\nQHeaderView#shot_metadata_view_ver_header { background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QHeaderView - shot_metadata_view_ver_header - section */\\\nQHeaderView#shot_metadata_view_ver_header::section { background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* ShotMetadataContextMenu */\\\nShotMetadataContextMenu { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_orange)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* ShotMetadataContextMenu -item - selected */\\\nShotMetadataContextMenu::item:selected { background-color: %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* PropMetadataView */\\\nPropMetadataView { background-color: %(grey)s; \\\n selection-background-color: %(bright_blue)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QHeaderView - prop_metadata_view_hor_header*/\\\nQHeaderView#prop_metadata_view_hor_header{ background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QHeaderView - prop_metadata_view_hor_header - section */\\\nQHeaderView#prop_metadata_view_hor_header::section { background-color: qlineargradient(spread:reflect, x1:0.06, y1:0.04, x2:0, y2:0, \\\n stop:0.8 %(grey)s, \\\n stop:1 %(bright_blue)s); \\\n font-weight: bold; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: 1px solid %(bright_grey)s; \\\n} \\\n\\\n\\\n/* QHeaderView - prop_metadata_view_ver_header */\\\nQHeaderView#prop_metadata_view_ver_header { background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QHeaderView - prop_metadata_view_ver_header - section */\\\nQHeaderView#prop_metadata_view_ver_header::section { background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n/* PropMetadataContextMenu */\\\n/* Here is the above mentioned menu but also its sub menus. */\\\n/* mnu_metadata, mnu_geometry, mnu_visibility, mnu_selection */\\\n\\\n\\\n/* PropMetadataContextMenu */\\\nPropMetadataContextMenu { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_blue)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* PropMetadataContextMenu -item - selected */\\\nPropMetadataContextMenu::item:selected { background-color: %(bright_blue_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_metadata */\\\nQMenu#PropMetadataContextMenu_mnu_metadata { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_blue)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_metadata -item - selected */\\\nQMenu#PropMetadataContextMenu_mnu_metadata::item:selected { background-color: %(bright_blue_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_geometry */\\\nQMenu#PropMetadataContextMenu_mnu_geometry { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_blue)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_geometry -item - selected */\\\nQMenu#PropMetadataContextMenu_mnu_geometry::item:selected { background-color: %(bright_blue_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_visibility */\\\nQMenu#PropMetadataContextMenu_mnu_visibility { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_blue)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_visibility -item - selected */\\\nQMenu#PropMetadataContextMenu_mnu_visibility::item:selected { background-color: %(bright_blue_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_selection */\\\nQMenu#PropMetadataContextMenu_mnu_selection { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_blue)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_selection -item - selected */\\\nQMenu#PropMetadataContextMenu_mnu_selection::item:selected { background-color: %(bright_blue_transparent)s; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* CharMetadataView */\\\nCharMetadataView { background-color: %(grey)s; \\\n selection-background-color: %(dark_green)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QHeaderView - char_metadata_view_hor_header*/\\\nQHeaderView#char_metadata_view_hor_header{ background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QHeaderView - char_metadata_view_hor_header - section */\\\nQHeaderView#char_metadata_view_hor_header::section { background-color: qlineargradient(spread:reflect, x1:0.06, y1:0.04, x2:0, y2:0, \\\n stop:0.8 %(grey)s, \\\n stop:1 %(bright_green)s); \\\n font-weight: bold; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: 1px solid %(bright_grey)s; \\\n} \\\n\\\n\\\n/* QHeaderView - char_metadata_view_ver_header */\\\nQHeaderView#char_metadata_view_ver_header { background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QHeaderView - char_metadata_view_ver_header - section */\\\nQHeaderView#char_metadata_view_ver_header::section { background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n/* CharMetadataContextMenu */\\\n/* Here is the above mentioned menu but also its sub menus. */\\\n/* mnu_metadata, mnu_geometry, mnu_visibility, mnu_selection */\\\n\\\n\\\n/* CharMetadataContextMenu */\\\nCharMetadataContextMenu { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_green)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* CharMetadataContextMenu -item - selected */\\\nCharMetadataContextMenu::item:selected { background-color: %(bright_green_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_metadata */\\\nQMenu#CharMetadataContextMenu_mnu_metadata { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_green)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_metadata -item - selected */\\\nQMenu#CharMetadataContextMenu_mnu_metadata::item:selected { background-color: %(bright_green_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_geometry */\\\nQMenu#CharMetadataContextMenu_mnu_geometry { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_green)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_geometry -item - selected */\\\nQMenu#CharMetadataContextMenu_mnu_geometry::item:selected { background-color: %(bright_green_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_visibility */\\\nQMenu#CharMetadataContextMenu_mnu_visibility { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_green)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_visibility -item - selected */\\\nQMenu#CharMetadataContextMenu_mnu_visibility::item:selected { background-color: %(bright_green_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_selection */\\\nQMenu#CharMetadataContextMenu_mnu_selection { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_green)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_selection -item - selected */\\\nQMenu#CharMetadataContextMenu_mnu_selection::item:selected { background-color: %(bright_green_transparent)s; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* TableViewEditorFramerange */\\\n/* This widget has a transparent background. Below are the stylesheets for the */\\\n/* children of this widget. */\\\n\\\n\\\n/* QSpinBox - spnbx_frame */\\\nQSpinBox#spnbx_frame { background-color: transparent; \\\n border-left: none; \\\n border-top: 1px solid %(bright_orange_transparent)s; \\\n border-bottom: 1px solid %(bright_orange_transparent)s; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_table_view_editor_framerange_main */\\\nQWidget#wdgt_table_view_editor_framerange_main { background-color: %(grey_transparent)s; \\\n border: 1px solid %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_frame */\\\nQWidget#wdgt_frame { background-color: transparent; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_range_and_time_slider */\\\nQWidget#wdgt_range_and_time_slider { background-color: transparent; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_frame_slider */\\\nQWidget#wdgt_frame_slider { background-color: transparent; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_frame_slider_left */\\\nQWidget#wdgt_frame_slider_left { background-color: qlineargradient(spread:reflect, x1:0.3, y1:0, x2:0, y2:0, \\\n stop:0.45 transparent, \\\n stop:0.5 %(dark_orange_transparent)s, \\\n stop:0.55 transparent); \\\n} \\\n\\\n\\\n/* QWidget - wdgt_frame_slider_right */\\\nQWidget#wdgt_frame_slider_right { background-color: qlineargradient(spread:reflect, x1:0.1, y1:0, x2:0, y2:0, \\\n stop:0.45 transparent, \\\n stop:0.5 %(dark_orange_transparent)s, \\\n stop:0.55 transparent); \\\n} \\\n\\\n\\\n/* AssetManagerHoverButton - btn_get_current_frame*/\\\nAssetManagerHoverButton#btn_get_current_frame { background-color: %(bright_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border: none; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_range_slider */\\\nQWidget#wdgt_range_slider { background-color: transparent; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_range_scrollbar */\\\nQWidget#wdgt_range_scrollbar { background-color: transparent; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_range_slider_left */\\\nQWidget#wdgt_range_slider_left { background-color: %(dark_grey_transparent)s; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_range_slider_middle */\\\nQWidget#wdgt_range_slider_middle { background-color: %(bright_grey_transparent)s; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_range_slider_right */\\\nQWidget#wdgt_range_slider_right { background-color: %(dark_grey_transparent)s; \\\n} \\\n\\\n\\\n/* QLabel - lbl_framesource */\\\nQLabel#lbl_framesource { background-color: transparent; \\\n} \\\n\\\n\\\n/* AssetManagerHoverButton - btn_complete_range_start*/\\\nAssetManagerHoverButton#btn_complete_range_start { background-color: %(grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n/* AssetManagerHoverButton - btn_current_range_start*/\\\nAssetManagerHoverButton#btn_current_range_start { background-color: %(dark_orange_transparent)s; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n/* AssetManagerHoverButton - btn_complete_range_end*/\\\nAssetManagerHoverButton#btn_complete_range_end { background-color: %(grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n/* AssetManagerHoverButton - btn_current_range_end*/\\\nAssetManagerHoverButton#btn_current_range_end { background-color: %(dark_orange_transparent)s; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* TableViewEditorNodepicker */\\\n/* This widget has a transparent background. Below are the stylesheets for the */\\\n/* children of this widget. */\\\n\\\n\\\n/* QWidget - wdgt_table_view_editor_nodepicker_main */\\\nQWidget#wdgt_table_view_editor_nodepicker_main { background-color: %(grey_transparent)s; \\\n border: 1px solid %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n/* QLabel - lbl_nodetype */\\\nQLabel#lbl_nodetype { background-color: %(grey_transparent)s; \\\n} \\\n\\\n\\\n/* QLineEdit - le_filter */\\\nQLineEdit#le_filter { background-color: %(dark_grey_transparent)s; \\\n border: 1px solid %(dark_orange_transparent)s; \\\n} \\\n\\\n\\\n/* QListView - node_view */\\\nQListView#node_view { background-color: %(grey_transparent)s; \\\n alternate-background-color: %(dark_grey_transparent)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QListView - node_view - item selected */\\\nQListView#node_view::item:selected { background-color: %(bright_orange)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* TableViewEditorPathpicker */\\\n/* This widget has a transparent background. Below are the stylesheets for the */\\\n/* children of this widget. */\\\n\\\n\\\n/* QWidget - wdgt_table_view_editor_pathpicker_main */\\\nQWidget#wdgt_table_view_editor_pathpicker_main { background-color: %(grey_transparent)s; \\\n border: 1px solid %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n/* QLabel - lbl_base_path */\\\nQLabel#lbl_base_path { background-color: %(grey_transparent)s; \\\n} \\\n\\\n\\\n/* QLineEdit - le_path_filter */\\\nQLineEdit#le_path_filter { background-color: %(dark_grey_transparent)s; \\\n border: 1px solid %(dark_orange_transparent)s; \\\n} \\\n\\\n\\\n/* QListView - path_view */\\\nQListView#path_view { background-color: %(grey_transparent)s; \\\n alternate-background-color: %(dark_grey_transparent)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QListView - path_view - item selected */\\\nQListView#path_view::item:selected { background-color: %(bright_orange)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* AssetManagerSliderAction */\\\n/* QWidgetAction that draws a slider and an LCD Display */\\\n\\\n\\\n/* AssetManagerSliderAction - QLabel */\\\nQLabel#AssetManagerSliderActionQLabel { background-color: transparent; \\\n margin-left: 8; \\\n margin-right: 8; \\\n} \\\n\\\n\\\n/* AssetManagerSliderAction - QWidget */\\\nQWidget#AssetManagerSliderActionQWidget { background-color: transparent; \\\n margin-left: 8; \\\n margin-right: 8; \\\n} \\\n\\\n\\\n/* AssetManagerSliderAction - QSlider - groove - horizontal */\\\nQSlider#AssetManagerSliderActionQSlider::groove:horizontal { background: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, \\\n stop:0 transparent, \\\n stop:1 %(bright_orange)s); \\\n height: 1px; \\\n margin-left: 8; \\\n margin-right: 8; \\\n} \\\n\\\n\\\n/* AssetManagerSliderAction - QSlider - handle - horizontal */\\\nQSlider#AssetManagerSliderActionQSlider::handle:horizontal { background: %(bright_grey)s; \\\n width: 20px; \\\n} \\\n\\\n\\\n/* AssetManagerSliderAction - QLCDNumber */\\\nQLCDNumber#AssetManagerSliderActionQLCDNumber { background: transparent; \\\n color: %(bright_orange)s; \\\n border: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QWidget - wdgt_asset_manager_pre_export_dialog_main */\\\nQWidget#wdgt_asset_manager_pre_export_dialog_main { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_orange)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_wdgt_asset_manager_pre_export_dialog_main_options */\\\nQWidget#wdgt_wdgt_asset_manager_pre_export_dialog_main_options { background-color: transparent; } \\\n\\\n\\\n/* QLabel - lbl_question */\\\nQLabel#lbl_question { background-color: transparent; \\\n color: %(bright_grey)s; \\\n} \\\n\\\n\\\n/* QPushButton - btn_accept */\\\nQPushButton#btn_accept { background-color: transparent; \\\n color: %(bright_grey)s; \\\n} \\\n\\\n\\\n/* QPushButton - btn_accept - pressed */\\\nQPushButton#btn_accept:pressed { background-color: transparent; \\\n color: %(bright_orange)s; \\\n} \\\n\\\n\\\n/* QPushButton - btn_accept - hover */\\\nQPushButton#btn_accept:hover { background-color: transparent; \\\n color: %(bright_orange)s; \\\n} \\\n\\\n\\\n/* QPushButton - btn_reject */\\\nQPushButton#btn_reject { background-color: transparent; \\\n color: %(bright_grey)s; \\\n} \\\n\\\n\\\n/* QPushButton - btn_reject - pressed */\\\nQPushButton#btn_reject:pressed { background-color: transparent; \\\n color: %(bright_orange)s; \\\n} \\\n\\\n\\\n/* QPushButton - btn_reject - hover */\\\nQPushButton#btn_reject:hover { background-color: transparent; \\\n color: %(bright_orange)s; \\\n} \\\n\\\n\\\n/* QCheckBox - chkbx_remember_choice */\\\nQCheckBox#chkbx_remember_choice { background: transparent; \\\n color: %(bright_grey)s; \\\n} \\\n\\\n\\\n/* QCheckBox - chkbx_remember_choice - indicator */\\\nQCheckBox#chkbx_remember_choice::indicator { background: transparent; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange)s; \\\n} \\\n\\\n\\\n/* QCheckBox - chkbx_remember_choice - indicator - hover */\\\nQCheckBox#chkbx_remember_choice::indicator:hover { background: %(dark_grey)s; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange)s; \\\n} \\\n\\\n\\\n/* QCheckBox - chkbx_remember_choice - indicator - checked */\\\nQCheckBox#chkbx_remember_choice::indicator:checked { background: %(bright_grey)s; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange)s; \\\n} \\\n\\\n\\\n/* QCheckBox - chkbx_remember_choice - indicator - pressed */\\\nQCheckBox#chkbx_remember_choice::indicator:pressed { background: %(dark_orange)s; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange)s; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* TableViewEditorBool */\\\n/* Below are the stylesheets for the children of this widget. */\\\n\\\n\\\n/* TableViewEditorBool */\\\nTableViewEditorBool { background-color: %(dark_grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QPushButton - btn_true */\\\nQPushButton#TableViewEditorBool_btn_true { background-color: transparent; \\\n border: none; \\\n} \\\n\\\n\\\n/* QPushButton - btn_true - hover */\\\nQPushButton#TableViewEditorBool_btn_true:hover { background-color: %(grey)s; \\\n border: none; \\\n} \\\n\\\n\\\n/* QPushButton - btn_true - pressed */\\\nQPushButton#TableViewEditorBool_btn_true:pressed { background-color: %(grey)s; \\\n border: none; \\\n} \\\n\\\n\\\n/* QPushButton - btn_false */\\\nQPushButton#TableViewEditorBool_btn_false { background-color: transparent; \\\n border: none; \\\n} \\\n\\\n\\\n/* QPushButton - btn_false - hover */\\\nQPushButton#TableViewEditorBool_btn_false:hover { background-color: %(grey)s; \\\n border: none; \\\n} \\\n\\\n\\\n/* QPushButton - btn_false - pressed */\\\nQPushButton#TableViewEditorBool_btn_false:pressed { background-color: %(grey)s; \\\n border: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* AssetManagerDockWidget */\\\nAssetManagerDockWidget { background: %(dark_grey)s; \\\n font-size: 14pt; \\\n color: %(bright_grey)s; \\\n} \\\n\\\n\\\n/* AssetManagerDockWidget - title */\\\nAssetManagerDockWidget::title { background: %(dark_grey)s; \\\n text-align: left; \\\n font-size: 14pt; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_orange)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\nAssetManagerDockWidget::close-button, AssetManagerDockWidget::float-button {background: %(bright_orange)s; \\\n border: none; \\\n} \\\n\\\n\\\nAssetManagerDockWidget::close-button:hover, AssetManagerDockWidget::float-button:hover { background: %(dark_orange)s; \\\n} \\\n\\\n\\\nAssetManagerDockWidget::close-button:pressed, AssetManagerDockWidget::float-button:pressed { background: %(dark_orange)s; \\\n} \\\n\\\n\\\n\"%ss_dict\n\n return str_stylesheet", "def make_layered_psd_from_images():\n\n\t\n\tdoc = open_document(FILEPATHS[0], show=False)\n\tdoc_root = doc.rootNode()\n\t\n\tdocs = []\n\tdocs.append(doc)\n\n\tall_layers = get_layers(doc)\n\tfor i in range(1, len(FILEPATHS)):\n\t\tdocx = open_document(FILEPATHS[i], show=False)\n\t\tdocs.append(docx)\n\t\tdocx_layers = get_layers(docx)\n\t\tfor layer in docx_layers:\n\t\t\tall_layers.append(layer.clone())\n\t\t\t# doc.rootNode().addChildNode(layer, parent_node)\n\tdoc_root.setChildNodes(all_layers)\n\n\tprint('Debug: all nodes: %s' % doc.rootNode().childNodes())\n\t# doc.refreshProjection()\n\n\tsave_filepath = filepath = QtWidgets.QFileDialog.getSaveFileName()[0]\n\tr = doc.saveAs(save_filepath)\n\tprint('Debug: saved: %s' % save_filepath)\n\t\n\tfor doc in docs:\n\t\tprint('Debug: closing %s' % doc)\n\t\tdoc.close()\n\n\tprint('Debug: Script done')", "def CreateNorthSymbols(color1=(0.0,0.0,0.0,1.0),color2=(1.0,1.0,1.0,1.0),\n scale=1.0,symbol_manager=None):\n\n if symbol_manager is None:\n sm=gview.GvSymbolManager()\n else:\n sm=symbol_manager\n\n refnames=[]\n for item in [GVNORTHSYM1]:\n rname,junk=CreateNorthSymbol(color1,color2,scale,sm)\n refnames.append(rname)\n\n return (refnames,sm)", "def parse_and_construct_graphic_layer(ds):\r\n graphic_layers = list()\r\n for item in ds.SegmentSequence:\r\n layer = {\r\n \"GraphicLayer\": str(item.SegmentDescription).upper(),\r\n \"GraphicLayerOrder\": item.SegmentNumber,\r\n \"GraphicLayerRecommendedDisplayCIELabValue\": [49512, 38656, 52736]\r\n }\r\n graphic_layers.append(layer)\r\n return graphic_layers", "def paintTags(self):\n imagesTagOrder = [\"gender\", \"skin\", \"head\", \"body\", \"mask\", \"hair\", \"shirt\", \"trousers\", \"skirt\", \"shoes\"]\n pos = 0\n for img in imagesTagOrder:\n self.imagesTag[img].topleft = 296, pos * 76\n self.imagesTag[img].connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.paintCustomizeZone, img)\n self.window.add_child(self.imagesTag[img])\n pos += 1", "def create_simple_symbol(xml_document, symbols_element, properties, count, alpha, tags=None):\n symbol_element = xml_document.createElement(\"symbol\")\n symbol_element.setAttribute(\"alpha\", alpha)\n symbol_element.setAttribute(\"clip_to_extent\", \"1\")\n symbol_element.setAttribute(\"type\", properties['symbol_type'])\n symbol_element.setAttribute(\"name\", unicode(count))\n if tags and len(tags) > 0:\n symbol_element.setAttribute(\"tags\", tags)\n symbols_element.appendChild(symbol_element)\n\n for layer in reversed(properties['layer']):\n renderer_layer_element = xml_document.createElement(\"layer\")\n renderer_layer_element.setAttribute(\"pass\", \"0\")\n renderer_layer_element.setAttribute(\"enabled\", \"1\")\n renderer_layer_element.setAttribute(\"locked\", \"0\")\n renderer_layer_element.setAttribute(\"class\", layer['simpleSymbolClass'])\n symbol_element.appendChild(renderer_layer_element)\n\n for key, value in layer['dict_symbols'].items():\n\n symbol_properties_element = xml_document.createElement(\"prop\")\n symbol_properties_element.setAttribute(\"k\", unicode(key))\n symbol_properties_element.setAttribute(\"v\", unicode(value))\n renderer_layer_element.appendChild(symbol_properties_element)\n\n data_defined_properties_element = xml_document.createElement(\"data_defined_properties\")\n renderer_layer_element.appendChild(data_defined_properties_element)\n\n data_defined_option_element = xml_document.createElement(\"Option\")\n data_defined_option_element.setAttribute(\"type\", \"Map\")\n data_defined_properties_element.appendChild(data_defined_option_element)\n\n data_defined_option_value_element = xml_document.createElement(\"Option\")\n data_defined_option_value_element.setAttribute(\"value\", \"\")\n data_defined_option_value_element.setAttribute(\"type\", \"QString\")\n data_defined_option_value_element.setAttribute(\"name\", \"name\")\n data_defined_option_element.appendChild(data_defined_option_value_element)\n\n data_defined_option_name_element = xml_document.createElement(\"Option\")\n data_defined_option_name_element.setAttribute(\"name\", \"properties\")\n data_defined_option_element.appendChild(data_defined_option_name_element)\n\n data_defined_option_collection_element = xml_document.createElement(\"Option\")\n data_defined_option_collection_element.setAttribute(\"value\", \"collection\")\n data_defined_option_collection_element.setAttribute(\"type\", \"QString\")\n data_defined_option_collection_element.setAttribute(\"name\", \"type\")\n data_defined_option_element.appendChild(data_defined_option_collection_element)\n\n if 'subSymbol' in layer:\n SimpleSymbol.create_simple_symbol(xml_document, renderer_layer_element, layer['subSymbol'], \"@0@0\", '1')", "def populateGallery():\n\n # Set the UI parent to be the scroll layout\n global objectScroll\n cmds.setParent(objectScroll)\n\n # List all assets in the direcoty\n assetList = [directory for directory in os.listdir(AC.ASSETS_PATH) if os.path.isdir(os.path.join(AC.ASSETS_PATH, directory))]\n\n # Create a ButtonIcon for each asset\n for asset in assetList:\n addButtonIcon(asset)", "def gen_symbols(path, strip):\n\n symbols = ''\n svg_namespace = 'http://www.w3.org/2000/svg'\n etree.register_namespace('', svg_namespace)\n\n for root, dirs, files in os.walk(os.path.abspath(path)):\n for wwsfile in files:\n basename, extension = os.path.splitext(wwsfile)\n if extension == '.svg':\n filepath = os.path.join(root, wwsfile)\n try:\n svg = etree.parse(filepath)\n svg_root = svg.getroot()\n\n attribs = svg_root.attrib\n desc = svg.find('{'+svg_namespace+'}desc')\n svg_root.remove(desc)\n title = svg.find('{'+svg_namespace+'}title')\n svg_root.remove(title)\n metadata = svg.find('{'+svg_namespace+'}metadata')\n svg_root.remove(metadata)\n\n viewbox_attrib = 'viewBox'\n if viewbox_attrib in attribs:\n viewbox = attribs[viewbox_attrib]\n else:\n viewbox = f\"0 0 {attribs['width']} {attribs['height']}\"\n\n basename2 = basename.replace(strip, '')\n symbols += f'<symbol id=\"{basename2}\" viewBox=\"{viewbox}\">'\n\n for element in svg_root:\n symbols += etree.tostring(element).decode('utf-8')\n symbols += '</symbol>'\n\n except Exception as err:\n warnings.warn(f'Could not parse file {filepath}: {err}')\n\n return symbols", "def my_phantomgallery( phantom_type ):\n\n if phantom_type == 'ellipses' or phantom_type == 'shepp_logan':\n # [semiaxis 1, semiaxis 2, x center, y center, phi=angle (degrees), greyscale=attenuation]\n M = np.array([[ .69, .92, 0, 0, 0, 1.],\n [ .6624, .8740, 0, -.0184, 0, -0.8],\n [ .1100, .3100, .22, 0, -18, -.2],\n [ .1600, .4100, -.22, 0, 18, -.2],\n [ .2100, .2500, 0, .35, 0, .1],\n [ .0460, .0460, 0, .1, 0, .1],\n [ .0460, .0460, 0, -.1, 0, .1],\n [ .0460, .0230, -.08, -.605, 0, .1],\n [ .0230, .0230, 0, -.605, 0, .1],\n [ .0230, .0460, .06, -.605, 0, .1]])\n\n\n elif phantom_type == 'modified_shepp_logan':\n # [semiaxis 1, semiaxis 2, x center, y center, phi=angle (degrees), greyscale=attenuation]\n p1 = [.7, .8, 0, 0, 0, 1]\n p2 = [.65,.75,0,0,0,-.9]\n p3 = [.15,.2,0,.4,0,.5]\n p4 = [.25,.15,-.25,.25,135.79,.2]\n p5 = [.25,.15,.25,.25,45.26,.2]\n p6 = [.08,.25,0,-.3,28.65,.65]\n p7 = [.05,.05,.5,-.3,0,.8]\n # combine into a matrix with one ellipse in each row\n M = np.array([p1, p2, p3, p4, p5, p6, p7]);\n \n\n\n elif phantom_type == 'squares':\n # [x center, y center, edge length ,phi=angle (degrees), greyscale=attenuation]\n s1 = [0,0,1.3,0,1]\n s2 = [0,0,1.1,0,-.9]\n s3 = [.1,-.1,.5,180/6,.4]\n s4 = [-.25,.15,.25,180/4,.2]\n s5 = [-.2,.25,.3,180/3,.4]\n #combine into a matrix with one square in each row\n M = np.array([s1, s2, s3, s4, s5]);\n\n elif (phantom_type == 'rectangles'):\n # [x center, y center, dimension 1, dimension 2, phi=angle (degrees), greyscale=attenuation]\n r1 = [0,0,1.3,1.1,0,1]\n r2 = [0,0,1.2,1,0,-.9]\n r3 = [0.25,.15,.25,.6,180/6,.4]\n r4 = [-.2,.1,.25,.20,180/4,.2]\n r5 = [-.3,.2,.3,.2,180/6,.4]\n #combine into a matrix with one square in each row\n M = np.array([r1, r2, r3, r4, r5])\n else:\n print('Unknown phantom_type')\n M = None\n\n return M", "def draw_schem(self):\n schem_cursor = DrawerCursor(self)\n while schem_cursor.draw_comps_here():\n pass\n while self._state: # TODO Add real stack ability\n self.pop()\n schem_cursor.step_back()\n schem_cursor.draw_comps_here('down')\n self.draw()\n self.save('resources/khiri.png')", "def display_images(symbol_dict, color=False):\n \n images = []\n\n for s in symbol_dict.values():\n for symb_img in s:\n images.append(symb_img.img)\n\n show_images(images, color=color)", "def CreateNorthSymbol(ntype=GVNORTHSYM1,color1=(0.0,0.0,0.0,1.0),\n color2=(1.0,1.0,1.0,1.0),scale=1.0,symbol_manager=None):\n\n if symbol_manager is None:\n sm=gview.GvSymbolManager()\n else:\n sm=symbol_manager\n\n cstr1=gvogrfs.gv_to_ogr_color(color1)\n if len(cstr1) < 9:\n cstr1=cstr1+\"FF\"\n\n cstr2=gvogrfs.gv_to_ogr_color(color2)\n if len(cstr2) < 9:\n cstr2=cstr2+\"FF\"\n\n sstr = str(scale).replace('.','_')\n\n refname=ntype+cstr1[1:]+cstr2[1:]+sstr\n if ntype==GVNORTHSYM1: \n shape=gview.GvShape(type=gview.GVSHAPE_AREA)\n shape.set_node(1.0*scale,-2.6*scale,node=0)\n shape.set_node(0.0,-0.8*scale,node=1)\n shape.set_node(-1.0*scale,-2.6*scale,node=2)\n shape.set_node(0.0,2.6*scale,node=3)\n shape.set_node(1.0*scale,-2.6*scale,node=4)\n shape.set_property('_gv_ogrfs','PEN(c:'+cstr1+');BRUSH(c:'+\\\n cstr2+')')\n sm.inject_vector_symbol(refname,shape)\n\n return (refname,sm)", "def generate_style(self, data):\n\n s = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<sld:StyledLayerDescriptor xmlns=\"http://www.opengis.net/sld\" xmlns:sld=\"http://www.opengis.net/sld\" xmlns:ogc=\"http://www.opengis.net/ogc\" xmlns:gml=\"http://www.opengis.net/gml\" version=\"1.0.0\">\n <sld:NamedLayer>\n <sld:Name>People affected by more than 1m of inundation</sld:Name>\n <sld:UserStyle>\n <sld:Name>People affected by more than 1m of inundation</sld:Name>\n <sld:Title>People Affected By More Than 1m Of Inundation</sld:Title>\n <sld:Abstract>People Affected By More Than 1m Of Inundation</sld:Abstract>\n <sld:FeatureTypeStyle>\n <sld:Name>People affected by more than 1m of inundation</sld:Name>\n <sld:Rule>\n <sld:RasterSymbolizer>\n <sld:Geometry>\n <ogc:PropertyName>geom</ogc:PropertyName>\n </sld:Geometry>\n <sld:ChannelSelection>\n <sld:GrayChannel>\n <sld:SourceChannelName>1</sld:SourceChannelName>\n </sld:GrayChannel>\n </sld:ChannelSelection>\n <sld:ColorMap>\n <sld:ColorMapEntry color=\"#ffffff\" opacity=\"0\" quantity=\"-9999.0\"/>\n <sld:ColorMapEntry color=\"#38A800\" opacity=\"0\" quantity=\"2\"/>\n <sld:ColorMapEntry color=\"#38A800\" quantity=\"5\"/>\n <sld:ColorMapEntry color=\"#79C900\" quantity=\"10\"/>\n <sld:ColorMapEntry color=\"#CEED00\" quantity=\"20\"/>\n <sld:ColorMapEntry color=\"#FFCC00\" quantity=\"50\"/>\n <sld:ColorMapEntry color=\"#FF6600\" quantity=\"100\"/>\n <sld:ColorMapEntry color=\"#FF0000\" quantity=\"200\"/>\n <sld:ColorMapEntry color=\"#7A0000\" quantity=\"300\"/>\n </sld:ColorMap>\n </sld:RasterSymbolizer>\n </sld:Rule>\n </sld:FeatureTypeStyle>\n </sld:UserStyle>\n </sld:NamedLayer>\n</sld:StyledLayerDescriptor>\n\n \"\"\"\n\n return s", "def setupStyling(self):\n\n\t\tfaces = {\n\t\t\t'times': 'Times New Roman',\n\t\t\t'mono' : 'Courier New',\n\t\t\t'helv' : 'Arial',\n\t\t\t'other': 'Comic Sans MS',\n\t\t\t'size' : 10,\n\t\t\t'size2': 8,\n\t\t}\n\n\t\tself.edit.StyleSetSpec(stc.STC_STYLE_DEFAULT, \"back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleClearAll()\n\t\tself.edit.StyleSetSpec(stc.STC_STYLE_LINENUMBER, \"fore:#928374,back:#212121,face:%(mono)s,size:%(size2)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.TEXT, \"fore:#d5c4a1,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.HEADING, \"fore:#EFCD1E,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.HIDDEN, \"fore:#d5c4a1,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODE, \"fore:#b8bb26,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.SYMBOL, \"fore:#81ac71,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.TEST, \"fore:#ff00ff,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.STRIKE, \"fore:#e44533,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.BOLD, \"fore:#d9a62e,bold,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.UNDERLINE, \"fore:#d9a62e,underline,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.ITALIC, \"fore:#7d9d90,italic,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.IMAGE, \"fore:#cb8296,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.IMAGE_UNDERLINED, \"fore:#cb8296,underline,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.LINK, \"fore:#cb8296,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.LINK_UNDERLINED, \"fore:#cb8296,underline,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.HTML, \"fore:#cb8296,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.HTML_ATTRIBUTE, \"fore:#d9a62e,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.FORMAT, \"fore:#e44533,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.COMMENT, \"fore:#928372,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_KEYWORD, \"fore:#569cd6,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_SYMBOL, \"fore:#9cdcfe,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_TEXT, \"fore:#F9FFE0,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_STRING, \"fore:#d69d73,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_COMMENT, \"fore:#57a64a,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_FUNCTION, \"fore:#4ec9b0,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_CLASS, \"fore:#4ec9b0,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_TYPE, \"fore:#EFCD1E,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_FLOW, \"fore:#d8a0df,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_DIGIT, \"fore:#b5ce92,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.IndicatorSetStyle(0, stc.STC_INDIC_SQUIGGLE)\n\t\tself.edit.IndicatorSetForeground(0, wx.RED)", "def construct_svg_smash_commands(files, ids, cmd_format, cmd_args):\n commands = []\n for f in files:\n if not f.startswith('Figure'):\n continue\n\n prefix, remainder = f.split('.', 1)\n\n try:\n id_, remainder = remainder.rsplit('_', 1)\n except:\n # GLOBAL SVG for each figure\n assert remainder == 'GLOBAL'\n continue\n\n # ignore svgs for non-AG points\n if id_ not in ids:\n continue\n\n args = cmd_args.copy()\n args['sample_id'] = id_\n args['prefix'] = prefix\n commands.append(cmd_format % args)\n return commands", "def getcolorcode1527(ssize=5):\n\n iA = 29\n iB1 = 932\n iB2 = 566\n colInd = list()\n marker = list()\n patchlist = list()\n\n marker.append('o')\n marker.append('v')\n marker.append('x')\n\n for i in range(0, iA):\n colInd.append('k')\n for i in range(0, iB1):\n colInd.append('b')\n for i in range(0, iB2):\n colInd.append('r')\n\n\n import matplotlib.patches as mpatches\n patchlist.append(mpatches.Patch(color='black', label=r'$\\alpha$'))\n patchlist.append(mpatches.Patch(color='blue', label=r'$\\beta$-1'))\n patchlist.append(mpatches.Patch(color='red', label=r'$\\beta$-2'))\n\n alpha = plt.scatter(0, 1, c='k', marker=marker[0], s=ssize, label=r'$\\alpha$')\n beta1 = plt.scatter(0, 1, c='b', marker=marker[1], s=ssize, label=r'$\\beta\\textnormal{-}1$')\n beta2 = plt.scatter(0, 1, c='r', marker=marker[2], s=ssize, label=r'$\\beta\\textnormal{-}2$')\n plt.close()\n\n patchlist = [alpha, beta1, beta2]\n\n return colInd, marker, patchlist", "def process():\r\n st.title(\"Process in GauGAN\")\r\n st.subheader(\"Now choose the styles you wish to process with the paintings.\")\r\n\r\n # Styles dictionary\r\n styles_dict = {\"Afternoon 1\": 1, \"Afternoon 2\": 2, \"Sunset 1\": 3,\r\n \"Sunset 2 Red Sun\": 4, \"Afternoon 3\": 5, \"Afternoon 4\": 6, \"Sunset 3\": 7,\r\n \"Sunset 4\": 8, \"Sunset 5\": 9, \"Sunset 6\": 10}\r\n\r\n # Allow the user to choose the keys from the styles dictionary\r\n styles = st.multiselect(\"Styles: \",list(styles_dict.keys()),\"Afternoon 1\")\r\n\r\n # set the directory where the pictures will be imported from\r\n DIR = 'tmp/'\r\n # Calculate the number of files that are going to be processed\r\n number_of_files = len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))])\r\n # Show it in the sidebar\r\n st.sidebar.subheader(\"Total pictures to process: %s\"%number_of_files)\r\n # If the user has chosen to process them:\r\n if st.button(\"Start processing with GauGAN\"):\r\n if number_of_files > 0:\r\n # Then process it: take the directory where the going-to-be-imported pictures exist,\r\n # Process them, and save them in 'files' directory.\r\n make_nature(styles_dict,DIR[:-1],'files',styles)\r\n else: # the number of files is zero\r\n st.warning(\"There are no files to process.\")", "def get_symbol_images_and_positions(im):\n gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n _, threshold = cv2.threshold(gray, 100, 255, 0)\n threshold = 255 - threshold\n # show(threshold)\n contours, hierarchy = cv2.findContours(threshold, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n contours = [cv2.approxPolyDP(contour, 8, True) for contour in contours]\n contours = [c for c in contours if c.shape[0] == 4 and cv2.isContourConvex(c)]\n contours = sorted(contours, key=cv2.contourArea)\n contour = contours[-1]\n\n offset_x, offset_y, _, _ = cv2.boundingRect(contour)\n symbols_im = trim_to_contour_bounding_box(im, contour)\n half_height = symbols_im.shape[0] / 2\n half_width = symbols_im.shape[1] / 2\n symbols = (\n symbols_im[:half_height, :half_width],\n symbols_im[:half_height, half_width:],\n symbols_im[half_height:, :half_width],\n symbols_im[half_height:, half_width:],\n\n )\n symbols = (_process_button_im(symbol_im) for symbol_im in symbols)\n\n positions = (\n (offset_x + half_width / 2, offset_y + half_height / 2),\n (offset_x + half_width * 3 / 2, offset_y + half_height / 2),\n (offset_x + half_width / 2, offset_y + half_height * 3 / 2),\n (offset_x + half_width * 3 / 2, offset_y + half_height * 3 / 2),\n )\n\n return symbols, positions", "def risePen(gcode):\r\n gcode.append(\"M300 S46\")\r\n #gcode.append(\"G0 Z0.1000\")\r", "def create_graphics(self, n):\n f = Frame(n)\n f.pack(side=TOP, fill=BOTH, expand=Y)\n\n change_graphics = Labelframe(f, text='Change Graphics')\n change_graphics.pack(side=TOP, fill=BOTH, expand=Y)\n Grid.columnconfigure(change_graphics, 0, weight=1)\n Grid.columnconfigure(change_graphics, 1, weight=1)\n\n graphicpacks = Listbox(\n change_graphics, height=10, listvariable=self.graphics,\n activestyle='dotbox')\n graphicpacks.grid(column=0, row=0, columnspan=2, sticky=\"nsew\", pady=4)\n\n install = Button(\n change_graphics, text='Install Graphics',\n command=lambda: self.install_graphics(graphicpacks))\n create_tooltip(install, 'Install selected graphics pack')\n install.grid(column=0, row=1, sticky=\"nsew\")\n update_saves = Button(\n change_graphics, text='Update Savegames',\n command=self.update_savegames)\n create_tooltip(\n update_saves, 'Install current graphics pack in all savegames')\n update_saves.grid(column=1, row=1, sticky=\"nsew\")\n truetype = Button(\n change_graphics, text='TrueType Fonts',\n command=lambda: self.cycle_option('truetype'))\n create_tooltip(\n truetype,\n 'Toggles whether to use TrueType fonts or tileset for text')\n truetype.grid(column=0, row=2, columnspan=2, sticky=\"nsew\")\n self.controls['truetype'] = truetype\n\n advanced = Labelframe(f, text='Advanced')\n advanced.pack(side=BOTTOM, fill=X, expand=Y)\n Grid.columnconfigure(advanced, 0, weight=1)\n Grid.columnconfigure(advanced, 1, weight=1)\n\n openfolder = Button(\n advanced, text='Open Graphics Folder',\n command=self.lnp.open_graphics)\n openfolder.grid(column=0, row=0, columnspan=2, sticky=\"nsew\")\n create_tooltip(openfolder, 'Add your own graphics packs here!')\n refresh = Button(\n advanced, text='Refresh List', command=self.read_graphics)\n create_tooltip(refresh, 'Refresh list of graphics packs')\n refresh.grid(column=0, row=1, sticky=\"nsew\")\n simplify = Button(\n advanced, text='Simplify Graphic Folders',\n command=self.simplify_graphics)\n create_tooltip(\n simplify, 'Deletes unnecessary files from graphics packs '\n '(saves space, useful for re-packaging)')\n simplify.grid(column=1, row=1, sticky=\"nsew\")\n return f", "def symbols(self):\n # get the names(identifiers) of all curves in the graph:\n curvenames = self.g.element_show()\n # foreach curve, add a diamond symbol, filled with the\n # color of the curve ('defcolor') and with a size of 2:\n for curvename in curvenames:\n self.g.element_configure(curvename, symbol='diamond',\n outlinewidth=2, fill='defcolor')", "def tokenize_graphics(raw_graphics_content):\n arg_stack = []\n for token in raw_graphics_content.decode(\"utf-8\").split():\n if token in PATH_OPS:\n yield (token, arg_stack.copy())\n arg_stack.clear()\n else:\n arg_stack.append(token)", "def draw(font_string,font_size,lang,alphabets,outdir=\".\"): # language, font file name, font full path, font size, characters\n \n \n image_dir=lang+\".\"+\"images\"\n if(os.path.exists(image_dir)):\n pass\n else:\n os.mkdir(image_dir)\n \n #Using a font\n #font= ImageFont.truetype(font,fsz)\n boxfile=image_dir+\"/\"+\"bigimage.box\"\n f=open(boxfile,\"w\")\n wt = 4000\n ht = 4000 #modified later using a separate script\n\t\n bigimage=Image.new(\"L\",(wt,ht),255)\t#change here for inverting\n bigdraw=ImageDraw.Draw(bigimage)\n x=y=10\n count=0\n for akshar in alphabets:\n akshar.strip() #remove nasty characters\n \n #I shall now create an image with black bgc and white font color. One\n #getbbox() determines the bounding box values I shall invert the image.\n #This has to be done since getbbox() only finds bounding box values for\n #non-zero pixels (read as white), but tesseract-ocr runs on the exact\n #opposite bgc fgc combination. Contact [email protected].\n \n \n #The lines below are pango/cairo code \n surface = cairo.ImageSurface(cairo.FORMAT_A8, font_size*4, font_size*3)\n context = cairo.Context(surface)\n\n pc = pangocairo.CairoContext(context)\n\n layout = pc.create_layout()\n layout.set_font_description(pango.FontDescription(font_string))\n layout.set_text(akshar)\n print akshar\n\n # lines take care of centering the text.\n width, height = surface.get_width(), surface.get_height()\n w, h = layout.get_pixel_size()\n position = (10,10) #most likely this part messes up when you try to change the size within this script. It is suggested to use the separate script.\n context.move_to(*position)\n pc.show_layout(layout)\n surface.write_to_png(\"pango.png\")\n\t\n #Here we open the generated image using PIL functions\n temp_image=Image.open(\"pango.png\") #black background, white text\n draw = ImageDraw.Draw(temp_image)\n bbox = temp_image.getbbox()\n deltax=bbox[2]-bbox[0]\n deltay=bbox[3]-bbox[1]\n\n \n print bbox\n new_image=temp_image.crop(bbox)\n temp_image=temp_image.load()\n inverted_image = ImageChops.invert(new_image) #White background, black text\n\t\n\tinverted_image.save(image_dir+\"/\"+str(count)+\".png\")\n\tbigimage.paste(inverted_image,(x,y))\n\tos.unlink(image_dir+\"/\"+str(count)+\".png\")\n\tcount = count+1\n\t#bigimage.load()\n bigbox=(x,y,x+deltax,y+deltay)\n print bigbox\n draw=ImageDraw.Draw(bigimage)\n\t#draw.rectangle(bigbox,None,100)\n x=bigbox[2]+5\n if x>(wt-10):\n x=10; y=y+40\n\n os.unlink(\"pango.png\") #delete the pango generated png\n\n line=akshar+\" \"+str(bigbox[0]-1)+\" \"+str(ht-(bigbox[1]+deltay)-1)+\" \"+str(bigbox[2]+1)+\" \"+str(ht-(bigbox[3]-deltay)+1) # this is the line to be added to the box file\n\tf.write(line+'\\n')\n\n\t#degrade code starts\n\tstrip=[deltax*.2,deltax*.4,deltax*.7]\n\tfor values in range(0,2):\n\t\tdistort2=inverted_image\n\t\tfor wai in range(0,deltay):\n\t\t\tfor ex in range(strip[values],strip[values]+1):\n\t\t\t\tdistort2.putpixel((ex,wai),255)\n\t\tbigbox=(x,y,x+deltax,y+deltay)\n\t\t#draw.rectangle(bigbox,None,10)\n\t\tline=akshar+\" \"+str(bigbox[0]-1)+\" \"+str(ht-(bigbox[1]+deltay)-1)+\" \"+str(bigbox[2]+1)+\" \"+str(ht-(bigbox[3]-deltay)+1) # this is the line to be added to the box file\n \tf.write(line+'\\n')\n\t\tbigimage.paste(distort2,(x,y))\n\t\tx=bigbox[2]+5\n \tif x>(wt-10):\n \t\tx=10; y=y+40\n\t\t\n\t\t\t\n\t#degrade code ends\n \n #distort.distort(filename2,bbox,fsz,akshar)\n \n \n \n #bigimage.save(image_dir+\"/\"+\"bigimage.tif\",\"TIFF\") #useful to generate merged file for all images when using default sizes.\n f.close()\n train.train(lang,outdir)", "def insertSymbol(name, image, title, string, text ='', num=0):\n onclick = \"insertSymbol('%s', '%s', %d);\" % (name, string, num)\n html = u'<a onclick=\"%s\" ' % onclick\n html += u'title=\"%s\">' % title\n html += text\n if image <> \"\":\n html += u'<img alt=\"%s\" src=\"%s\"/>' % ('symbol', image)\n html += u'</a>\\n' \n return html", "def symbols(self):\n pass", "def make_images(self, text_on_button):\n text_color = pg.Color('black')\n idle_color = pg.Color('#FCF0C8')\n hover_color = pg.Color('#E84A5F')\n blocked_color = pg.Color('#DCDADA')\n blocked_text_color = pg.Color('#A79E8B')\n # idle_image = self.font.render(text, True, idle_color)\n # blocked_image = self.font.render(text, True, blocked_color)\n # hover_image = self.font.render(text, True, hover_color, hover_fill)\n text = self.font.render(text_on_button, True, text_color)\n blocked_text = self.font.render(\n text_on_button, True, blocked_text_color)\n width = text.get_width() + 30\n height = text.get_height() + 20\n\n image = pg.Surface((width, height)).convert_alpha()\n image.fill((0, 0, 0, 0))\n\n idle_image = image.copy()\n small_rect = pg.rect.Rect(3, 3, width - 6, height - 6)\n pg.draw.ellipse(idle_image, idle_color, small_rect)\n\n blocked_image = image.copy()\n small_rect = pg.rect.Rect(3, 3, width - 6, height - 6)\n pg.draw.ellipse(blocked_image, blocked_color, small_rect)\n\n hover_image = image.copy()\n big_rect = pg.rect.Rect(0, 0, width, height)\n pg.draw.ellipse(hover_image, hover_color, big_rect)\n small_rect = pg.rect.Rect(3, 3, width - 6, height - 6)\n pg.draw.ellipse(hover_image, idle_color, small_rect)\n\n text_rect = pg.rect.Rect(0, 0, text.get_width(), text.get_height())\n text_rect.center = small_rect.center\n idle_image.blit(text, text_rect)\n blocked_image.blit(blocked_text, text_rect)\n hover_image.blit(text, text_rect)\n\n return idle_image, blocked_image, hover_image", "def ps2svg_simple(sPostscript):\n\n def group_numbers(result, times = 1):\n nums = []\n for sNum in result.groups():\n if re.match(r'[a-zA-Z]+', sNum):\n # This is just a string\n nums.append(sNum)\n else:\n # This must be a floating point number\n nums.append(\"{:.6f}\".format(times * float(sNum) ))\n return nums\n\n sBack = \"\"\n lst_out = []\n oErr = ErrHandle()\n line_style = 'stroke:black;stroke-width:1'\n point_style = \"fill:blue;font-family:Times\"\n offset_y = 18 # Adding 18px to compensate for double mirroring\n min_y = width_simple\n min_x = height_simple\n max_y = 0\n max_x = 0\n try:\n # Recognize the initial lines we are looking for\n re_Line = re.compile( r'^\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+l$')\n re_point = re.compile(r'^([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+translate\\s+([0-9]+\\.?[0-9]*)\\s+rotate$')\n re_label = re.compile(r'^\\(([a-zA-Z]+)\\)\\s+show$')\n\n lst_out.append(sIntroSimple)\n\n # Split into lines\n lines = sPostscript.split(\"\\n\")\n section = \"pre\"\n idx = 14\n bFirstPoint = True\n oorsprong = dict(x=0.0, y=0.0)\n for line in lines:\n # Check if we have a line \n if section == \"pre\":\n result = re_Line.search(line)\n if result:\n section = \"lines\"\n else:\n # We are not in a lines section\n pass\n if section == \"lines\":\n result = re_Line.search(line)\n if result:\n nums = group_numbers(result, 1)\n # Convert into <line> element\n sLine = '<g id=line{}><line x1=\"{}\" y1=\"{}\" x2=\"{}\" y2=\"{}\" style=\"{}\" stroke-linecap=\"round\" /></g>'.format(\n idx, nums[0], nums[1], nums[2], nums[3], line_style)\n idx += 2\n lst_out.append(sLine)\n\n # Keep track of min_y and min_x\n min_x = min(min_x, float(nums[0]), float(nums[2]))\n min_y = min(min_y, float(nums[1]), float(nums[3]))\n max_x = max(max_x, float(nums[0]), float(nums[2]))\n max_y = max(max_y, float(nums[1]), float(nums[3]))\n else:\n # We have exited the lines section\n section = \"point\"\n\n elif section == \"point\":\n # Look for a point\n result = re_point.search(line)\n if result:\n # We have found a point: get it in\n nums = group_numbers(result, 1)\n pos_x = \"{:.6f}\".format(float(nums[0])) \n pos_y = \"{:.6f}\".format(float(nums[1]) + offset_y )\n\n # Keep track of min_y and min_x\n min_x = min(min_x, float(nums[0]))\n min_y = min(min_y, float(nums[1]))\n max_x = max(max_x, float(nums[0]))\n max_y = max(max_y, float(nums[1]))\n\n section = \"label\"\n elif section == \"label\":\n # Look for a label\n result = re_label.search(line)\n if result:\n # we have found a label: get it\n sLabel = result.groups()[0]\n\n # Output this label\n sLabel = '<g id=\"text{}\"><text y=\"{}\" x=\"{}\" style=\"{}\">{}</text></g>'.format(\n idx, pos_y, pos_x, point_style, sLabel)\n idx += 2\n lst_out.append(sLabel)\n\n section = \"point\"\n\n # Finish up the svg nicely\n lst_out.append(\"</svg>\")\n # Convert the list into a string\n sBack = \"\\n\".join(lst_out)\n\n # Adapt w.r.t. min_x and min_y, max_x, max_y\n fHeight = height_simple - 2 * min_y + offset_y\n sViewbox = 'viewBox=\"{} {} {} {}\" width=\"{}\" height=\"{}\"'.format(\n 0, min_y, width_simple, fHeight, width_simple, fHeight\n )\n sBack = sBack.replace('@viewbox', sViewbox)\n\n except:\n msg = oErr.get_error_message()\n oErr.DoError(\"ps2svg\")\n\n # Return what we have gathered\n return sBack" ]
[ "0.5846963", "0.5831491", "0.57929367", "0.5631854", "0.56144685", "0.55986845", "0.5543508", "0.55282354", "0.54610837", "0.5445795", "0.544179", "0.5433599", "0.54209745", "0.54185355", "0.53752816", "0.5288781", "0.5242281", "0.52400696", "0.5225346", "0.51927286", "0.51873726", "0.51864034", "0.5175348", "0.5174489", "0.5159743", "0.5150197", "0.5141812", "0.51395625", "0.5132592", "0.5129004" ]
0.69908535
0
check if given args are new
def __is_args_new(self, *args, **kwargs): # if input size is different if len(args) != len(self.__cached_args) or len(kwargs) != len(self.__cached_kwargs): return True # check args and kwargs for a, ca in zip(args, self.__cached_args): if a != (ca() if isinstance(ca, wr.ReferenceType) else ca): return True for k in kwargs: if k not in self.__cached_kwargs: return True a = self.__cached_kwargs[k] if kwargs[k] != (a() if isinstance(a, wr.ReferenceType) else a): return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_args(self, args_):\n\n pass", "def _check_args(self, args):\n if len(args) == 0:\n print(\"No parameters provided.\")\n return False\n else:\n return True", "def __check_args(self):\n self.__check_args_type()\n self.__check_args_val()", "def valid_args(args):\n return args is not None and len(args) > 0", "def test_args_count_equal(args: list, target: int) -> bool:\n\n\treturn (args_count(args) == target)", "def __getnewargs__(self):\n return ()", "def check_args(name, arg_str):\n if len(arg_str) < 1:\n raise gdb.GdbError(\"ERROR: '%s' requires an argument.\"\n % name)\n return False\n else:\n return True", "def validate_args(args):\n command = args[0]\n args_length = len(args) - 1\n return VALID_COMMANDS[command] == args_length", "def outdated(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))", "def __check_arg_count(self):\n if len(self.args) > 6 or len(self.args) == 0:\n raise ArgError(\"Unsupported amount of arguments! (\" + str(len(self.args)) + \")\")", "def valid_args(args):\n is_valid = True\n\n # valid date format?\n try:\n datetime.datetime(year=args.year, month=args.month, day=args.day)\n except Exception:\n traceback.print_exc()\n is_valid = False\n\n print(f\"Arguments: {args}\")\n return is_valid", "def check_diff_as_arg(self):\n if self.args.diff is True:\n if (\n self.args.pre_snapfile is not None\n and os.path.isfile(self.args.pre_snapfile)\n ) and (\n self.args.post_snapfile is not None\n and os.path.isfile(self.args.post_snapfile)\n ):\n comp = Comparator()\n comp.compare_diff(self.args.pre_snapfile, self.args.post_snapfile, None)\n sys.exit(1)", "def _validate_add_command(args):\n res = _check_entry_name(args)\n if res != 0:\n return res\n\n return _check_property_arguments(args, args.type)", "def check_arguments(self):\n ## only four test operation is permitted, if given anything apart from this, then it should print error message\n if (self.args.snap is False and self.args.snapcheck is False and self.args.check is False and self.args.diff is False and self.args.version is False):\n self.logger.error(colorama.Fore.RED +\n \"Arguments not given correctly, Please refer help message\", extra=self.log_detail)\n self.parser.print_help()\n sys.exit(1)\n\n if(((self.args.snap is True and (self.args.pre_snapfile is None or self.args.file is None)) or\n (self.args.snapcheck is True and self.args.file is None) or\n (self.args.check is True and self.args.file is None)) and \n (self.args.testfiles is None or self.args.hostname is None)\n ):\n self.logger.error(colorama.Fore.RED +\n \"Arguments not given correctly, Please refer help message\", extra=self.log_detail)\n self.parser.print_help()\n sys.exit(1)\n if self.args.diff is True:\n if (self.args.pre_snapfile is not None and os.path.isfile(self.args.pre_snapfile)) and (\n self.args.post_snapfile is not None and os.path.isfile(self.args.post_snapfile)):\n comp = Comparator()\n comp.compare_diff(\n self.args.pre_snapfile,\n self.args.post_snapfile,\n None)\n sys.exit(1)\n else:\n if (self.args.file is None) and (\n self.args.testfiles is None or self.args.hostname is None):\n self.parser.print_help()\n sys.exit(1)", "def check_args(args):\n for arg in vars(args):\n if getattr(args, arg):\n return True\n return False", "def _validate_edit_command(args):\n res = _check_entry_name(args)\n if res != 0:\n return res\n\n # If no new type is specified on the command line then leave validation of\n # property arguments to _process_edit_command() when a type of the existing\n # entry is determined.\n if args.type is None:\n return 0\n\n return _check_property_arguments(args, args.type)", "def is_reachable_mut(self, mut, prev_args):\n mut_args = mut.args()\n for arg in prev_args:\n if arg not in mut_args:\n return False\n return True", "def is_command_ancillary(args):\n # pylint: disable=bad-continuation\n if (\n # skip the parent check and only\n # determine if the parameter is present\n is_valid_executes(args, skip=True)\n ):\n return True\n return False", "def check_args(self):\n parser = get_base_arguments(get_parser())\n parser = get_tc_arguments(parser)\n # Disable \"Do not use len(SEQ) as condition value\"\n # pylint: disable=C1801\n if len(sys.argv) < 2:\n self.logger.error(\"Icetea called with no arguments! \")\n parser.print_help()\n return False\n elif not self.args.ignore_invalid_params and self.unknown:\n self.logger.error(\"Unknown parameters received, exiting. \"\n \"To ignore this add --ignore_invalid_params flag.\")\n self.logger.error(\"Following parameters were unknown: {}\".format(self.unknown))\n parser.print_help()\n return False\n return True", "def check_args(args, iam='gfind', allow_no_coords=False):\n\n args = gargs.check_common_args(args, iam, allow_no_coords=allow_no_coords)\n\n return args", "def check_arguments(self):\n self.check_num_arguments()\n self.are_readable_files(self.args)", "def args_ok(inoptions, pos_args):\n\n if inoptions.factory_properties:\n for key in inoptions.factory_properties:\n setattr(inoptions, key, inoptions.factory_properties[key])\n\n if inoptions.list_masters:\n return True\n\n if inoptions.build_properties and not inoptions.master_dir:\n if inoptions.build_properties['mastername']:\n inoptions.mastername = inoptions.build_properties['mastername']\n else:\n print >>sys.stderr, 'error: build properties did not specify a ',\n print >>sys.stderr, 'mastername'\n return False\n else:\n if not (inoptions.master_dir or pos_args):\n print >>sys.stderr, 'error: you must provide a mastername or ',\n print >>sys.stderr, 'directory!'\n return False\n else:\n if not inoptions.master_dir:\n inoptions.mastername = pos_args.pop(0)\n\n if inoptions.stepfilter:\n if inoptions.stepreject:\n print >>sys.stderr, ('Error: can\\'t specify both stepfilter and '\n 'stepreject at the same time!')\n return False\n\n try:\n inoptions.step_regex = re.compile(inoptions.stepfilter)\n except re.error as e:\n print >>sys.stderr, 'Error compiling stepfilter regex \\'%s\\': %s' % (\n inoptions.stepfilter, e)\n return False\n if inoptions.stepreject:\n if inoptions.stepfilter:\n print >>sys.stderr, ('Error: can\\'t specify both stepfilter and '\n 'stepreject at the same time!')\n return False\n try:\n inoptions.stepreject_regex = re.compile(inoptions.stepreject)\n except re.error as e:\n print >>sys.stderr, 'Error compiling stepreject regex \\'%s\\': %s' % (\n inoptions.stepfilter, e)\n return False\n\n if inoptions.list_builders:\n return True\n\n if inoptions.build_properties and not (inoptions.slavehost or\n inoptions.builder):\n if inoptions.build_properties['buildername']:\n inoptions.builder = inoptions.build_properties['buildername']\n else:\n print >>sys.stderr, 'error: build properties did not specify a '\n print >>sys.stderr, 'buildername!'\n return False\n else:\n if not (pos_args or inoptions.slavehost or inoptions.builder):\n print >>sys.stderr, 'Error: you must provide a builder or slave hostname!'\n return False\n\n inoptions.spec = {}\n if inoptions.builder:\n inoptions.spec['builder'] = inoptions.builder\n elif inoptions.slavehost:\n inoptions.spec['hostname'] = inoptions.slavehost\n else:\n inoptions.spec['either'] = pos_args.pop(0)\n\n if inoptions.list_steps:\n return True\n\n if inoptions.logfile == '-' or inoptions.annotate:\n inoptions.log = sys.stdout\n else:\n try:\n inoptions.log = open(inoptions.logfile, 'w')\n except IOError as err:\n errno, strerror = err\n print >>sys.stderr, 'Error %d opening logfile %s: %s' % (\n inoptions.logfile, errno, strerror)\n return False\n\n if hasattr(inoptions, 'build_properties') and not hasattr(\n inoptions, 'svn_rev'):\n if inoptions.build_properties['revision']:\n try:\n setattr(inoptions, 'revision', int(\n inoptions.build_properties['revision']))\n except ValueError:\n setattr(inoptions, 'revision', None)\n\n if not (hasattr(inoptions, 'revision') and inoptions.revision) and (\n inoptions.build_properties['got_revision']):\n try:\n setattr(inoptions, 'revision', int(\n inoptions.build_properties['got_revision']))\n except ValueError:\n setattr(inoptions, 'revision', None)\n\n if not inoptions.revision or inoptions.revision < 1:\n print >>sys.stderr, 'Error: revision must be a non-negative integer!'\n return False\n else:\n print >>sys.stderr, 'error: build properties did not specify a revision!'\n return False\n\n print >>sys.stderr, 'using revision: %d' % inoptions.revision\n inoptions.build_properties['revision'] = '%d' % inoptions.revision\n else:\n if inoptions.svn_rev:\n try:\n inoptions.revision = int(inoptions.svn_rev)\n except ValueError:\n inoptions.revision = None\n\n if not inoptions.revision or inoptions.revision < 1:\n print >>sys.stderr, 'Error: svn rev must be a non-negative integer!'\n return False\n\n if not inoptions.annotate:\n print >>sys.stderr, 'using revision: %d' % inoptions.revision\n else: # nothing specified on command line, let's check LKGR\n inoptions.revision, errmsg = get_lkgr()\n if not inoptions.revision:\n print >>sys.stderr, errmsg\n return False\n if not inoptions.annotate:\n print >>sys.stderr, 'using LKGR: %d' % inoptions.revision\n\n return True", "def test_args(self):\n args = forge.args\n assert isinstance(args, forge._signature.VarPositional)\n assert args.name == 'args'\n assert args.converter is None\n assert args.validator is None", "def _is_args_added(parser: CoreParser, custom_args: List[str]) -> bool:\n namespace, _ = parser.parser.parse_known_args()\n namespace_args = vars(namespace).keys()\n\n for arg in custom_args:\n if arg not in namespace_args:\n return False\n\n return True", "def validate_input(self, *args):\n return", "def valid_args(args):\n is_valid = True\n if not args.ts_url or not args.username or not args.password or not args.from_user or not args.to_user:\n eprint(\"Missing required parameters.\")\n is_valid = False\n\n return is_valid", "def ok(*args):", "def validate_params(cls, args):\n if not (len(args) == 3 or len(args) == 5 or len(args) == 7):\n sys.exit(\n 'Execute o script passando o caminho do diretório das'\n ' imagens, ou apenas o path de uma imagem e decida se'\n ' deseja mover ou não'\n )\n args_dict = cls.__make_params(args)\n keys_args_set = set(args_dict.keys())\n if keys_args_set.difference(KEYS_DEFAULT_AS_SET) != set():\n sys.exit(\n 'Verifique a passagem de parâmetros.'\n ' Foi encontrado parâmetros desconhecidos.'\n )\n\n return cls.__check_args(args_dict)", "def args_is_good(arg_list: list) -> bool:\n usage_msg = (\n \"Usage: python operations.py <number1> <number2>\\n\"\n \"Example:\\n\"\n \" python operations.py 10 3\\n\"\n )\n too_many_msg = \"InputError: too many arguments\\n\"\n only_numbers_msg = \"InputError: only numbers\\n\"\n if len(arg_list) == 1:\n print(usage_msg)\n return False\n if len(arg_list) > 3:\n print(too_many_msg, usage_msg)\n return False\n try:\n a, b = int(arg_list[1]), int(arg_list[2])\n # discarding floats here, even those like 5.0\n # use float.is_integer() if need to keep those\n # keeping only 42 or \"42\" (ints with or without quotes)\n if arg_list[1] == str(a) and arg_list[2] == str(b):\n return True\n except TypeError:\n print(only_numbers_msg, usage_msg)\n return False", "def hasInputsChanged(self):\n return False\n\n # XXX\n _parameters = None\n lastConfigChange = self.findLastConfigureOperation()\n if lastConfigChange:\n changeset = self._manifest.loadConfigChange(lastConfigChange)\n _parameters = changeset.inputs\n if not _parameters:\n return not not self.inputs\n\n # isn't it too early for this??\n inputs = self.getCurrentInputs(lastConfigChange)\n if set(inputs.keys()) != set(_parameters.keys()):\n return True # params were added or removed\n\n # XXX calculate and compare digests\n return False" ]
[ "0.7180964", "0.67476994", "0.6550825", "0.64482", "0.61821944", "0.6121256", "0.6078874", "0.60775244", "0.5957351", "0.59326947", "0.59292126", "0.585978", "0.5825051", "0.57995284", "0.57905746", "0.5777149", "0.57604843", "0.5759606", "0.5752128", "0.5742403", "0.5735094", "0.57057124", "0.57007", "0.56933355", "0.5674712", "0.5668681", "0.56647813", "0.5627276", "0.56124616", "0.56017536" ]
0.76295835
0
Return the two base images needed to create a lighthouse animation. base_img is either A full/relative path from the run context The name of a directory under lighthouses here
def load_base_images(base_img): if base_img is not None: if not os.path.exists(base_img): base_img = os.path.join(LIGHTHOUSES_DIR, base_img) return ( Image.open(os.path.join(base_img, 'on.gif')).convert('RGBA'), Image.open(os.path.join(base_img, 'off.gif')) ) return None, None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_imagebase(self):\n pass", "def get_random_base():\n\n n_base = count_raw_img('base')\n img = \"{}.jpg\".format(random.randint(1, n_base + 1))\n return Image.open(RAW_DIR_PATH['base'] + img)", "def propose_image_path():\n image_name = \"image_{}.png\".format(''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(8)))\n image_path = os.path.join(_IMAGE_BASE_PATH, image_name)\n image_url = '/images/' + image_name\n return image_path, image_url", "def base_image(self) -> Optional[pulumi.Input['BasisArgs']]:\n return pulumi.get(self, \"base_image\")", "def get_images():\n return _IMAGES", "def images(name):\n return static_file(name, root=os.path.join(BASEDIR, \"images\"))", "def getimgs():", "def get_image_url():", "def base_image(self) -> pulumi.Output['outputs.BasisResponse']:\n return pulumi.get(self, \"base_image\")", "def load_images(self):\r\n self.standing_frame = [load_image(\"cat1.png\")]\r\n self.walk_frames_r = [load_image(\"cat2.png\"), load_image(\"cat3.png\"),\r\n load_image(\"cat4.png\")]", "def read_base_folder(base_path: str) -> dict:\n\tfour_x_images: dict = {}\n\tfor file in listdir(four_x_file_path):\n\t\tfile_extension = os.path.splitext(file)[1]\n\t\tif file_extension in image_extensions:\n\t\t\tfour_x_images[file] = join(base_path, file)\n\treturn four_x_images", "def create_base_image(self, builder, template, parameters):", "def images_steps(login, horizon):\n return ImagesSteps(horizon)", "def baseimage(self, new_image):\n images = []\n for instr in self.structure:\n if instr['instruction'] == 'FROM':\n image, _ = image_from(instr['value'])\n if image is not None:\n images.append(image)\n if not images:\n raise RuntimeError('No stage defined to set base image on')\n images[-1] = new_image\n self.parent_images = images", "def getImagePath():\n currentPath = os.path.dirname(__file__)\n resourcesPath = os.path.join(currentPath, \"Resources\")\n imagesPath = os.path.join(resourcesPath, \"Images\")\n return imagesPath", "def image(name):\n\n # the path where all the images area\n if getattr(sys, 'frozen', False):\n # The application is frozen\n datadir = os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n # Change this bit to match where you store your data files:\n datadir = os.path.dirname(__file__)\n return str(os.path.join(os.path.abspath(datadir), \"icons\", name))", "def __make_icon():\n icon = pygame.image.load(str(PurePath(\"res/Images/bird_wing_down.png\")))\n return icon", "def _get_image(runtime):\n return \"{}:{}\".format(LambdaContainer._IMAGE_REPO_NAME, runtime)", "def _get_base(**kwargs):\n profile = get_container_profile(copy.deepcopy(kwargs.get(\"profile\")))\n kw_overrides = copy.deepcopy(kwargs)\n\n def select(key, default=None):\n kw_overrides_match = kw_overrides.pop(key, _marker)\n profile_match = profile.pop(key, default)\n # let kwarg overrides be the preferred choice\n if kw_overrides_match is _marker:\n return profile_match\n return kw_overrides_match\n\n template = select(\"template\")\n image = select(\"image\")\n vgname = select(\"vgname\")\n path = kwargs.get(\"path\", None)\n # remove the above three variables from kwargs, if they exist, to avoid\n # duplicates if create() is invoked below.\n for param in (\"path\", \"image\", \"vgname\", \"template\"):\n kwargs.pop(param, None)\n\n if image:\n proto = urllib.parse.urlparse(image).scheme\n img_tar = __salt__[\"cp.cache_file\"](image)\n img_name = os.path.basename(img_tar)\n hash_ = salt.utils.hashutils.get_hash(\n img_tar, __salt__[\"config.get\"](\"hash_type\")\n )\n name = f\"__base_{proto}_{img_name}_{hash_}\"\n if not exists(name, path=path):\n create(\n name, template=template, image=image, path=path, vgname=vgname, **kwargs\n )\n if vgname:\n rootfs = os.path.join(\"/dev\", vgname, name)\n edit_conf(\n info(name, path=path)[\"config\"],\n out_format=\"commented\",\n **{\"lxc.rootfs\": rootfs},\n )\n return name\n elif template:\n name = f\"__base_{template}\"\n if not exists(name, path=path):\n create(\n name, template=template, image=image, path=path, vgname=vgname, **kwargs\n )\n if vgname:\n rootfs = os.path.join(\"/dev\", vgname, name)\n edit_conf(\n info(name, path=path)[\"config\"],\n out_format=\"commented\",\n **{\"lxc.rootfs\": rootfs},\n )\n return name\n return \"\"", "def getImagePath(self)->str:\n\n returnStr = '../../../../assets/image/{}.png'.format(randint(1,15))\n return returnStr", "def base():\n wheels()\n build_base()\n push_base()", "def make_image_path(raw_img, input_base, base_path):\n path = os.path.dirname(raw_img)\n relpath = os.path.relpath(path, input_base)\n if relpath == '.':\n dest_folder = base_path\n else:\n dest_folder = os.path.join(base_path, relpath)\n return os.path.normpath(dest_folder)\n # return dest_folder", "def imagePath(self):\n if self.use_dic:\n if self.imlist:\n paths = []\n for img in self.allimgs:\n paths.append(join(self.home, 'data'+str(self.data), self.activity, self.imsize, str(img)+'.jpg'))\n return paths\n else:\n path = join(self.home, 'data'+str(self.data), self.activity, self.imsize, str(self.img)+'.jpg')\n else:\n path = self.img\n return path", "def main():\n images = Images()\n #print images.create_image_urls()\n print images.get_image_random()\n print images.get_image(12)", "def load_images():\n\n def load_image(img_file_name):\n \"\"\"Return the loaded pygame image with the specified file name.\n\n This function looks for images in the game's images folder\n (./images/). All images are converted before being returned to\n speed up blitting.\n\n Arguments:\n img_file_name: The file name (including its extension, e.g.\n '.png') of the required image, without a file path.\n \"\"\"\n file_name = os.path.join('.', 'images', img_file_name)\n img = pygame.image.load(file_name)\n img.convert()\n return img\n\n return {'background': load_image('background.png'),\n 'pipe-end': load_image('pipe_end.png'),\n 'pipe-body': load_image('pipe_body.png'),\n # images for animating the flapping bird -- animated GIFs are\n # not supported in pygame\n 'bird-wingup': load_image('bird_wing_up.png'),\n 'bird-wingdown': load_image('bird_wing_down.png')}", "def create_gif(base_folder):\n img_list = []\n search_path = glob.glob(os.path.join(base_folder, '*.png'))\n search_path.sort()\n for f in search_path:\n im = Image.open(f)\n img_list.append(im)\n save_file = os.path.join(base_folder, 'animated_gif.gif')\n img_list[0].save(save_file,\n save_all=True, append_images=img_list[1:], optimize=False, duration=180, loop=0)", "def standard_image(img_name):\n clout = CommandLine(\n \"which afni\",\n ignore_exception=True,\n resource_monitor=False,\n terminal_output=\"allatonce\",\n ).run()\n if clout.runtime.returncode != 0:\n return None\n\n out = clout.runtime.stdout\n basedir = os.path.split(out)[0]\n return os.path.join(basedir, img_name)", "def get_images(stage=0):\n return get_files(stage)[0]", "def get_icons():\n ICONS = {\n \"http://files.heuritech.com/raw_files/surfrider/bottle.png\" : \".mot/resources/bottle.png\",\n \"http://files.heuritech.com/raw_files/surfrider/fragment.png\" : \".mot/resources/fragment.png\",\n \"http://files.heuritech.com/raw_files/surfrider/other.png\" : \".mot/resources/other.png\"\n }\n\n home = os.path.expanduser(\"~\")\n if not os.path.isdir(os.path.join(home, \".mot/\")):\n os.mkdir(os.path.join(home, \".mot/\"))\n if not os.path.isdir(os.path.join(home, \".mot/resources\")):\n os.mkdir(os.path.join(home, \".mot/resources\"))\n\n for k,v in ICONS.items():\n path = os.path.join(home, v)\n if not os.path.isfile(path):\n wget.download(k, path)\n print(\"\\ndownloaded to \", path)\n return [cv2.imread(filename,-1) for filename in [os.path.join(home, \".mot/resources/bottle.png\"),\n os.path.join(home, \".mot/resources/fragment.png\"),\n os.path.join(home, \".mot/resources/other.png\")]]", "def environmentImagesPath():\n # A recursion counter to make sure that the loop ends.\n count = 0\n # Get the path to the Blender executable.\n filePath = os.path.dirname(bpy.app.binary_path)\n # Find the lowest path level which contains Blender.\n while \"blender\" not in os.path.basename(filePath).lower():\n filePath = os.path.dirname(filePath)\n if not filePath or count == 20:\n break\n count += 1\n\n # Search all subpaths for the datafiles folder. Based on this folder\n # the path can be completed.\n for dirPath, dirs, fileList in os.walk(filePath):\n if os.path.basename(dirPath) == \"datafiles\":\n return os.path.join(os.path.join(dirPath, \"studiolights\"), \"world\")" ]
[ "0.62116516", "0.6163408", "0.5916963", "0.57784426", "0.5775673", "0.5691343", "0.5683936", "0.5591003", "0.5576489", "0.55682087", "0.5541228", "0.548597", "0.5453294", "0.54221994", "0.54105973", "0.5399216", "0.535957", "0.5359305", "0.5342906", "0.531411", "0.52954346", "0.5290477", "0.5283491", "0.5282739", "0.5266804", "0.5253816", "0.5236814", "0.5227043", "0.5216022", "0.51966715" ]
0.7206143
0
Given a light characteristic, return a list of 2tuples representing the state of light at any given time. A fixed light is the given colour, permanently >>> characteristic_to_light_states('F. R') [('R', 1)]
def characteristic_to_light_states(description): fragments = description.split() pattern_type, groups = parse_pattern(fragments.pop(0)) colour, fragments = get_colour_code(fragments) try: period = parse_period(fragments) except IndexError: if must_have_period(pattern_type, groups): raise period = None if period is not None and cannot_have_period(pattern_type, groups): raise ValueError('Period is not allowed in this type of light') return TYPES[pattern_type](groups, colour, period)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_states(crime):\n statelist = []\n for i in range(len(crime)-1):\n statelist.append(crime[i][0])\n return statelist", "def get_rgb_light():\n return list(light.rgb())", "def lights(self):\n return list(self.GetLights())", "def lights_on(self) -> list:\n return [\n entity for entity in self.all_lights if self.hass.get_state(entity) == \"on\"\n ]", "def lights(self) -> List[dict]:\n return self.items_by_domain(\"light\")", "def get_switch_states(self):\n switches_states = []\n for connection in self.connections:\n if connection.start.is_switch_output():\n switches_states.append((connection.start.switch,\n connection.start.output_nr))\n if connection.end.is_switch_output():\n switches_states.append((connection.end.switch,\n connection.end.output_nr))\n return switches_states", "def node_list_to_state_list(self, node_list):\n return [self.nodes[node].state for node in node_list]", "def find_dark_states(excited_state, ground_states):", "def light_palette(color, n_colors=6, reverse=False, as_cmap=False,\n input=\"rgb\"):\n color = _color_to_rgb(color, input)\n light = set_hls_values(color, l=.95)\n colors = [color, light] if reverse else [light, color]\n return blend_palette(colors, n_colors, as_cmap)", "def get_light_sensors(self):\n x=self.send_packet_check_response('\\x50')\n LS=[]\n for i in range(8):\n a=bytearray(x[i*3:(i+1)*3])\n LS.append(a[0]|(a[1]&0xf)<<8)\n LS.append(a[1]>>4|a[2]<<4)\n return LS", "def get_lights():\n _lifxlan.devices = None # forces a refresh\n _lifxlan.num_lights = None\n try:\n raw_lights = _lifxlan.get_devices() # get devices\n except OSError as err:\n log.warning('could not get lifx lights: %s', err)\n return []\n\n log.debug('discovered %i lifx lights: %s', len(raw_lights), ','.join([l.get_label() for l in raw_lights]))\n\n for raw_light in raw_lights:\n name = raw_light.get_label()\n\n # create state\n hue, saturation, brightness, kelvin = raw_light.get_color()\n rgb_color = colorsys.hsv_to_rgb(\n hue / 65535,\n saturation / 65535,\n brightness / 65535)\n\n state = LightState(\n power=raw_light.get_power() != 0,\n color=[\n int(rgb_color[0] * 255),\n int(rgb_color[1] * 255),\n int(rgb_color[2] * 255)\n ],\n kelvin=kelvin,\n brightness=int(brightness * 100 / 65523)\n )\n\n if name not in _cache:\n _cache[name] = LifxLight(name, raw_light, state=state)\n else:\n _cache[name].update_state(state)\n\n return list(_cache.values())", "def get_light_state(self, light):\n \tif (not self.has_image):\n return light.state, 1.0, None if light else TrafficLight.UNKNOWN, 0, None\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"rgb8\")\n #Get classification\n \treturn self.light_classifier.get_classification(cv_image)", "async def get_switches(self):\n return await self.get_states_by_tag_prefix(\"led\")", "def get_state_colors():\n state_colors = []\n state_cases = []\n state_active = []\n for i in get_covid_stats_for_all_states():\n state_colors.append(i.color)\n state_cases.append(i.cases)\n state_active.append(i.activeCases)\n socketio.emit(\n \"colors\", {\"colors\": state_colors, \"cases\": state_cases, \"active\": state_active}\n )", "def get_light_list(self):\n return self.light_array", "def map_value_to_led_states(on, off, num_leds, value):\n assert -1.0 <= value <= 1, u'The input value must be in the range [-1, 1]'\n assert num_leds % 2 != 0, u'There must be an odd number of leds'\n assert num_leds >= 3, u'There must be at least 3 leds'\n mid_index = int(math.floor(num_leds / 2))\n active_length = mid_index + 1\n active_led_states = map(lambda i: (on if i / active_length <= abs(value) else off), map(float, range(active_length)))\n inactive_led_states = repeat(0, num_leds - active_length)\n if value < 0:\n return chain(reversed(active_led_states), inactive_led_states)\n return chain(inactive_led_states, active_led_states)", "def get_list_of_states(self):\n return self.states", "def state_sequence(node):\n states = [node.state]\n while node.previous:\n node = node.previous\n states.append(node.state)\n return states[::-1]", "def apply_brightness(self, colorArray, brightness=1):\n return [[max(0, min(x * brightness, 255)) for x in led] for led in colorArray]", "def light(color, dist):\n return tuple( float(x*dist*dist) for x in color )", "async def Turn_On_Lights() -> Dict[str, Any]:\n busylightapi.manager.light_on(ALL_LIGHTS)\n return {\n \"action\": \"on\",\n \"light_id\": \"all\",\n \"color\": \"green\",\n }", "def getStates(self):\n feature_states = []\n for i, sim in enumerate(self.sims):\n state = sim.getState()\n\n long_id = self._make_id(state.scanId, state.location.viewpointId)\n if self.features:\n feature = self.features[long_id] # Get feature for\n feature_states.append((feature, state))\n else:\n feature_states.append((None, state))\n return feature_states", "def getStates(self):\n feature_states = []\n for i, sim in enumerate(self.sims):\n state = sim.getState()\n\n long_id = self._make_id(state.scanId, state.location.viewpointId)\n if self.features:\n feature = self.features[long_id] # Get feature for\n feature_states.append((feature, state))\n else:\n feature_states.append((None, state))\n return feature_states", "def light_bumps():\n return [Bump.LIGHT_BUMP_L, Bump.LIGHT_BUMP_FL, Bump.LIGHT_BUMP_CL,\n Bump.LIGHT_BUMP_CR, Bump.LIGHT_BUMP_FR, Bump.LIGHT_BUMP_R]", "def ControlLights(state):\n for led in (RED,YELLOW,GREEN):\n GPIO.output(LED[led],state[led])\n time.sleep(FLASH_TIME)", "def control_lights(state):\n for led in (RED, AMBER, GREEN):\n GPIO.output(LED[led],state[led])", "def _get_brightness_component(self, color):\n brightness = self._get_color_brightness(color)\n if brightness == 0:\n return tuple(map(lambda x: -x, color))\n result = [0] * len(color)\n for i in range(len(color)):\n result[i] = color[i] - int(round(color[i]/brightness))\n return tuple(result)", "def relief_colors(color_or_ink: ColorOrInk = (0, 0, 0), darken_factors: ReliefBrightness = (0.6, 0.3)) -> ReliefColors:\n if len(color_or_ink) > 3 and not color_or_ink[3]:\n return ()\n max_col_part = max(color_or_ink[:3])\n if max_col_part == 0: # prevent zero division if color_or_ink is black/default\n lightened_color = (1.0, 1.0, 1.0)\n else:\n brighten_factor = 1 / max_col_part\n lightened_color = tuple([(col * brighten_factor) for col in color_or_ink[:3]])\n return tuple([tuple([col_part * darken for col_part in lightened_color]) for darken in darken_factors])", "def get_light_state(self, light):\n if(not self.has_image):\n self.prev_light_loc = None\n return TrafficLight.RED\n\n # fixing convoluted camera encoding...\n if hasattr(self.camera_image, 'encoding'):\n self.attribute = self.camera_image.encoding\n if self.camera_image.encoding == '8UC3':\n self.camera_image.encoding = \"rgb8\"\n else:\n self.camera_image.encoding = 'rgb8'\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"rgb8\")\n\n #Get classification\n if self.light_classifier is not None:\n classification = self.light_classifier.get_classification(cv_image)\n else:\n classification = TrafficLight.UNKNOWN\n print \"traffic light: \", label[classification]\n return classification", "def get_light_state(self, light):\n\treturn light.state \n\n\t#if(not self.has_image):\n # self.prev_light_loc = None\n # return False" ]
[ "0.5880032", "0.5774206", "0.5655911", "0.55833024", "0.5447408", "0.5406236", "0.54036015", "0.537056", "0.53520906", "0.5329592", "0.5319485", "0.5250559", "0.5245645", "0.5213831", "0.51771307", "0.51550233", "0.50853217", "0.507346", "0.50687844", "0.50391555", "0.50137216", "0.50026834", "0.50026834", "0.49972907", "0.49930874", "0.49766776", "0.49517578", "0.49430355", "0.49384093", "0.49358767" ]
0.6722805
0
Given the split up characteristic, return the period in milliseconds The period is specified in seconds >>> parse_period(['2']) 2000 The letter 's' to mark the units may be present >>> parse_period(['3s']) 3000 It may be separated from the number by a space >>> parse_period(['4','s']) 4000 A Quick flash can only have a period if it has groups >>> parse_period(['3s']) 3000
def parse_period(fragments): period_spec = fragments[-1] # The last term is the cycle period, # it may or may not have 's' for seconds # The 's' may or may not be attached to the number if period_spec == 's': period_spec = fragments[-2] if period_spec[-1] == 's': period_spec = period_spec[:-1] return int(float(period_spec) * 1000)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def period(self):\n period_ns = int(utils.readstr_all(os.path.join(_CHANNEL_PATH(self._chip,\n self._channel),\n 'period')))\n return int(period_ns / 1000)", "def get_period_seconds(period):\n if isinstance(period, str):\n try:\n name = 'seconds_per_' + period.lower()\n result = globals()[name]\n except KeyError:\n msg = \"period not in (second, minute, hour, day, month, year)\"\n raise ValueError(msg)\n elif isinstance(period, numbers.Number):\n result = period\n elif isinstance(period, datetime.timedelta):\n result = period.days * get_period_seconds('day') + period.seconds\n else:\n raise TypeError('period must be a string or integer')\n return result", "def period(self):\n return float(self._period) / 1000", "def parse_period(self, default=None): # TODO: this method is absolutly useless here...\n\t\tcfg_period = self.cfg_root.find('period')\n\t\tif cfg_period and cfg_period.text.isnumeric():\n\t\t\tcfg_period = int(cfg_period.text)\n\t\telse: # period not specified or not numerical value\n\t\t\tcfg_period = default\n\n\t\treturn cfg_period", "def period(self) -> int:", "def period(self) -> str:\n\t\t# pylint: disable=unsubscriptable-object\n\t\treturn self.value[0]", "def period(self, value: int, /) -> None:", "def _get_period(self, n_sec, n_nsec, p_sec, p_nsec):\n return self._get_time(n_sec, n_nsec) - self._get_time(p_sec, p_nsec)", "def parse_interval(val):\n re_intv = re.compile(r\"(?P<count>\\d+)(?P<unit>\\w+)\")\n match = re_intv.match(val.strip())\n if not match:\n raise ValueError(\"invalid interval string '%s'\" % val)\n unit = match.group('unit')\n count = float(match.group('count'))\n if unit == 's':\n return count\n if unit == 'm':\n return count * 60\n if unit == 'ms':\n return count / 1000\n if unit == 'h':\n return count * 3600\n if unit == 'd':\n return count * 86400\n\n raise ValueError(\"unknown unit from interval string '%s'\" % val)", "def _calculate_period(vals):\n\tif len(vals) < 4:\n\t\treturn None\n\t# if self.firmware['major'] < 16:\n\t# \treturn ((vals[3] << 24) | (vals[2] << 16) | (vals[1] << 8) | vals[0]) / 12e6\n\t# else:\n\treturn self._calculate_float(vals)", "def period_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period_seconds\")", "def period_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period_seconds\")", "def to_length_secs(self):\n return (self.bpm / 60.0) / self.period", "def test_str_time_period_sec(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx,\n \"TestSensor\",\n group_address_state=\"1/2/3\",\n value_type=\"time_period_sec\",\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0xE0,\n 0xF5,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), 57589)\n self.assertEqual(sensor.unit_of_measurement(), \"s\")\n self.assertEqual(sensor.ha_device_class(), None)", "def period_seconds(self) -> Optional[int]:\n return pulumi.get(self, \"period_seconds\")", "def period_seconds(self) -> Optional[int]:\n return pulumi.get(self, \"period_seconds\")", "def getProbationPeriod(probationPercent, fileLength):\n return min(\n math.floor(probationPercent * fileLength),\n probationPercent * 5000)", "def parse_duration_string_ms(duration):\n pattern = r'(?P<value>[0-9]+\\.?[0-9]*?)(?P<units>\\D+)'\n matches = list(re.finditer(pattern, duration))\n assert matches, 'Failed to parse duration string %s' % duration\n\n times = {'h': 0, 'm': 0, 's': 0, 'ms': 0}\n for match in matches:\n parsed = match.groupdict()\n times[parsed['units']] = float(parsed['value'])\n\n return (times['h'] * 60 * 60 + times['m'] * 60 + times['s']) * 1000 + times['ms']", "def ParseDurationValue(self, allowFraction=True):\n value = self.ParseDIGITRepeat()\n if value is None:\n return None, None\n if self.the_char in \".,\":\n if not allowFraction:\n raise DateTimeError(\n \"fractional component in duration must have lowest order\")\n format = \"n\" + self.the_char + \"n\"\n value = value + self.ParseFraction()\n else:\n format = \"n\"\n return value, format", "def test_str_time_period_msec(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx,\n \"TestSensor\",\n group_address_state=\"1/2/3\",\n value_type=\"time_period_msec\",\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0x93,\n 0xC7,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), 37831)\n self.assertEqual(sensor.unit_of_measurement(), \"ms\")\n self.assertEqual(sensor.ha_device_class(), None)", "def period(pspec):\n\tw = '|'.join(TimeUnit.names)\n\tm = re.match('^(\\d+)(\\.\\d+)? (%s)s?$' % w, pspec)\n\tif m:\n\t\tistimerange = False\n\telif len(pspec.split(':')) == 2:\n\t\t(start, end) = pspec.split(':')\n\t\tistimerange = True\n\telse:\n\t\traise Exception('Invalid period specification')\n\t\n\tif istimerange:\n\t\tdates = (datetime.strptime(start, '%Y-%m-%d').date(), \n\t\t\tdatetime.strptime(end, '%Y-%m-%d').date())\n\t\treturn DateRangePeriod(dates, 'day')\n\telse:\n\t\tg = m.groups()\n\t\treturn FixedTimePeriod(float(g[0] + (g[1] or '.0')), g[2])", "def get_time_period(value):\n\t\tfor time_period in TimePeriod:\n\t\t\tif time_period.period == value:\n\t\t\t\treturn time_period\n\t\traise ValueError('{} is not a valid TimePeriod'.format(value))", "def parse_duration_string_ns(duration):\n pattern = r'(?P<value>[0-9]+\\.?[0-9]*?)(?P<units>\\D+)'\n matches = list(re.finditer(pattern, duration))\n assert matches, 'Failed to parse duration string %s' % duration\n\n times = {'h': 0, 'm': 0, 's': 0, 'ms': 0, 'us': 0, 'ns': 0}\n for match in matches:\n parsed = match.groupdict()\n times[parsed['units']] = float(parsed['value'])\n\n value_ns = (times['h'] * 60 * 60 + times['m'] * 60 + times['s']) * 1000000000\n value_ns += times['ms'] * 1000000 + times['us'] * 1000 + times['ns']\n\n return value_ns", "def _getDuration(v, line, text):\n if \"/\" in v:\n try:\n return eval(v + \".\")\n except:\n raise ValueError(\"invalid duration value '%s' on line %d: %s\" %\n (v, line, text))\n return float(v)", "def parse_duration(duration):\n command_parse = re.compile(r\"(!mute|/mute) ?(\\d+)? ?([\\w+\\D]+)?\")\n parsed = command_parse.match(duration.text)\n time = parsed.group(2)\n reason = parsed.group(3)\n\n if not time:\n time = 5\n time = int(time)\n\n if not reason:\n reason = 'for no reason'\n\n until_date = datetime.now() + timedelta(minutes=time)\n return until_date, reason, time", "def parse_duration(duration):\n duration = str(duration).upper().strip()\n\n elements = ELEMENTS.copy()\n\n for pattern in (SIMPLE_DURATION, COMBINED_DURATION):\n if pattern.match(duration):\n found = pattern.match(duration).groupdict()\n del found['time']\n\n elements.update(dict((k, int(v or 0))\n for k, v\n in found.items()))\n\n return datetime.timedelta(days=(elements['days'] +\n _months_to_days(elements['months']) +\n _years_to_days(elements['years'])),\n hours=elements['hours'],\n minutes=elements['minutes'],\n seconds=elements['seconds']) \n \n return ParseError()", "def parse_duration_level(f):\n stem = Path(f).stem\n return stem.split(\"_\")[2]", "def parse_time_ms(time_string):\n try:\n return int(1000 * parse_duration(time_string))\n except:\n logging.exception('Unable to extract seconds from {}'.format(time_string))\n logging.info('Defaulting time to 1 second.')\n return 1000", "def token_period(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"token_period\")", "def period(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"period\")" ]
[ "0.6479936", "0.644268", "0.6255988", "0.6126181", "0.59293336", "0.5887166", "0.5848696", "0.581168", "0.5801859", "0.57852525", "0.57155305", "0.57155305", "0.56989634", "0.5696296", "0.55982697", "0.55982697", "0.5581507", "0.55336004", "0.5522052", "0.55193275", "0.55144316", "0.5496971", "0.54927856", "0.5447475", "0.54466826", "0.54045725", "0.5397136", "0.5367687", "0.53643066", "0.5360508" ]
0.75614196
0
Crack a pattern definition into its type and any grouping. A pattern consists of the pattern type (e.g. flashing, occulting) and optionally a group designation in parentheses. The pattern definition could just be the type >>> parse_pattern('Fl') ('fl', [1]) It could have optional dots marking the abbreviation, these can be discarded >>> parse_pattern('L.Fl.') ('lfl', [1]) It could have grouping information in parentheses >>> parse_pattern('Fl(2)') ('fl', [2]) The group could be a composite group. >>> parse_pattern('Oc(2+1)') ('oc', [2, 1])
def parse_pattern(pattern): pattern_type, _, group_spec = pattern.partition('(') # Groups are separated by '+' in a composite pattern. groups = [ int(group) for group in group_spec[:-1].split('+') ] if group_spec else [1] # Some light lists use dots, some don't, just throw them away return pattern_type.lower().replace('.', ''), groups
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_pattern(pattern: NumberPattern | str) -> NumberPattern:\n if isinstance(pattern, NumberPattern):\n return pattern\n\n def _match_number(pattern):\n rv = number_re.search(pattern)\n if rv is None:\n raise ValueError(f\"Invalid number pattern {pattern!r}\")\n return rv.groups()\n\n pos_pattern = pattern\n\n # Do we have a negative subpattern?\n if ';' in pattern:\n pos_pattern, neg_pattern = pattern.split(';', 1)\n pos_prefix, number, pos_suffix = _match_number(pos_pattern)\n neg_prefix, _, neg_suffix = _match_number(neg_pattern)\n else:\n pos_prefix, number, pos_suffix = _match_number(pos_pattern)\n neg_prefix = f\"-{pos_prefix}\"\n neg_suffix = pos_suffix\n if 'E' in number:\n number, exp = number.split('E', 1)\n else:\n exp = None\n if '@' in number and '.' in number and '0' in number:\n raise ValueError('Significant digit patterns can not contain \"@\" or \"0\"')\n if '.' in number:\n integer, fraction = number.rsplit('.', 1)\n else:\n integer = number\n fraction = ''\n\n def parse_precision(p):\n \"\"\"Calculate the min and max allowed digits\"\"\"\n min = max = 0\n for c in p:\n if c in '@0':\n min += 1\n max += 1\n elif c == '#':\n max += 1\n elif c == ',':\n continue\n else:\n break\n return min, max\n\n int_prec = parse_precision(integer)\n frac_prec = parse_precision(fraction)\n if exp:\n exp_plus = exp.startswith('+')\n exp = exp.lstrip('+')\n exp_prec = parse_precision(exp)\n else:\n exp_plus = None\n exp_prec = None\n grouping = parse_grouping(integer)\n return NumberPattern(pattern, (pos_prefix, neg_prefix),\n (pos_suffix, neg_suffix), grouping,\n int_prec, frac_prec,\n exp_prec, exp_plus, number)", "def parse(self, pattern):\n phrasal_pattern = self.convert_parse_tree_to_phrasal_pattern(\n self.parse_tree(pattern))\n return phrasal_pattern", "def parse_argument_group_pattern(content: str) -> (ArgumentGroupPattern, int):\n\n if len(content) == 0:\n raise PatternError(\"content may not be empty\")\n\n if content[0] != \"(\" or content[-1] != \")\":\n raise PatternError(\"argument group pattern must be wrapped '( )'\")\n\n if content[1] == \"!\":\n is_required = True\n offset = 2\n else:\n is_required = False\n offset = 1\n\n arguments = list()\n while offset < len(content) - 1:\n if content[offset].isspace():\n offset += 1\n continue\n\n arg_match = re.match(r\"([\\[<].*[]>])\",\n content[offset::])\n\n if arg_match is not None:\n arg, size = parse_argument_pattern(arg_match.group(1))\n offset += size\n\n if arg.is_required:\n raise PatternError(f\"argument group patterns may not contain required arguments\")\n\n arguments.append(arg)\n continue\n else:\n raise PatternError(f\"could not parse arguments in group '{content}'\")\n\n offset += 1\n\n return ArgumentGroupPattern(is_required, arguments), offset", "def _parse_pattern(cls, pattern, default_pattern: str = \"*\") -> Pattern:\n pattern = pattern or default_pattern\n if pattern is None:\n return None\n\n return Pattern(pattern)", "def parse_pattern_str(pattern: str, parent_layer: nn.Layer) -> Union[None, List[Dict[str, Union[nn.Layer, str, None]]]]:\n\n pattern_list = pattern.split(\".\")\n if not pattern_list:\n msg = f\"The pattern('{pattern}') is illegal. Please check and retry.\"\n return None\n\n layer_list = []\n while len(pattern_list) > 0:\n if '[' in pattern_list[0]:\n target_layer_name = pattern_list[0].split('[')[0]\n target_layer_index = pattern_list[0].split('[')[1].split(']')[0]\n else:\n target_layer_name = pattern_list[0]\n target_layer_index = None\n\n target_layer = getattr(parent_layer, target_layer_name, None)\n\n if target_layer is None:\n msg = f\"Not found layer named('{target_layer_name}') specifed in pattern('{pattern}').\"\n return None\n\n if target_layer_index and target_layer:\n if int(target_layer_index) < 0 or int(target_layer_index) >= len(target_layer):\n msg = f\"Not found layer by index('{target_layer_index}') specifed in pattern('{pattern}'). The index should < {len(target_layer)} and > 0.\"\n return None\n\n target_layer = target_layer[target_layer_index]\n\n layer_list.append({\"layer\": target_layer, \"name\": target_layer_name, \"index\": target_layer_index})\n\n pattern_list = pattern_list[1:]\n parent_layer = target_layer\n return layer_list", "def parse(s):\n\n if not s:\n # edge case: null value\n raise hxl.HXLException('Attempt to parse empty tag pattern')\n elif isinstance(s, TagPattern):\n # edge case: already parsed\n return s\n\n result = re.match(TagPattern.PATTERN, s)\n if result:\n tag = '#' + result.group(1).lower()\n include_attributes = set()\n exclude_attributes = set()\n attribute_specs = re.split(r'\\s*([+-])', result.group(2))\n for i in range(1, len(attribute_specs), 2):\n if attribute_specs[i] == '+':\n include_attributes.add(attribute_specs[i + 1].lower())\n else:\n exclude_attributes.add(attribute_specs[i + 1].lower())\n if result.group(3) == '!':\n is_absolute = True\n if exclude_attributes:\n raise ValueError('Exclusions not allowed in absolute patterns')\n else:\n is_absolute = False\n return TagPattern(\n tag,\n include_attributes=include_attributes,\n exclude_attributes=exclude_attributes,\n is_absolute=is_absolute\n )\n else:\n raise hxl.HXLException('Malformed tag: ' + s)", "def _get_regex_for_pattern(self, pattern: bytes):\n # TODO: should blacksheep support \":\" in routes (using escape chars)?\n for c in _escaped_chars:\n if c in pattern:\n pattern = pattern.replace(c, b\"\\\\\" + c)\n\n if b\"*\" in pattern:\n # throw exception if a star appears more than once\n if pattern.count(b\"*\") > 1:\n raise RouteException(\n \"A route pattern cannot contain more than one star sign *. \"\n \"Multiple star signs are not supported.\"\n )\n\n if b\"/*\" in pattern:\n pattern = _route_all_rx.sub(br\"?(?P<tail>.*)\", pattern)\n else:\n pattern = _route_all_rx.sub(br\"(?P<tail>.*)\", pattern)\n\n # support for < > patterns, e.g. /api/cats/<cat_id>\n # but also: /api/cats/<int:cat_id> or /api/cats/<uuid:cat_id> for more\n # granular control on the generated pattern\n if b\"<\" in pattern:\n pattern = _angle_bracket_route_param_rx.sub(\n self._handle_rich_parameter, pattern\n )\n\n # support for mustache patterns, e.g. /api/cats/{cat_id}\n # but also: /api/cats/{int:cat_id} or /api/cats/{uuid:cat_id} for more\n # granular control on the generated pattern\n if b\"{\" in pattern:\n pattern = _mustache_route_param_rx.sub(self._handle_rich_parameter, pattern)\n\n # route parameters defined using /:name syntax\n if b\"/:\" in pattern:\n pattern = _route_param_rx.sub(br\"/(?P<\\1>[^\\/]+)\", pattern)\n\n # NB: following code is just to throw user friendly errors;\n # regex would fail anyway, but with a more complex message\n # 'sre_constants.error: redefinition of group name'\n # we only return param names as they are useful for other things\n param_names = []\n for p in _named_group_rx.finditer(pattern):\n param_name = p.group(1)\n if param_name in param_names:\n raise ValueError(\n f\"cannot have multiple parameters with name: \" f\"{param_name}\"\n )\n\n param_names.append(param_name)\n\n if len(pattern) > 1 and not pattern.endswith(b\"*\"):\n # NB: the /? at the end ensures that a route is matched both with\n # a trailing slash or not\n pattern = pattern + b\"/?\"\n return re.compile(b\"^\" + pattern + b\"$\", re.IGNORECASE), param_names", "def load_pattern(pattern, content_type=CONTENT_TYPE_YAML, node_id=None):\n try:\n if not isinstance(pattern, Mapping):\n pattern = load_file(pattern, content_type, node_id)\n if \"config-handler\" in pattern:\n pattern[\"config_handler\"] = pattern[\"config-handler\"]\n del pattern[\"config-handler\"]\n\n # add dummy values to pass validation\n for dummy in [\"definition\", \"name\", \"config_handler\"]:\n if dummy not in pattern:\n pattern[dummy] = dummy\n\n if not validate_pattern(pattern, node_id):\n log.error(\"%s: failed to validate pattern attributes\", node_id)\n return None\n\n pattern[\"node_id\"] = node_id\n return Pattern(**pattern)\n except TypeError as exc:\n log.error(\"%s: failed to load pattern '%s' (%s)\", node_id, pattern, exc)\n\n return None", "def convert_pattern(pattern, pattern_type=None):\n\tif pattern_type == 'regex':\n\t\treturn re.compile(pattern)\n\telif pattern_type == 'wildcard':\n\t\treturn re.compile(fnmatch.translate(pattern))\n\treturn re.compile(re.escape(pattern))", "def _parseStage1(pattern):\n result = []\n counter = 0\n pattern =\\\n pattern.replace('(', \" ( \").replace(')', \" ) \").replace('|', \" | \")\n pattern =\\\n pattern.replace('[', \" [ \").replace(']', \" ] \").replace('*', \" * \")\n pattern = pattern.strip().split()\n if pattern[0] != '(':\n pattern = ['('] + pattern\n pattern = [')'] + pattern\n\n bPattern, _ = closeBrackets(pattern)\n return bPattern", "def parse_argument_pattern(content: str) -> (ArgumentPattern, int):\n if len(content) == 0:\n raise PatternError(\"content may not be empty\")\n\n if content[0] not in \"[<\" or content[-1] not in \"]>\":\n raise PatternError(\"argument pattern must be wrapped in '[ ]' or '< >'\")\n\n open_brace = content[0]\n\n is_required = open_brace == \"<\"\n\n offset = 1\n\n names, size = __parse_names(content[offset::])\n offset += size\n\n is_positional = len(names) == 0\n\n ident, arg_num, size = __parse_var(content[offset::], is_positional)\n offset += size\n\n delim, size = __parse_delim(content[offset::])\n offset += size\n\n if (delim is not None and not (arg_num.quantifier == Quantifier.N and arg_num.count == 1\n or arg_num.quantifier == Quantifier.OPTIONAL)):\n raise PatternError(f\"Only arguments taking 1 or optional values may specify a delimiter\")\n\n try:\n if (close_brace := content[offset]) in \"]>\":\n if open_brace == \"<\" and close_brace != \">\" or open_brace == \"[\" and close_brace != \"]\":\n raise PatternError(f\"mismatching brace types, found '{open_brace}' and '{close_brace}'\")\n\n offset += 1\n else:\n raise PatternError(f\"expected '{']' if open_brace == '[' else '>'}' but found '{content[offset]}\")\n except IndexError as err:\n raise PatternError(f\"error parsing arguments pattern: {err}\")\n\n if is_positional and not is_required:\n raise PatternError(\"a positional argument may not be optional, you may specify either '?' or '*' as quantifiers\")\n\n if ident is None and len(names) > 0:\n ident = (max(names, key=lambda l: len(l)).lstrip('-')\n .upper().replace(\"-\", \"_\"))\n\n return ArgumentPattern(ident, arg_num, names, is_positional, is_required, delim), offset", "def __init__(self, pattern, use_regex=False, pid=None, cpu=None):\n parts = bytes(pattern).split(b':')\n if len(parts) == 1:\n parts = [b\"p\", b\"\", parts[0]]\n elif len(parts) == 2:\n parts = [b\"p\", parts[0], parts[1]]\n elif len(parts) == 3:\n if parts[0] == b\"t\":\n parts = [b\"t\", b\"\", b\"%s:%s\" % tuple(parts[1:])]\n if parts[0] not in [b\"p\", b\"t\", b\"u\"]:\n raise Exception(\"Type must be 'p', 't', or 'u', but got %s\" %\n parts[0])\n else:\n raise Exception(\"Too many ':'-separated components in pattern %s\" %\n pattern)\n\n (self.type, self.library, self.pattern) = parts\n if not use_regex:\n self.pattern = self.pattern.replace(b'*', b'.*')\n self.pattern = b'^' + self.pattern + b'$'\n\n if (self.type == b\"p\" and self.library) or self.type == b\"u\":\n libpath = BPF.find_library(self.library)\n if libpath is None:\n # This might be an executable (e.g. 'bash')\n libpath = BPF.find_exe(str(self.library))\n if libpath is None or len(libpath) == 0:\n raise Exception(\"unable to find library %s\" % self.library)\n self.library = libpath\n\n self.pid = pid\n self.cpu = cpu\n self.matched = 0\n self.trace_functions = {} # map location number to function name", "def __init__(self, pattern_type, experimental_scenario, pattern):\n self.pattern_type = pattern_type # if pattern_type=1 --> experimental group, otherwise control group\n self.experimental_scenario = experimental_scenario\n self.pattern = pattern", "def parse_command_pattern(content: str) -> CommandPattern:\n if len(content) == 0:\n raise PatternError(\"content may not be empty\")\n\n command, sub_commands, offset = parse_commands(content)\n\n arguments = list()\n groups = list()\n\n while offset < len(content):\n if content[offset].isspace():\n offset += 1\n continue\n\n # check for an argument\n arg_match = re.match(r\"([\\[<].*[]>])\",\n content[offset::])\n\n if arg_match is not None:\n arg, size = parse_argument_pattern(arg_match.group(1))\n offset += size\n\n arguments.append(arg)\n continue\n\n group_match = re.match(r\"(\\(.*?\\))\",\n content[offset::])\n\n if group_match is not None:\n group, size = parse_argument_group_pattern(group_match.group(1))\n offset += size\n\n groups.append(group)\n continue\n\n raise PatternError(f\"unexpected value '{content[offset]}'\")\n\n return CommandPattern(command, sub_commands, arguments, groups)", "def pattern(self):\n return self.get_data(\"pattern\")", "def add_pattern(self, name, pattern=None):\n assert isinstance(name, str) and len(name) < 32 and name.find(' ') == -1, \"name must be a string with less than 32 characters and contain no spaces\"\n assert isinstance(pattern, (list, np.ndarray, Pattern)), \"pattern must be a list or Pattern\"\n \n if not isinstance(pattern, Pattern):\n pattern = Pattern(name, multipliers=pattern, time_options=self._options.time) \n else: #elif pattern.time_options is None:\n pattern.time_options = self._options.time\n if pattern.name in self._data.keys():\n raise ValueError('Pattern name already exists')\n self[name] = pattern", "def pattern_factory(self):\n\t\treturn self.args[1]", "def parse_from_regex(string,pattern,fields):\n\n string = string.replace('\\\\','/') # USE POSIX PLEASE\n num_groups = flat_paren_counter(pattern)\n if isinstance(fields,str):\n fields = [fields]\n num_fields = len(fields)\n if not num_fields == num_groups:\n return {}\n match = re.search(pattern,string)\n if not num_groups == len(match.groups()):\n return {}\n \n l = []\n \n for field,value in zip(fields,list(match.groups())):\n d = nested_notation_to_tree(field,value)\n l.append(d)\n return deep_merge_N(l)", "def convertPattern(pattern, sign):\r\n\r\n # Check for include vs exclude patterns.\r\n if pattern[:2] == \"+ \":\r\n pattern = pattern[2:]\r\n sign = \"+\"\r\n elif pattern[:2] == \"- \":\r\n pattern = pattern[2:]\r\n sign = \"-\"\r\n\r\n # Express windows, mac patterns in unix patterns (rsync.py extension).\r\n separator = os.path.normpath(\"/\")\r\n if separator != \"/\":\r\n pattern = re.sub(re.escape(separator), \"/\", pattern)\r\n\r\n # If pattern contains '/' it should match from the start.\r\n temp = pattern\r\n if pattern[0] == \"/\":\r\n pattern = pattern[1:]\r\n if temp[-1] == \"/\":\r\n temp = temp[:-1]\r\n\r\n # Convert pattern rules: ** * ? to regexp rules.\r\n pattern = re.escape(pattern)\r\n pattern = string.replace(pattern, \"\\\\*\\\\*\", \".*\")\r\n pattern = string.replace(pattern, \"\\\\*\", \"[^/]*\")\r\n pattern = string.replace(pattern, \"\\\\*\", \".*\")\r\n\r\n if \"/\" in temp:\r\n # If pattern contains '/' it should match from the start.\r\n pattern = \"^\\\\/\" + pattern\r\n else:\r\n # Else the pattern should match the all file or folder name.\r\n pattern = \"\\\\/\" + pattern\r\n\r\n if pattern[-2:] != \"\\\\/\" and pattern[-2:] != \".*\":\r\n # File patterns should match also folders.\r\n pattern = pattern + \"\\\\/?\"\r\n\r\n # Pattern should match till the end.\r\n pattern = pattern + \"$\"\r\n return (sign, pattern)", "def _match_fn_name_pattern(\n self, fn_name, pattern\n ) -> Union[str, Tuple[str, str], None]:\n if isinstance(pattern, RegexPattern):\n m = pattern.search(fn_name)\n groups = m and m.groups()\n if groups:\n if len(groups) == 1:\n return groups[0]\n if len(groups) > 2:\n raise ValueError(\n f\"The `out_pattern` {pattern} matched on '{fn_name}' >2 groups: {groups}\"\n )\n return sfxed(*reversed(groups))\n elif callable(pattern):\n return pattern(fn_name)\n elif fn_name.startswith(pattern):\n return fn_name[len(pattern) :]", "def compile(self, name, pattern):\n try:\n return self.get_pattern(name)\n except KeyError:\n return self.store_pattern(name, re.compile(pattern))", "def pattern(self, pattern):\n if pattern is None:\n raise ValueError(\"Invalid value for `pattern`, must not be `None`\") # noqa: E501\n\n self._pattern = pattern", "def create_pattern_function(self):\n\n type_regex = \"(?:\\w+(?:\\:\\:)?)+\"\n regex = \"^(?P<indent>\\s*)(?P<virtual>virtual )?(?P<function_return>(?:const )?\" + type_regex + \"(?P<subtype><?\" + type_regex + \">?)?) (?P<function_name>.*)\\((?P<args>.*)\\)(?P<const_qualifier> const)?(?: = 0)?;\\n$\"\n return regex", "def pattern(self):\n return self[\"pattern\"]", "def pattern(self):\n return self[\"pattern\"]", "def pattern(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"pattern\")", "def compile_pattern(self, input, debug=False, with_tree=False):\n tokens = tokenize_wrapper(input)\n try:\n root = self.driver.parse_tokens(tokens, debug=debug)\n except parse.ParseError as e:\n raise PatternSyntaxError(str(e)) from None\n if with_tree:\n return self.compile_node(root), root\n else:\n return self.compile_node(root)", "def convert_pattern_format(text):\n parsed_text = []\n # parse text via Pattern's parser\n pattern_parsed_text = Text(parse(text, relations=True, lemmata=True))\n for sentence in pattern_parsed_text:\n s = Sentence()\n s.string = remove_blanks(sentence.string)\n for word in sentence:\n # Patterns tags for each word in the sentence are stored in a new Word-object\n w = Word()\n w.string = word.string\n w.lemma = word.lemma\n w.index = word.index\n w.tag = word.type\n w.entity = \"\"\n # each word is appended to a Sentence-object\n s.words.append(w)\n # each Sentence-object is appended to an array\n parsed_text.append(s)\n return parsed_text", "def pattern(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"pattern\")", "def pattern(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"pattern\")" ]
[ "0.6776147", "0.61842924", "0.6145672", "0.60469675", "0.582925", "0.576661", "0.5751908", "0.5672661", "0.564937", "0.55928665", "0.5582709", "0.54133475", "0.5398375", "0.53610086", "0.5338687", "0.5337617", "0.53008944", "0.5261952", "0.5254372", "0.52399373", "0.5239051", "0.52385855", "0.5235806", "0.52307326", "0.52307326", "0.52240825", "0.5202667", "0.5192095", "0.5186522", "0.5186522" ]
0.7442205
0
Given a list of light states, collapse any adjacent entries that have the same state. If there are no adjacent matching states, there is no change to the output >>> collapse_states([('R',1), ('Y', 1), ('R', 1)]) [('R', 1), ('Y', 1), ('R', 1)] Adjacent states are collapsed, summing their durations >>> collapse_states([('R',1), ('R', 1), ('Y', 1)]) [('R', 2), ('Y', 1)] >>> collapse_states([('R',1), ('R', 2), ('R', 3), ('Y', 1)]) [('R', 6), ('Y', 1)]
def collapse_states(states): new_states = states[:1] for state in states[1:]: last_state = new_states[-1] if state[0] == last_state[0]: new_states[-1] = (state[0], last_state[1] + state[1]) else: new_states.append(state) return new_states
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def states_filter(state):\n if state.count(0) < state.count(1) or state.count(1) < state.count(0) - 1:\n return False\n\n rows = [[i, i+1, i+2] for i in [0, 3, 6]]\n cols = [[i, i+3, i+6] for i in [0, 1, 2]]\n\n winners = set()\n\n for row_indexes in rows:\n row = [state[ind] for ind in row_indexes]\n if row[0] >= 0 and are_same(row):\n winners.add(row[0])\n\n for col_indexes in cols:\n col = [state[ind] for ind in col_indexes]\n if col[0] >= 0 and are_same(col):\n winners.add(col[0])\n\n # We don't look at diags\n return len(winners) <= 1", "def collapseRow(self, lst):\n collapsible = False\n length = len(lst)\n base = ecount = 0\n last_merge = None\n for i in range(length):\n if lst[i] == 0:\n ecount += 1\n elif base != i:\n if lst[i] == lst[base] and base != last_merge:\n lst[base] *= 2\n self.score += lst[base]\n lst[i] = 0\n last_merge = base\n collapsible = True\n elif lst[base] == 0:\n lst[base] = lst[i]\n lst[i] = 0\n collapsible = True\n elif i > base+1:\n lst[base+1] = lst[i]\n lst[i] = 0\n collapsible = True\n\n if lst[base+1] != 0:\n base += 1\n if ecount == length:\n collapsible = True\n\n return lst, collapsible", "def merged_transitions(self):\n from copy import deepcopy\n def key(transition):\n return (transition.to_state, transition.word_out)\n\n new = self.empty_copy()\n changed = False\n state_dict = {}\n memo = {}\n\n for state in self.states():\n new_state = deepcopy(state,memo)\n state_dict[state] = new_state\n new.add_state(new_state)\n\n for state in self.states():\n grouped_transitions = itertools.groupby(sorted(state.transitions, key=key), key=key)\n for (to_state, word_out), transitions in grouped_transitions:\n transition_list = list(transitions)\n changed = changed or len(transition_list) > 1\n word_in = 0\n for transition in transition_list:\n if hasattr(transition.word_in, '__iter__') and len(transition.word_in) == 1:\n word_in += transition.word_in[0]\n else:\n raise TypeError('%s does not have a list of length 1 as word_in' % transition)\n new.add_transition((state, to_state, word_in, word_out))\n\n if changed:\n return new\n else:\n return self", "def remove_unreachable_states(mona_data):\n\n # Remove all cases in the transition dict where the state is either a source or a destination\n def remove_state_from_transition_dict(transition_dict, state):\n return {(source, dest): label\n for (source, dest), label in transition_dict.items()\n if source != state and dest != state}\n\n # Decrements a state name if the value < filter state\n def filter_and_transform(state, filter_state):\n return str((int(state) - 1)) if int(state) > int(filter_state) else str(state)\n\n # decrement state name\n def decrement_state_name(transition_dict, state):\n return {(filter_and_transform(source, state), filter_and_transform(dest, state)): label\n for (source, dest), label in transition_dict.items()}\n\n states_to_remove = []\n\n # As per convention, only rejecting states can be unreachable\n for state in mona_data['rejecting_states']:\n\n # Check if this state cannot reach an accepting/reporting state\n # For now, we assume that only states that are not reporting and have\n # no outgoing (no self-referential) edges, cannot reach reporting\n unreachable = True\n for (source, dest), label in mona_data['transition_dict'].items():\n if source == state and dest != state:\n unreachable = False\n \n # If unreachable, remove the state\n if unreachable:\n states_to_remove.append(state)\n\n\n for state in states_to_remove:\n\n # Remove state from states\n assert state in mona_data['states']\n mona_data['states'].remove(state)\n mona_data['states'] = [filter_and_transform(state_i, state) for state_i in mona_data['states']]\n\n # Reduce num_states by one\n mona_data['num_states'] -= 1\n\n # Remove unreachable state and update rejecting states\n assert state in mona_data['rejecting_states']\n mona_data['rejecting_states'].remove(state)\n mona_data['rejecting_states'] = set([filter_and_transform(state_i, state) for state_i in mona_data['rejecting_states']])\n\n # Remove unreachable state and update relevant transitions\n mona_data['transition_dict'] = remove_state_from_transition_dict(mona_data['transition_dict'], state)\n mona_data['transition_dict'] = decrement_state_name(mona_data['transition_dict'], state)\n \n # Remove unreachable state and update dont_care transitions\n if state in mona_data['dont_care_states']:\n mona_data['dont_care_states'].remove(state)\n mona_data['dont_care_states'] = set([filter_and_transform(state_i, state) for state_i in mona_data['dont_care_states']])\n\n #Update Accepting states\n mona_data['accepting_states'] = [filter_and_transform(state_i, state) for state_i in mona_data['accepting_states']]", "def _verify_all_states_reachable(states_list):\n\n # This queue stores state names.\n processed_queue = []\n curr_queue = [states_list[0]['name']]\n\n while curr_queue:\n curr_state = curr_queue[0]\n curr_queue = curr_queue[1:]\n\n if curr_state in processed_queue:\n continue\n\n processed_queue.append(curr_state)\n\n curr_state_ind = next(ind for ind, state in enumerate(states_list)\n if state['name'] == curr_state)\n\n for handler in states_list[curr_state_ind]['widget']['handlers']:\n for rule in handler['rule_specs']:\n dest_state = rule['dest']\n if (dest_state not in curr_queue and\n dest_state not in processed_queue and\n dest_state != feconf.END_DEST):\n curr_queue.append(dest_state)\n\n if len(states_list) != len(processed_queue):\n unseen_states = list(\n set([s['name'] for s in states_list]) - set(processed_queue))\n raise Exception('The following states are not reachable from the '\n 'initial state: %s' % ', '.join(unseen_states))", "def collapse(self):\r\n qubits, _ = self._input_indices\r\n if len(qubits) > 0:\r\n raise ValueError(\"Currently only support collapsing states.\")\r\n qubits, _ = self._output_indices\r\n if self.is_pure:\r\n state_vector = self.tensor_pure.contract()\r\n self.operations_by_name = {}\r\n self.append(PureState(len(qubits), np.array(state_vector)), qubits)\r\n else:\r\n state_matrix = self.tensor_density.contract()\r\n self.operations_by_name = {}\r\n self.append(State(len(qubits), np.array(state_matrix)), qubits)\r\n return self", "def prune_states(state: PushState) -> PushState:\n if state and not state[-1]:\n return PushGame.prune_states(state[:-1])\n else:\n return state", "def collapse_date_change(self, list_of_dates, list_of_changes):\r\n # Get unique dates and sort them\r\n unique_dates = list(set(list_of_dates))\r\n sorted_unique_dates = list(np.flipud(np.sort(unique_dates)))\r\n\r\n # Zip date lists and changes lists to relate changes to dates\r\n zipped_date_change = list(zip(list_of_dates, list_of_changes))\r\n\r\n # Use sorted list of unique dates to find all account changes on each\r\n # date, and grab a total change for the date\r\n collapsed_dates = []\r\n collapsed_changes = []\r\n for date_temp in sorted_unique_dates:\r\n change_on_date = [\r\n tup[1] for tup in zipped_date_change if tup[0] == date_temp]\r\n collapsed_dates.append(date_temp)\r\n collapsed_changes.append(round(sum(change_on_date), 2))\r\n\r\n return(collapsed_dates, collapsed_changes)", "def getAbsorbingStates(m):\n\t\n a=[]\n for r in range(len(m)):\n if(sum(m[r])==0): a.append(r)\n return a", "def successorStates(self, state):\r\n\r\n successors = []\r\n\r\n for action in Directions.CARDINAL:\r\n x, y = state\r\n dx, dy = Actions.directionToVector(action)\r\n nextx, nexty = int(x + dx), int(y + dy)\r\n\r\n if (not self.walls[nextx][nexty]):\r\n nextState = (nextx, nexty)\r\n cost = self.costFn(nextState)\r\n\r\n successors.append((nextState, action, cost))\r\n\r\n # Bookkeeping for display purposes (the highlight in the GUI).\r\n self._numExpanded += 1\r\n if (state not in self._visitedLocations):\r\n self._visitedLocations.add(state)\r\n self._visitHistory.append(state)\r\n\r\n return successors", "def reduce_all(\n state: State,\n action: Action,\n next_state: State,\n *,\n terminating_functions: Sequence[TerminatingFunction],\n) -> bool:\n return reduce(\n state,\n action,\n next_state,\n terminating_functions=terminating_functions,\n reduction=all,\n )", "def moves(self) -> List[List[PushState]]:\n # seen_moves = set()\n possible_moves = [[], []]\n\n if state := self.prune_states(self.state): # Could return None\n size = len(self.prune_states(state))\n else:\n return possible_moves\n\n for count, each_slot in enumerate(state):\n # for count, each_slot in reversed(list(enumerate(state))):\n if each_slot == \"L\" and not possible_moves[0]:\n next_state = deepcopy(state)\n next_state = tuple(self.push(list(next_state), count))\n next_state = self.prune_states(next_state)\n\n # if next_state not in seen_moves:\n # seen_moves.add(next_state)\n possible_moves[0].append(next_state)\n\n elif each_slot == \"R\" and not possible_moves[1]:\n next_state = deepcopy(state)\n next_state = tuple(self.push(list(next_state), count))\n next_state = self.prune_states(next_state)\n\n # if next_state not in seen_moves:\n # seen_moves.add(next_state)\n possible_moves[1].append(next_state)\n\n if possible_moves[0] and possible_moves[1]:\n break\n\n return possible_moves", "def collapse_same_day_off(list_dt_dt):\n collapsed_list = []\n\n list_iter = iter(list_dt_dt)\n\n # read the first row\n previous = next(list_iter)\n\n collapsed_list.append(previous)\n\n count_rows_skipped = 0\n\n for row in list_iter:\n skipped = False\n while previous[1] + datetime.timedelta(seconds=1) == row[0]: # compare the end of the previous row with the\n # start of the current row and if they are one second apart, then enter the condition to allow the rows\n # to be collapsed (note that this has not been tested where the rows do not go over midnight)\n skipped = True\n previous = row\n row = next(list_iter)\n count_rows_skipped += 1\n\n if skipped:\n collapsed_list[-1][1] = previous[1] # change the end time of the previous row to be the end time of the\n # current row (this has already been appended to the list, so it needs to be changed there)\n collapsed_list.append([row[0], row[1]])\n else:\n collapsed_list.append([row[0], row[1]])\n\n previous = row\n\n print('Rows combined:', count_rows_skipped)\n\n return collapsed_list", "def strip_states(self, states):\n\n stripped_states_by_task_key = {}\n\n def strip_state(original_state):\n \"\"\"Returns a stripped copy of a TaskState.\"\"\"\n\n task_key = original_state.task_key\n if task_key in stripped_states_by_task_key:\n return stripped_states_by_task_key[task_key]\n\n assert original_state in self.all_states\n assert original_state not in self.non_serializable_states\n\n # Make a copy of the TaskState, which we'll strip down to make it\n # easier to serialize.\n # (This is a shallow copy, so we'll make sure to avoid mutating any of\n # its member variables.)\n stripped_state = copy.copy(original_state)\n stripped_states_by_task_key[task_key] = stripped_state\n\n # Strip out data cached in memory -- we can't necessarily pickle it, so\n # we need to get rid of it before trying to transmit this state to\n # another process.\n stripped_state._result = None\n\n # External dependency states are expected to be already completed, so we\n # don't need to include their task information or any of their dependencies.\n if original_state in self.external_dependency_states:\n stripped_state.task = None\n stripped_state.func_attrs = None\n stripped_state.dep_states = []\n\n # Otherwise, we'll recursively strip all the dependency states as well.\n else:\n stripped_state.dep_states = [\n strip_state(dep_state) for dep_state in original_state.dep_states\n ]\n\n # We also strip and include any followup states.\n stripped_state.followup_states = [\n strip_state(followup_state)\n for followup_state in original_state.followup_states\n ]\n\n return stripped_state\n\n return [strip_state(state) for state in states]", "def successorStates(self, state):\n currentState = state[1]\n successors = []\n for action in Directions.CARDINAL:\n x, y = state[0] # currentPosition\n print(\"State: {}\".format(state[0]))\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n hitsWall = self.walls[nextx][nexty]\n\n # Implement a successor discovery, check if any corners are satisfied\n # and update values as they are satisfied\n if (not hitsWall):\n successorsState = []\n nextxy = (nextx, nexty)\n if nextxy == self.corners[0]:\n successorsState.append(True)\n else:\n successorsState.append(currentState[0])\n if nextxy == self.corners[1]:\n successorsState.append(True)\n else:\n successorsState.append(currentState[1])\n if nextxy == self.corners[2]:\n successorsState.append(True)\n else:\n successorsState.append(currentState[2])\n if nextxy == self.corners[3]:\n successorsState.append(True)\n else:\n successorsState.append(currentState[3])\n # Put all updated values of 4 corners to a variable\n successorPost = (successorsState[0], successorsState[1],\n successorsState[2], successorsState[3])\n # Append to go to the next move\n successors.append(((nextxy, successorPost), action, 1))\n\n self._numExpanded += 1 # Count the number of nodes expanded\n return successors", "def states_to_batch(previous_states_list, states_list, augseq, previous_states_distances, model_height, model_width, model_prev_height, model_prev_width):\n assert isinstance(previous_states_list, list)\n assert isinstance(states_list, list)\n assert isinstance(previous_states_list[0], list)\n assert isinstance(states_list[0], list)\n assert len(previous_states_list) == len(states_list)\n\n B = len(states_list)\n H, W = model_height, model_width\n Hp, Wp = model_prev_height, model_prev_width\n\n nb_prev_load = max(previous_states_distances)\n nb_future_states = len(states_list[0]) - 1\n nb_timesteps = nb_prev_load + 1 + nb_future_states\n #images = np.zeros((nb_timesteps, B, H, W, 3), dtype=np.uint8)\n #images_gray = np.zeros((nb_timesteps, B, Hp, Wp), dtype=np.float32)\n images_by_timestep = np.zeros((1+nb_future_states, B, H, W, 3), dtype=np.float32)\n images_gray = np.zeros((nb_timesteps, B, Hp, Wp), dtype=np.float32)\n multiactions = [[] for i in xrange(nb_timesteps)]\n rewards = np.zeros((nb_timesteps, B), dtype=np.float32)\n speeds = np.zeros((nb_timesteps, B), dtype=np.float32)\n is_reverse = np.zeros((nb_timesteps, B), dtype=np.float32)\n steering_wheel = np.zeros((nb_timesteps, B), dtype=np.float32)\n steering_wheel_raw = np.zeros((nb_timesteps, B), dtype=np.float32)\n\n augseqs_det = [augseq.to_deterministic() for _ in xrange(len(states_list))]\n\n for b, (previous_states, states) in enumerate(zip(previous_states_list, states_list)):\n augseq_det = augseqs_det[b]\n\n all_states = previous_states + states\n for t, state in enumerate(all_states):\n imgy = cv2.cvtColor(state.screenshot_rs, cv2.COLOR_RGB2GRAY)\n imgy_rs = downscale(imgy, Hp, Wp)\n imgy_rs_aug = augseq_det.augment_image(imgy_rs)\n images_gray[t, b, ...] = imgy_rs\n\n multiactions[t].append(state.multiaction)\n rewards[t, b] = state.reward\n if state.speed is not None:\n speeds[t, b] = state.speed\n if state.is_reverse is not None:\n is_reverse[t, b] = int(state.is_reverse)\n if state.steering_wheel_cnn is not None:\n steering_wheel[t, b] = state.steering_wheel_cnn\n if state.steering_wheel_raw_cnn is not None:\n steering_wheel_raw[t, b] = state.steering_wheel_raw_cnn\n images_gray = images_gray[..., np.newaxis]\n\n for b, states in enumerate(states_list):\n augseq_det = augseqs_det[b]\n\n for i, state in enumerate(states):\n state = states[i]\n images_by_timestep[i, b, ...] = augseq_det.augment_image(downscale(state.screenshot_rs, H, W))\n\n nb_prev_per_img = len(previous_states_distances)\n images_prev_by_timestep = np.zeros((1+nb_future_states, B, Hp, Wp, nb_prev_per_img), dtype=np.float32)\n for t in xrange(1 + nb_future_states):\n indices = [nb_prev_load+t-d for d in previous_states_distances]\n prev = images_gray[indices]\n prev = prev.transpose((1, 2, 3, 4, 0)).reshape((B, Hp, Wp, nb_prev_per_img))\n images_prev_by_timestep[t] = prev\n images_by_timestep = (images_by_timestep.astype(np.float32) / 255.0).transpose((0, 1, 4, 2, 3))\n images_prev_by_timestep = (images_prev_by_timestep.astype(np.float32) / 255.0).transpose((0, 1, 4, 2, 3))\n\n return BatchData(nb_prev_load, images_by_timestep, images_prev_by_timestep, multiactions, rewards, speeds, is_reverse, steering_wheel, steering_wheel_raw, previous_states_distances)", "def keep_only_positive_unique_sequences(self) -> None:\n\n already_visited_sequences = set()\n for s1, current_state_sequence in enumerate(self.mcts_statesequences):\n assert current_state_sequence.exctract_and_do_hash_analysis is False\n\n # Ignore empty sequences\n if current_state_sequence.status != -1:\n seq_key = \"_\".join([x.state_id for x in current_state_sequence.states])\n\n if seq_key not in already_visited_sequences:\n already_visited_sequences.add(seq_key)\n current_state_sequence.exctract_and_do_hash_analysis = True", "def next_possible_states(self, state):\n # For each node, generate all possible single moves\n def make_new_state(state, src, dest):\n if self._is_valid_dest(state, dest):\n new_state = BaggageState(state.configuration, state.moves+1,\n state.bins_per_cart)\n new_state.move_baggage(src, dest)\n return new_state\n return False\n\n for src, _ in enumerate(state):\n if self._is_valid_src(state, src):\n for dest, _ in enumerate(state):\n new_state = make_new_state(state, src, dest)\n if new_state:\n yield new_state", "def _collapse(cls, id_list):\n\n # XXX doesn't collapse expanded selectors such as /foo/xxx,/bar/yyy\n # properly\n raise NotImplemented('unfinished method - should eventually replace collapse()')\n\n # Can only collapse list identifiers that all have the same number of\n # levels:\n assert len(set(map(len, id_list))) == 1\n\n # Collect all tokens for each level:\n levels = [[] for i in range(max(map(len, id_list)))]\n for i in range(len(id_list)):\n for j in range(len(id_list[i])):\n if not(id_list[i][j] in levels[j]):\n levels[j].append(id_list[i][j])\n\n def collapse_level(level):\n \"\"\"\n Recursively called function to collapse all values in a single level.\n \"\"\"\n\n # type_set = set(map(type, level))\n # if type_set in set([int]):\n if all(map(lambda x: isinstance(x, (int, long)), level)):\n\n # If a level only contains consecutive integers, convert it into an\n # interval:\n level.sort()\n if cls.are_consecutive(level):\n return ['[%s:%s]' % (min(level), max(level)+1)]\n\n # If a level contains nonconsecutive integers, convert it into a\n # list:\n else:\n return ['['+','.join([str(i) for i in level])+']']\n\n elif all(map(lambda x: isinstance(x, basestring), level)):\n # elif type_set in set([str]):\n if len(level) == 1:\n return level\n else:\n return ['['+','.join([s for s in level])+']']\n else:\n level_int = sorted([x for x in level if isinstance(x, (int, long))])\n level_str = sorted([x for x in level if isinstance(x, basestring)])\n return collapse_level(level_int)+collapse_level(level_str)\n\n # If a level contains multiple string AND integer tokens, convert it to\n # a list:\n collapsed_list = []\n for level in levels:\n collapsed_list.append(collapse_level(sorted(level)))\n selector_list = []\n for t in itertools.product(*collapsed_list):\n selector = ''\n for s in t:\n if s[0] == '[':\n selector += s\n else:\n selector = selector + '/' + s\n selector_list.append(selector)\n return ','.join(selector_list)", "def remove_sites_from_seqs(\n model,\n seqs,\n states_to_remove,\n start_states\n):\n num_sites = 0\n removed = 0\n sites = [ ]\n for s, seq in enumerate( seqs ):\n log_P_star, q_star = model.viterbi( seq )\n for i, q in enumerate( q_star ):\n if q in start_states:\n num_sites += 1\n sites.append( ( s, i ) )\n if q in states_to_remove:\n removed += 1\n seq[ i ] = model.M #make this base unknown\n return num_sites, removed", "def collapse_short_edges(self,l_thresh=1.0):\n l = self.edges_length()\n to_collapse = np.nonzero(l<l_thresh)[0]\n\n for j in to_collapse:\n print( \"Collapsing edge\",j)\n self.collapse_edge(j)", "def clean_tmatrix(transition_matrix, rm_absorbing=True):\n t_matrix = deepcopy(transition_matrix)\n n_states = len(transition_matrix)\n\n # Removing the non-visited states and absorbing states\n removed_states = []\n for index in range(n_states - 1, -1, -1):\n if not any(t_matrix[index]): # non-visited\n t_matrix = np.delete(t_matrix, index, axis=1)\n t_matrix = np.delete(t_matrix, index, axis=0)\n removed_states.append(index)\n elif t_matrix[index, index] == 1.0: # absorbing state\n if not all([t_matrix[index, j] == 0.0 for j in range(n_states) if j != index]):\n raise ValueError(\n \"The sum of the elements in a row of the \\\n transition matrix must be one\"\n )\n t_matrix = np.delete(t_matrix, index, axis=1)\n t_matrix = np.delete(t_matrix, index, axis=0)\n removed_states.append(index)\n\n # Renormalizing just in case\n t_matrix = normalize_markov_matrix(t_matrix)\n\n return t_matrix, removed_states", "def collapse(L):\n output = \"\"\n for s in L:\n output = output + s\n return output", "def collapse(L):\n output = \"\"\n for s in L:\n output = output + s\n return output", "def collapse(L):\n output = \"\"\n for s in L:\n output = output + s\n return output", "def collapse(L):\n output = \"\"\n for s in L:\n output = output + s\n return output", "def get_successors(state): \n \n child_states = []\n \n size = len(state)\n i = 0\n j = 0\n for i in range (size):\n if 0 in state[i]:\n for j in range (size):\n if state[i][j] == 0:\n break \n break\n\n if j != size-1:\n child_states.append ((\"Left\", swap_cells(state, i, j, i, j+1)))\n if j != 0:\n child_states.append ((\"Right\", swap_cells(state, i, j, i, j-1)))\n if i != size-1:\n child_states.append ((\"Up\", swap_cells(state, i, j, i+1, j)))\n if i != 0:\n child_states.append ((\"Down\", swap_cells(state, i, j, i-1, j)))\n \n return child_states", "def null_closure(self, states):\n closure = list(states)\n unchecked = list(states)\n while unchecked:\n state = unchecked.pop()\n null_transitions = self.move([state], NULL)\n for transition in null_transitions:\n if transition not in closure:\n closure.append(transition)\n unchecked.append(transition)\n return sorted(closure)", "def transition_to_state(state):\n\n # Clear our \"time-in-state\" counter.\n m.d.ss += cycles_in_state.eq(0)\n\n # If we have any additional entry conditions for the given state, apply them.\n if state in tasks_on_entry:\n m.d.ss += tasks_on_entry[state]\n\n m.next = state", "def transitions(self, state):\n if len(set(state)) < len(state):\n yield self.STOP_STATE\n return\n for hidx in xrange(self.num_players):\n for lidx in xrange(hidx):\n (lower, higher) = (state[lidx], state[hidx])\n yield self.makestate(((2*lower) if (i == lidx) else ((higher - lower) if (i == hidx) else s)) for (i, s) in enumerate(state))" ]
[ "0.52173054", "0.5162994", "0.50791174", "0.50559324", "0.4989781", "0.4952426", "0.4899571", "0.48871356", "0.48810527", "0.48748165", "0.48466158", "0.47453317", "0.47329503", "0.47069013", "0.46448886", "0.46182868", "0.4606048", "0.4591518", "0.45846918", "0.45573312", "0.45447987", "0.45414752", "0.45170528", "0.45170528", "0.45170528", "0.45170528", "0.4503505", "0.44815755", "0.44666088", "0.44518787" ]
0.7746325
0
The Fixed pattern is simply an alwayson light in the given colour. groups and period are irrelevant.
def fixed(_groups, colour, _period): return [(colour, 1)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_pattern(colors=('green', 'blue', 'red')): # (10)\n for i in range(0, int(ceil(float(NUM_LEDS)/float(len(colors))))):\n for color in colors:\n push_color(color)", "def fill(self, color):", "def fill(self, colour: int, /) -> None:", "def occulting(groups, colour, period):\n if groups == [1]:\n return [\n ('Off', 1000),\n (colour, period - 1000)\n ]\n return light_sequence(groups, 'Off', colour, period, 500, 1000)", "def long_flash(groups, colour, period):\n if groups == [1]:\n return [\n (colour, 2000),\n ('Off', period - 2000)\n ]\n return light_sequence(groups, colour, 'Off', period, 2000, 3000)", "def flash(groups, colour, period):\n\n if groups == [1]:\n if period <= 2000:\n raise ValueError(\n \"The cycle period for a flash must be longer than 2 seconds\"\n )\n\n return [\n (colour, 1000),\n ('Off', period-1000)\n ]\n\n return light_sequence(groups, colour, 'Off', period, 500, 1000)", "def seasonal_pattern(season_time):\n\treturn np.where(season_time < 0.4,\n\t\t\t\t\tnp.cos(season_time * 2 * np.pi),\n\t\t\t\t\t1 / np.exp(3* season_time))", "def colour(z, i):\n if abs(z) < self.threshold:\n return 0, 0, 0\n v = np.log2(i + self.threshold - np.log2(np.log2(abs(z)))) / self.threshold\n if v < 1.0:\n return v ** b1, v ** b2, v ** b3 # coloured tones\n else:\n v = max(0, 2 - v)\n return v ** r1, v ** r2, v ** r3 # sepia tones", "def colour(z, i):\n if abs(z) < self.threshold:\n return self.background\n v = np.log2(i + self.threshold - np.log2(np.log2(abs(z)))) / self.threshold\n if v < 1.0:\n return v ** b1, v ** b2, v ** b3 # background\n else:\n v = max(0, 2 - v)\n return v ** r1, v ** r2, v ** r3 # main tones", "def seasonal_pattern(season_time):\r\n return np.where(season_time < 0.4,\r\n np.cos(season_time * 2 * np.pi),\r\n 1 / np.exp(3 * season_time))", "def seasonal_pattern(season_time):\r\n return np.where(season_time < 0.4,\r\n np.cos(season_time * 2 * np.pi),\r\n 1 / np.exp(3 * season_time))", "def seasonal_pattern(season_time):\n return np.where(season_time < 0.4,\n np.cos(season_time * 2 * np.pi),\n 1 / np.exp(3 * season_time))", "def seasonal_pattern(season_time):\n return np.where(season_time < 0.4,\n np.cos(season_time * 2 * np.pi),\n 1 / np.exp(3 * season_time))", "def seasonal_pattern(season_time):\n return np.where(season_time < 0.4,\n np.cos(season_time * 2 * np.pi),\n 1 / np.exp(3 * season_time))", "def lighter(clr, f=1/3):\n gaps = [f*(1 - val) for val in clr]\n new_clr = [val + gap for gap, val in zip(gaps, clr)]\n return new_clr", "def calculate_fg(color, alpha, mean_f , inv_cov_f, mean_b, inv_cov_b, sigma_d):\n\n a_11 = inv_cov_f + (alpha**2 / sigma_d**2) * np.identity(3)\n a_22 = inv_cov_b + ((1 - alpha)**2 / sigma_d**2) * np.identity(3)\n a_12 = a_21 = (alpha * (1 - alpha) / sigma_d**2) * np.identity(3)\n\n b_1 = np.dot(inv_cov_f, mean_f) + (alpha / sigma_d**2) * color\n b_2 = np.dot(inv_cov_b, mean_b) + ((1 - alpha) / sigma_d**2) * color\n\n l = np.empty([6,6])\n l[0] = np.append(a_11[0], a_12[0])\n l[1] = np.append(a_11[1], a_12[1])\n l[2] = np.append(a_11[2], a_12[2])\n l[3] = np.append(a_21[0], a_22[0])\n l[4] = np.append(a_21[1], a_22[1])\n l[5] = np.append(a_21[2], a_22[2])\n r = np.append(b_1, b_2)\n\n return np.split(np.linalg.solve(l, r), 2)", "def setShadingPattern(pattern,outline=1):\n dislin.shdpat(shadingdict[pattern])\n if not outline:\n dislin.noarln()", "def make_pattern(self):\n probability = random.SystemRandom().random()\n if probability < 0.1:\n _pattern = [0 for x in range(32)]\n elif probability > 0.5:\n pattern_num = SECURE_RANDOM.choice(CLASSIC)\n _probability = random.SystemRandom().random()\n if _probability > 0.80:\n _pattern = [1 if random.SystemRandom().random() < pattern_num/32 else 0 for x in range(1,33)]\n elif _probability < 0.40:\n _offset = random.SystemRandom().randint(2, 16)\n _pattern = [1 if (x == _offset) or (x % pattern_num == _offset) else 0 for x in range(1,33)]\n else:\n _pattern = [1 if (x == 1) or (x % pattern_num == 1) else 0 for x in range(1,33)]\n else:\n pattern_num = SECURE_RANDOM.choice(INNER_CLASSIC)\n _probability = random.SystemRandom().random()\n if _probability > 0.50:\n _pattern = [1 if (x == 1) or (x % pattern_num == 1) else 0 for x in range(1,33)]\n else:\n _pattern = [1 if random.SystemRandom().random() < pattern_num/32 else 0 for x in range(1,33)]\n\n if not self.global_swing:\n _probability = random.SystemRandom().random()\n if _probability > 0.3:\n _pattern.extend([random.SystemRandom().uniform(0.01, 0.5), random.SystemRandom().randint(1, 14), 0])\n else:\n _pattern.extend([0,1,0])\n else: \n _pattern.extend([0,1,1]) \n\n return _pattern", "def startColorLoop():\n b.set_group(1, 'on', True)\n b.set_group(1, 'bri', 254)\n b.set_group(1, 'hue', 255)\n b.set_group(1, 'sat', 255)\n b.set_group(1, 'effect', 'colorloop')", "def isophase(_groups, colour, period):\n # Whole numbers are required, so odd numbers are dealt with by loading\n # the spare into the off period.\n # As this is in milliseconds, this will be imperceptible.\n # It is also unlikely, as the top-level input is in seconds\n # and has been multiplied up to milliseconds before reaching this\n # function\n return [\n (colour, math.floor(period/2)),\n ('Off', math.ceil(period/2))\n ]", "def test_color(self):\n self._calibration_test(\"color_full\")", "def modulus_domaincol(w, s):\n \n H = compute_hue(w)\n modulus = np.absolute(w)\n c= np.log(2)\n Logm=np.log(modulus)/c#log base 2\n Logm=np.nan_to_num(Logm)\n\n V=Logm-np.floor(Logm)\n S = s*np.ones_like(H, float)\n\n HSV = np.dstack((H,S,V**0.2))# V**0.2>V for V in[0,1];this choice avoids too dark colors\n RGB=hsv_to_rgb(HSV) \n return RGB", "def _assign_colours_to_groups(self, groups):\n\n pass", "def find_image(grouped):\n for _i in grouped:\n _i[0] = _i[0] * 10 #increases value of red components\n if _i[0] > 225:\n _i[0] = 225\n _i[1] = _i[0] #sets green components equal to red\n _i[2] = _i[0] #sets blue components equal to red\n return grouped", "def proc_fill_color(self, tokens):\n\n self.pen.fill_color = self._proc_color(tokens)\n return []", "def do_run(self, pattern):\n pattern_hue = random.randint(0, 255)\n pattern_color = ColorConvert.hsv_to_rgb(pattern_hue, 255, 255)\n for step in pattern.stops:\n if not self.run:\n return\n step_hue = random.randint(0, 255)\n step_color = ColorConvert.hsv_to_rgb(step_hue, 255, 255)\n for ind, light in enumerate(step.lights):\n if light.random == 'single':\n hue = random.randint(0, 255)\n color = ColorConvert.hsv_to_rgb(hue, 255, 255)\n elif light.random == 'static':\n color = step_color\n elif light.random == 'pattern':\n color = pattern_color\n else:\n color = light.get_color()\n if self.ignoreColors:\n if not color == [0, 0, 0]:\n color = self.defaultColor\n\n self.main.single_change(ind, color)\n self.main.update()\n time.sleep(self.wait)\n self.main.all_off()\n self.main.update()", "def fill(self, color: int) -> None:\n red = (color >> 16) & 0xFF\n green = (color >> 8) & 0xFF\n blue = color & 0xFF\n for x in range(24):\n offset = unpack_from(\">HHH\", self.ledmap_bytes, x * 6)\n self._is31[offset[self.r_offset]] = red\n self._is31[offset[self.g_offset]] = green\n self._is31[offset[self.b_offset]] = blue", "def fill(self, color: int) -> None:\n red = (color >> 16) & 0xFF\n green = (color >> 8) & 0xFF\n blue = color & 0xFF\n for x in range(24):\n offset = unpack_from(\">HHH\", self.ledmap_bytes, x * 6)\n self._is31[offset[self.r_offset]] = red\n self._is31[offset[self.g_offset]] = green\n self._is31[offset[self.b_offset]] = blue", "def processframe(pilimage):\n # TODO: Idea on of overfilling\n # [[0,0,0],\n # [1,1,1],\n # [0,0,0]]\n # Keep this as template. aka pattern. use scipy measure and that s pattern to match all connecting\n # this gets all the fills. the rest is thrown into the pile of sets.\n # we assume index 0 as discarded (Can't really do much with black images.)\n numpyarrayfrompil = numpy.array(pilimage)\n # First we pass to regionprops\n props = createfillers(numpyarrayfrompil)\n # pass all the data we need now to the mapprops2color\n # returns a string which can be cerealised.\n return mapprops2color(props, numpyarrayfrompil, pilimage)", "def led(color: int, /) -> None:" ]
[ "0.57946724", "0.57644004", "0.5761895", "0.5750514", "0.5239501", "0.52209616", "0.5144774", "0.51025516", "0.5077008", "0.50707483", "0.50707483", "0.5058601", "0.5058601", "0.5058601", "0.50353426", "0.5011609", "0.50076145", "0.50035447", "0.49892506", "0.49738902", "0.49643943", "0.49043226", "0.4901966", "0.4901191", "0.4877879", "0.4876219", "0.48605362", "0.48605362", "0.48443314", "0.48438472" ]
0.72213733
0
A flash is a single colour displayed for a short period, followed by a longer period of darkness A single flash of a given colour is a 1 second flash >>> flash([1], 'R', 5000) [('R', 1000), ('Off', 4000)] Grouped flashes have a shorter duration >>> flash([3], 'R', 10000) [('R', 500), ('Off', 1000), ('R', 500), ('Off', 1000), ('R', 500),\ ('Off', 1000), ('Off', 5500)] Composite groups are separated by an even period of darkness >>> flash([3, 1], 'R', 10000) [('R', 500), ('Off', 1000), ('R', 500), ('Off', 1000), ('R', 500),\ ('Off', 1000), ('Off', 2000), ('R', 500), ('Off', 1000), ('Off', 2000)] The total duration of all states matches the requested period >>> sum((state[1] for state in flash([1], 'R', 5000))) == 5000 True
def flash(groups, colour, period): if groups == [1]: if period <= 2000: raise ValueError( "The cycle period for a flash must be longer than 2 seconds" ) return [ (colour, 1000), ('Off', period-1000) ] return light_sequence(groups, colour, 'Off', period, 500, 1000)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def long_flash(groups, colour, period):\n if groups == [1]:\n return [\n (colour, 2000),\n ('Off', period - 2000)\n ]\n return light_sequence(groups, colour, 'Off', period, 2000, 3000)", "def quick(groups, colour, period):\n # The cycle period cannot be longer than 1.2s (60/50)\n # or shorter than 0.5s\n if groups == [1]:\n if period is not None:\n raise ValueError(\n \"Quick Flash cycle periods must be longer than 0.5 seconds\"\n )\n\n return [\n (colour, 250),\n ('Off', 750)\n ]\n\n return light_sequence(groups, 'Off', colour, period, 250, 500)", "def occulting(groups, colour, period):\n if groups == [1]:\n return [\n ('Off', 1000),\n (colour, period - 1000)\n ]\n return light_sequence(groups, 'Off', colour, period, 500, 1000)", "def flash_red(self, duration=0.2):\n self.pen_color = wx.RED\n self.Refresh(True)\n t = time.time()\n while time.time() - t < duration:\n time.sleep(0.001)\n self.pen_color = wx.WHITE\n self.Refresh(True)", "def ledFlash(strip, color, t = 1):\r\n utime.sleep(t)\r\n setStrip(strip, color)\r\n utime.sleep(t)\r\n setStrip(strip, LED_COLOR_OFF)", "def flash_all_leds(self, k):\n t_end = time.time() + k\n while time.time() < t_end:\n for i in range(300):\n self.light_led(0)\n self.light_led(1)\n self.light_led(2)\n self.light_led(3)\n self.light_led(4)\n self.light_led(5)\n self.light_led(6)\n time.sleep(0.5)\n self.light_led(6)", "def start_flash_timer(self):\r\n self.flashMillisecs = 1000\r\n self.flashTimer.start(50)", "async def flash(self, light: Light, num_times: int, delay=0.15) -> None:\n for _ in range(num_times):\n self.set_lights_off()\n await sleep(delay)\n self.set_lights(light)\n await sleep(delay)", "def find_flush(self, hands_list, cards_list):\n card_color_counts = Counter(map(lambda c: c.color, cards_list))\n for value in card_color_counts.values():\n if value == 5:\n hands_list.append(HandDescription('Flush', None, None))", "def LED_flash(self, state):\n command = bytearray(b\"l\")\n if state == \"on\":\n command += bytes([1])\n command += b\"\\n\"\n elif state == \"off\":\n command += bytes([0])\n command += b\"\\n\"\n self.send(command)", "def test_flash_status_autosample_mode(self):\n self.assert_initialize_driver(DriverProtocolState.AUTOSAMPLE)\n reply = self.driver_client.cmd_dvr('execute_resource', ProtocolEvent.FLASH_STATUS)\n regex = re.compile('Compact Flash Card present - Compact Flash OK!\\r\\n\\r\\r\\nVolume in drive is .+ bytes free\\r\\r\\n', re.DOTALL)\n match = regex.match(reply[1])\n\n self.assertNotEqual(match, None, \"TestINT.test_flash_status: status response not correct\")", "def display(board, leds, delay=0.05, flashdelay=0.05):\n global i\n delay = float(delay)\n flashdelay = float(flashdelay)\n img = np.tile([i, 255, 255], board.shape).astype(np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)\n leds.draw(img, delay=delay)\n img = np.tile([0, 0, 0], board.shape).astype(np.uint8)\n if flashdelay > 0:\n leds.draw(img, delay=flashdelay)\n i += 5\n if i > 255:\n i = 0", "def test_blink(self):\n display = get_display(1)\n display.register_state(main.Fade)\n prev = display.strand[0]\n for i in range(1000):\n display.tick()\n assert(all(0 <= display.strand[0][i] <= 255 for i in range(3)))\n assert display.strand[0] != prev\n prev = display.strand[0]", "def brief_led_flash(self):\n self._ubx.send('CFG-TP5', pulseLenRatioLock=990000)", "def _colour_loop(self, colours, seconds=None, milliseconds=None, fade=True):\n colours = self.convert_to_colour_list(colours) #Forces a list of colours into an actual python list\n if len(colours)<2:\n colours.append(\"#000000\") #Blink between black and the specified colour if only one provided\n \n #Start with the first colour immediately:\n if fade:\n self.fade(colours[0])\n else:\n self.set(colours[0])\n step_time = self.clean_time_in_milliseconds(seconds, milliseconds, default_seconds=1, minimum_milliseconds=50)\n \n #Do the loop\n i = 1 #We're moving to the second colour now\n total_colours = len(colours)\n while not self._sequence_stop_signal:\n #Resolve our colour\n next_colour = colours[i]\n i = (i+1) % total_colours #ensures we are never asking for more colours than provided\n if fade: #Fading is a blocking process, thus we let the fade loop use up the time\n _latest_colour = self.fade(next_colour, fade_time=step_time, check=False)\n else: #Set is instant, so we need to consume the step time\n _latest_colour = self.set(next_colour, fade=False, check=False)\n self.sleep(step_time/1000) #NB fade uses milliseconds!!\n #Return the latest colour\n return self.sync_channels()", "def giveFlash(self, amount):\n self.fb = amount\n if amount > 0:\n es.give(self.userid, \"weapon_flashbang\")", "def do_light(self,count):\n if (count == ''):\n count=\"1\"\n for i in range(0,int(count)):\n light=RCtime(12)\n print \"*\"*(light/4000)+\": %d\" % light", "def flash_display_text(self):\r\n if self.flashMillisecs in self.flashTimes:\r\n self.countdownString.setStyleSheet(\"QLabel { background-color : white; color : black; }\")\r\n self.talkInfoString.setStyleSheet(\"QLabel { background-color : white; color : black; }\")\r\n else:\r\n self.countdownString.setStyleSheet(\"QLabel { background-color : black; color : white; }\")\r\n self.talkInfoString.setStyleSheet(\"QLabel { background-color : black; color : white; }\")\r\n self.flashMillisecs -= 50\r\n if self.flashMillisecs < 0:\r\n self.flashTimer.stop()", "def ControlLights(state):\n for led in (RED,YELLOW,GREEN):\n GPIO.output(LED[led],state[led])\n time.sleep(FLASH_TIME)", "def flash_status(text=\"\", duration=0.05):\n status_label.color = WHITE\n status_label.text = text\n time.sleep(duration)\n status_label.color = BLACK\n time.sleep(duration)\n status_label.text = \"\"", "def badmood(streak):\n if streak == 1:\n return \"#FFF8DC\"\n elif streak == 2:\n return \"#FFB5B5\"\n elif streak == 3:\n return \"#FF7373\"\n elif streak == 4:\n return \"#FF4848\" \n elif streak == 5:\n return \"#FF2626\"\n else:\n return \"#FF0000\"", "def goodmood(streak):\n if streak == 1:\n return \"#FFF8DC\"\n elif streak == 2:\n return \"#CCFFCC\"\n elif streak == 3:\n return \"#99FFCC\"\n elif streak == 4:\n return \"#99FF99\"\n elif streak == 5:\n return \"#66FF99\"\n else:\n return \"#00FF66\"", "def measure_darks(det, shutter, quantity):\n yield from set_dark_frame()\n yield from bps.mv(shutter, \"close\")\n yield from _acquire_n_frames(det, quantity)", "def fireworks():\n\n sleep_speed = 0.025\n\n # Turn on white\n PYGLOW.color(\"white\", 60)\n sleep(sleep_speed)\n # Turn on blue\n PYGLOW.color(\"blue\", 60)\n sleep(sleep_speed)\n # Fade white\n PYGLOW.color(\"white\", 50)\n sleep(sleep_speed)\n # Turn on green\n PYGLOW.color(\"green\", 60)\n sleep(sleep_speed)\n # Fade white and blue\n PYGLOW.color(\"white\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 50)\n sleep(sleep_speed)\n # Turn on yellow\n PYGLOW.color(\"yellow\", 60)\n sleep(sleep_speed)\n # Fade white, blue, and green\n PYGLOW.color(\"white\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 50)\n sleep(sleep_speed)\n # Turn on orange\n PYGLOW.color(\"orange\", 60)\n sleep(sleep_speed)\n # Fade white, blue, green, and yellow\n PYGLOW.color(\"white\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 50)\n sleep(sleep_speed)\n # Turn on red\n PYGLOW.color(\"red\", 60)\n sleep(sleep_speed)\n # Fade white, blue, green, yellow, and orange\n PYGLOW.color(\"white\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 50)\n sleep(sleep_speed)\n # Fade all\n PYGLOW.color(\"white\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 50)\n sleep(sleep_speed)\n # Fade blue, green, yellow, orange, and red\n PYGLOW.color(\"blue\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 40)\n sleep(sleep_speed)\n # Fade green, yellow, orange, and red\n PYGLOW.color(\"green\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 30)\n sleep(sleep_speed)\n # Fade yellow, orange, and red\n PYGLOW.color(\"yellow\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 20)\n sleep(sleep_speed)\n # Fade orange, and red\n PYGLOW.color(\"orange\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 10)\n sleep(sleep_speed)\n # Fade red\n PYGLOW.color(\"red\", 0)\n sleep(sleep_speed)\n # Pause 1 second before the next one\n sleep(1)", "def flashLed(self, times=5, start=False, stop=False):\n if start:\n times = -1\n if stop:\n times = 0\n\n self._lowLevelFlashLed(times)", "def play_war(deck):\n a_cards = deck[:int(len(deck)/2)]\n b_cards = deck[int(len(deck)/2):]\n a_stash = []\n b_stash = []\n print(\"\\na_cards: %s, a_stash: %s, \\nb_cards: %s, b_stash: %s\" % (a_cards, a_stash, b_cards, b_stash))\n round = 1\n while a_cards and b_cards:\n # The pop() here means we play with the card that is at the end of the list\n a_card = a_cards.pop()\n b_card = b_cards.pop()\n\n # This is the case if the drawn cards are of equal value\n if a_card[list(a_card.keys())[0]] == b_card[list(b_card.keys())[0]]:\n if len(a_cards) > 0 and len(b_cards) > 0:\n a_stash.extend([a_card]+[a_cards.pop()])\n b_stash.extend([b_card]+[b_cards.pop()])\n print(\"\\n-----------------IT'S A WAR!!!!!!!-----------------\")\n print(\"\\na_cards: %s, a_stash: %s, \\nb_cards: %s, b_stash: %s\" % (a_cards, a_stash, b_cards, b_stash))\n continue\n else:\n continue\n \n # This is the case when a_card wins over the b_card\n elif a_card[list(a_card.keys())[0]] > b_card[list(b_card.keys())[0]]:\n a_cards = [a_card, b_card] + a_stash + b_stash + a_cards\n a_stash = []\n b_stash = []\n\n # This is the case when b_card wins over the a_card\n elif b_card[list(b_card.keys())[0]] > a_card[list(a_card.keys())[0]]:\n b_cards = [b_card, a_card] + b_stash + a_stash + b_cards\n a_stash = []\n b_stash = []\n\n print(\"\\na_cards: %s, a_stash: %s, \\nb_cards: %s, b_stash: %s\" % (a_cards, a_stash, b_cards, b_stash))\n\n print(\"After round %s: \\na_cards_count: %s, a_stash_count: %s, b_cards_count: %s, b_stash_count: %s\" %\n (round, len(a_cards), len(a_stash), len(b_cards), len(b_stash)))\n round += 1\n\n if(len(a_cards) > len(b_cards)):\n print(\"A_cards wins!!!\")\n elif(len(b_cards) > len(a_cards)):\n print(\"B_cards wins!!!\")\n else:\n print(\"Both the set of cards are empty! It's a tie!\")", "async def Pulse_Lights():\n busylightapi.manager.apply_effect_to_light(ALL_LIGHTS, pulse)\n return {\n \"action\": \"effect\",\n \"name\": \"pulse\",\n \"light_id\": \"all\",\n \"color\": \"red\",\n }", "def step(self, amt=1):\n \n # For checking if all the animations have their framse looked at\n #activewormind = [i for i, x in enumerate(self._idlelist) if x == False]\n #print \"Worm {} at {:5g}\".format(activewormind, 1000*(time.time() - starttime))\n # save times activated for each worm \n [self.timedata[i].append(1000*(time.time() - starttime)) for i, x in enumerate(self._idlelist) if x == False]\n \n #self._led.buffer = [0] * 480\n self._led.pixheights = [-100] * self._led.numLEDs\n #print type(self._led.buffer)\n for ledcopy in self._ledcopies:\n # self._led.buffer = map(ixor, self._led.buffer, ledcopy.buffer)\n # use pixheights but assume all buffers same size\n # print ledcopy.driver[0].pixheights\n for pix in range(self._led.numLEDs):\n #for ledcopy in self._ledcopies:\n if self._led.pixheights[pix] == ledcopy.driver[0].pixheights[pix]:\n for i in range(3):\n self._led.buffer[3*pix + i] ^= ledcopy.buffer[3*pix + i]\n elif self._led.pixheights[pix] < ledcopy.driver[0].pixheights[pix]:\n for i in range(3):\n self._led.buffer[3*pix + i] = ledcopy.buffer[3*pix + i]\n self._led.pixheights[pix] = ledcopy.driver[0].pixheights[pix] \n self._step += 1", "def create_wave(self, state):\n\n pulses = []\n for row in Panel.Rows:\n (turn_on, dim_off, factor) = self.row_mask(row, state)\n turn_off = ~turn_on & (self.all_rows_mask | self.all_cols_mask)\n\n if factor is None:\n # no dimmed LEDs\n pulses.append(pigpio.pulse(turn_on, turn_off, 1500))\n else:\n # single dimmed LED\n pulses.extend([\n pigpio.pulse(turn_on, turn_off, int(1500 * factor)),\n pigpio.pulse(0, dim_off, int(1500 * (1.0 - factor)))\n ])\n\n self._pi.wave_add_generic(pulses)\n return self._pi.wave_create()", "def writeFlash(self, hex_):\n Flash, EEPROM,IDlocs,Config = self.__formatHex(hex_)\n print(\"Erasing Flash...\")\n self.__writeRegion(op.BootloaderMemoryRegions.Flash,op.BootloaderCommands.EraseFlash,0x0800,Flash,None)\n print(\"Writing Flash...\")\n if(self.__writeRegion(op.BootloaderMemoryRegions.Flash,op.BootloaderCommands.WriteFlash,0x0800,Flash,op.BootloaderCommands.ReadFlash)):\n print(\"Flash written OK\")\n #Don't actually erase the EEPROM, this would wipe out all of the calibration data.\n #if(self.writeRegion(op.BootloaderMemoryRegions.EEPROM,op.BootloaderCommands.WriteEEPROM,0x0000,EEPROM,op.BootloaderCommands.ReadEEPROM)):\n # print(\"EEPROM written OK\")\n if(self.__writeChunk(op.BootloaderMemoryRegions.IDLocs,op.BootloaderCommands.WriteFlash,0x0000,IDlocs,op.BootloaderCommands.ReadFlash)):\n print(\"IDLocs written OK\")\n if(self.__writeChunk(op.BootloaderMemoryRegions.Config,op.BootloaderCommands.WriteConfig,0x0000,Config,op.BootloaderCommands.ReadConfig)):\n print(\"Config written OK\")" ]
[ "0.6906268", "0.63640183", "0.53384686", "0.5311426", "0.52867997", "0.5262986", "0.5233251", "0.52132744", "0.5064614", "0.50232327", "0.4976142", "0.49747515", "0.49659762", "0.49345678", "0.4889537", "0.4873445", "0.4861576", "0.48529956", "0.48520216", "0.48484832", "0.4823681", "0.47934783", "0.47504562", "0.4743432", "0.47056893", "0.46648934", "0.46607664", "0.4638538", "0.46229413", "0.46221158" ]
0.730446
0
isophase is a pattern with equal dark and light. There are no groups.
def isophase(_groups, colour, period): # Whole numbers are required, so odd numbers are dealt with by loading # the spare into the off period. # As this is in milliseconds, this will be imperceptible. # It is also unlikely, as the top-level input is in seconds # and has been multiplied up to milliseconds before reaching this # function return [ (colour, math.floor(period/2)), ('Off', math.ceil(period/2)) ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_monochromatic(self):\n return equal(s.color for s in self.iter_states())", "def keep_measures(mosaiced_img, pattern):\n h, w = mosaiced_img.shape\n imout = np.zeros((h, w, 3))\n mask = np.zeros((h, w, 3))\n for i in range(2):\n for j in range(2):\n imout[i::2, j::2, pattern[i][j]] = mosaiced_img[i::2,j::2]\n mask[i::2, j::2, pattern[i][j]] = 1\n \n return imout, mask", "def find_dark_states(excited_state, ground_states):", "def is_dark(self):\n\n return self.red() < 125 and self.green() < 125 and self.blue() < 125", "def darkText(img):\n kernel = np.ones((30, 30), np.uint8) \n img_orig = cv2.morphologyEx(img, cv2.MORPH_BLACKHAT, kernel)\n \n TH = 150\n img_orig[(img_orig[:,:,0] < TH) | (img_orig[:,:,1] < TH) | (img_orig[:,:,2] < TH)] = (0,0,0)\n \n img_orig = closing(img_orig, size=(1, int(img.shape[1] / 8)))\n \n return (cv2.cvtColor(img_orig, cv2.COLOR_BGR2GRAY) != 0).astype(np.uint8)", "def seasonal_pattern(season_time):\n\treturn np.where(season_time < 0.4,\n\t\t\t\t\tnp.cos(season_time * 2 * np.pi),\n\t\t\t\t\t1 / np.exp(3* season_time))", "def candle_pattern(ohlc_df,ohlc_day):\r\n pattern = None\r\n signi = \"low\"\r\n avg_candle_size = abs(ohlc_df[\"close\"] - ohlc_df[\"open\"]).median()\r\n sup, res = res_sup(ohlc_df,ohlc_day)\r\n \r\n if (sup - 1.5*avg_candle_size) < ohlc_df[\"close\"][-1] < (sup + 1.5*avg_candle_size):\r\n signi = \"HIGH\"\r\n \r\n if (res - 1.5*avg_candle_size) < ohlc_df[\"close\"][-1] < (res + 1.5*avg_candle_size):\r\n signi = \"HIGH\"\r\n \r\n if candle_type(ohlc_df) == 'doji' \\\r\n and ohlc_df[\"close\"][-1] > ohlc_df[\"close\"][-2] \\\r\n and ohlc_df[\"close\"][-1] > ohlc_df[\"open\"][-1]:\r\n pattern = \"doji_bullish\"\r\n \r\n if candle_type(ohlc_df) == 'doji' \\\r\n and ohlc_df[\"close\"][-1] < ohlc_df[\"close\"][-2] \\\r\n and ohlc_df[\"close\"][-1] < ohlc_df[\"open\"][-1]:\r\n pattern = \"doji_bearish\" \r\n \r\n if candle_type(ohlc_df) == \"maru_bozu_green\":\r\n pattern = \"maru_bozu_bullish\"\r\n \r\n if candle_type(ohlc_df) == \"maru_bozu_red\":\r\n pattern = \"maru_bozu_bearish\"\r\n \r\n if trend(ohlc_df.iloc[:-1,:],7) == \"uptrend\" and candle_type(ohlc_df) == \"hammer\":\r\n pattern = \"hanging_man_bearish\"\r\n \r\n if trend(ohlc_df.iloc[:-1,:],7) == \"downtrend\" and candle_type(ohlc_df) == \"hammer\":\r\n pattern = \"hammer_bullish\"\r\n \r\n if trend(ohlc_df.iloc[:-1,:],7) == \"uptrend\" and candle_type(ohlc_df) == \"shooting_star\":\r\n pattern = \"shooting_star_bearish\"\r\n \r\n if trend(ohlc_df.iloc[:-1,:],7) == \"uptrend\" \\\r\n and candle_type(ohlc_df) == \"doji\" \\\r\n and ohlc_df[\"high\"][-1] < ohlc_df[\"close\"][-2] \\\r\n and ohlc_df[\"low\"][-1] > ohlc_df[\"open\"][-2]:\r\n pattern = \"harami_cross_bearish\"\r\n \r\n if trend(ohlc_df.iloc[:-1,:],7) == \"downtrend\" \\\r\n and candle_type(ohlc_df) == \"doji\" \\\r\n and ohlc_df[\"high\"][-1] < ohlc_df[\"open\"][-2] \\\r\n and ohlc_df[\"low\"][-1] > ohlc_df[\"close\"][-2]:\r\n pattern = \"harami_cross_bullish\"\r\n \r\n if trend(ohlc_df.iloc[:-1,:],7) == \"uptrend\" \\\r\n and candle_type(ohlc_df) != \"doji\" \\\r\n and ohlc_df[\"open\"][-1] > ohlc_df[\"high\"][-2] \\\r\n and ohlc_df[\"close\"][-1] < ohlc_df[\"low\"][-2]:\r\n pattern = \"engulfing_bearish\"\r\n \r\n if trend(ohlc_df.iloc[:-1,:],7) == \"downtrend\" \\\r\n and candle_type(ohlc_df) != \"doji\" \\\r\n and ohlc_df[\"close\"][-1] > ohlc_df[\"high\"][-2] \\\r\n and ohlc_df[\"open\"][-1] < ohlc_df[\"low\"][-2]:\r\n pattern = \"engulfing_bullish\"\r\n \r\n return \"Significance - {}, Pattern - {}\".format(signi,pattern)", "def filter_mentor_advise(image):\n HSV = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n\n # For yellow\n yellow = cv2.inRange(HSV, (20, 100, 100), (50, 255, 255))\n\n # For white\n sensitivity_1 = 68\n white = cv2.inRange(HSV, (0,0,255-sensitivity_1), (255,20,255))\n\n sensitivity_2 = 60\n HSL = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)\n white_2 = cv2.inRange(HSL, (0,255-sensitivity_2,0), (255,255,sensitivity_2))\n white_3 = cv2.inRange(image, (200,200,200), (255,255,255))\n\n bit_layer = yellow | white | white_2 | white_3\n\n return bit_layer", "def seasonal_pattern(season_time):\r\n return np.where(season_time < 0.4,\r\n np.cos(season_time * 2 * np.pi),\r\n 1 / np.exp(3 * season_time))", "def seasonal_pattern(season_time):\r\n return np.where(season_time < 0.4,\r\n np.cos(season_time * 2 * np.pi),\r\n 1 / np.exp(3 * season_time))", "def get_allsky(self):\n band = self.get_band()\n septon = self.is_septon()\n if band == '10_90' or band == '30_90' or septon:\n allsky = True\n else:\n allsky = False\n return allsky", "def seasonal_pattern(season_time):\n return np.where(season_time < 0.4,\n np.cos(season_time * 2 * np.pi),\n 1 / np.exp(3 * season_time))", "def seasonal_pattern(season_time):\n return np.where(season_time < 0.4,\n np.cos(season_time * 2 * np.pi),\n 1 / np.exp(3 * season_time))", "def seasonal_pattern(season_time):\n return np.where(season_time < 0.4,\n np.cos(season_time * 2 * np.pi),\n 1 / np.exp(3 * season_time))", "def candle_pattern(ohlc_df,ohlc_day):\n pattern = None\n signi = \"low\"\n avg_candle_size = abs(ohlc_df[\"close\"] - ohlc_df[\"open\"]).median()\n sup, res = res_sup(ohlc_df,ohlc_day)\n \n if (sup - 1.5*avg_candle_size) < ohlc_df[\"close\"][-1] < (sup + 1.5*avg_candle_size):\n signi = \"HIGH\"\n \n if (res - 1.5*avg_candle_size) < ohlc_df[\"close\"][-1] < (res + 1.5*avg_candle_size):\n signi = \"HIGH\"\n \n if candle_type(ohlc_df) == 'doji' \\\n and ohlc_df[\"close\"][-1] > ohlc_df[\"close\"][-2] \\\n and ohlc_df[\"close\"][-1] > ohlc_df[\"open\"][-1]:\n pattern = \"doji_bullish\"\n \n if candle_type(ohlc_df) == 'doji' \\\n and ohlc_df[\"close\"][-1] < ohlc_df[\"close\"][-2] \\\n and ohlc_df[\"close\"][-1] < ohlc_df[\"open\"][-1]:\n pattern = \"doji_bearish\" \n \n if candle_type(ohlc_df) == \"maru_bozu_green\":\n pattern = \"maru_bozu_bullish\"\n \n if candle_type(ohlc_df) == \"maru_bozu_red\":\n pattern = \"maru_bozu_bearish\"\n \n if trend(ohlc_df.iloc[:-1,:],7) == \"uptrend\" and candle_type(ohlc_df) == \"hammer\":\n pattern = \"hanging_man_bearish\"\n \n if trend(ohlc_df.iloc[:-1,:],7) == \"downtrend\" and candle_type(ohlc_df) == \"hammer\":\n pattern = \"hammer_bullish\"\n \n if trend(ohlc_df.iloc[:-1,:],7) == \"uptrend\" and candle_type(ohlc_df) == \"shooting_star\":\n pattern = \"shooting_star_bearish\"\n \n if trend(ohlc_df.iloc[:-1,:],7) == \"uptrend\" \\\n and candle_type(ohlc_df) == \"doji\" \\\n and ohlc_df[\"high\"][-1] < ohlc_df[\"close\"][-2] \\\n and ohlc_df[\"low\"][-1] > ohlc_df[\"open\"][-2]:\n pattern = \"harami_cross_bearish\"\n \n if trend(ohlc_df.iloc[:-1,:],7) == \"downtrend\" \\\n and candle_type(ohlc_df) == \"doji\" \\\n and ohlc_df[\"high\"][-1] < ohlc_df[\"open\"][-2] \\\n and ohlc_df[\"low\"][-1] > ohlc_df[\"close\"][-2]:\n pattern = \"harami_cross_bullish\"\n \n if trend(ohlc_df.iloc[:-1,:],7) == \"uptrend\" \\\n and candle_type(ohlc_df) != \"doji\" \\\n and ohlc_df[\"open\"][-1] > ohlc_df[\"high\"][-2] \\\n and ohlc_df[\"close\"][-1] < ohlc_df[\"low\"][-2]:\n pattern = \"engulfing_bearish\"\n \n if trend(ohlc_df.iloc[:-1,:],7) == \"downtrend\" \\\n and candle_type(ohlc_df) != \"doji\" \\\n and ohlc_df[\"close\"][-1] > ohlc_df[\"high\"][-2] \\\n and ohlc_df[\"open\"][-1] < ohlc_df[\"low\"][-2]:\n pattern = \"engulfing_bullish\"\n \n return \"Significance - {}, Pattern - {}\".format(signi,pattern)", "def make_pattern(self):\n probability = random.SystemRandom().random()\n if probability < 0.1:\n _pattern = [0 for x in range(32)]\n elif probability > 0.5:\n pattern_num = SECURE_RANDOM.choice(CLASSIC)\n _probability = random.SystemRandom().random()\n if _probability > 0.80:\n _pattern = [1 if random.SystemRandom().random() < pattern_num/32 else 0 for x in range(1,33)]\n elif _probability < 0.40:\n _offset = random.SystemRandom().randint(2, 16)\n _pattern = [1 if (x == _offset) or (x % pattern_num == _offset) else 0 for x in range(1,33)]\n else:\n _pattern = [1 if (x == 1) or (x % pattern_num == 1) else 0 for x in range(1,33)]\n else:\n pattern_num = SECURE_RANDOM.choice(INNER_CLASSIC)\n _probability = random.SystemRandom().random()\n if _probability > 0.50:\n _pattern = [1 if (x == 1) or (x % pattern_num == 1) else 0 for x in range(1,33)]\n else:\n _pattern = [1 if random.SystemRandom().random() < pattern_num/32 else 0 for x in range(1,33)]\n\n if not self.global_swing:\n _probability = random.SystemRandom().random()\n if _probability > 0.3:\n _pattern.extend([random.SystemRandom().uniform(0.01, 0.5), random.SystemRandom().randint(1, 14), 0])\n else:\n _pattern.extend([0,1,0])\n else: \n _pattern.extend([0,1,1]) \n\n return _pattern", "def get_dark_channel(self,img, *, size):\n #Extract the dark/hazy part from the image\n minch = np.amin(img, axis=2)\n box = cv2.getStructuringElement(cv2.MORPH_RECT, (size // 2, size // 2))\n return cv2.erode(minch, box)", "def is_het(self): \n return self.geno_hap1 != self.geno_hap2", "def chk_hamming(data):\n pass", "def whatsgreen2(image):\n green = image.hueDistance(color= Color('green'), minvalue=40).binarize()\n return green", "def heralded_fock_basis(self, detector_pattern):\n undetected_photons = self.photon_number - sum(detector_pattern)\n undetected_modes = set(range(self.N)) - self.circuit.detected_modes\n\n #write down the detector outcome in terms of which modes the photons arrived \n detector_outcome = []\n for mode, occupancy in zip(self.circuit.detected_modes, detector_pattern):\n detector_outcome.extend([mode] * occupancy)\n\n if undetected_photons > 0:\n #look at all options for where undetected photons could be\n undetected_outcomes = combinations_with_replacement(undetected_modes, undetected_photons)\n\n #combine detected and undetected outcomes\n return (tuple(sorted(detector_outcome + list(u))) for u in undetected_outcomes)\n else:\n return (tuple(detector_outcome),)", "def sun_isoperimetric_ratio(image, mode_viz=False): # On distorted image\r\n try :\r\n _, _, sun_mask = sun_center(image)\r\n except TypeError :\r\n return np.nan\r\n # We blurr the image and re-binarize it\r\n blurred_mask = mahotas.gaussian_filter(sun_mask, 0.7)\r\n blurred_mask = (blurred_mask > blurred_mask.mean())\r\n # Obtain a binary image with the sun border in white pixels\r\n sun_perim_mask = mahotas.labeled.bwperim(blurred_mask, 8)\r\n # Compute the perimeter in pixels\r\n sun_perim = int(perimeter(sun_perim_mask))\r\n # Compute the surface in pixels\r\n sun_surface = np.sum(blurred_mask)\r\n # ratio = 4*pi*S/(P^2). Is in [0,1], equals 1 for a circle\r\n ratio = 4*np.pi*sun_surface/(sun_perim**2)\r\n if mode_viz:\r\n # Plot\r\n # print(f\"perimeter = {sun_perim} | surface = {sun_surface}\")\r\n fig = plt.figure(figsize=(12, 6))\r\n ax1 = fig.add_subplot(121)\r\n ax1.imshow(blurred_mask, cmap=\"gray\")\r\n ax2 = fig.add_subplot(122)\r\n ax2.imshow(sun_perim_mask, cmap=\"gray\")\r\n plt.show()\r\n return np.round(ratio, 3)", "def watershed(mask, img, plotImage = False, kernelSize = None):\n imgCopy = img.copy()\n maskCopy = np.array(mask.copy(), dtype=np.uint8)\n \n if kernelSize is None:\n kernelSize = 2\n\n # Finding sure foreground area\n #dist_transform = cv2.distanceTransform(mask, cv2.DIST_L2, 5)\n #ret, sure_fg = cv2.threshold(dist_transform,0.3*dist_transform.max(),255,0) #change the second argument to change the sensitivity \n maskClosed = skimage.morphology.closing(np.array(maskCopy, dtype=np.uint8))\n maskClosed = skimage.morphology.closing(np.array(maskClosed, dtype=np.uint8))\n kernel = np.ones((kernelSize,kernelSize), np.uint8)\n # maskCopy = img_as_bool(maskCopy)\n sure_fg = cv2.erode(maskClosed, kernel, iterations = 2) ###\n sure_fg = skimage.morphology.closing(np.array(sure_fg, dtype=np.uint8))\n # kernel = np.ones((2,2), np.uint8)\n # sure_fg = binary_closing(sure_fg, kernel)\n \n # sure background area\n #kernel = np.ones((5, 5), np.uint8)\n #sure_bg = cv2.dilate(mask, kernel, iterations = 1)\n sure_fg_bool = 1 - img_as_bool(sure_fg)\n # sure_bg = np.uint8(1 - morphology.medial_axis(sure_fg_bool)) ### \n sure_bg = np.uint8(1 - morphology.skeletonize(sure_fg_bool))\n sure_bg[0, :] = 1\n sure_bg[-1, :] = 1\n sure_bg[:, 0] = 1\n sure_bg[:, -1] = 1\n \n # Finding unknown region\n sure_fg = np.uint8(sure_fg)\n unknown = cv2.subtract(sure_bg, sure_fg)\n \n if plotImage:\n plt.figure()\n plt.imshow(sure_fg)\n plt.title(\"Inner Marker\")\n plt.figure()\n plt.imshow(sure_bg)\n plt.title(\"Outer Marker\")\n plt.figure()\n plt.imshow(unknown)\n plt.title(\"Unknown\")\n \n # Marker labelling\n ret, markers = cv2.connectedComponents(sure_fg)\n\n # Add one to all labels so that sure background is not 0, but 1\n markers = markers+1\n\n # Now, mark the region of unknown with zero\n markers[unknown==1] = 0\n \n if plotImage:\n plt.figure()\n plt.imshow(markers, cmap='jet')\n plt.title(\"Markers\")\n \n # Do watershed\n markers = cv2.watershed(imgCopy, markers)\n \n imgCopy[markers == -1] = [0, 255 ,0]\n\n if plotImage:\n plt.figure()\n plt.imshow(markers,cmap='jet')\n plt.title(\"Mask\")\n plt.figure()\n plt.imshow(img)\n plt.title(\"Original Image\")\n plt.figure()\n plt.imshow(imgCopy)\n plt.title(\"Marked Image\")\n plt.show()\n\n return markers", "def test_1_2_dimethylcyclohexane(self):\n def draw(image: ShapeImage):\n image.add_regular_hexagon(\n 100, start_coord=(400, 400)\n )\n image.add_line((487, 350), (487, 250))\n image.add_line((574, 400), (661, 350))\n\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[487, 350]],\n [[574, 400]],\n [[574, 500]],\n [[487, 550]],\n [[400, 500]],\n # Methyl groups\n [[487, 250]],\n [[661, 350]]\n ]),\n drawer=draw,\n expected_edges=np.array([\n [[400, 400, 487, 350]],\n [[487, 350, 574, 400]],\n [[574, 400, 574, 500]],\n [[574, 500, 487, 550]],\n [[487, 550, 400, 500]],\n [[400, 500, 400, 400]],\n # To methyl groups\n [[487, 350, 487, 250]],\n [[574, 400, 661, 350]]\n ])\n )", "def opening(self, img):\n return cv.morphologyEx(img, cv.MORPH_OPEN, self.kernel)", "def is_homo_alt(self):\n return self.geno_hap1 == 1 and self.geno_hap2 == 1", "def testStereo(self):\r\n smi_and_cansmi = [\r\n ('OC(=O)[C@@H](CCC(N)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](CCC(N)=O)N', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@H](C(O)=O)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('OC(=O)[C@H](N)CCC(N)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('OC(=O)[C@@H](N)CCC(N)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('N[C@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('N[C@@H](CCC(N)=O)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](N)C(O)=O', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](N)C(O)=O', 'NC(=O)CC[C@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@H](C(O)=O)N', 'NC(=O)CC[C@@H](N)C(=O)O'),\r\n ('NC(=O)CC[C@@H](C(O)=O)N', 'NC(=O)CC[C@H](N)C(=O)O')]\r\n for smi, cansmi in smi_and_cansmi:\r\n mol = pybel.readstring(\"smi\", smi)\r\n self.assertEqual(mol.write(\"can\").split()[0],\r\n cansmi)", "def mode(v_o, Vcc):\n if v_o == Vcc:\n return \"positive saturation\"\n if v_o >= -Vcc and v_o <= Vcc:\n return \"linear region\"\n if v_o == -Vcc:\n return \"negative saturation\"", "def splitCh(img,debugOption = 'off'):\n\n oriImg = img\n tempRed = oriImg[:,:,0]; tempGreen = oriImg[:,:,1] ; tempBlue = oriImg[:,:,2]\n tempNorm = cv2.normalize(img_as_float(tempGreen), None, 0.0, 1.0, cv2.NORM_MINMAX) # Convert to normalized floating point\n\n greenImg = tempNorm\n if debugOption == 'on':\n \n plt.axis(\"off\")\n plt.title('original')\n plt.imshow(cv2.cvtColor(oriImg, cv2.COLOR_BGR2RGB))\n plt.show()\n \n plt.axis(\"off\")\n plt.title('green ch')\n plt.imshow(temp,cmap='gray')\n plt.show()\n\n print(\"oriImg : {} tempNorm : {} greenImg : {} \".format(oriImg.dtype,tempNorm.dtype,greenImg.dtype))", "def modis_(img):\n cmask = img.select(\"state_1km\")\n cloud = tools.compute_bits_client(cmask, 0, 0, \"cloud\")\n mix = tools.compute_bits_client(cmask, 1, 1, \"mix\")\n shadow = tools.compute_bits_client(cmask, 2, 2, \"shadow\")\n cloud2 = tools.compute_bits_client(cmask, 10, 10, \"cloud2\")\n snow = tools.compute_bits_client(cmask, 12, 12, \"snow\")\n\n mask = (cloud\n .Or(mix)\n # .Or(shadow) # Cloud shadow seems to be miscomputed (MODIS/MYD09GA/MYD09GA_005_2015_09_18)\n .Or(cloud2)\n .Or(snow)\n )\n\n return img.updateMask(mask.Not())" ]
[ "0.54219186", "0.52342373", "0.51545143", "0.5147554", "0.512688", "0.493742", "0.49075535", "0.48455277", "0.48435876", "0.48435876", "0.48414382", "0.48366624", "0.48366624", "0.48366624", "0.48335874", "0.4826204", "0.4740178", "0.47373027", "0.47355133", "0.4724675", "0.4723563", "0.47187185", "0.47045416", "0.46668172", "0.46537086", "0.465088", "0.4641816", "0.4623727", "0.46152225", "0.4612073" ]
0.5926785
0
A Quick flash is more than 50 per minute.
def quick(groups, colour, period): # The cycle period cannot be longer than 1.2s (60/50) # or shorter than 0.5s if groups == [1]: if period is not None: raise ValueError( "Quick Flash cycle periods must be longer than 0.5 seconds" ) return [ (colour, 250), ('Off', 750) ] return light_sequence(groups, 'Off', colour, period, 250, 500)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_flash_timer(self):\r\n self.flashMillisecs = 1000\r\n self.flashTimer.start(50)", "def giveFlash(self, amount):\n self.fb = amount\n if amount > 0:\n es.give(self.userid, \"weapon_flashbang\")", "def brief_led_flash(self):\n self._ubx.send('CFG-TP5', pulseLenRatioLock=990000)", "def flash(self, filename, secs):\n raise NotImplementedError()", "def flash_all_leds(self, k):\n t_end = time.time() + k\n while time.time() < t_end:\n for i in range(300):\n self.light_led(0)\n self.light_led(1)\n self.light_led(2)\n self.light_led(3)\n self.light_led(4)\n self.light_led(5)\n self.light_led(6)\n time.sleep(0.5)\n self.light_led(6)", "def flash_red(self, duration=0.2):\n self.pen_color = wx.RED\n self.Refresh(True)\n t = time.time()\n while time.time() - t < duration:\n time.sleep(0.001)\n self.pen_color = wx.WHITE\n self.Refresh(True)", "def FlashBang(self):\t\t\n\t\tprint(self.name.Title() + \"FlashBang!\")", "def _flash(self,id,msg,duration=30.0):\n if duration>0:\n pass #gtk.timeout_add(duration,'')\n return self.statusbar.push(id,msg)", "def flash_display_text(self):\r\n if self.flashMillisecs in self.flashTimes:\r\n self.countdownString.setStyleSheet(\"QLabel { background-color : white; color : black; }\")\r\n self.talkInfoString.setStyleSheet(\"QLabel { background-color : white; color : black; }\")\r\n else:\r\n self.countdownString.setStyleSheet(\"QLabel { background-color : black; color : white; }\")\r\n self.talkInfoString.setStyleSheet(\"QLabel { background-color : black; color : white; }\")\r\n self.flashMillisecs -= 50\r\n if self.flashMillisecs < 0:\r\n self.flashTimer.stop()", "def lightleep(time_ms: int = None) -> None:", "def flash(groups, colour, period):\n\n if groups == [1]:\n if period <= 2000:\n raise ValueError(\n \"The cycle period for a flash must be longer than 2 seconds\"\n )\n\n return [\n (colour, 1000),\n ('Off', period-1000)\n ]\n\n return light_sequence(groups, colour, 'Off', period, 500, 1000)", "def shutter_pulse(self, width):\n step_name = 'Shutter Pulse'\n self.shutter.settings['shutter_open'] = True\n self.db_poll(step_name)\n print('Shutter open')\n t0 = time.time()\n t_lastlog = t0\n while True:\n if self.interrupt_measurement_called:\n self.shutter.settings['shutter_open'] = False\n break\n if time.time()-t0 > width:\n break\n time.sleep(0.001)\n if time.time() - t_lastlog > 0.2:\n # do some logging\n self.db_poll(step_name)\n t_lastlog = time.time()\n \n self.shutter.settings['shutter_open'] = False\n self.settings['steps_taken'] += 1\n print('Shutter closed')", "def test_flash_status_autosample_mode(self):\n self.assert_initialize_driver(DriverProtocolState.AUTOSAMPLE)\n reply = self.driver_client.cmd_dvr('execute_resource', ProtocolEvent.FLASH_STATUS)\n regex = re.compile('Compact Flash Card present - Compact Flash OK!\\r\\n\\r\\r\\nVolume in drive is .+ bytes free\\r\\r\\n', re.DOTALL)\n match = regex.match(reply[1])\n\n self.assertNotEqual(match, None, \"TestINT.test_flash_status: status response not correct\")", "def __stream_triggered(self):\n # Call this every time period\n thread = Timer(self.stream_time, self.__stream_triggered)\n thread.start()\n self.__threads.append(thread)\n\n if len(self.__spike_buffer) > 2:\n speed = self.__get_speed()\n print(speed)\n self.__stream_send(speed)", "def test_flashdevice_1_read_bandwidth(self):\n self.flash = SerialEepromManager.get_flash_device(self.ftdi_url,\n '24AA32A', 0x50,\n highspeed=True)\n delta = now()\n data = self.flash.read(0, len(self.flash))\n delta = now()-delta\n length = len(data)\n self._report_bw('Read', length, delta)", "def sleep(self):\n time.sleep(0.2)", "def power_up(self):\n t_end = time.time() + 3\n while time.time() < t_end:\n self.light_led(5)\n self.light_led(6)", "def limit_fps(fps):\n global _last_update\n elapsed = time.time() - _last_update\n if elapsed < 1 / fps:\n time.sleep(1 / fps - elapsed)\n _last_update = time.time()", "def flash(self):\n\t\traise NotImplementedError", "def long_flash(groups, colour, period):\n if groups == [1]:\n return [\n (colour, 2000),\n ('Off', period - 2000)\n ]\n return light_sequence(groups, colour, 'Off', period, 2000, 3000)", "def do_light(self,count):\n if (count == ''):\n count=\"1\"\n for i in range(0,int(count)):\n light=RCtime(12)\n print \"*\"*(light/4000)+\": %d\" % light", "def flash(self, n=1, f=2.0):\n self.turn_off()\n proc = Thread(target=self._flash_thread_worker, args=[n, f])\n proc.start()", "async def flash(self, light: Light, num_times: int, delay=0.15) -> None:\n for _ in range(num_times):\n self.set_lights_off()\n await sleep(delay)\n self.set_lights(light)\n await sleep(delay)", "def _full_speed_rumble(self, images, duration):\n while duration > 0:\n self.microbit.display.show(images[0]) # pylint: disable=no-member\n time.sleep(0.04)\n self.microbit.display.show(images[1]) # pylint: disable=no-member\n time.sleep(0.04)\n duration -= 0.08", "def micro_sleep(micro_sec):\n start_time = time.time()\n\n while (time.time() - start_time) < (micro_sec * 1e-6):\n pass\n\n return 0", "def time_waste(n=3):\n time.sleep(n)\n message = f'I just wasted {n} seconds of your life.'\n print(message)", "def remaining_ms():", "def vibrate(self, duration):\n self.wm.rumble = 1\n sleep(duration)\n self.wm.rumble = 0", "def delay():\r\n time.sleep(2)", "def test(self):\n winsound.PlaySound('SystemExclamation', winsound.SND_ALIAS)\n \n pulses=1000*3\n winsound.Beep(200, 1000) # .Beep(1650Hz, (XXXXms)) #e.g 1000ms=1second\n self.run(pulses); self.run(pulses, ANTI_CLK_W)\n sleep(1)\n\n winsound.Beep(400, 1000)\n self.swing(128, count=30); self.stop() #0.9 degrees\n sleep(1)\n\n winsound.Beep(800, 1000)\n print('Testing I.....')\n self.swing(32, count=120); self.stop() #0.225 degrees \n sleep(1)\n\n winsound.Beep(1600, 1000)\n print('Testing II.....')\n self.swing(2, count=1800); self.stop() #0.05625 degrees\n \n winsound.PlaySound('SystemExclamation', winsound.SND_ALIAS)\n print(' Testings Done! ')\n return self.stop() #set low before exist " ]
[ "0.7274421", "0.6186878", "0.6086867", "0.59733325", "0.58153445", "0.58117354", "0.5774111", "0.57247484", "0.5715428", "0.569542", "0.5694873", "0.5687403", "0.56838447", "0.5658738", "0.56574625", "0.5635628", "0.5612565", "0.5605468", "0.5590237", "0.55884016", "0.5575568", "0.55580443", "0.55513793", "0.5538716", "0.5502652", "0.5495478", "0.5493273", "0.5481922", "0.54761046", "0.5475414" ]
0.61882275
1
Write out binary VTK file with a single vector field. Can specify time index or output time.
def writeVTK(self,fname,itime=None,output_time=None): if output_time: itime = int(output_time / self.dt) if not itime: print 'Need to specify itime or output_time' return print 'Writing out time step',itime,': t=',self.t[itime] u = np.zeros((self.NY,1,self.NZ)); u[:,0,:] = np.flipud(self.field['u'][itime,:,:]).T v = np.zeros((self.NY,1,self.NZ)); v[:,0,:] = np.flipud(self.field['v'][itime,:,:]).T w = np.zeros((self.NY,1,self.NZ)); w[:,0,:] = np.flipud(self.field['w'][itime,:,:]).T VTKwriter.vtk_write_structured_points( open(fname,'wb'), #binary mode 1,self.NY,self.NZ, [u,v,w], datatype=['vector'], dx=1.0,dy=self.dy,dz=self.dz, dataname=['TurbSim_velocity'], origin=[0.,self.y[0],self.z[0]] )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def saveVelocityAndPressureVTK_binary(pressure,u,v,w,x,y,z,filename,dims):\n numEl_size = u.size; numEl = np.prod(numEl_size);\n # open the file and write the ASCII header:\n file = open(filename,'w')\n file.write('# vtk DataFile Version 3.0\\n')\n file.write('VTK file for data post-processed with Python\\n')\n file.write('Binary\\n\\n')\n file.write('DATASET STRUCTURED_GRID\\n')\n file.write('DIMENSIONS %d %d %d \\n'%(dims[0],dims[1],dims[2]))\n file.write('POINTS %d float\\n'%(numEl))\n file.close()\n \n # append binary x,y,z data\n file = open(filename,'ab')\n for i in range(len(x)): # there really needs to be a better way.\n pt = [x[i],y[i],z[i]]\n pt_buf = array('f',pt)\n pt_buf.byteswap()\n file.write(pt_buf)\n \n \n file.close()\n \n # append an ASCII sub header\n file = open(filename,'a')\n file.write('POINT_DATA %d \\n'%numEl)\n file.write('VECTORS velocity_vectors float\\n')\n file.close()\n \n # append binary u,v,w data\n file = open(filename,'ab')\n for i in range(len(u)):\n pt = [u[i],v[i],w[i]]\n pt_buf = array('f',pt)\n pt_buf.byteswap()\n file.write(pt_buf)\n \n file.close()\n \n # append ASCII sub header for scalar velocity magnitude data\n file = open(filename,'a')\n file.write('SCALARS VelocityMagnitude float\\n')\n file.write('LOOKUP_TABLE default\\n')\n \n file.close()\n \n file = open(filename,'ab')\n v_mag = np.sqrt(u**2+v**2+w**2)\n file = open(filename,'ab')\n p_buf = array('f',v_mag); p_buf.byteswap()\n file.write(p_buf)\n file.close()\n \n \n # append another ASCII sub header for the scalar pressure data\n file = open(filename,'a')\n file.write('SCALARS Pressure float\\n')\n file.write('LOOKUP_TABLE default\\n')\n file.close()\n \n # append binary pressure data\n file = open(filename,'ab')\n p_buf = array('f',pressure); p_buf.byteswap()\n file.write(p_buf)\n file.close()", "def writeVTK(self, fname,\n itime=None,\n output_time=None,\n scaled=True,\n stdout='overwrite'):\n if output_time:\n itime = int(output_time / self.dt)\n if itime is None:\n print('Need to specify itime or output_time')\n return\n if stdout=='overwrite':\n sys.stdout.write('\\rWriting time step {:d} : t= {:f}'.format(\n itime,self.t[itime]))\n else: #if stdout=='verbose':\n print('Writing out VTK for time step',itime,': t=',self.t[itime])\n\n # scale fluctuations\n up = np.zeros((1,self.NY,self.NZ)) # constant x plane (3D array for VTK output)\n wp = np.zeros((1,self.NY,self.NZ))\n vp = np.zeros((1,self.NY,self.NZ))\n up[0,:,:] = self.U[0,itime,:,:]\n vp[0,:,:] = self.U[1,itime,:,:]\n wp[0,:,:] = self.U[2,itime,:,:]\n if scaled:\n for iz in range(self.NZ):\n up[0,:,iz] *= self.scaling[0,iz]\n vp[0,:,iz] *= self.scaling[1,iz]\n wp[0,:,iz] *= self.scaling[2,iz]\n\n # calculate instantaneous velocity\n U = up.copy()\n V = vp.copy()\n W = wp.copy()\n if self.mean_flow_read:\n for iz in range(self.NZ):\n U[0,:,iz] += self.U_inlet[:,iz]\n V[0,:,iz] += self.V_inlet[:,iz]\n W[0,:,iz] += self.W_inlet[:,iz]\n\n # write out VTK\n vtk_write_structured_points(\n open(fname,'wb'), #binary mode\n {\n \"U\": np.stack((U,V,W)),\n \"u'\": np.stack((up,vp,wp)),\n },\n dx=1.0, dy=self.dy, dz=self.dz,\n origin=[0.,self.y[0],self.z[0]],\n indexorder='ijk',\n )", "def _call_writeVec(vecObj, filename, mode):\n res = vecObj.writeVec(filename, mode)\n return res", "def _writeVTKOutput(self):\n\n sigma = numpy.ones((self.numStations, 3), dtype=numpy.float64)\n sigma[:, 0] *= self.sigmaEast\n sigma[:, 1] *= self.sigmaNorth\n sigma[:, 2] *= self.sigmaUp\n\n vtkHead = \"# vtk DataFile Version 2.0\\n\" + \\\n \"Synthetic GPS stations\\n\" + \\\n \"ASCII\\n\" + \\\n \"DATASET POLYDATA\\n\" + \\\n \"POINTS \" + repr(self.numStations) + \" double\\n\"\n\n v = open(self.vtkOutputFile, 'w')\n v.write(vtkHead)\n numpy.savetxt(v, self.coords)\n\n numConnect = 2 * self.numStations\n connectHead = \"VERTICES %d %d\\n\" % (self.numStations, numConnect)\n v.write(connectHead)\n verts = numpy.arange(self.numStations, dtype=numpy.int64)\n sizes = numpy.ones_like(verts)\n outConnect = numpy.column_stack((sizes, verts))\n numpy.savetxt(v, outConnect, fmt=\"%d\")\n \n dispHead = \"POINT_DATA \" + repr(self.numStations) + \"\\n\" + \\\n \"VECTORS displacement double\\n\"\n v.write(dispHead)\n numpy.savetxt(v, self.dispNoise)\n\n sigHead = \"VECTORS uncertainty double\\n\"\n v.write(sigHead)\n numpy.savetxt(v, sigma)\n v.close()\n \n return", "def writeMeshVTP(self, outFile):\n # setup colors\n Colors = vtk.vtkFloatArray()\n #Colors.SetNumberOfComponents(3)\n Colors.SetNumberOfTuples(self.Npts)\n Colors.SetName(self.label) #can change to any string\n\n #points\n vtkPts = vtk.vtkPoints()\n\n #build points and colors\n for i,facet in enumerate(self.mesh.Facets):\n for j in range(3):\n x = facet.Points[j][0]\n y = facet.Points[j][1]\n z = facet.Points[j][2]\n vtkPts.InsertNextPoint(x,y,z)\n # Colors.InsertTuple( i*3+j, (arr[i],arr[i],arr[i]) )\n Colors.InsertTuple( i*3+j, [self.scalar[i]] )\n\n #build vtp triangular mesh\n Triangles = vtk.vtkCellArray()\n for i in range(self.Npts):\n Triangle = vtk.vtkTriangle()\n Triangle.GetPointIds().SetId(0, i*3+0)\n Triangle.GetPointIds().SetId(1, i*3+1)\n Triangle.GetPointIds().SetId(2, i*3+2)\n Triangles.InsertNextCell(Triangle)\n\n #build final vtp object for writing\n polydata = vtk.vtkPolyData()\n polydata.SetPoints(vtkPts)\n polydata.SetPolys(Triangles)\n polydata.GetPointData().SetScalars(Colors)\n polydata.Modified()\n writer = vtk.vtkXMLPolyDataWriter()\n writer.SetFileName(outFile)\n writer.SetInputData(polydata)\n #writer.SetDataModeToBinary()\n writer.Write()\n\n return", "def write_vectors(self, filename):\n svu.write_realvectors(self,filename)", "def WriteVTK(self, filename=None, result=None, fmt=\"binary\", interpolation_degree=10, ProjectionFlags=None):\n\n self.__do_essential_memebers_exist__()\n\n if fmt == \"xml\":\n pass\n elif fmt == \"binary\":\n try:\n from pyevtk.hl import pointsToVTK, linesToVTK, gridToVTK, unstructuredGridToVTK\n from pyevtk.vtk import VtkVertex, VtkLine, VtkTriangle, VtkQuad, VtkTetra, VtkPyramid, VtkHexahedron\n except ImportError:\n raise ImportError(\"Could not import evtk. Install it using 'pip install pyevtk'\")\n else:\n raise ValueError(\"Writer format not understood\")\n\n elements = np.copy(self.elements)\n\n cellflag = None\n if self.element_type =='tri':\n cellflag = 5\n offset = 3\n if self.elements.shape[1]==6:\n cellflag = 22\n offset = 6\n elif self.element_type =='quad':\n cellflag = 9\n offset = 4\n if self.elements.shape[1]==8:\n cellflag = 23\n offset = 8\n if self.element_type =='tet':\n cellflag = 10\n offset = 4\n if self.elements.shape[1]==10:\n cellflag = 24\n offset = 10\n # CHANGE NUMBERING ORDER FOR PARAVIEW\n para_arange = [0,4,1,6,2,5,7,8,9,3]\n elements = elements[:,para_arange]\n elif self.element_type == 'hex':\n cellflag = 12\n offset = 8\n if self.elements.shape[1] == 20:\n cellflag = 25\n offset = 20\n elif self.element_type == 'line':\n cellflag = 3\n offset = 2\n\n if filename is None:\n warn('File name not specified. I am going to write one in the current directory')\n filename = os.path.join(PWD(__file__), \"output.vtu\")\n if \".vtu\" in filename and fmt == \"binary\":\n filename = filename.split('.')[0]\n if \".vtu\" not in filename and fmt == \"xml\":\n filename = filename + \".vtu\"\n\n\n if self.InferPolynomialDegree() > 1:\n try:\n from Florence.PostProcessing import PostProcess\n from Florence.VariationalPrinciple import DisplacementFormulation\n except ImportError:\n raise RuntimeError(\"Writing high order elements to VTK is not supported yet\")\n if result is not None and result.ndim > 1:\n raise NotImplementedError(\"Writing vector/tensor valued results to binary vtk not supported yet\")\n return\n else:\n if result is None:\n result = np.zeros_like(self.points)[:,:,None]\n if result.ndim == 1:\n result = result.reshape(result.shape[0],1,1)\n pp = PostProcess(3,3)\n pp.SetMesh(self)\n pp.SetSolution(result)\n pp.SetFormulation(DisplacementFormulation(self,compute_post_quadrature=False))\n pp.WriteVTK(filename,quantity=0,interpolation_degree=interpolation_degree, ProjectionFlags=ProjectionFlags)\n return\n\n\n if self.InferSpatialDimension() == 2:\n points = np.zeros((self.points.shape[0],3))\n points[:,:2] = self.points\n else:\n points = self.points\n\n if result is None:\n if fmt == \"xml\":\n write_vtu(Verts=self.points, Cells={cellflag:elements},fname=filename)\n elif fmt == \"binary\":\n unstructuredGridToVTK(filename,\n np.ascontiguousarray(points[:,0]),np.ascontiguousarray(points[:,1]),\n np.ascontiguousarray(points[:,2]), np.ascontiguousarray(elements.ravel()),\n np.arange(0,offset*self.nelem,offset)+offset, np.ones(self.nelem)*cellflag)\n else:\n if isinstance(result, np.ndarray):\n if result.ndim > 1:\n if result.size == result.shape[0]:\n result = result.flatten()\n\n if fmt == \"xml\":\n if result.ndim > 1:\n if result.shape[0] == self.nelem:\n write_vtu(Verts=self.points, Cells={cellflag:elements},\n cvdata={cellflag:result.ravel()},fname=filename)\n elif result.shape[0] == self.points.shape[0]:\n write_vtu(Verts=self.points, Cells={cellflag:elements},\n pvdata=result.ravel(),fname=filename)\n else:\n if result.shape[0] == self.nelem:\n write_vtu(Verts=self.points, Cells={cellflag:elements},cdata=result,fname=filename)\n elif result.shape[0] == self.points.shape[0]:\n write_vtu(Verts=self.points, Cells={cellflag:elements},pdata=result,fname=filename)\n elif fmt == \"binary\":\n if result.ndim <= 1:\n if result.shape[0] == self.nelem:\n unstructuredGridToVTK(filename,\n np.ascontiguousarray(points[:,0]),np.ascontiguousarray(points[:,1]),\n np.ascontiguousarray(points[:,2]), np.ascontiguousarray(elements.ravel()),\n np.arange(0,offset*self.nelem,offset)+offset, np.ones(self.nelem)*cellflag,\n cellData={'result':np.ascontiguousarray(result.ravel())})\n elif result.shape[0] == self.points.shape[0]:\n unstructuredGridToVTK(filename,\n np.ascontiguousarray(points[:,0]),np.ascontiguousarray(points[:,1]),\n np.ascontiguousarray(points[:,2]), np.ascontiguousarray(elements.ravel()),\n np.arange(0,offset*self.nelem,offset)+offset, np.ones(self.nelem)*cellflag,\n pointData={'result':np.ascontiguousarray(result.ravel())})\n else:\n if result.shape[1] == 3:\n result_data = {'result':tuple(( np.ascontiguousarray(result[:,0]), np.ascontiguousarray(result[:,1]), np.ascontiguousarray(result[:,2]) ))}\n elif result.shape[1] == 2:\n result_data = {'result':tuple(( np.ascontiguousarray(result[:,0]), np.ascontiguousarray(result[:,1]) ))}\n else:\n raise NotImplementedError(\"Writing vector/tensor valued results > 3 to binary vtk not supported yet\")\n\n if result.shape[0] == self.nelem:\n unstructuredGridToVTK(filename,\n np.ascontiguousarray(points[:,0]),np.ascontiguousarray(points[:,1]),\n np.ascontiguousarray(points[:,2]), np.ascontiguousarray(elements.ravel()),\n np.arange(0,offset*self.nelem,offset)+offset, np.ones(self.nelem)*cellflag,\n cellData=result_data)\n elif result.shape[0] == self.points.shape[0]:\n unstructuredGridToVTK(filename,\n np.ascontiguousarray(points[:,0]),np.ascontiguousarray(points[:,1]),\n np.ascontiguousarray(points[:,2]), np.ascontiguousarray(elements.ravel()),\n np.arange(0,offset*self.nelem,offset)+offset, np.ones(self.nelem)*cellflag,\n pointData=result_data)", "def _write(self):\n f = FortranFile(self.filename,mode='w')\n # Default omnivor binary header\n f.writeInts ( self.data['MK'] , 'i' ) \n f.writeInts ( self.data['itime'] , 'i' ) \n f.writeString ( self.data['version'] ) \n f.writeInts ( self.data['file_id'] , 'i' ) \n f.writeString ( self.data['sversion'] ) \n # Velocity field\n f.writeString ( self.data['stype'] ) \n f.writeInts ( self.data['is_grid'] , 'i' ) \n f.writeInts ( self.data['nCPs'] , 'i' ) \n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n f.writeInts ( self.data['n1'] , 'i' ) \n f.writeInts ( self.data['n2'] , 'i' ) \n f.writeInts ( self.data['n3'] , 'i' ) \n f.writeInts ( self.data['is_straight'] , 'i' ) \n f.writeReals ( self.data['v1'] , real_char ) \n f.writeReals ( self.data['v2'] , real_char ) \n f.writeReals ( self.data['v3'] , real_char ) \n\n CPs = self.data['CPs'].flatten(order = 'F')\n Utot = self.data['Utot'].flatten(order = 'F')\n f.writeReals(CPs,real_char)\n f.writeReals(Utot,real_char)", "def writevto(\n self, \n addr: int, \n vector: Sequence[bytes], \n stop: bool = True, \n /\n ) -> int:", "def writevto(\n self, \n addr: int, \n vector: Sequence[bytes], \n stop: bool = True, \n /\n ) -> int:", "def save_vtu_file(arr, name, filename, sample_fp=None):\n if sample_fp == None:\n sample_fp = vda.get_sorted_fps_U(self.settings.DATA_FP)[0]\n\n ug = vtktools.vtu(sample_fp) #use sample fp to initialize positions on grid\n\n ug.AddScalarField('name', arr)\n ug.Write(filename)", "def write_vec(f, vec, name, vec_type):\n f.write('%s %s[%d] = {\\n' % (vec_type, name, len(vec)))\n\n # Write vector elements\n for i in range(len(vec)):\n if vec_type == 'c_float':\n f.write('(c_float)%.20f,\\n' % vec[i])\n else:\n f.write('%i,\\n' % vec[i])\n\n f.write('};\\n')", "def saveScalarStructuredGridVTK_binary(scalar,scalar_name,x,y,z,filename,dims):\n \n numEl_size = x.size; numEl = np.prod(numEl_size);\n # open the file and write the ASCII header:\n file = open(filename,'w')\n file.write('# vtk DataFile Version 3.0\\n')\n file.write('VTK file for data post-processed with Python\\n')\n file.write('Binary\\n\\n')\n file.write('DATASET STRUCTURED_GRID\\n')\n file.write('DIMENSIONS %d %d %d \\n'%(dims[0],dims[1],dims[2]))\n file.write('POINTS %d float\\n'%(numEl))\n file.close()\n \n # append binary x,y,z data\n file = open(filename,'ab')\n for i in range(len(x)): # there really needs to be a better way.\n pt = [x[i],y[i],z[i]]\n pt_buf = array('f',pt)\n pt_buf.byteswap()\n file.write(pt_buf)\n \n \n file.close()\n \n # append another ASCII sub header for the scalar data\n file = open(filename,'a')\n file.write('POINT_DATA %d \\n'%numEl)\n file.write('SCALARS %s int\\n'%scalar_name)\n file.write('LOOKUP_TABLE default\\n')\n file.close()\n \n # append binary scalar data\n file = open(filename,'ab')\n p_buf = array('f',scalar); p_buf.byteswap()\n file.write(p_buf)\n file.close()", "def save_vtk(self, filename=None):\n if filename != None:\n # Explicitly provided filename overwrites the previously used one.\n self.vtk_export_filename = filename\n\n # Check whether we're still writing to the same file.\n if self.vtk_saver.filename != self.vtk_export_filename:\n self.vtk_saver.open(\n self.vtk_export_filename, self.overwrite_pvd_files)\n\n self.vtk_saver.save_field(self._m, self.t)", "def vF3d_VTK(field,name,VTKformat): \n if VTKformat == 'vtu':\n vf3d_vtu(field,name)\n elif VTKformat == None:\n print 'Please select a VTK format'\n else:\n print 'The selected format has not been developed yet'\n return #nothing, since functions output the written VTK file", "def writeVelocityPlot(self):\n name = \"velocity.vtk\"\n chargeFile = open(name,'w')\n chargeFile.write(\"%s\\n\"%(\"# vtk DataFile Version 2.0\"))\n chargeFile.write(\"%s\\n\"%(\"obtained via hydraulicmodule\"))\n chargeFile.write(\"%s\\n\"%(\"ASCII\"))\n chargeFile.write(\"%s\\n\"%(\"DATASET UNSTRUCTURED_GRID\"))\n chargeFile.write(\"%s %i %s\\n\"%(\"POINTS\",len(self.points),\"double\"))\n dim = self.mesh.getSpaceDimensions()\n if (dim==2): \n for ind in range(0,len(self.points)):\n chargeFile.write(\"%15.8e %15.8e %15.8e\\n\"%(self.points[ind][0],\\\n self.points[ind][1],\\\n 0.))\n pass\n pass\n elif (dim==3): \n for ind in range(0,len(self.points)):\n chargeFile.write(\"%15.8e %15.8e %15.8e\\n\"%(self.points[ind][0],\\\n self.points[ind][1],\\\n self.points[ind][2]))\n pass\n pass\n else:\n raise Exception(\" error in mesh dimension \") \n numberOfCells = self.mesh.getNumberOfCells()\n connectivity = self.mesh.getConnectivity()\n\n cellListSize = 0\n for i in range(0,numberOfCells): # gmsh meshes: type of elements\n gmshType = connectivity[i][1]\n if gmshType == 1: # 2-node line\n cellListSize += 3\n pass\n elif gmshType == 2: # 3-node triangles\n cellListSize += 4\n pass\n elif gmshType == 3: # 4-node quadrangles\n cellListSize += 5\n pass\n elif gmshType == 4: # 4-node tetrahedron\n cellListSize += 5\n pass\n elif gmshType == 5: # 8-node hexahedrons\n cellListSize += 9\n pass\n pass\n chargeFile.write(\"CELLS %i %i\\n\"%(numberOfCells,cellListSize))\n ind = 0\n for cell in connectivity:\n ind = cell[2]+3\n# print \" ctm dbg cell \",vtkTyp,ind,cell,\" perm \",permutation[ind],permutation[ind+1],permutation[ind+2],permutation[ind+3]\n # \n vtkTyp = _vtkGmsh(cell[1])\n if (vtkTyp==3): # 2-node line\n ind = cell[2]+3\n chargeFile.write(\"%i %i %i\\n\"%(\n 2,\\\n cell[ind]-1,\\\n cell[ind+1]-1)\n )\n pass\n \n elif (vtkTyp==5): # triangles\n chargeFile.write(\"%i %i %i %i\\n\"%(\n 3, \n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1)\n )\n pass\n elif (vtkTyp==9): # quadr\n chargeFile.write(\"%i %i %i %i %i\\n\"%(\n 4,\\\n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1,\\\n cell[ind+3]-1)\n )\n pass\n elif (vtkTyp==10): # tetra\n chargeFile.write(\"%i %i %i %i %i\\n\"%(\n 4,\\\n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1,\\\n cell[ind+3]-1)\n )\n pass\n elif (vtkTyp==12): # hexahedron\n chargeFile.write(\"%i %i %i %i %i %i %i %i %i\\n\"%(\n 8,\\\n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1,\\\n cell[ind+3]-1,\\\n cell[ind+4]-1,\\\n cell[ind+5]-1,\\\n cell[ind+6]-1,\\\n cell[ind+7]-1)\n )\n pass\n pass\n chargeFile.write(\"%s %i\\n\"%(\"CELL_TYPES\",numberOfCells))\n#\n for i in range(0,numberOfCells):\n gmshType = connectivity[i][1]\n\n if (gmshType)==1:\n cellTyp = 3\n pass\n elif (gmshType)==2:\n cellTyp = 5\n pass\n elif (gmshType)==3:\n cellTyp = 9\n pass\n elif (gmshType)==4:\n cellTyp = 10\n pass\n elif (gmshType)==5:\n cellTyp = 12\n pass\n elif (gmshType)==6:\n cellTyp = 13\n pass\n elif gmshType == 7:\n cellTyp = 14\n pass\n else:\n raise Exception(\" check gmshtype \")\n chargeFile.write(\"%i\\n\"%(cellTyp))\n chargeFile.write(\"%s %d\\n\"%(\"POINT_DATA\",len(self.points)))\n chargeFile.write(\"%s\\n\"%(\"VECTORS vectors float\"))\n for velocityComponent in self.velocity:\n chargeFile.write(\" %e %e %e\\n \"%(velocityComponent[0], velocityComponent[1], velocityComponent[2]))\n chargeFile.write(\"%s\\n\"%(\"SCALARS charge double\"))\n chargeFile.write(\"%s\\n\"%(\"LOOKUP_TABLE default\"))\n#\n \n chargeDataFile=open(\"./\" + self.flowComponent.meshDirectoryName + \"/\" + \"HeVel.dat\",'r')\n line = chargeDataFile.readline()\n while \"Number Of Nodes\" not in line:\n line = chargeDataFile.readline()\n#line.split()\n nodesNumber = line.split()[-1]\n while \"Perm\" not in line:\n line = chargeDataFile.readline()\n#\n# We read the permutation\n#\n for i in range(int(nodesNumber)): chargeDataFile.readline()\n#\n# We read the charge\n#\n for i in range(int(nodesNumber)): chargeFile.write(\" %15.10e\\n \"%(float(chargeDataFile.readline())))", "def writeBOV(g):\n global counter\n bovNm = 'file_%03d.bov' % counter\n dataNm = 'file_%03d.data' % counter\n counter += 1\n with open(bovNm, 'w') as f:\n f.write('TIME: %g\\n' % float(counter))\n f.write('DATA_FILE: %s\\n' % dataNm)\n if len(g.shape) == 2:\n f.write('DATA_SIZE: %d %d 1\\n' % g.shape)\n elif len(g.shape) == 3:\n f.write('DATA_SIZE: %d %d %d\\n' % g.shape)\n else:\n raise RuntimeError(f'unexpected shape {g.shape}')\n if g.dtype == np.float64:\n f.write('DATA_FORMAT: DOUBLE\\n')\n elif g.dtype == np.int32:\n f.write('DATA_FORMAT: INT\\n')\n else:\n raise RuntimeError(f'unexpected data type {g.dtype}')\n f.write('VARIABLE: U\\n')\n f.write('DATA_ENDIAN: LITTLE\\n')\n f.write('CENTERING: ZONAL\\n')\n f.write('BRICK_ORIGIN: 0. 0. 0.\\n')\n f.write('BRICK_SIZE: 1.0 1.0 1.0\\n')\n with open(dataNm, 'w') as f:\n g.T.tofile(f) # BOV format expects Fortran order", "def write_vector(vector, outfile):\r\n out_dir = os.path.dirname(outfile)\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n\r\n vector = vector.copy()\r\n for k in vector:\r\n if isinstance(vector[k], np.ndarray):\r\n vector[k] = vector[k].round(4).tolist()\r\n with open(outfile, 'w') as f:\r\n json.dump(vector, f)\r\n f.write('\\n')\r\n\r\n print(\" ... wrote {}\".format(outfile))", "def write_vector(vector, outfile):\n out_dir = os.path.dirname(outfile)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n vector = vector.copy()\n for k in vector:\n if isinstance(vector[k], np.ndarray):\n vector[k] = vector[k].round(4).tolist()\n with open(outfile, 'w') as f:\n json.dump(vector, f, separators=(',', ': '), indent=4)\n f.write('\\n')\n\n print(\" ... wrote {}\".format(outfile))", "def writePointCloudVTP(self, outFile):\n #points\n vtkPts = vtk.vtkPoints()\n cells = vtk.vtkCellArray()\n\n # setup colors\n Colors = vtk.vtkFloatArray()\n #Colors.SetNumberOfComponents(3)\n Colors.SetNumberOfTuples(self.Npts)\n Colors.SetName(self.label) #can change to any string\n\n for i in range(self.Npts):\n x = self.ctrs[i,0]\n y = self.ctrs[i,1]\n z = self.ctrs[i,2]\n id = vtkPts.InsertNextPoint(x,y,z)\n cells.InsertNextCell(1)\n cells.InsertCellPoint(id)\n Colors.InsertTuple( i, [self.scalar[i]] )\n\n\n #build final vtp object for writing\n polydata = vtk.vtkPolyData()\n polydata.SetPoints(vtkPts)\n polydata.SetVerts(cells)\n polydata.GetPointData().SetScalars(Colors)\n polydata.Modified()\n\n writer = vtk.vtkXMLPolyDataWriter()\n writer.DebugOn()\n writer.SetFileName(outFile)\n writer.SetInputData(polydata)\n #writer.SetDataModeToBinary()\n writer.Write()\n\n return", "def write_to_vtk(mesh, displacement=None, file_name=\"gridfile\"):\n cents = get_cell_centroids(mesh)\n dim = len(cents[0])\n \n if displacement is not None:\n cents+= displacement\n \n file_name = \"./\"+file_name\n \n write_function=None\n if dim==3:\n write_function = write_to_vtk3D\n if dim==2:\n write_function = write_to_vtk2D\n \n write_function(cents, displacement, file_name)\n\n\n pass", "def vf3d_vtu(field,name):\n [X,Y,Z,U,V,W] = field #3d velocity field\n \n #achieve the correct format\n Pnts = F3d_2_vtkFromat(N.array([X,Y,Z])) \n velF = F3d_2_vtkFromat(N.array([U,V,W])) \n #name the vtu file\n if name == None:\n vtu = 'vf3VTU.vtu'\n else:\n vtu = name + '.vtu'\n \n #Generate and write the .vtu file \n Ugrid = tvtk.UnstructuredGrid()\n Ugrid.points = Pnts\n Ugrid.point_data.vectors = velF\n Ugrid.point_data.vectors.name = 'velocity'\n \n write_data(Ugrid, vtu)\n \n return vtu", "def saveStructuredPointsVTK_ascii(scalar,scalar_name,filename,dims,origin,spacing):\n numEl = dims[0]*dims[1]*dims[2]\n file = open(filename,'w')\n file.write('# vtk DataFile Version 3.0\\n')\n file.write('VTK file for data post-processed with Python\\n')\n file.write('ASCII\\n\\n')\n file.write('DATASET STRUCTURED_POINTS\\n')\n file.write('DIMENSIONS %d %d %d \\n'%(dims[0],dims[1],dims[2]))\n file.write('ORIGIN %g %g %g \\n '%(origin[0],origin[1],origin[2]))\n file.write('SPACING %g %g %g \\n'%(spacing[0],spacing[1],spacing[2]))\n file.write('POINT_DATA %d \\n'%numEl)\n file.write('SCALARS %s float 1 \\n'%scalar_name)\n file.write('LOOKUP_TABLE default \\n')\n for i in range(numEl):\n file.write('%g \\n'%scalar[i])\n file.close()", "def serialize_index(index):\n writer = faiss.VectorIOWriter()\n faiss.write_index(index, writer)\n return faiss.vector_to_array(writer.data)", "def _write_vtk_box(box_points, filename, dimensions):\n # setup points and vertices\n points = vtk.vtkPoints()\n\n for index in range(0, box_points.shape[1]):\n points.InsertNextPoint(box_points[0, index], box_points[1, index],\n box_points[2, index])\n\n grid = vtk.vtkStructuredGrid()\n\n grid.SetPoints(points)\n grid.SetDimensions(dimensions)\n grid.Modified()\n\n writer = vtk.vtkStructuredGridWriter()\n writer.SetFileName(filename)\n\n if vtk.VTK_MAJOR_VERSION <= 5:\n grid.Update()\n writer.SetInput(grid)\n else:\n writer.SetInputData(grid)\n\n writer.Write()", "def vtp(self, f_vtu, f_vtp):\r\n reader = vtk.vtkXMLUnstructuredGridReader()\r\n reader.SetFileName(f_vtu)\r\n reader.Update()\r\n ugrid = reader.GetOutput()\r\n geometryFilter = vtk.vtkGeometryFilter()\r\n geometryFilter.SetInputData(ugrid)\r\n geometryFilter.Update()\r\n polydata = geometryFilter.GetOutput()\r\n writer =vtk.vtkXMLPolyDataWriter()\r\n writer.SetFileName(f_vtp)\r\n writer.SetInputData(polydata)\r\n writer.Write()\r\n print(\"vtp file created.\")", "def write_gpt(self, filePath, asci2gdf_bin=None, verbose=True):\n \n return write_gpt_fieldmesh(self, filePath, asci2gdf_bin=asci2gdf_bin, verbose=verbose)", "def save_vector(dataset, outpath, driver=None):\n try:\n if not driver:\n driver = dataset.GetDriver()\n if os.path.exists(outpath):\n driver.DeleteDataSource(outpath)\n dst_ds = driver.CopyDataSource(dataset, outpath)\n else:\n driver = ogr.GetDriverByName(driver)\n if os.path.exists(outpath):\n driver.DeleteDataSource(outpath)\n dst_ds = driver.CopyDataSource(dataset, outpath)\n\n\n except RuntimeError as err:\n raise err\n except Exception as e:\n raise e\n\n finally:\n dst_ds = None # Flush the dataset to disk", "def WriteFile( self ):\n with open( \"BasisVector.in\" , \"w\" ) as outfile:\n firstLine = \" \" + str( self.NQ ) + \\\n \" \" + str( self.Nbranches ) + \\\n \" \" + str( self.NatomsUC ) + \\\n \" \" + str( self.dim ) + \"\\n\"\n outfile.write( firstLine )\n for qq in range( self.NQ ): ## loop over Q vectors\n lineQ = [ \"{:15.8f}\".format( x ) for x in \n self.QVectors[ qq , : ] ]\n lineQ = \"\".join( lineQ )\n outfile.write( lineQ + \"\\n\" )\n for branch in range( self.Nbranches ): ## loop over branches\n for atom in range( self.NatomsUC ): ## loop over atoms in unit cell\n line = [ \"{:15.8f}\".format( x ) for x in \n self.EigenVectors[ qq , branch , atom , : ] ]\n line = \"\".join( line )\n outfile.write( line + \"\\n\" )\n outfile.write( \"\\n\" )\n outfile.write( \"\\n\" )", "def save(self, filename):\n\t\tnp.savetxt(filename, self.V)\n\t\treturn" ]
[ "0.6757693", "0.66369015", "0.6426294", "0.63140506", "0.626889", "0.6226187", "0.607385", "0.604287", "0.6018301", "0.6018301", "0.5969815", "0.5968377", "0.594419", "0.5942577", "0.58605295", "0.5848289", "0.5788712", "0.5758208", "0.5744678", "0.5741473", "0.5739878", "0.5521824", "0.55153966", "0.551026", "0.55054873", "0.5501141", "0.54371595", "0.54269534", "0.54199785", "0.5400446" ]
0.7530258
0
Call writeVTK for a range of times
def writeVTKSeries(self,prefix=None,step=1): if not prefix: prefix = self.prefix for i in range(0,self.N,step): fname = prefix + '_' + str(i) + '.vtk' self.writeVTK(fname,itime=i)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def writeVTK(self,fname,itime=None,output_time=None):\n if output_time:\n itime = int(output_time / self.dt)\n if not itime:\n print 'Need to specify itime or output_time'\n return\n print 'Writing out time step',itime,': t=',self.t[itime]\n u = np.zeros((self.NY,1,self.NZ)); u[:,0,:] = np.flipud(self.field['u'][itime,:,:]).T\n v = np.zeros((self.NY,1,self.NZ)); v[:,0,:] = np.flipud(self.field['v'][itime,:,:]).T\n w = np.zeros((self.NY,1,self.NZ)); w[:,0,:] = np.flipud(self.field['w'][itime,:,:]).T\n VTKwriter.vtk_write_structured_points( open(fname,'wb'), #binary mode\n 1,self.NY,self.NZ,\n [u,v,w],\n datatype=['vector'],\n dx=1.0,dy=self.dy,dz=self.dz,\n dataname=['TurbSim_velocity'],\n origin=[0.,self.y[0],self.z[0]] )", "def writeVTK(self, fname,\n itime=None,\n output_time=None,\n scaled=True,\n stdout='overwrite'):\n if output_time:\n itime = int(output_time / self.dt)\n if itime is None:\n print('Need to specify itime or output_time')\n return\n if stdout=='overwrite':\n sys.stdout.write('\\rWriting time step {:d} : t= {:f}'.format(\n itime,self.t[itime]))\n else: #if stdout=='verbose':\n print('Writing out VTK for time step',itime,': t=',self.t[itime])\n\n # scale fluctuations\n up = np.zeros((1,self.NY,self.NZ)) # constant x plane (3D array for VTK output)\n wp = np.zeros((1,self.NY,self.NZ))\n vp = np.zeros((1,self.NY,self.NZ))\n up[0,:,:] = self.U[0,itime,:,:]\n vp[0,:,:] = self.U[1,itime,:,:]\n wp[0,:,:] = self.U[2,itime,:,:]\n if scaled:\n for iz in range(self.NZ):\n up[0,:,iz] *= self.scaling[0,iz]\n vp[0,:,iz] *= self.scaling[1,iz]\n wp[0,:,iz] *= self.scaling[2,iz]\n\n # calculate instantaneous velocity\n U = up.copy()\n V = vp.copy()\n W = wp.copy()\n if self.mean_flow_read:\n for iz in range(self.NZ):\n U[0,:,iz] += self.U_inlet[:,iz]\n V[0,:,iz] += self.V_inlet[:,iz]\n W[0,:,iz] += self.W_inlet[:,iz]\n\n # write out VTK\n vtk_write_structured_points(\n open(fname,'wb'), #binary mode\n {\n \"U\": np.stack((U,V,W)),\n \"u'\": np.stack((up,vp,wp)),\n },\n dx=1.0, dy=self.dy, dz=self.dz,\n origin=[0.,self.y[0],self.z[0]],\n indexorder='ijk',\n )", "def _writeVTKOutput(self):\n\n sigma = numpy.ones((self.numStations, 3), dtype=numpy.float64)\n sigma[:, 0] *= self.sigmaEast\n sigma[:, 1] *= self.sigmaNorth\n sigma[:, 2] *= self.sigmaUp\n\n vtkHead = \"# vtk DataFile Version 2.0\\n\" + \\\n \"Synthetic GPS stations\\n\" + \\\n \"ASCII\\n\" + \\\n \"DATASET POLYDATA\\n\" + \\\n \"POINTS \" + repr(self.numStations) + \" double\\n\"\n\n v = open(self.vtkOutputFile, 'w')\n v.write(vtkHead)\n numpy.savetxt(v, self.coords)\n\n numConnect = 2 * self.numStations\n connectHead = \"VERTICES %d %d\\n\" % (self.numStations, numConnect)\n v.write(connectHead)\n verts = numpy.arange(self.numStations, dtype=numpy.int64)\n sizes = numpy.ones_like(verts)\n outConnect = numpy.column_stack((sizes, verts))\n numpy.savetxt(v, outConnect, fmt=\"%d\")\n \n dispHead = \"POINT_DATA \" + repr(self.numStations) + \"\\n\" + \\\n \"VECTORS displacement double\\n\"\n v.write(dispHead)\n numpy.savetxt(v, self.dispNoise)\n\n sigHead = \"VECTORS uncertainty double\\n\"\n v.write(sigHead)\n numpy.savetxt(v, sigma)\n v.close()\n \n return", "def writeVTKSeries(self,\n outputdir='.',\n prefix='inflow',\n step=1,\n scaled=True,\n stdout='overwrite'):\n if not os.path.isdir(outputdir):\n print('Creating output dir :',outputdir)\n os.makedirs(outputdir)\n\n for i in range(0,self.N,step):\n fname = os.path.join(outputdir, f'{prefix:s}_{i:06d}.vtk')\n self.writeVTK(fname,itime=i,scaled=scaled,stdout=stdout)\n if stdout=='overwrite': sys.stdout.write('\\n')", "def writeVelocityPlot(self):\n name = \"velocity.vtk\"\n chargeFile = open(name,'w')\n chargeFile.write(\"%s\\n\"%(\"# vtk DataFile Version 2.0\"))\n chargeFile.write(\"%s\\n\"%(\"obtained via hydraulicmodule\"))\n chargeFile.write(\"%s\\n\"%(\"ASCII\"))\n chargeFile.write(\"%s\\n\"%(\"DATASET UNSTRUCTURED_GRID\"))\n chargeFile.write(\"%s %i %s\\n\"%(\"POINTS\",len(self.points),\"double\"))\n dim = self.mesh.getSpaceDimensions()\n if (dim==2): \n for ind in range(0,len(self.points)):\n chargeFile.write(\"%15.8e %15.8e %15.8e\\n\"%(self.points[ind][0],\\\n self.points[ind][1],\\\n 0.))\n pass\n pass\n elif (dim==3): \n for ind in range(0,len(self.points)):\n chargeFile.write(\"%15.8e %15.8e %15.8e\\n\"%(self.points[ind][0],\\\n self.points[ind][1],\\\n self.points[ind][2]))\n pass\n pass\n else:\n raise Exception(\" error in mesh dimension \") \n numberOfCells = self.mesh.getNumberOfCells()\n connectivity = self.mesh.getConnectivity()\n\n cellListSize = 0\n for i in range(0,numberOfCells): # gmsh meshes: type of elements\n gmshType = connectivity[i][1]\n if gmshType == 1: # 2-node line\n cellListSize += 3\n pass\n elif gmshType == 2: # 3-node triangles\n cellListSize += 4\n pass\n elif gmshType == 3: # 4-node quadrangles\n cellListSize += 5\n pass\n elif gmshType == 4: # 4-node tetrahedron\n cellListSize += 5\n pass\n elif gmshType == 5: # 8-node hexahedrons\n cellListSize += 9\n pass\n pass\n chargeFile.write(\"CELLS %i %i\\n\"%(numberOfCells,cellListSize))\n ind = 0\n for cell in connectivity:\n ind = cell[2]+3\n# print \" ctm dbg cell \",vtkTyp,ind,cell,\" perm \",permutation[ind],permutation[ind+1],permutation[ind+2],permutation[ind+3]\n # \n vtkTyp = _vtkGmsh(cell[1])\n if (vtkTyp==3): # 2-node line\n ind = cell[2]+3\n chargeFile.write(\"%i %i %i\\n\"%(\n 2,\\\n cell[ind]-1,\\\n cell[ind+1]-1)\n )\n pass\n \n elif (vtkTyp==5): # triangles\n chargeFile.write(\"%i %i %i %i\\n\"%(\n 3, \n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1)\n )\n pass\n elif (vtkTyp==9): # quadr\n chargeFile.write(\"%i %i %i %i %i\\n\"%(\n 4,\\\n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1,\\\n cell[ind+3]-1)\n )\n pass\n elif (vtkTyp==10): # tetra\n chargeFile.write(\"%i %i %i %i %i\\n\"%(\n 4,\\\n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1,\\\n cell[ind+3]-1)\n )\n pass\n elif (vtkTyp==12): # hexahedron\n chargeFile.write(\"%i %i %i %i %i %i %i %i %i\\n\"%(\n 8,\\\n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1,\\\n cell[ind+3]-1,\\\n cell[ind+4]-1,\\\n cell[ind+5]-1,\\\n cell[ind+6]-1,\\\n cell[ind+7]-1)\n )\n pass\n pass\n chargeFile.write(\"%s %i\\n\"%(\"CELL_TYPES\",numberOfCells))\n#\n for i in range(0,numberOfCells):\n gmshType = connectivity[i][1]\n\n if (gmshType)==1:\n cellTyp = 3\n pass\n elif (gmshType)==2:\n cellTyp = 5\n pass\n elif (gmshType)==3:\n cellTyp = 9\n pass\n elif (gmshType)==4:\n cellTyp = 10\n pass\n elif (gmshType)==5:\n cellTyp = 12\n pass\n elif (gmshType)==6:\n cellTyp = 13\n pass\n elif gmshType == 7:\n cellTyp = 14\n pass\n else:\n raise Exception(\" check gmshtype \")\n chargeFile.write(\"%i\\n\"%(cellTyp))\n chargeFile.write(\"%s %d\\n\"%(\"POINT_DATA\",len(self.points)))\n chargeFile.write(\"%s\\n\"%(\"VECTORS vectors float\"))\n for velocityComponent in self.velocity:\n chargeFile.write(\" %e %e %e\\n \"%(velocityComponent[0], velocityComponent[1], velocityComponent[2]))\n chargeFile.write(\"%s\\n\"%(\"SCALARS charge double\"))\n chargeFile.write(\"%s\\n\"%(\"LOOKUP_TABLE default\"))\n#\n \n chargeDataFile=open(\"./\" + self.flowComponent.meshDirectoryName + \"/\" + \"HeVel.dat\",'r')\n line = chargeDataFile.readline()\n while \"Number Of Nodes\" not in line:\n line = chargeDataFile.readline()\n#line.split()\n nodesNumber = line.split()[-1]\n while \"Perm\" not in line:\n line = chargeDataFile.readline()\n#\n# We read the permutation\n#\n for i in range(int(nodesNumber)): chargeDataFile.readline()\n#\n# We read the charge\n#\n for i in range(int(nodesNumber)): chargeFile.write(\" %15.10e\\n \"%(float(chargeDataFile.readline())))", "def onevtkfile():\n basedir = '/home/amit/WorkSpace/UCLA/simulations/PhaseDiagram/RawData'\n with hp.File('VTKFile.h5', 'w') as onefile:\n allvtk = np.empty((600, 500, 3, 216), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n vtkfilepath = '{}/Run{}/VTKFile-{}.h5'.format(basedir, i, j+1)\n with hp.File(vtkfilepath, 'r') as vtkfile:\n for t in range(500):\n allvtk[j, t, i, :] = vtkfile['T{}/Points'.format(2*t)][:].ravel()\n onefile.create_dataset('Points', data=allvtk, chunks=(1, 50, 3, 216), \n compression='gzip', compression_opts=9)", "def office_generate_kernel_vtk(parser, args, params):\n parser.add_argument('--num_slices', type=int,\n help='Number of slices (processors)',\n metavar='', required=True)\n local_args = parser.parse_known_args(args)\n num_slices = local_args[0].num_slices\n\n control.generate_kernel_vtk(params, num_slices)", "def write_to_vtk(mesh, displacement=None, file_name=\"gridfile\"):\n cents = get_cell_centroids(mesh)\n dim = len(cents[0])\n \n if displacement is not None:\n cents+= displacement\n \n file_name = \"./\"+file_name\n \n write_function=None\n if dim==3:\n write_function = write_to_vtk3D\n if dim==2:\n write_function = write_to_vtk2D\n \n write_function(cents, displacement, file_name)\n\n\n pass", "def office_generate_model_vtk(parser, args, params):\n parser.add_argument('--num_slices', type=int,\n help='Number of slices (processors)',\n metavar='', required=True)\n local_args = parser.parse_known_args(args)\n num_slices = local_args[0].num_slices\n\n control.generate_model_vtk(params, num_slices)", "def plot_time_slices(self):\n U = self.r.u[:, 15:-15, :]\n T = range(U.shape[2])\n kwarglist = [dict(t=t,\n index=self.index,\n U=U,\n levels=self.levels,\n fname=self.time_slice_path(t))\n for t in T]\n util.parallel_process(plot_time_slice, kwarglist=kwarglist)", "def _write_sample_with_date(self, cube, i, key_list):\n for sample_slice in cube.slices_over('sample'):\n sample_id = int(sample_slice.coord('sample').points[0])\n\n var = self.input_data.get_value_label(\n InputType.VARIABLE)[i].encode('utf-8')\n self.header.append('{var}(sample {sample_id})'.format(\n sample_id=sample_id, var=var))\n self._write_time_cube(sample_slice, key_list)", "def viz_samples(data, trace, num_sweeps, K, viz_interval=3, figure_size=3, title_fontsize=20, marker_size=1.0, opacity=0.3, bound=20, colors=['#AA3377','#0077BB', '#EE7733', '#009988', '#BBBBBB', '#EE3377', '#DDCC77'], save_name=None):\n E_tau, E_mu, E_z = trace['E_tau'].cpu(), trace['E_mu'].cpu(), trace['E_z'].cpu()\n num_rows = len(data)\n num_cols = 2 + int((num_sweeps-1) / viz_interval)\n gs = gridspec.GridSpec(num_rows, num_cols)\n gs.update(left=0.0 , bottom=0.0, right=1.0, top=1.0, wspace=0, hspace=0)\n fig = plt.figure(figsize=(figure_size * num_cols, figure_size * num_rows))\n for row_ind in range(num_rows):\n ax = fig.add_subplot(gs[row_ind, 0])\n viz_gmm(ax, data[row_ind], K, marker_size, opacity, bound, colors, latents=None) ## visualize raw dataset in the 1st column\n if row_ind == 0:\n ax.set_title('Data', fontsize=title_fontsize)\n# col_ind = 1\n for col_ind in range(num_cols-1):\n sweep = col_ind * viz_interval\n ax = fig.add_subplot(gs[row_ind, col_ind+1])\n viz_gmm(ax, data[row_ind], K, marker_size, opacity, bound, colors, latents=(E_tau[sweep, row_ind], E_mu[sweep, row_ind], E_z[sweep, row_ind]))\n if row_ind == 0:\n if sweep == 0:\n ax.set_title('RWS', fontsize=title_fontsize)\n else:\n ax.set_title('sweep %d' % sweep, fontsize=title_fontsize)\n if save_name is not None:\n plt.savefig(save_name + '.svg', dpi=300)", "def vF3d_VTK(field,name,VTKformat): \n if VTKformat == 'vtu':\n vf3d_vtu(field,name)\n elif VTKformat == None:\n print 'Please select a VTK format'\n else:\n print 'The selected format has not been developed yet'\n return #nothing, since functions output the written VTK file", "def write_bc_vtk(self):\n print \"Creating boundary condition arrays\"\n obst_array = np.zeros(self.nnodes)\n obst_array[list(self.obst_list)] = 100.\n\n #print type(self.inlet_list)\n inlet_array = np.zeros(self.nnodes)\n inlet_array[list(self.inlet_list)] = 200.\n\n outlet_array = np.zeros(self.nnodes)\n outlet_array[list(self.outlet_list)] = 300.\n\n solid_array = np.zeros(self.nnodes)\n solid_array[list(self.solid_list)] = 500.\n \n dims = [int(self.Nx), int(self.Ny), int(self.Nz)]\n origin = [0., 0., 0.]\n dx = self.x[1] - self.x[0]\n spacing = [dx, dx, dx] #uniform lattice\n \n print \"Writing boundary conditions to VTK files\"\n writeVTK(inlet_array,'inlet','inlet.vtk',dims,origin,spacing)\n writeVTK(outlet_array,'outlet','outlet.vtk',dims,origin,spacing)\n writeVTK(obst_array,'obst','obst.vtk',dims,origin,spacing)\n writeVTK(solid_array,'solid','solid.vtk',dims,origin,spacing)", "def write_bc_vtk(self):\n print \"Creating boundary condition arrays\"\n obst_array = np.zeros(self.nnodes)\n obst_array[list(self.obst_list)] = 100.\n\n #print type(self.inlet_list)\n inlet_array = np.zeros(self.nnodes)\n inlet_array[list(self.inlet_list)] = 200.\n\n outlet_array = np.zeros(self.nnodes)\n outlet_array[list(self.outlet_list)] = 300.\n\n solid_array = np.zeros(self.nnodes)\n solid_array[list(self.solid_list)] = 500.\n \n dims = [int(self.Nx), int(self.Ny), int(self.Nz)]\n origin = [0., 0., 0.]\n dx = self.x[1] - self.x[0]\n spacing = [dx, dx, dx] #uniform lattice\n \n print \"Writing boundary conditions to VTK files\"\n writeVTK(inlet_array,'inlet','inlet.vtk',dims,origin,spacing)\n writeVTK(outlet_array,'outlet','outlet.vtk',dims,origin,spacing)\n writeVTK(obst_array,'obst','obst.vtk',dims,origin,spacing)\n writeVTK(solid_array,'solid','solid.vtk',dims,origin,spacing)", "def WriteVTK(self, filename=None, result=None, fmt=\"binary\", interpolation_degree=10, ProjectionFlags=None):\n\n self.__do_essential_memebers_exist__()\n\n if fmt == \"xml\":\n pass\n elif fmt == \"binary\":\n try:\n from pyevtk.hl import pointsToVTK, linesToVTK, gridToVTK, unstructuredGridToVTK\n from pyevtk.vtk import VtkVertex, VtkLine, VtkTriangle, VtkQuad, VtkTetra, VtkPyramid, VtkHexahedron\n except ImportError:\n raise ImportError(\"Could not import evtk. Install it using 'pip install pyevtk'\")\n else:\n raise ValueError(\"Writer format not understood\")\n\n elements = np.copy(self.elements)\n\n cellflag = None\n if self.element_type =='tri':\n cellflag = 5\n offset = 3\n if self.elements.shape[1]==6:\n cellflag = 22\n offset = 6\n elif self.element_type =='quad':\n cellflag = 9\n offset = 4\n if self.elements.shape[1]==8:\n cellflag = 23\n offset = 8\n if self.element_type =='tet':\n cellflag = 10\n offset = 4\n if self.elements.shape[1]==10:\n cellflag = 24\n offset = 10\n # CHANGE NUMBERING ORDER FOR PARAVIEW\n para_arange = [0,4,1,6,2,5,7,8,9,3]\n elements = elements[:,para_arange]\n elif self.element_type == 'hex':\n cellflag = 12\n offset = 8\n if self.elements.shape[1] == 20:\n cellflag = 25\n offset = 20\n elif self.element_type == 'line':\n cellflag = 3\n offset = 2\n\n if filename is None:\n warn('File name not specified. I am going to write one in the current directory')\n filename = os.path.join(PWD(__file__), \"output.vtu\")\n if \".vtu\" in filename and fmt == \"binary\":\n filename = filename.split('.')[0]\n if \".vtu\" not in filename and fmt == \"xml\":\n filename = filename + \".vtu\"\n\n\n if self.InferPolynomialDegree() > 1:\n try:\n from Florence.PostProcessing import PostProcess\n from Florence.VariationalPrinciple import DisplacementFormulation\n except ImportError:\n raise RuntimeError(\"Writing high order elements to VTK is not supported yet\")\n if result is not None and result.ndim > 1:\n raise NotImplementedError(\"Writing vector/tensor valued results to binary vtk not supported yet\")\n return\n else:\n if result is None:\n result = np.zeros_like(self.points)[:,:,None]\n if result.ndim == 1:\n result = result.reshape(result.shape[0],1,1)\n pp = PostProcess(3,3)\n pp.SetMesh(self)\n pp.SetSolution(result)\n pp.SetFormulation(DisplacementFormulation(self,compute_post_quadrature=False))\n pp.WriteVTK(filename,quantity=0,interpolation_degree=interpolation_degree, ProjectionFlags=ProjectionFlags)\n return\n\n\n if self.InferSpatialDimension() == 2:\n points = np.zeros((self.points.shape[0],3))\n points[:,:2] = self.points\n else:\n points = self.points\n\n if result is None:\n if fmt == \"xml\":\n write_vtu(Verts=self.points, Cells={cellflag:elements},fname=filename)\n elif fmt == \"binary\":\n unstructuredGridToVTK(filename,\n np.ascontiguousarray(points[:,0]),np.ascontiguousarray(points[:,1]),\n np.ascontiguousarray(points[:,2]), np.ascontiguousarray(elements.ravel()),\n np.arange(0,offset*self.nelem,offset)+offset, np.ones(self.nelem)*cellflag)\n else:\n if isinstance(result, np.ndarray):\n if result.ndim > 1:\n if result.size == result.shape[0]:\n result = result.flatten()\n\n if fmt == \"xml\":\n if result.ndim > 1:\n if result.shape[0] == self.nelem:\n write_vtu(Verts=self.points, Cells={cellflag:elements},\n cvdata={cellflag:result.ravel()},fname=filename)\n elif result.shape[0] == self.points.shape[0]:\n write_vtu(Verts=self.points, Cells={cellflag:elements},\n pvdata=result.ravel(),fname=filename)\n else:\n if result.shape[0] == self.nelem:\n write_vtu(Verts=self.points, Cells={cellflag:elements},cdata=result,fname=filename)\n elif result.shape[0] == self.points.shape[0]:\n write_vtu(Verts=self.points, Cells={cellflag:elements},pdata=result,fname=filename)\n elif fmt == \"binary\":\n if result.ndim <= 1:\n if result.shape[0] == self.nelem:\n unstructuredGridToVTK(filename,\n np.ascontiguousarray(points[:,0]),np.ascontiguousarray(points[:,1]),\n np.ascontiguousarray(points[:,2]), np.ascontiguousarray(elements.ravel()),\n np.arange(0,offset*self.nelem,offset)+offset, np.ones(self.nelem)*cellflag,\n cellData={'result':np.ascontiguousarray(result.ravel())})\n elif result.shape[0] == self.points.shape[0]:\n unstructuredGridToVTK(filename,\n np.ascontiguousarray(points[:,0]),np.ascontiguousarray(points[:,1]),\n np.ascontiguousarray(points[:,2]), np.ascontiguousarray(elements.ravel()),\n np.arange(0,offset*self.nelem,offset)+offset, np.ones(self.nelem)*cellflag,\n pointData={'result':np.ascontiguousarray(result.ravel())})\n else:\n if result.shape[1] == 3:\n result_data = {'result':tuple(( np.ascontiguousarray(result[:,0]), np.ascontiguousarray(result[:,1]), np.ascontiguousarray(result[:,2]) ))}\n elif result.shape[1] == 2:\n result_data = {'result':tuple(( np.ascontiguousarray(result[:,0]), np.ascontiguousarray(result[:,1]) ))}\n else:\n raise NotImplementedError(\"Writing vector/tensor valued results > 3 to binary vtk not supported yet\")\n\n if result.shape[0] == self.nelem:\n unstructuredGridToVTK(filename,\n np.ascontiguousarray(points[:,0]),np.ascontiguousarray(points[:,1]),\n np.ascontiguousarray(points[:,2]), np.ascontiguousarray(elements.ravel()),\n np.arange(0,offset*self.nelem,offset)+offset, np.ones(self.nelem)*cellflag,\n cellData=result_data)\n elif result.shape[0] == self.points.shape[0]:\n unstructuredGridToVTK(filename,\n np.ascontiguousarray(points[:,0]),np.ascontiguousarray(points[:,1]),\n np.ascontiguousarray(points[:,2]), np.ascontiguousarray(elements.ravel()),\n np.arange(0,offset*self.nelem,offset)+offset, np.ones(self.nelem)*cellflag,\n pointData=result_data)", "def setSlicesPerTimepoint(self, n):\n\t\tassert n > 0, \"Slices per timepoint needs to be greater than 0\"\n\t\tprint \"Setting slices per timepoint to \", n\n\t\tself.slicesPerTimepoint = n\n\t\tself.z = n\n\t\tself.readers = []", "def _write_times(parameters):\n data = parameters[\"times\"]\n data = data if numpy.ndim(data) else [data]\n\n # Formats\n fmt = block_to_format[\"TIMES\"]\n fmt1 = str2format(fmt[1])\n fmt2 = str2format(fmt[2])\n\n # Record 1\n out = write_record([len(data)], fmt1)\n\n # Record 2\n out += write_record(data, fmt2, multi=True)\n\n return out", "def saveVelocityAndPressureVTK_binary(pressure,u,v,w,x,y,z,filename,dims):\n numEl_size = u.size; numEl = np.prod(numEl_size);\n # open the file and write the ASCII header:\n file = open(filename,'w')\n file.write('# vtk DataFile Version 3.0\\n')\n file.write('VTK file for data post-processed with Python\\n')\n file.write('Binary\\n\\n')\n file.write('DATASET STRUCTURED_GRID\\n')\n file.write('DIMENSIONS %d %d %d \\n'%(dims[0],dims[1],dims[2]))\n file.write('POINTS %d float\\n'%(numEl))\n file.close()\n \n # append binary x,y,z data\n file = open(filename,'ab')\n for i in range(len(x)): # there really needs to be a better way.\n pt = [x[i],y[i],z[i]]\n pt_buf = array('f',pt)\n pt_buf.byteswap()\n file.write(pt_buf)\n \n \n file.close()\n \n # append an ASCII sub header\n file = open(filename,'a')\n file.write('POINT_DATA %d \\n'%numEl)\n file.write('VECTORS velocity_vectors float\\n')\n file.close()\n \n # append binary u,v,w data\n file = open(filename,'ab')\n for i in range(len(u)):\n pt = [u[i],v[i],w[i]]\n pt_buf = array('f',pt)\n pt_buf.byteswap()\n file.write(pt_buf)\n \n file.close()\n \n # append ASCII sub header for scalar velocity magnitude data\n file = open(filename,'a')\n file.write('SCALARS VelocityMagnitude float\\n')\n file.write('LOOKUP_TABLE default\\n')\n \n file.close()\n \n file = open(filename,'ab')\n v_mag = np.sqrt(u**2+v**2+w**2)\n file = open(filename,'ab')\n p_buf = array('f',v_mag); p_buf.byteswap()\n file.write(p_buf)\n file.close()\n \n \n # append another ASCII sub header for the scalar pressure data\n file = open(filename,'a')\n file.write('SCALARS Pressure float\\n')\n file.write('LOOKUP_TABLE default\\n')\n file.close()\n \n # append binary pressure data\n file = open(filename,'ab')\n p_buf = array('f',pressure); p_buf.byteswap()\n file.write(p_buf)\n file.close()", "def loadVTK(self, filename, folder):\n import vtk\n print('Extracting Dataset')\n start = time.time()\n reader = vtk.vtkPolyDataReader()\n reader.SetFileName(folder + filename)\n reader.Update()\n polydata = reader.GetOutput()\n n = polydata.GetNumberOfPoints()\n self.data = np.array([0, 0, 0])\n\n for i in range(0, n, 1):\n vraw = list(polydata.GetPoint(i))\n inRange = np.all([vraw[0] > self.ranges[0,0], vraw[0] < self.ranges[0,1], vraw[1] > self.ranges[1,0], vraw[1] < self.ranges[1,1], vraw[2] > self.ranges[2,0], vraw[2] < self.ranges[2,1]])\n if inRange:\n self.data = np.vstack((self.data, np.array(vraw)))\n if i % 50000 == 0:\n print(' Out of the ' + str(n) + ' particles in the dataset, ' + str(i) + ' (' + str(round(i*100/n, 3)) + ' %) have been processed, and ' + str(len(self.data) - 1) + ' have been stored.')\n\n self.data = self.data[1:, :]\n rangeStr = '_x[' + str(self.ranges[0,0]) + ',' + str(self.ranges[0,1]) + ']_y[' + str(self.ranges[1,0]) + ',' + str(self.ranges[1,1]) + ']_z[' + str(self.ranges[1,0]) + ',' + str(self.ranges[1,1]) + '].npy'\n np.save(folder + 'VoronoiData' + rangeStr, self.data)\n print('Elapsed Time: ' + str(round(time.time() - start, 3)))", "def stamp_collection(d_data='',**kwargs):\n\n GR = glo.global_results()\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n # Because some of the 25Mpc galaxies are HUGE\n if p.gal_index == 'all':\n gal_indices = np.arange(GR.N_gal)\n gal_indices = gal_indices[GR.R_max < 200.]\n print(len(gal_indices))\n else: \n gal_indices = p.gal_index\n\n\n print('TEST!!')\n gal_indices = [91,93,124,117,121,130,135,136,139,143,146,147,152,154,164,166,167,168,171,173,174,175,186,189,192,203,211,213,214,226,222,223,226,228,233,236]\n\n N_stamps_1 = 8\n N_stamps_2 = 6\n\n #zoom = 1.5\n\n counter = N_stamps_1 * N_stamps_2\n fignum = 0\n plotnum = 0\n\n for gal_index in gal_indices:\n\n if counter == N_stamps_1 * N_stamps_2:\n print('Creating new figure')\n fig, axes = plt.subplots(figsize=(20,20))\n # fig,(axs,cax) = plt.subplots(ncols=2,figsize = (20,30),\\\n # gridspec_kw={\"width_ratios\":[1, 0.05]})\n gs1 = mpl.gridspec.GridSpec(N_stamps_1, N_stamps_2,left=0.05,top=0.95,bottom=0.05,right=0.82)\n\n gal_ob = gal.galaxy(GR=GR, gal_index=gal_index)\n #simgas = aux.load_temp_file(gal_ob=gal_ob,data_type=p.sim_type)\n simgas = gal_ob.particle_data.get_dataframe('simgas',d_data=d_data)\n map2D,lab,max_scale = make_projection_map(simgas,prop=p.prop,pix_size_kpc=p.pix_size_kpc,scale=1.5)\n\n # Plot\n ax1 = plt.subplot(gs1[N_stamps_1*N_stamps_2 - counter])\n ax1.set_facecolor(\"black\")\n Rmax = max_scale/2\n # ax1 = axs[5*8 - counter]\n if p.log:\n map2D[map2D < 10.**p.vmin] = 10.**p.vmin/2\n map2D[map2D > 10.**p.vmax] = 10.**p.vmax\n map2D = np.log10(map2D)\n if not p.log:\n map2D[map2D < p.vmin] = p.vmin/2\n map2D[map2D > p.vmax] = p.vmax\n im = ax1.imshow(map2D,\\\n extent=[-Rmax,Rmax,-Rmax,Rmax],vmin=p.vmin,vmax=p.vmax,cmap=p.cmap)\n Rmax = p.R_max\n ax1.set_xlim([-Rmax,Rmax])\n ax1.set_ylim([-Rmax,Rmax])\n ax1.text(0.05,0.05,'G%i' % gal_index,\\\n fontsize=14,transform=ax1.transAxes,color='white')\n if p.prop == 'm':\n ax1.text(0.05,0.85,'M$_{gas}$=%.2eM$_{\\odot}$' % np.sum(simgas.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.75,'SFR=%.2eM$_{\\odot}$/yr' % GR.SFR[gal_index],\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.65,'# gas particles: %i' % (len(simgas)),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n\n ax1.set_xticklabels([])\n ax1.set_yticklabels([])\n ax1.set_aspect('equal')\n\n counter -= 1\n plotnum += 1\n\n print(gal_index, counter)\n if counter == 0 or gal_index == gal_indices[-1]:\n gs1.update(wspace=0.0, hspace=0.0)\n axes.set_xlabel('x [kpc]'); axes.set_ylabel('y [kpc]')\n cbar_ax = fig.add_axes([0.85, 0.06, 0.02, 0.85])\n cbar = fig.colorbar(im, cax=cbar_ax)\n cbar.set_label(label=lab,size=20)\n cbar.ax.tick_params(labelsize=14)\n print('Saving in ' + p.d_plot + 'sim_data/%s%s_map_%s_%s_gals_%i.png' % (p.sim_name,p.sim_run,p.prop,p.z1,fignum))\n # plt.tight_layout()\n if not os.path.isdir(p.d_plot + 'sim_data/'): os.mkdir(p.d_plot + 'sim_data/') \n plt.savefig(p.d_plot + 'sim_data/%s%s_map_%s_%s_gals_%i.png' % (p.sim_name,p.sim_run,p.prop,p.z1,fignum), format='png', dpi=250, facecolor='w')\n counter = N_stamps_1 * N_stamps_2\n fignum += 1\n plt.close('all')", "def plot_violins(times, func_names, output_file):\n fig, axes = plt.subplots(1, len(func_names))\n for name, func_time, ax in zip(func_names, times, axes):\n sns.violinplot(y=func_time, ax=ax)\n ax.set_ylabel('Time (ms)')\n ax.set_title(name)\n fig.tight_layout()\n fig.savefig(output_file)", "def slice_trajectory(**kwargs):\n\tglobal gmxpaths\n\tif gmxpaths==None: gmxpaths = get_gmx_paths()\n\tcall = bash\n\t#---process kwargs\n\tstart,end,skip,sequence = [kwargs[k] for k in 'start end skip sequence'.split()]\n\ttpr_keyfinder,traj_keyfinder = kwargs['tpr_keyfinder'],kwargs['traj_keyfinder']\n\toutkey = kwargs['outkey']\n\tpostdir = kwargs['postdir']\n\toutput_format = kwargs.get('output_format','xtc')\n\tpbc = kwargs.get('pbc',None)\n\tgroup_fn = kwargs.get('group_fn',None)\n\n\t#---commands to create sub-slices\n\tsources = infer_parts_to_slice(start,end,skip,sequence)\n\tsn = sources[0][0][0]\n\tif sn=='membrane-v563':\n\t\tsources = infer_parts_to_slice_legacy(start,end,skip,sequence)\n\tgroup_flag = '' if not group_fn else ' -n '+group_fn\n\tpbc_flag = '' if not pbc else ' -pbc %s'%pbc\n\tcmdlist = []\n\tfor num,source in enumerate(sources):\n\t\tkeys,t0 = source\n\t\tsn = keys[0]\n\t\t#---get tpr exist use the previous one (or fail on first source)\n\t\ttry: \n\t\t\ttpr = tpr_keyfinder(*keys,strict=False)\n\t\texcept: \n\t\t\timport ipdb;ipdb.set_trace()\n\t\t\traise Exception('development error. could not locate a TPR: %s'%kwargs)\n\t\t#---assume cursor points to the trajectory we want\n\t\ttry: \n\t\t\ttraj = traj_keyfinder(*keys)\n\t\texcept Exception as e: \n\t\t\traise Exception('could not locate trajectory for %s,%s,%s'%keys+': %s'%e)\n\t\toutfile = 'trjconv%d.%s'%(num,output_format)\n\t\t\"\"\"\n\t\tnote on timestamps: if you ask for time beyond the end of a simulation, the slicer will fail with\n\t\tblank outputs from `gmx trjconv`. in one misadventure, the author misattributed this to problems\n\t\twith the interval of the samples, since the dt flag causes trjconv to only save frames with times\n\t\twhich are zero modulo dt, and copied the begin flag to t0 to fail through the problem silently. \n\t\ta better alternative is to treat trjconv failures more seriously and check the time stamps with\n\t\t`make look times`. the slicer is designed to ignore problems of jitter. if a new XTC starts on\n\t\ta non-even or non-integer time, the slicer should continue as normal and rely on dt to find the next\n\t\tvalid time. ... ???\n\t\t\"\"\"\n\t\ttail = ' -b %d -e %d -dt %d -s %s -f %s -o %s%s%s'%(\n\t\t\tt0 if t0>start else start,end,skip,tpr,traj,\n\t\t\toutfile,group_flag,pbc_flag)\n\t\tcmdlist.append((outfile,gmxpaths['trjconv']+tail))\n\n\t#---make a GRO file of the first frame for reference\n\tkeys,t0 = sources[0]\n\tsn,sub,fn = keys\n\ttraj = traj_keyfinder(*keys)\n\ttail = ' -dump %d -s %s -f %s -o %s.gro%s'%(start,tpr,traj,outkey,group_flag)\n\tif pbc != None: tail = tail + ' -pbc %s'%pbc\n\tbash(gmxpaths['trjconv']+tail,cwd=postdir,inpipe='0\\n'.encode())\n\n\t#---convert relevant trajectories\n\tstart = time.time()\n\tfor ii,(outfile,cmd) in enumerate(cmdlist):\n\t\tstatus('slicing trajectory',i=ii,looplen=len(cmdlist),start=start,tag='SLICE')\n\t\tbash(cmd,cwd=postdir,inpipe='0\\n'.encode())\n\t\n\t#---concatenate remaining steps with no errors\n\tvalid_parts = range(len(cmdlist))\n\tbash(gmxpaths['trjcat']+' -o %s.%s -f '%(outkey,output_format)+\n\t\t' '.join(list(zip(*cmdlist))[0]),cwd=postdir)\n\n\t#---delete extraneous files\n\t#---! consider using a temporary directory although it's nice to have things onsite\n\tfor outfile in list(zip(*cmdlist))[0]:\n\t\tos.remove(postdir+'/%s'%outfile)", "def plot_time_slice(index, t, U, fname, levels):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n title = 'Time slice {r} {t:0>4d}'.format(r=index, t=t)\n ax.set_title(title)\n U = U[:, :, t]\n contourf = ax.contourf(U, levels)\n fig.colorbar(contourf)\n util.makedirs_p(os.path.dirname(fname))\n fig.savefig(fname)", "def plot_vanHove_dt(comp,conn,start,step_size,steps):\n \n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",comp).fetchone()\n (max_step,) = conn.execute(\"select max_step from vanHove_prams where comp_key = ?\",comp).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp[0])]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n\n\n # istatus = plots.non_i_plot_start()\n \n fig = mplt.figure()\n fig.suptitle(r'van Hove dist temp: %.2f dtime: %d'% (temp,dtime))\n dims = figure_out_grid(steps)\n \n plt_count = 1\n outs = []\n tmps = []\n for j in range(start,start+step_size*steps, step_size):\n (edges,count,x_lim) = _extract_vanHove(g,j+1,1,5)\n if len(count) < 50:\n plt_count += 1\n continue\n #count = count/np.sum(count)\n \n sp_arg = dims +(plt_count,)\n ax = fig.add_subplot(*sp_arg)\n ax.grid(True)\n\n \n alpha = _alpha2(edges,count)\n \n ax.set_ylabel(r'$\\log{P(N)}$')\n ax.step(edges,np.log((count/np.sum(count))),lw=2)\n ax.set_title(r'$\\alpha_2 = %.2f$'%alpha + ' j:%d '%j )\n ax.set_xlim(x_lim)\n plt_count += 1\n\n mplt.draw()\n\n # plots.non_i_plot_start(istatus)\n\n del g\n Fin.close()\n del Fin", "def write_tcv(self):\n suffix = '_'+str(self.shot)+'_'+str(int(self.t*1e3))\n self.write_input(suffix=suffix)", "def writeVTKBlock(self,\n fname='turbulence_box.vtk',\n outputdir=None,\n step=1,\n scaled=True):\n if outputdir is None:\n outputdir = '.'\n elif not os.path.isdir(outputdir):\n print('Creating output dir :',outputdir)\n os.makedirs(outputdir)\n\n fname = os.path.join(outputdir,fname)\n print('Writing VTK block',fname)\n\n if self.Umean is not None:\n Umean = self.Umean\n else:\n Umean = 1.0\n\n # scale fluctuations\n Nt = int(self.N / step)\n up = np.zeros((Nt,self.NY,self.NZ))\n vp = np.zeros((Nt,self.NY,self.NZ))\n wp = np.zeros((Nt,self.NY,self.NZ))\n up[:,:,:] = self.U[0,:Nt*step:step,:,:]\n vp[:,:,:] = self.U[1,:Nt*step:step,:,:]\n wp[:,:,:] = self.U[2,:Nt*step:step,:,:]\n if scaled:\n for iz in range(self.NZ):\n up[:,:,iz] *= self.scaling[0,iz]\n vp[:,:,iz] *= self.scaling[1,iz]\n wp[:,:,iz] *= self.scaling[2,iz]\n\n # write out VTK\n vtk_write_structured_points( open(fname,'wb'), #binary mode\n Nt, self.NY, self.NZ,\n [ up,vp,wp ],\n datatype=['vector'],\n dx=step*Umean*self.dt, dy=self.dy, dz=self.dz,\n dataname=['u\\''],\n origin=[0.,self.y[0],self.z[0]],\n indexorder='ijk')", "def export_grid(self, vtk_fname='GRID', toVTK=True, toNumpy=True):\r\n print('Exporting grids')\r\n tID = 0\r\n # Start by exporting input properties (from read_prop() or read_ext_prop())\r\n # In VTK files, these props will only be visible at only the first timestep\r\n dp = []\r\n propIds = []\r\n for prop in self.out_props:\r\n if type(self.out_props[prop]) is not dict:\r\n data = np.array(self.out_props[prop])\r\n # Save to Numpy\r\n if toNumpy:\r\n self.export_prop(data, prop, tID)\r\n # Add property data to vts structured grid\r\n if toVTK:\r\n propIds = self._prep_vtk(data, prop, propIds)\r\n self._check_out('vtk')\r\n else:\r\n dp.append(prop)\r\n\r\n # Export time-series output properties (from read_out_props())\r\n for t in self.times:\r\n for prop in self.out_props:\r\n if prop in dp:\r\n data = np.array(self.out_props[prop][t], order='F')\r\n # Save to Numpy\r\n if toNumpy:\r\n # self.export_prop(data, prop, tID)\r\n self.export_prop(data, prop, t)\r\n # Add property data to vts structured grid\r\n if toVTK:\r\n propIds = self._prep_vtk(data, prop, propIds)\r\n # Save to VTK\r\n if toVTK:\r\n if tID == 0:\r\n self._check_out('vtk')\r\n # self.exportVTK(os.path.join(self.out_dir, 'vtk', vtk_fname + str(tID)))\r\n self.exportVTK(os.path.join(self.out_dir, 'vtk', vtk_fname + str(t)))\r\n for id in propIds:\r\n self.Grid.GetCellData().RemoveArray(id)\r\n tID += 1\r\n propIds = []", "def run(self):\r\n #print 'WriteFITS.run'\r\n\r\n # construct the name of the file\r\n runtime = self.previous_results['runtime']\r\n fitsname = '%s.fits' % runtime\r\n\r\n # get list of instrument observations\r\n observe = self.previous_results['observe']\r\n obs_timeline = observe['observed_timeline']\r\n observed_times = obs_timeline.keys()\r\n observed_times.sort()\r\n\r\n # construct lists of the values to be stored in each Table column\r\n for t in observed_times:\r\n timelist = []\r\n smec_position = []\r\n smec_nominal_position = []\r\n flag = []\r\n data = []\r\n pointing1_x = []\r\n pointing1_y = []\r\n pointing2_x = []\r\n pointing2_y = []\r\n\r\n config = obs_timeline[t]\r\n\r\n timelist.append(config.time)\r\n smec_position.append(config.smec_position)\r\n smec_nominal_position.append(config.smec_nominal_position)\r\n flag.append(config.flag)\r\n data.append(config.data)\r\n pointing1_x.append(config.pointing1_x)\r\n pointing1_y.append(config.pointing1_y)\r\n pointing2_x.append(config.pointing2_x)\r\n pointing2_y.append(config.pointing2_y)\r\n\r\n # create a Header object and primary HDU - this just contains\r\n # some very basic, general information\r\n prihdr = pyfits.Header()\r\n prihdr['COMMENT'] = 'This FITS file was created by pyfiins at %s' % \\\r\n runtime\r\n prihdu = pyfits.PrimaryHDU(header=prihdr)\r\n\r\n # create list of Header Data Unit objects, include the primary HDU\r\n hdulist = pyfits.HDUList([prihdu])\r\n\r\n # create an HDU to contain the Table and append it to the list\r\n hdulist.append(pyfits.BinTableHDU.from_columns(\r\n pyfits.ColDefs([\r\n pyfits.Column(name='Time', format='D',\r\n array=np.array(timelist)),\r\n pyfits.Column(name='SMEC Position', format='E',\r\n array=np.array(smec_position)),\r\n pyfits.Column(name='SMEC Nominal Position', format='E',\r\n array=np.array(smec_nominal_position)),\r\n pyfits.Column(name='Flag', format='L',\r\n array=np.array(flag)),\r\n pyfits.Column(name='Data', format='E',\r\n array=np.array(data)),\r\n pyfits.Column(name='Pointing1 X', format='E',\r\n array=np.array(pointing1_x)),\r\n pyfits.Column(name='Pointing1 Y', format='E',\r\n array=np.array(pointing1_y)),\r\n pyfits.Column(name='Pointing2 X', format='E',\r\n array=np.array(pointing2_x)),\r\n pyfits.Column(name='Pointing2 Y', format='E',\r\n array=np.array(pointing2_y))])))\r\n\r\n # write the HDU list to a file\r\n hdulist.writeto(fitsname, clobber=True)\r\n self.result['fitsfile'] = fitsname\r\n\r\n return self.result", "def run(self):\n\n # Setup hdf5 file and datasets\n self.vw_f = h5py.File(self.name,'w')\n self.vw,self.vwts = [],[]\n for i in range(self.n_cams):\n x,y = self.resolution[i]\n vw = self.vw_f.create_dataset('mov{}'.format(i), (self.hdf_resize, y, x), maxshape=(None, y, x), dtype='uint8', compression='lzf') \n vwts = self.vw_f.create_dataset('ts{}'.format(i), (self.hdf_resize,2), maxshape=(None,2), dtype=np.float64, compression='lzf')\n self.vw.append(vw)\n self.vwts.append(vwts)\n \n # Counters and buffers\n _sav_idx = [0]*self.n_cams # index within hdf5 dataset\n _buf_idx = [0]*self.n_cams # index of in-memory buffer that is periodicially dumped to hdf5 dataset\n _saving_buf,_saving_ts_buf = [],[]\n for i in range(self.n_cams):\n x,y = self.resolution[i]\n sb = np.empty((self.buffer_size,y,x), dtype=np.uint8)\n stb = np.empty((self.buffer_size,2), dtype=np.float64)\n _saving_buf.append(sb)\n _saving_ts_buf.append(stb)\n\n cams_running = [True for i in range(self.n_cams)]\n # Main loop\n while any(cams_running):\n # For all datasets: if there's not enough room to dump another buffer's worth into dataset, extend it\n # Then read new frames, and save/query as desired\n for di in range(self.n_cams):\n if not cams_running[di]:\n continue\n \n if self.vw[di].shape[0]-_sav_idx[di] <= self.buffer_size:\n assert self.vw[di].shape[0] == self.vwts[di].shape[0], 'Frame and timestamp dataset lengths are mismatched.'\n self.vw[di].resize((self.vw[di].shape[0]+self.hdf_resize, self.vw[di].shape[1], self.vw[di].shape[2]))\n self.vwts[di].resize((self.vwts[di].shape[0]+self.hdf_resize,self.vwts[di].shape[1]))\n \n # Get new frames from buffer, breaking out if empty and kill flag has been raised\n ts=temp=bsave=None\n try:\n ts,temp,bsave = self.frame_buffer[di].get(block=False)\n except Queue.Empty:\n if self.kill_flag.value:\n cams_running[di] = False\n continue\n\n if self.kill_flag.value==True:\n logging.info('Final flush for camera {}: {} frames remain.'.format(di, self.frame_buffer[di].qsize()))\n \n if di==self.query_idx and self.query_flag.value:\n self.query_queue[:] = temp.copy()\n self.query_queue_ts.value = ts[1]\n self.query_flag.value = False\n \n if bsave: # flag that this frame was added to queue during a saving period\n\n # add new data to in-memory buffer\n x,y = self.resolution[di]\n _saving_buf[di][_buf_idx[di]] = temp.reshape([y,x])\n _saving_ts_buf[di][_buf_idx[di]] = ts\n _buf_idx[di] += 1\n # if necessary, flush out buffer to hdf dataset\n if (self.flushing.value and _buf_idx[di]>=self.min_flush) or _buf_idx[di] >= self.buffer_size:\n if _buf_idx[di] >= self.buffer_size:\n logging.warning('Dumping camera b/c reached max buffer (buffer={}, current idx={})'.format(self.buffer_size, _buf_idx[di]))\n self.vw[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di],:,:] = _saving_buf[di][:_buf_idx[di]]\n self.vwts[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di],:] = _saving_ts_buf[di][:_buf_idx[di]]\n _sav_idx[di] += _buf_idx[di]\n _buf_idx[di] = 0\n\n # final flush:\n for di in range(self.n_cams):\n self.vw[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di],:,:] = _saving_buf[di][:_buf_idx[di]]\n self.vwts[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di]] = _saving_ts_buf[di][:_buf_idx[di]]\n _sav_idx[di] += _buf_idx[di]\n # cut off all unused allocated space \n self.vw[di].resize([_sav_idx[di],self.vw[di].shape[1],self.vw[di].shape[2]])\n self.vwts[di].resize([_sav_idx[di],2])\n\n self.vw_f.close()\n self.saving_complete.value = True" ]
[ "0.67234313", "0.65439826", "0.61956275", "0.6171551", "0.584729", "0.58062536", "0.5772773", "0.5763361", "0.5762426", "0.56863654", "0.5424772", "0.5380682", "0.5364221", "0.53386146", "0.53386146", "0.5328883", "0.5259615", "0.5244934", "0.52399683", "0.5105576", "0.50775194", "0.5071998", "0.5070449", "0.5068268", "0.5055085", "0.5053707", "0.50417334", "0.50278413", "0.5023351", "0.49886754" ]
0.6567778
1
Creates a minimal configuration for the user.
def create_default_user_config(server, port, user, api_key, whitelist_tags=[], ignore_proxy=True, verify_ssl=False): config = {} config_path = DEFAULT_CONFIG_PATH config['default'] = {'server': server, 'port': port, 'user': user, 'api_key': api_key, 'whitelist_tags': whitelist_tags, 'ignore_proxy': ignore_proxy, 'verify_ssl': verify_ssl} with open(config_path, 'w') as configfile: #config.write(configfile) yaml.dump(config, configfile, Dumper=Dumper) logging.info("Wrote user configuration to: {}".format(config_path)) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_config(self) -> None:\n pass", "def create_config(self) -> None:\n pass", "def create_empty_config_file():\n config = {\n \"config\": [\n {\n \"site\": {\n \"username\": \"\",\n \"name\": \"\",\n \"ip_address\": \"\",\n \"password\": \"\",\n \"local\": \"\",\n \"use_https\": \"\"\n }\n }\n ]\n }\n return config", "def create_user_configuration(self):\n\n # Ask before touching things that we do not have to!\n if self.test.user_conf_dir_exists():\n if self.test.user_configuration_seems_complete():\n reply = question(_(\"\"\"User configuration already exists.\nDo you want to rewrite it with a new one?\"\"\"), False)\n if not reply:\n report(_(\"Keeping configuration intact and continuing with settings.\"))\n return\n else:\n self.remove_user_configuration()\n else:\n reply = question(_(\"\"\"User configuration already exists, but it seems to be incomplete.\nDo you want to keep it?\"\"\"), False)\n if not reply:\n self.remove_user_configuration()\n else:\n report(_(\"Keeping configuration intact and aborting.\"))\n return\n # Copy the original intact configuration files\n # creating a conf/ subdirectory\n config_root = self.test.user_conf_dir()\n shutil.copytree(buildconfig.SPD_CONF_ORIG_PATH, config_root)\n # Ensure the files are writeable when copying from immutable directory.\n umask = os.umask(0)\n os.umask(umask)\n os.chmod(self.test.user_conf_dir(), 0o755 & ~umask)\n for root, dirs, files in os.walk(self.test.user_conf_dir()):\n for d in dirs:\n os.chmod(os.path.join(root, d), 0o755 & ~umask)\n for f in files:\n os.chmod(os.path.join(root, f), 0o644 & ~umask)\n\n report(_(\"User configuration created in %s\" % self.test.user_conf_dir()))", "def config_skeleton():\n config = Config()\n config.set_to_default()\n config.save()", "def create_default_config():\n import codecs\n config = ConfigParser.SafeConfigParser()\n config.readfp(StringIO(DEFAULT_CONFIG))\n\n # Load user settings\n filename = get_user_config_filename()\n if not os.path.exists(filename):\n from wizard import setup_wizard\n setup_wizard(config)\n else:\n try:\n fi = codecs.open(filename, 'r', encoding='utf-8')\n config.readfp(fi)\n finally:\n fi.close()\n return config", "def _create_default_config(self):\n self.options.setdefault('options.admin_passwd', '')\n sys.path.append(self.openerp_dir)\n sys.path.extend([egg.location for egg in self.ws])\n from openerp.tools.config import configmanager\n configmanager(self.config_path).save()", "def createConfig():\n\twith open(configPath, 'w', encoding='utf-8') as file:\n\t\tjson.dump(default_config, file, indent=3)", "def __create_default_config(self):\n if not os.path.exists(self.__configfile):\n path=os.path.dirname(self.__configfile)\n try:\n os.makedirs(path)\n except:\n pass\n if os.path.exists(path):\n self.save(defaults=True)", "def user_config():\n user_config = copy.deepcopy(config)\n user_config.pop(\"metadata\")\n user_config.pop(\"version\")\n user_config.pop(\"refers\")\n user_config.pop(\"pool_size\")\n return user_config", "def config_user(tmp_path_factory):\n path = tmp_path_factory.mktemp('recipe-test')\n filename = write_config_user_file(path)\n # The fixture scope is set to module to avoid very slow\n # test runs, as the following line also reads the CMOR tables\n cfg = esmvalcore._config.read_config_user_file(filename, 'recipe_test')\n cfg['synda_download'] = False\n cfg['auxiliary_data_dir'] = str(path / 'auxiliary_data_dir')\n cfg['check_level'] = esmvalcore.cmor.check.CheckLevels['DEFAULT']\n return cfg", "def init_config():\n global udata\n udata = UserConfig()", "def default_user(self):\n self.user = self.create_user(create_token=True)\n return", "def generate_config():\n\n return {\n \"email_subject\": DEFAULT_EMAIL_SUBJECT,\n \"from_email\": DEFAULT_FROM_EMAIL,\n \"to_email\": DEFAULT_TO_EMAIL,\n \"url\": DEFAULT_URL,\n \"start_value\": DEFAULT_START_VALUE,\n \"look_ahead\": DEFAULT_LOOK_AHEAD,\n \"slide_window\": DEFAULT_SLIDE_WINDOW,\n }", "def define_user_config(self) -> None:\n self.add_standard_metadata('infiles')\n\n self.add_custom_metadata(name='key_cols',\n short_name='k',\n required=True,\n default=[],\n nargs='*',\n type=list)\n self.add_custom_metadata(name='compare_cols',\n short_name='c',\n default=[],\n nargs='*',\n type=list)\n self.add_custom_metadata(name='ignore_cols',\n default=[],\n nargs='*',\n type=list)\n self.add_custom_metadata(name='col_names',\n default=[],\n nargs='*',\n type=list)\n self.add_custom_metadata(name='variables',\n default=[],\n nargs='*',\n type=list)\n self.add_custom_metadata(name='already_sorted',\n action='store_const',\n const=True,\n default=False,\n type=bool)\n self.add_custom_metadata(name='already_uniq',\n action='store_const',\n const=True,\n default=False,\n type=bool)\n self.add_custom_metadata(name='temp_dir',\n default=None,\n type=str)\n self.add_custom_metadata(name='out_dir',\n default=None,\n type=str)\n self.add_custom_metadata(name='assignments',\n default=[],\n type=list)\n\n self.add_standard_metadata('verbosity')\n self.add_all_config_configs()\n self.add_all_csv_configs()\n self.add_all_help_configs()", "def prepare_config(config: dict) -> dict:\n config.setdefault('password', None)\n config.setdefault('private_key', None)\n config.setdefault('private_key_pass', None)\n config.setdefault('host_key', None)\n config.setdefault('dirs', ['.'])\n\n return config", "def test_create_experiment_new_full_config(self, user_config):\n with OrionState() as cfg:\n experiment = create_experiment(**user_config, storage=cfg.storage_config)\n\n exp_config = experiment.configuration\n\n assert exp_config[\"space\"] == config[\"space\"]\n assert exp_config[\"max_trials\"] == config[\"max_trials\"]\n assert exp_config[\"max_broken\"] == config[\"max_broken\"]\n assert exp_config[\"working_dir\"] == config[\"working_dir\"]\n assert exp_config[\"algorithm\"] == config[\"algorithm\"]", "def create_default_settings():\n from flaskbb.fixtures.settings import fixture\n create_settings_from_fixture(fixture)", "def get_user_config():\n\n user_config_file = os.path.join(\n os.path.expanduser('~'),\n 'princeton-iot-inspector',\n 'iot_inspector_config.json'\n )\n\n try:\n with open(user_config_file) as fp:\n return json.load(fp)\n\n except Exception:\n pass\n\n while True:\n user_key = requests.get(server_config.NEW_USER_URL).text.strip()\n\n # Make sure we're not getting server's error messages\n if len(user_key) == 32:\n break\n\n time.sleep(1)\n\n user_key = user_key.replace('-', '')\n secret_salt = str(uuid.uuid4())\n\n with open(user_config_file, 'w') as fp:\n config_dict = {\n 'user_key': user_key,\n 'secret_salt': secret_salt\n }\n json.dump(config_dict, fp)\n\n return config_dict", "def post(self):\n data = entity_parser.parse_args()\n configuration = g.user.get_api().create_configuration(data['name'])\n result = configuration.to_json()\n return result, 201", "def test_construct_3_default_bootsraps(self):\n configerus.new_config()", "def prepare_config(config: dict) -> dict:\n config.setdefault('password', None)\n config.setdefault('private_key', None)\n config.setdefault('private_key_pass', None)\n config.setdefault('to', None)\n\n return config", "def initCreate(self , initialconfig):\n return", "def configuration():", "def create_user(self):\n # TODO-ROB: This is used ONLY when the user registers in flask\n # TODO-ROB: Create the cookiecutter.json file\n # extra_context overrides user and default configs\n cookiecutter(self.user_cookie, no_input=True, extra_context={\"user_name\": self.user}, output_dir=self.users)", "def init_default_users():\n from flask import current_app as app\n with app.app_context():\n notion_uname = app.config.get(\"NOTION_CRONJOB_USERNAME\")\n notion_passwd = app.config.get(\"NOTION_CRONJOB_PASSWORD\")\n\n if notion_uname and notion_passwd:\n try:\n User.createOne(\n username=notion_uname,\n password=notion_passwd\n )\n except NotUniqueError:\n app.logger.info(\"Notion Job User already exists!\")\n except Exception as err:\n app.logger.error(\"Notion Job User was not created!\", err)\n else:\n app.logger.info(\"Created Notion Job User Successfully!\")", "def create_default_config(self, parser):\n parser.add_section('irc')\n parser.set('irc', 'channels', '')\n \n # create the full path, and the file\n try:\n os.makedirs(self.config_dir_path, mode=0700)\n except OSError:\n pass\n file_resource = open(self.config_file_path, 'w')\n parser.write(file_resource)", "def generateDefaultConfig(self):\n\n\t\t# Open config.ini in write mode\n\t\tf = open(self.fileName, \"w\")\n\n\t\t# Set keys to config object\n\t\tself.config.add_section(\"db\")\n\t\tself.config.set(\"db\", \"host\", \"localhost\")\n\t\tself.config.set(\"db\", \"username\", \"root\")\n\t\tself.config.set(\"db\", \"password\", \"\")\n\t\tself.config.set(\"db\", \"database\", \"ripple\")\n\t\tself.config.set(\"db\", \"pingtime\", \"600\")\n\n\t\tself.config.add_section(\"server\")\n\t\tself.config.set(\"server\", \"server\", \"tornado\")\n\t\tself.config.set(\"server\", \"host\", \"0.0.0.0\")\n\t\tself.config.set(\"server\", \"port\", \"5001\")\n\t\tself.config.set(\"server\", \"localizeusers\", \"1\")\n\t\tself.config.set(\"server\", \"outputpackets\", \"0\")\n\t\tself.config.set(\"server\", \"outputrequesttime\", \"0\")\n\t\tself.config.set(\"server\", \"timeoutlooptime\", \"100\")\n\t\tself.config.set(\"server\", \"timeouttime\", \"100\")\n\n\t\tself.config.add_section(\"flask\")\n\t\tself.config.set(\"flask\", \"threaded\", \"1\")\n\t\tself.config.set(\"flask\", \"debug\", \"0\")\n\t\tself.config.set(\"flask\", \"logger\", \"0\")\n\n\t\tself.config.add_section(\"ci\")\n\t\tself.config.set(\"ci\", \"key\", \"changeme\")\n\n\t\t# Write ini to file and close\n\t\tself.config.write(f)\n\t\tf.close()", "def bootstrap_default():\n\treturn default_configuration", "def enable(self):\n logging.debug(\"Enabling autologin for this user...\")\n if os.path.exists(self.CONFIG_FILENAME):\n for backup_filename in self.generate_backup_filename():\n if not os.path.exists(backup_filename):\n shutil.copyfile(self.CONFIG_FILENAME, backup_filename)\n shutil.copystat(self.CONFIG_FILENAME, backup_filename)\n break\n\n with open(self.CONFIG_FILENAME, \"w\") as f:\n f.write(self.TEMPLATE.format(username=os.getenv(\"SUDO_USER\")))" ]
[ "0.6830326", "0.6830326", "0.68101245", "0.67205757", "0.66883713", "0.6676231", "0.6484155", "0.6468319", "0.64551955", "0.63014174", "0.626041", "0.6171588", "0.61103344", "0.60650027", "0.6061568", "0.6031424", "0.5996078", "0.592579", "0.59251434", "0.59190315", "0.59057903", "0.589578", "0.5868491", "0.5861472", "0.5860773", "0.5859229", "0.5858146", "0.58567643", "0.58507055", "0.5819588" ]
0.68759596
0
rebuilds elements of flat_li to match list structure of original_li (or tuple if given as args)
def deflatten(flat_li, *original_li): if len(original_li) == 1: original_li = original_li[0] deflatten_li = [] i = 0 for el in original_li: if isinstance(el, Sequence): deflatten_li.append(flat_li[i:i+len(el)]) i += len(el) else: deflatten_li.append(flat_li[i]) i += 1 return deflatten_li
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def flatten(self, l, ltypes=(list, tuple)):\n i = 0\n while i < len(l):\n while isinstance(l[i], ltypes):\n if not l[i]:\n l.pop(i)\n if not len(l):\n break\n else:\n l[i:i + 1] = list(l[i])\n i += 1\n return l", "def flat_list(old_list):\n new_list = []\n for element in old_list:\n if \"list\" in str(type(element)):\n recursive_list = flat_list(element)\n for sub_element in recursive_list:\n new_list.append(sub_element) \n else:\n new_list.append(element)\n return new_list", "def flatten_list(alist):\n return list(flatten_list_gen(alist))", "def lflatten(*lst):\n return flatten(list(lst))", "def flatten(lol ):\n return [item for sublist in lol for item in sublist]", "def flatten(l,ltypes=(list,tuple)):\n ltype = type(l)\n l = list(l)\n i = 0\n while i < len(l):\n while isinstance(l[i], ltypes):\n if not l[i]:\n l.pop(i)\n i -= 1\n break\n else:\n l[i:i + 1] = l[i]\n i += 1\n return ltype(l)", "def unflatten(self, flat, unused_shaped_like):\n return next(flat)", "def transformation_flatten(twoDlistinput):\r\n oneDlistoutput = []\r\n for i in range(len(twoDlistinput)):\r\n for j in range(len(twoDlistinput[i])):\r\n oneDlistoutput.append(twoDlistinput[i][j])\r\n return(oneDlistoutput)", "def replace(old, new, l):\n if type(l) == list:#Here both elements need to be checked\n if l[0] == old:#Replacement\n r1 = new\n elif type(l[0]) != str:#if it's either list or tuple\n r1 = replace(old, new, l[0])#recursive checking is needed\n else:\n r1 = l[0]#nothing changes\n if l[1] == old:#same as above\n r2 = new\n elif type(l[1]) != str:\n r2 = replace(old, new, l[1])\n else:\n r2 = l[1]\n return [r1, r2]#assemble the new list with replacements done and return\n elif type(l) == tuple:#Here only the second element needs to be checked. Also, if the Abstraction here defines the variable that's been looked for, stop looking in there (namespacing)\n if l[0] == \"*\"+old:#if the abstraction also defines the variable to replace (see above)\n r = l\n else:\n if l[1] == old:#the same stuff as above, just for one element\n r = (l[0], new)\n elif type(l[1]) != str:\n r = (l[0], replace(old, new, l[1]))\n else:\n r = l\n return r", "def flatten_list(lol):\n return list(itertools.chain.from_iterable(lol))", "def flatten(list_to_flatten): \n flattened_list = []\n for item in list_to_flatten:\n if isinstance(item, list) or isinstance(item, tuple):\n flattened_list += flatten(item)\n else:\n flattened_list.append(item)\n return flattened_list", "def unflatten(tmpl, flat):\n def unflatten_recursive(tmpl, flat):\n if isinstance(tmpl, (tuple, list)):\n nested = []\n for sub_tmpl in tmpl:\n sub_nested, flat = unflatten_recursive(sub_tmpl, flat)\n nested.append(sub_nested)\n if isinstance(tmpl, tuple):\n nested = tuple(nested)\n return nested, flat\n else:\n return flat[0], flat[1:]\n\n nested, _ = unflatten_recursive(tmpl, flat)\n return nested", "def flatten(lst):\n \"*** YOUR CODE HERE ***\"", "def flat_list(list_: list) -> list:\n return [item for sublist in list_ for item in sublist]", "def make_flat(list_of_lists: list) -> list:\n return sum([list(item) for item in list_of_lists], [])", "def flatten(ls):\r\n return [item for sublist in ls for item in sublist]", "def _unflatten(updates, flat):\n updates_flat, treedef = tree_flatten(updates)\n offsets = []\n for update in updates_flat:\n size = np.prod(update.shape)\n if offsets:\n offsets.append(size + offsets[-1])\n else:\n offsets.append(size)\n del offsets[-1]\n flat_split = jnp.split(flat, offsets)\n reshaped = [\n jnp.reshape(flat_update, update.shape)\n for flat_update, update in zip(flat_split, updates_flat)\n ]\n return tree_unflatten(treedef, reshaped)", "def flattenPythonList(pythonList):\n if isEmptyList(pythonList):\n return []\n\n def flattenHelper(lista):\n if type(lista) is not tuple:\n raise Exception(\"Error in parameter list representation\")\n\n izq = lista[0]\n der = lista[1]\n \n if type(izq) is float: #no need to check right because constants always have None right\n return izq \n \n if type(izq[0]) is float: #check left depth\n izqResult = izq[0]\n else:\n izqResult = flattenHelper(izq)\n\n # if type(izq[0][0]) is float and der is None and izq[1] is None:\n # return (izqResult, )\n if der is None and izq[1] is None:\n return (izqResult, )\n\n if der is None:\n return izqResult\n\n temp = []\n if der[1] is not None:\n temp.append(izqResult)\n derResult = flattenHelper(der)\n temp = temp + list(derResult)\n else:\n temp.append(izqResult)\n derResult = flattenHelper(der)\n temp.append(derResult)\n return tuple(temp)\n \n elems = []\n for tupleList in pythonList:\n if type(tupleList) == float:\n return elems.append(tupleList)\n # elif type(tupleList[0]) is tuple and type(tupleList[0][0]) is tuple and tupleList[1] is None:\n # temp = (flattenHelper(tupleList), )\n else:\n temp = flattenHelper(tupleList)\n\n if type(temp) is float:\n temp = (temp,)\n elif len(temp) == 1:\n temp = (temp, )\n # elif type(temp[0]) is tuple and type(temp[0][0]) is tuple and temp[1] is None:\n # temp (temp, )\n elems.append(temp)\n return elems", "def flattenList(input_list):\r\n return [item for sublist in input_list for item in sublist]", "def flatten_me(lst, new_lst=None):\n if new_lst is None:\n new_lst = []\n\n for item in lst:\n if isinstance(item, list):\n flatten_me(item, new_lst)\n else:\n new_lst.append(item)\n\n return new_lst", "def flatten_list_gen(alist):\n for item in alist:\n if isinstance(item, list) or isinstance(item, np.ndarray):\n for i in flatten_list_gen(item):\n yield i\n else:\n yield item", "def flatten(seq):\n \n ret = []\n def _flatten(seq):\n for i in seq:\n if isinstance(i, (list, tuple)):\n _flatten(i)\n else:\n ret.append(i)\n return ret\n \n if isinstance(seq, tuple):\n return tuple(_flatten(seq))\n \n return _flatten(seq)", "def flatten(nested_list):\r\n return list(chain.from_iterable(nested_list))", "def unpack_list(to_unpack: list):\n # Must iterate through the given list\n for entry in to_unpack:\n # If the current element is not a list then all is chill, this element is ready for appending\n # to the final list\n if not isinstance(entry, list):\n out.append(entry)\n else:\n # If the current element IS a list, then obviously we still have more unpacking to do,\n # so we call this function recursively.\n unpack_list(entry)", "def flattenList(l=None):\r\n flat_list = [item for sublist in l for item in sublist]\r\n return flat_list", "def flatten(l):\n if isinstance(l, list):\n for el in l:\n if isinstance(el, list):\n yield from flatten(el)\n else:\n yield el\n else:\n return l", "def flatten(lis):\n new_lis = []\n for item in lis:\n if type(item) == type([]):\n new_lis.extend(flatten(item))\n else:\n new_lis.append(item)\n return new_lis", "def ijoin_lists(l):\n if l:\n try:\n if not all(ymap(isinstance, l, list)):\n from tek.errors import MooException\n raise MooException('Some elements aren\\'t lists!')\n for i in cumsum([0] + list(map(len, l[:-1]))):\n l[i:i+1] = l[i]\n except Exception as e:\n logger.debug('ijoin_lists failed with: ' + str(e))\n return l", "def make_list( elements ):\n if isinstance(elements, (list, tuple)):\n return elements\n else:\n return [elements]", "def flat_list_of_lists(l):\n return [item for sublist in l for item in sublist]" ]
[ "0.6270293", "0.6178351", "0.61386776", "0.60761553", "0.58953786", "0.5860441", "0.5845089", "0.57745105", "0.5755302", "0.5732161", "0.56978935", "0.566958", "0.566346", "0.56503826", "0.5627321", "0.5624456", "0.5575356", "0.5567329", "0.5556008", "0.5543179", "0.55420625", "0.5540202", "0.55145687", "0.5507821", "0.5480979", "0.5458771", "0.5457782", "0.54572004", "0.54418904", "0.5421442" ]
0.7833961
0
find all indices from list ``l`` where entries match specific object ``o``
def findall(l, o): return [i for i, u in enumerate(l) if u==o]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getAllIndex(ldata, fldata):\n\treturn list(map(lambda e : fldata.index(e), ldata))", "def matchloc(alist,val): \n return [ilc for ilc,jlc in enumerate(alist) if jlc==val]", "def get_indexes(from_list, find_list):\n\n df_find = pd.DataFrame(find_list, columns=['value'])\n df_from = pd.DataFrame(list(zip(from_list, np.arange(len(from_list)))), columns=['value', 'index'])\n indexes = pd.merge(df_from, df_find, on='value', how='inner')['index'].values\n return indexes", "def find_index(self, obj):\n return self.model.indexlist[obj]", "def list_item_indexes(list_arg: list, item: Any) -> Tuple[int, ...]:\n indexes = [index for index, value in enumerate(list_arg) if value == item]\n return indexes", "def indexer(list1, list2):\r\n\tindex_list = []\r\n\tfor x in list2:\r\n\t\tfor y in list1:\r\n\t\t\tif x == y:\r\n\t\t\t\tindex = list1.index(x)\r\n\t\t\t\tindex_list.append(index)\r\n\treturn index_list", "def index(queryset, obj):\n for index, item in enumerate(queryset):\n if item == obj:\n return index\n\n return -1", "def indices(lst, element):\n result = []\n offset = -1\n while True:\n try:\n offset = lst.index(element, offset + 1)\n except ValueError:\n return result\n result.append(offset)", "def my_index(list_, element):\n pos = []\n for i in range(len(list_)):\n if list_[i] == element:\n pos.append(i)\n return pos", "def objects_to_indexes(self, object_seq: Sequence[Any]) -> np.ndarray:\n res = np.zeros(len(object_seq))\n for i, obj in enumerate(object_seq):\n if obj in self.obj_to_idx:\n res[i] = self.obj_to_idx[obj]\n else:\n res[i] = self.start-1\n return res", "def _index_q_list_in_k_list(q_list, k_list):\r\n q_list_length = len(q_list)\r\n k_list_length = len(k_list)\r\n for idx in range(k_list_length - q_list_length + 1):\r\n t = [q == k for q, k in zip(q_list, k_list[idx: idx + q_list_length])]\r\n # print(idx, t)\r\n if all(t):\r\n # print(idx)\r\n idx_start = idx\r\n return idx_start", "def linear_search(list, target):\n for i in range (0, len(list)):\n if list[i] == target:\n return i\n\n\n return None", "def index(l_: List[int], i: Tuple[int, ...]) -> Tuple[int, ...]:\n return tuple([l_[x] for x in i])", "def get_coincidence_indices(self, lst, element):\n result = []\n offset = -1\n while True:\n try:\n offset = lst.index(element, offset+1)\n except ValueError:\n return result\n result.append(offset)", "def linear_search(element, list_of_elements):\n for i, elem in enumerate(list_of_elements):\n if elem == element:\n return i\n return None", "def getall(l, idx):\n return [l[i] for i in idx]", "def indexAll(inputList=None, value=None):\r\n if not isinstance(inputList, list):\r\n raise TypeError('Input list must be a list object.')\r\n return [i for i, x in enumerate(inputList) if x == value]", "def binary_search(alist, target):\n index = binary_search_iterative(alist, target)\n return index", "def linear_search_iterative(alist, target):\n index_target = None\n found = False\n index_current = 0\n while index_current < len(alist) and found is False:\n if alist[index_current] == target:\n index_target = index_current\n found = True\n index_current += 1\n return index_target", "def index_object(idxs=None):", "def linear_search(alist, key):\n for i in range(len(alist)):\n if alist[i] == key:\n return i\n return -1", "def search_linear(xs, target):\n for (i, v) in enumerate(xs):\n if v == target: # Is referred to as a probe.\n return i\n return -1", "def get_indexes_of(number, int_list):\n\n index = 0\n result = []\n while True:\n if is_end_of_list(int_list, index):\n break\n if number in int_list[index:]: # if number is found in (the rest of) the int_list\n result.append(index + int_list[index:].index(number)) # result = [3]\n index = result[-1] + 1 # index = 4\n continue\n else: # cannot find the number in (the rest of) the int_list\n break\n return result # [3,7]", "def get_numa_index_list(obj):\n obj_lists = collections.defaultdict(list)\n for index, o in enumerate(obj):\n o[\"_index\"] = index\n obj_lists[o.numa_node].append(o)\n return obj_lists", "def linear_search(arr: IntList, query: int) -> int:\n arr_len: int = len(arr)\n for idx in range(arr_len):\n if arr[idx] == query:\n return idx\n return -1", "def indices(self):\n _indices = []\n for h in self.miller.indices():\n _indices.append(self.indices_hkl(*h)[0])\n return _indices", "def oprime_inds(self, obj_inds):\n if type(obj_inds) == set:\n obj_inds = list(obj_inds)\n try:\n common_intent = self.np_table[obj_inds[0], :].copy()\n except IndexError:\n return set(range(len(self.attributes)))\n else:\n for obj_ind in obj_inds[1:]:\n common_intent &= self.np_table[obj_ind, :]\n return common_intent.nonzero()[0]", "def findall(lo,val):\n\tu = []\n\ti = -1\n\twhile( i < len(lo)-1):\n\t\ttry:\n\t\t\ti = lo.index(val,i+1)\n\t\t\tu.append(i)\n\t\texcept:\n\t\t\ti += 1\n\treturn u", "def check_object_repeated(lists, obj):\n for any_obj in lists:\n if check_tuples(any_obj['indexes'], obj['indexes']):\n return None\n return obj", "def get_indexes(self, x):\n indexes = []\n for index_hashes in self.hash_functions:\n combined_index = []\n for idx_spec, hash_func in zip(self.config.index_specs, index_hashes):\n combined_index.append(idx_spec.distribution.get_index(hash_func(x)))\n indexes.append(tuple(combined_index))\n return indexes" ]
[ "0.63037544", "0.6193546", "0.6087121", "0.6016721", "0.59283173", "0.5781884", "0.57606155", "0.5718999", "0.5709015", "0.56931007", "0.5680674", "0.5623687", "0.5537491", "0.551839", "0.5506623", "0.5459669", "0.5450619", "0.54497373", "0.53892154", "0.53692645", "0.5348893", "0.53455377", "0.53089446", "0.53083384", "0.5277277", "0.5276007", "0.52668405", "0.5260988", "0.5254946", "0.5253118" ]
0.7872293
0
get all entries of list ``l`` at positions ``idx``
def getall(l, idx): return [l[i] for i in idx]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def multiListSlice(lol, index):\n divisor = 1\n values = []\n for i in range(0, len(lol)):\n index = (index / divisor) % len(lol[i])\n values[i] = lol[i][index]\n divisor *= len(lol[i])\n return values", "def extract_sub_list(mylist, indices):\n return [mylist[ii] for ii in indices]", "def getAllIndex(ldata, fldata):\n\treturn list(map(lambda e : fldata.index(e), ldata))", "def slice_by_index(lst, indexes):\r\n if not lst or not indexes:\r\n return []\r\n slice_ = itemgetter(*indexes)(lst)\r\n if len(indexes) == 1:\r\n return [slice_]\r\n return list(slice_)", "def slice_by_index(lst, indices):\r\n slicer = itemgetter(*indices)(lst)\r\n if len(indices) == 1:\r\n return [slicer]\r\n return list(slicer)", "def getitem(lst, indices):\n if not indices:\n return lst\n\n i, indices = indices[0], indices[1:]\n item = list.__getitem__(lst, i)\n\n if isinstance(i, int):\n return getitem(item, indices)\n\n # Empty slice: check if all subsequent indices are in range for the\n # full slice, raise IndexError otherwise. This is NumPy's behavior.\n if not item:\n if lst:\n _ = getitem(lst, (slice(None),) + indices)\n elif any(isinstance(k, int) for k in indices):\n raise IndexError\n return []\n\n return [getitem(x, indices) for x in item]", "def __getitem__(self, idx) :\n\n return self.getitem_all(idx * settings.WORLD_SIZE + settings.RANK)", "def index(l_: List[int], i: Tuple[int, ...]) -> Tuple[int, ...]:\n return tuple([l_[x] for x in i])", "def all_lines_at_idx(mm, idx_list):\n lines = []\n for idx in idx_list:\n mm.seek(idx)\n # row back to beginning of line\n ibegin = mm.rfind(b'\\n', 0, idx)\n if ibegin == -1:\n ibegin = 0\n mm.seek(ibegin)\n mm.readline()\n # read desired line\n line = mm.readline()\n lines.append(line.decode())\n return lines", "def reader(list, index_list):\r\n\tnewlist = []\r\n\tfor i in index_list:\r\n\t\tnewlist.append(list[i])\r\n\treturn newlist", "def __getitem__(self, ndx):\n if type(ndx) is slice:\n return list(islice(self._all(), ndx.start, ndx.stop, ndx.step or 1))\n else:\n return islice(self._all(), ndx, ndx+1).next()", "def iter_lists(self, with_index=True):\n for idx, row in zip(self.index, self._data):\n if with_index:\n yield idx, list(row)\n else:\n yield list(row)", "def appendList(indexlist, l):\n\tappendedList = []\n\tfor i in indexlist:\n\t\tappendedList.append(l[i])\n\treturn appendedList", "def getidx(self, ind: Union[str, int]) -> List[Any]:\n output = []\n for data in self.data:\n output.append(data[ind])\n return output", "def GetSpectraFromIndexList(all_wl,all_spectra,idx_list):\n NBSPEC=len(all_spectra)\n \n \n all_wl_sel=[]\n all_spectra_sel=[]\n \n for idx in np.arange(0,NBSPEC):\n if idx in idx_list:\n all_wl_sel.append(all_wl[idx])\n all_spectra_sel.append(all_spectra[idx])\n return all_wl_sel,all_spectra_sel", "def _get_item_by_idx(self, iterator, idx):\r\n size = len(self)\r\n idx = operator.index(idx)\r\n if not -size <= idx < size:\r\n raise IndexError('index {} is out of range'.format(idx))\r\n idx %= size\r\n return next(itertools.islice(iterator, idx, None))", "def getidx(self, ind: Union[str, int]) -> List[Any]:\n output = []\n for data in self.data:\n output.append(data[ind])\n\n return output", "def findall(l, o):\n return [i for i, u in enumerate(l) if u==o]", "def get_from_list(self,list_,index):\r\n\r\n\r\n try:\r\n return list_[self._index_to_int(index)]\r\n except IndexError:\r\n self._index_error(list_,index)", "def get_items(self, indexes: Iterable[int]) -> List[_T]:\n return [self.get_item(index) for index in indexes]", "def ins_all_positions(x, l):\n res = []\n for i in range(0, len(l) + 1):\n res.append(l[:i] + [x] + l[i:])\n return res", "def matchloc(alist,val): \n return [ilc for ilc,jlc in enumerate(alist) if jlc==val]", "def get_elements_from_list(target_list, indexes):\n elements = [target_list[i] for i in indexes]\n return elements", "def getIntersectorList(self, l):\n return [self.getIntersector(v) for v in l]", "def fix_indexes(res, idx_local, idx, buffer_size):\n\n # get limits for the data (exlude indexes that have buffer data)\n data_start = idx_local[0].start\n data_end = idx_local[0].stop\n\n return res[data_start:data_end]", "def nth(_list, n):\n n = lloc(_list, n)\n return [a[n] for a in _list]", "def slice_list(in_list, lens):\n if isinstance(lens, int):\n assert len(in_list) % lens == 0\n lens = [lens] * int(len(in_list) / lens)\n if not isinstance(lens, list):\n raise TypeError('\"indices\" must be an integer or a list of integers')\n elif sum(lens) != len(in_list):\n raise ValueError(\n \"sum of lens and list length does not \"\n f\"match: {sum(lens)} != {len(in_list)}\"\n )\n out_list = []\n idx = 0\n for i in range(len(lens)):\n out_list.append(in_list[idx : idx + lens[i]])\n idx += lens[i]\n return out_list", "def loc_data_idx(loc_idx):\n retval = []\n for i in as_tuple(loc_idx):\n if isinstance(i, slice) and i.step is not None and i.step == -1:\n if i.stop is None:\n retval.append(slice(0, i.start+1, -i.step))\n else:\n retval.append(slice(i.stop+1, i.start+1, -i.step))\n elif isinstance(i, slice) and i.step is not None and i.step < -1:\n if i.stop is None:\n lmin = i.start\n while lmin >= 0:\n lmin += i.step\n retval.append(slice(lmin-i.step, i.start+1, -i.step))\n else:\n retval.append(slice(i.stop+1, i.start+1, -i.step))\n elif is_integer(i):\n retval.append(slice(i, i+1, 1))\n else:\n retval.append(i)\n return as_tuple(retval)", "def __getitem__(self, index):\n try:\n return next(islice(self.__iter__(), index, index+1))\n except TypeError:\n return list(islice(self.__iter__(), index.start, index.stop,\n index.step))", "def partition(alist, indices):\n return [alist[i:j] for i, j in zip([0]+indices, indices+[None])]" ]
[ "0.6098279", "0.59349114", "0.58925664", "0.58031976", "0.5779936", "0.5718426", "0.56653845", "0.56569105", "0.5654663", "0.5648731", "0.5606806", "0.55991054", "0.55896133", "0.550258", "0.5500482", "0.54870623", "0.54548275", "0.54517424", "0.5408129", "0.54024506", "0.52995014", "0.529436", "0.5291839", "0.52790165", "0.5221834", "0.5207061", "0.5197536", "0.5182784", "0.51579535", "0.515706" ]
0.82942766
0
Model function for CIFAR10.
def cifar10_model_fn(features, labels, mode, params): features = tf.reshape(features, [-1, _IMAGE_SIZE, _IMAGE_SIZE, _NUM_CHANNELS]) learning_rate_fn = resnet_run_loop.learning_rate_with_decay( batch_size=params['batch_size'], batch_denom=128, num_images=_NUM_IMAGES['train'], boundary_epochs=[10, 20, 30], decay_rates=[1, 0.1, 0.01, 0.001]) # We use a weight decay of 0.0002, which performs better # than the 0.0001 that was originally suggested. weight_decay = 2e-4 # Empirical testing showed that including batch_normalization variables # in the calculation of regularized loss helped validation accuracy # for the CIFAR-10 dataset, perhaps because the regularization prevents # overfitting on the small data set. We therefore include all vars when # regularizing and computing loss during training. def loss_filter_fn(_): return True return resnet_run_loop.resnet_model_fn( features=features, labels=labels, mode=mode, model_class=Model, resnet_size=params['resnet_size'], weight_decay=weight_decay, learning_rate_fn=learning_rate_fn, momentum=0.9, data_format=params['data_format'], resnet_version=params['resnet_version'], loss_scale=params['loss_scale'], loss_filter_fn=loss_filter_fn, dtype=params['dtype'], fine_tune=params['fine_tune'] )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cifar10(self):\n\t\t# Get the data.\n\t\tself.x_train = self.x_train.reshape(self.nb_train, self.input_dim)\n\t\tself.x_test = self.x_test.reshape(self.nb_test, self.input_dim)\n\t\tself.x_train = self.x_train.astype('float32')\n\t\tself.x_test = self.x_test.astype('float32')\n\t\tself.x_train /= 255\n\t\tself.x_test /= 255\n\n\t\t# convert class vectors to binary class matrices\n\t\tself.y_train = np_utils.to_categorical(self.y_train, self.nb_classes)\n\t\tself.y_test = np_utils.to_categorical(self.y_test, self.nb_classes)", "def get_cifar10():\n\n from keras.datasets import cifar10\n\n # input image dimensions\n img_rows, img_cols = 32, 32\n n_channels = 3\n num_classes = 10\n # the data, shuffled and split between train and test sets\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n if K.image_data_format() == 'channels_first':\n print (\"Using Channels first\")\n x_train = x_train.reshape(x_train.shape[0], n_channels, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], n_channels, img_rows, img_cols)\n input_shape = (n_channels, img_rows, img_cols)\n else:\n print(\"Channels last\")\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, n_channels)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, n_channels)\n input_shape = (img_rows, img_cols, n_channels)\n\n return (x_train, y_train), (x_test, y_test)", "def get_cifar10_mlp():\n # Set defaults.\n nb_classes = 10 #dataset dependent \n batch_size = 64\n epochs = 4\n input_shape = (3072,) #because it's RGB\n\n # Get the data.\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n x_train = x_train.reshape(50000, 3072)\n x_test = x_test.reshape(10000, 3072)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n\n # convert class vectors to binary class matrices\n y_train = to_categorical(y_train, nb_classes)\n y_test = to_categorical(y_test, nb_classes)\n\n return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs)", "def _model(self):\n common_scale = self.edp_par['common_scale'].value\n model = self.F_trans() * self.F_cont()\n # get F(h=1,k=0), which is used for normalization \n # common_scale is a common scaling factor => F(h=1,k=0) = 100*common_scale\n F_10 = model[(self.h==1)&(self.k==0)]\n model = model / np.absolute(F_10) * 100 * common_scale\n return model", "def cifar10(transform=transforms.ToTensor()):\n\n\t# Directories\n\tscript_dir = os.path.dirname(os.path.realpath(__file__))\n\tdata_dir = os.path.join(script_dir, 'data', 'cifar10')\n\n\t# Load training set, downloading if necessary\n\tdataset = datasets.CIFAR10(data_dir, train=True, transform=transform, download=True)\n\n\t# Split into train, val, and test sets\n\n\t# Add 'classes' list and 'class_to_idx' lookup dictionary to both sets\n\tclasses = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', \n\t\t'horse', 'ship', 'truck']\n\tclass_to_idx = {classes[i]: i for i in range(len(classes))}\n\n\tdataset.classes = classes\n\tdataset.class_to_idx = class_to_idx\n\n\treturn dataset", "def get_cifar10_cnn():\n # Set defaults.\n nb_classes = 10 #dataset dependent \n batch_size = 128\n epochs = 4\n \n # the data, shuffled and split between train and test sets\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n \n # convert class vectors to binary class matrices\n y_train = to_categorical(y_train, nb_classes)\n y_test = to_categorical(y_test, nb_classes)\n\n #x._train shape: (50000, 32, 32, 3)\n #input shape (32, 32, 3)\n input_shape = x_train.shape[1:]\n\n #print('x_train shape:', x_train.shape)\n #print(x_train.shape[0], 'train samples')\n #print(x_test.shape[0], 'test samples')\n #print('input shape', input_shape)\n \n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n\n return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs)", "def _compile_model(self):\n\n\t\t\t### GC90 atmospheric model implementation\n\t\t\n\t\t\ttheta_sun, beta, alpha, am, rh, pressure = T.scalars('theta_sun', 'beta', 'alpha', 'am', 'rh', 'pressure')\n\n\t\t\twl = T.vector('wl')\n\t\t\n\t\t\twl_a = 550\n\n\t\t\ttheta_sun_ = theta_sun * np.pi / 180.\n\n\t\t\tz3 = -0.1417 * alpha + 0.82\n\t\t\tz2 = ifelse(T.gt(alpha, 1.2), 0.65, z3)\n\t\t\tz1 = ifelse(T.lt(alpha, 0), 0.82, z2)\n\t\t\n\t\t\ttheta_sun_mean = z1\n\n\t\t\tB3 = T.log(1 - theta_sun_mean)\n\t\t\tB2 = B3 * (0.0783 + B3 * (-0.3824 - 0.5874 * B3))\n\t\t\tB1 = B3 * (1.459 + B3 * (0.1595 + 0.4129 * B3))\n\t\t\tFa = 1 - 0.5 * T.exp((B1 + B2 * T.cos(theta_sun_)) * T.cos(theta_sun_))\n\n\t\t\tomega_a = (-0.0032 * am + 0.972) * T.exp(3.06 * 1e-4 * rh)\n\t\t\ttau_a = beta*(wl/wl_a)**(-alpha)\n\n\t\t\t# fixed a bug in M, thanks Jaime! [brackets added]\n\t\t\tM = 1 / (T.cos(theta_sun_) + 0.50572 * (90 + 6.07995 - theta_sun)**(-1.6364)) \n\t\t\tM_ = M * pressure / 1013.25\n\n\t\t\tTr = T.exp(- M_ / (115.6406 * (wl / 1000)**4 - 1.335 * (wl / 1000)**2)) \n\n\t\t\tTas = T.exp(- omega_a * tau_a * M)\n\n\t\t\tEdd = Tr * Tas \n\t\t\tEdsr = 0.5 * (1 - Tr**0.95)\n\t\t\tEdsa = Tr**1.5 * (1 - Tas) * Fa\n\t\t\n\t\t\tEd = Edd + Edsr + Edsa\n\t\t\tEdd_Ed = Edd / Ed\n\t\t\tEdsr_Ed = Edsr / Ed\n\t\t\tEdsa_Ed = Edsa / Ed\n\t\t\tEds_Ed = Edsr_Ed + Edsa_Ed\n\n\t\t\t### Albert and Mobley bio-optical model implementation\n\n\t\t\ta_w, daw_dT, astar_ph, astar_y, Ls_Ed = T.vectors('a_w', 'daw_dT', 'astar_ph', 'astar_y', 'Ls_Ed') \n\t\t\n\t\t\tC_chl, C_sm, C_mie, n_mie, C_y, S_y, T_w, theta_view, n_w, rho_s, rho_dd, rho_ds, delta= T.scalars('C_chl', 'C_sm', 'C_mie', 'n_mie', 'C_y', 'S_y', 'T_w', 'theta_view', 'n_w', 'rho_s', 'rho_dd', 'rho_ds', 'delta')\n\n\t\t\t# calc_a_ph\n\t\t\ta_ph = C_chl * astar_ph\n\n\t\t\t# calc_a_y\n\t\t\twl_ref_y = 440\n\t\t\ta_y = ifelse(T.eq(S_y, -1), C_y * astar_y, C_y * T.exp(- S_y * (wl - wl_ref_y)))\n\n\t\t\t# calc_a\n\t\t\tT_w_ref = 20.\n\t\t\ta_w_corr = a_w + (T_w - T_w_ref) * daw_dT\n\t\t\n\t\t\ta = a_w_corr + a_ph + a_y\n\n\t\t\t# calc_bb_sm\n\t\t\tbbstar_sm = 0.0086\n\t\t\tbbstar_mie = 0.0042\n\t\t\twl_ref_mie = 500\n\t\t\n\t\t\tbb_sm = C_sm * bbstar_sm + C_mie * bbstar_mie * (wl / wl_ref_mie)**n_mie\n\n\t\t\t# calc_bb\n\t\t\tb1 = ifelse(T.eq(n_w, 1.34), 0.00144, 0.00111)\n\t\t\n\t\t\twl_ref_water = 500\n\t\t\tS_water = -4.32\n\n\t\t\tbb_water = b1 * (wl / wl_ref_water)**S_water\n\t\t\tbb = bb_water + bb_sm\n\n\t\t\t# calc omega_b\n\t\t\tomega_b = bb / (bb + a)\n\n\t\t\t# calc sun and viewing zenith angles under water\n\t\t\ttheta_sun_ = theta_sun * np.pi / 180.\n\t\t\ttheta_sun_ss = T.arcsin(T.sin(theta_sun_) / n_w)\n\t\t\ttheta_view_ = theta_view * np.pi / 180.\n\t\t\ttheta_view_ss = T.arcsin(T.sin(theta_view_) / n_w)\n\n\t\t\tp_f = [0.1034, 1, 3.3586, -6.5358, 4.6638, 2.4121]\n\t\t\tp_frs = [0.0512, 1, 4.6659, -7.8387, 5.4571, 0.1098, 0.4021]\n\n\t\t\t# calc subsurface reflectance \n\t\t\tf = p_f[0] * (p_f[1] + p_f[2] * omega_b + p_f[3] * omega_b**2 + p_f[4] * omega_b**3) * (1 + p_f[5] / T.cos(theta_sun_ss)) \n\n\t\t\tR0minus = f * omega_b\n\n\t\t\t# calc subsurface remote sensing reflectance \n\t\t\tfrs = p_frs[0] * (p_frs[1] + p_frs[2] * omega_b + p_frs[3] * omega_b**2 + p_frs[4] * omega_b**3) * (1 + p_frs[5] / T.cos(theta_sun_ss)) * (1 + p_frs[6] / T.cos(theta_view_ss))\n\n\t\t\tRrs0minus = frs * omega_b\n\n\t\t\t# calc water surface reflected reflectance \n\t\t\tRrs_refl = rho_s * Ls_Ed + rho_dd * Edd_Ed / np.pi + rho_ds * Eds_Ed / np.pi + delta\n\n\t\t\t# calc_Rrs0plus (Lee1998, eq22), R=Q*Rrs\n\t\t\tgamma = 0.48\n\t\t\tzeta = 0.518\n\n\t\t\tRrs = zeta * Rrs0minus / ( 1 - gamma * R0minus )\n\t\t\n\t\t\tLu_Ed = Rrs + Rrs_refl\n\t\t\n\t\t\tf = th.function([beta, alpha, am, rh, pressure, C_chl, C_sm, C_mie, n_mie, C_y, S_y, T_w, theta_sun, theta_view, n_w, rho_s, rho_dd, rho_ds, delta, wl, a_w, daw_dT, astar_ph, astar_y, Ls_Ed], [Rrs, Rrs_refl, Lu_Ed], on_unused_input='warn')\n\n\t\t\treturn f", "def train_cifar10():\r\n # type: () -> None\r\n\r\n # Build dataset and model\r\n dataset = CIFAR10_TRAIN(path=Config.video_folder)\r\n model = LSACIFAR10(input_shape=dataset.shape, code_length=64,\r\n cpd_channels=100).to(device).train()\r\n\r\n # Set up result helper and perform test\r\n helper = OneClassResultHelper(dataset, model,\r\n checkpoints_dir=Config.model_ckpt,\r\n output_file='cifar10.txt')\r\n helper.train_one_class_classification()", "def fit_covid_function(self):\r\n return", "def __init__(self, **kwargs):\n super(CIFAR10Classifier, self).__init__() #pylint: disable=super-with-arguments\n self.model_conv = models.resnet50(pretrained=True)\n for param in self.model_conv.parameters():\n param.requires_grad = False\n num_ftrs = self.model_conv.fc.in_features\n num_classes = 10\n self.model_conv.fc = nn.Linear(num_ftrs, num_classes)\n\n self.scheduler = None\n self.optimizer = None\n self.args = kwargs\n\n self.train_acc = Accuracy()\n self.val_acc = Accuracy()\n self.test_acc = Accuracy()\n\n self.preds = []\n self.target = []\n self.example_input_array = torch.rand((1, 3, 64, 64))", "def presenetCar():", "def build_model_fn(self):", "def cifar10_bin_model_fn(features, labels, mode, params):\n\n network = resnet_model.cifar10_resnet_v2_generator(\n params['resnet_size'], _NUM_CLASSES, params['data_format']\n )\n\n inputs = tf.reshape(features, [-1, _HEIGHT, _WIDTH, _DEPTH])\n clabels = labels[:, :_NUM_CLASSES]\n \n # logits = network(inputs, mode == tf.estimator.ModeKeys.TRAIN)\n logits = network(inputs, mode == tf.estimator.ModeKeys.TRAIN, name=\"main\")\n probs = tf.sigmoid(logits)\n\n # slabels, smask = smooth_neg_labels(clabels, params[\"cutoff_weight\"], params[\"pen_prob\"])\n # slabels, smask = smooth_neg_labels(clabels, 1*1/_NUM_CLASSES, 0.45)\n bt_loss = tf.reduce_mean(per_class_bin_loss(probs, clabels, params[\"milden\"]), axis=1)\n\n loss = tf.reduce_mean(bt_loss, axis=0)\n loss = tf.identity(loss, name=\"loss_vec\")\n loss_sum = tf.summary.scalar(\"loss\", loss)\n\n probs_cal = tf.sigmoid(logits/params[\"temp\"])\n rate = tf.reduce_max(probs_cal, axis=1)\n\n # loss = tf.Print(loss, [smask], summarize=100, message=\"smask: \")\n # loss = tf.Print(loss, [tf.reduce_mean(probs)], summarize=100, message=\"mean: \")\n # loss = tf.Print(loss, [rate], summarize=100, message=\"rate: \")\n # loss = tf.Print(loss, [clabels, slabels], summarize=100, message=\"slabels: \")\n\n classes = tf.argmax(logits, axis=1)\n accuracy_m = tf.metrics.accuracy( tf.argmax(clabels, axis=1), classes, name=\"accuracy_metric\")\n accuracy = tf.identity(accuracy_m[1], name=\"accuracy_vec\")\n accuracy_sum = tf.summary.scalar(\"accuracy\", accuracy)\n\n if mode == tf.estimator.ModeKeys.EVAL or params[\"predict\"]:\n\n # print # note this is labels not clabels\n print_labels = tf.argmax(labels, axis=1)\n print_rate = rate\n print_probs = probs\n print_logits = logits\n\n hooks = []\n eval_metric_ops = { \"accuracy\": accuracy_m }\n\n # # printing stuff if predict\n if params[\"predict\"]:\n loss = tf.Print(loss, [print_labels], summarize=1000000, message='Targets')\n loss = tf.Print(loss, [print_rate], summarize=1000000, message='Rate')\n loss = tf.Print(loss, [print_probs], summarize=1000000, message='Probs')\n loss = tf.Print(loss, [print_logits], summarize=1000000, message='Logits')\n hooks = []\n eval_metric_ops = {}\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metric_ops = eval_metric_ops,\n # evaluation_hooks=hooks,\n )\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n # Scale the learning rate linearly with the batch size. When the batch size\n # is 128, the learning rate should be 0.1.\n initial_learning_rate = 0.1 * params['batch_size'] / 128\n batches_per_epoch = _NUM_IMAGES['train'] / params['batch_size']\n global_step = tf.train.get_or_create_global_step()\n\n # Multiply the learning rate by 0.1 at 100, 150, and 200 epochs.\n boundaries = [int(batches_per_epoch * epoch) for epoch in [100, 150, 200]]\n values = [initial_learning_rate * decay for decay in [1, 0.1, 0.01, 0.001]]\n learning_rate = tf.train.piecewise_constant(\n tf.cast(global_step, tf.int32), boundaries, values, name=\"learning_rate_vec\")\n\n learning_rate_sum = tf.summary.scalar(\"learning_rate\", learning_rate)\n\n optimizer = tf.train.MomentumOptimizer(\n learning_rate=learning_rate,\n momentum=_MOMENTUM\n )\n\n # Batch norm requires update ops to be added as a dependency to the train_op\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(loss, global_step)\n\n hook = tf.train.SummarySaverHook(\n summary_op=tf.summary.merge([accuracy_sum, learning_rate_sum]),\n save_steps=1,\n )\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss,\n train_op=train_op,\n training_hooks=[hook],\n )", "def get_cifar_data(num_classes=10):\n\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()\n x_train = x_train.astype(np.float32) / 255.\n x_test = x_test.astype(np.float32) / 255.\n\n y_train_cat = to_categorical(y_train, num_classes=num_classes).astype(np.float32)\n y_test_cat = to_categorical(y_test, num_classes=num_classes).astype(np.float32)\n\n return x_train, y_train, x_test, y_test, y_train_cat, y_test_cat", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadCIFAR, self).__init__()\n self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.BatchNorm2d(768), nn.ReLU(inplace=True))\n self.classifier = nn.Linear(768, num_classes)", "def get_model_code():\n\n return \"\"\"\n functions {\n matrix cov_matrix_ard(int N, int D, vector[] x, vector ls, real alpha_sq, int cov_id) {\n matrix[N,N] S;\n real dist_sum;\n real sqrt3;\n real sqrt5;\n sqrt3=sqrt(3.0);\n sqrt5=sqrt(5.0);\n\n // For RBF ARD kernel\n if (cov_id == 1) {\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n dist_sum = 0;\n for(d in 1:D) {\n dist_sum = dist_sum + square(x[i][d] - x[j][d]) / square(ls[d]);\n }\n S[i,j] = alpha_sq * exp( -0.5 * dist_sum);\n }\n }\n }\n\n // Fill upper triangle\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n S[j,i] = S[i,j];\n }\n }\n\n // Create diagonal\n for(i in 1:N) {\n S[i,i] = alpha_sq;\n }\n\n return S;\n }\n\n matrix distance_matrix_on_vectors(int N, vector[] x) {\n matrix[N, N] distmat;\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n distmat[i, j] = square(distance(x[i], x[j]));\n }\n }\n return distmat;\n }\n\n matrix cov_matrix_matern(int N, matrix dist, real ls, real alpha_sq, int cov_id) {\n matrix[N,N] S;\n real dist_ls;\n real sqrt3;\n real sqrt5;\n sqrt3=sqrt(3.0);\n sqrt5=sqrt(5.0);\n\n // For Matern kernel with parameter nu=1/2 (i.e. absolute exponential kernel)\n if (cov_id == 2) {\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n dist_ls = fabs(dist[i,j])/square(ls);\n S[i,j] = alpha_sq * exp(-1 * dist_ls);\n }\n }\n }\n\n // For Matern kernel with parameter nu=3/2\n else if (cov_id == 3) {\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n dist_ls = fabs(dist[i,j])/ls;\n S[i,j] = alpha_sq * (1 + sqrt3 * dist_ls) * exp(-sqrt3 * dist_ls);\n }\n }\n }\n\n // For Matern kernel with parameter nu=5/2\n else if (cov_id == 4) {\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n dist_ls = fabs(dist[i,j])/ls;\n S[i,j] = alpha_sq * (1 + sqrt5 * dist_ls + 5 * pow(dist_ls,2)/3) * exp(-sqrt5 * dist_ls);\n }\n }\n }\n\n // For Matern kernel with parameter nu tending to infinity (i.e. RBF kernel)\n else if (cov_id == 1) {\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n dist_ls = fabs(dist[i,j])/ls;\n S[i,j] = alpha_sq * exp( -0.5 * pow(dist_ls, 2) );\n }\n }\n }\n\n // Fill upper triangle\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n S[j,i] = S[i,j];\n }\n }\n\n // Create diagonal\n for(i in 1:N) {\n S[i,i] = alpha_sq;\n }\n\n return S;\n }\n\n }\n\n data {\n int<lower=1> D;\n int<lower=1> N;\n vector[D] x[N];\n vector[N] y;\n real<lower=0> ig1;\n real<lower=0> ig2;\n real<lower=0> n1;\n real<lower=0> n2;\n real<lower=0> sigma;\n int kernel_id;\n }\n\n parameters {\n real<lower=0> rho;\n vector<lower=0>[D] rhovec;\n real<lower=0> alpha;\n }\n\n model {\n int cov_id;\n matrix[N, N] cov;\n matrix[N, N] L_cov;\n matrix[N, N] distmat;\n\n // RBF kernel single lengthscale\n if (kernel_id == 1) {\n cov = cov_exp_quad(x, alpha, rho) + diag_matrix(rep_vector(square(sigma), N));\n L_cov = cholesky_decompose(cov);\n rho ~ inv_gamma(ig1, ig2);\n alpha ~ normal(n1, n2);\n y ~ multi_normal_cholesky(rep_vector(0, N), L_cov);\n }\n\n // Matern kernel single lengthscale\n else if (kernel_id >= 2 && kernel_id <= 4) {\n if (kernel_id == 2) { cov_id = 2; }\n if (kernel_id == 3) { cov_id = 3; }\n if (kernel_id == 4) { cov_id = 4; }\n\n distmat = distance_matrix_on_vectors(N, x);\n cov = cov_matrix_matern(N, distmat, rho, square(alpha), cov_id) + diag_matrix(rep_vector(square(sigma), N));\n L_cov = cholesky_decompose(cov);\n rho ~ inv_gamma(ig1, ig2);\n alpha ~ normal(n1, n2);\n y ~ multi_normal_cholesky(rep_vector(0, N), L_cov);\n }\n\n // RBF kernel with ARD (D-dimensional) lengthscale\n else if (kernel_id == 5) {\n cov_id = 1;\n cov = cov_matrix_ard(N, D, x, rhovec, square(alpha), cov_id) + diag_matrix(rep_vector(square(sigma), N));\n L_cov = cholesky_decompose(cov);\n for(d in 1:D) {\n rhovec[d] ~ inv_gamma(ig1, ig2);\n }\n alpha ~ normal(n1, n2);\n y ~ multi_normal_cholesky(rep_vector(0, N), L_cov);\n }\n\n }\n \"\"\"", "def __init__(self, S, f, num_t_sens, num_t_insens):\n # Number of states \n self.S = S \n self.f = f\n self.t_sens = num_t_sens\n self.t_insens = num_t_insens\n \n self.name = 'CRF'", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadCIFAR, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), # image size = 2 x 2\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, model):\n self.model = model\n self.n = 0\n\n self.ctetra4 = model.ctetra4\n self.cpyram5 = model.cpyram5\n self.cpenta6 = model.cpenta6\n self.chexa8 = model.chexa8\n\n self.ctetra10 = model.ctetra10\n self.cpyram13 = model.cpyram13\n self.cpenta15 = model.cpenta15\n self.chexa20 = model.chexa20\n self.element_id = array([], dtype='int32')", "def recognition_model(self, x, c):\n x = self.encoder(x)\n c = self.conditional(c)\n temp = self.recog_head(x + c)\n mu = self.mu(temp)\n logvar = self.logvar(temp)\n return mu, logvar", "def recognition_model(self, x, c):\n x = self.encoder(x)\n c = self.conditional(c)\n temp = self.recog_head(x + c)\n mu = self.mu(temp)\n logvar = self.logvar(temp)\n return mu, logvar", "def build_model():", "def get_CIFAR10_data():\n # Load the raw CIFAR-10 data\n cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'\n X_train, _, X_test, _ = load_CIFAR10(cifar10_dir)\n X = np.concatenate((X_train, X_test), axis =0)\n return X", "def __init__(self, model):\n if model == \"biblis\" :\n data = [1.3200e+00, 2.7720e-01, 2.6562e-03, \\\n 7.1596e-02, 0.00000000, 0.00000000, \\\n 2.3106e-02] \n else :\n raise Exception(\"Reflector model not available\")\n self.model = model\n # Default group data.\n self.D1 = data[0]\n self.D2 = data[1]\n self.A1 = data[2]\n self.A2 = data[3] \n self.F1 = data[4]\n self.F2 = data[5] \n self.S12 = data[6] \n self.R1 = self.A1 + self.S12", "def construct_model():\n import lbann\n\n # Layer graph\n input = lbann.Input(target_mode='N/A', name='inp_data')\n # data is 64*64*4 images + 15 scalar + 5 param\n #inp_slice = lbann.Slice(input, axis=0, slice_points=\"0 16399 16404\",name='inp_slice')\n inp_slice = lbann.Slice(input, axis=0, slice_points=str_list([0,args.ydim,args.ydim+5]),name='inp_slice')\n gt_y = lbann.Identity(inp_slice,name='gt_y')\n gt_x = lbann.Identity(inp_slice, name='gt_x') #param not used\n\n zero = lbann.Constant(value=0.0,num_neurons='1',name='zero')\n one = lbann.Constant(value=1.0,num_neurons='1',name='one')\n\n z_dim = 20 #Latent space dim\n\n z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=\"20\")\n model = macc_models.MACCWAE(args.zdim,args.ydim,cf=args.mcf,use_CNN=args.useCNN)\n d1_real, d1_fake, d_adv, pred_y = model(z,gt_y)\n\n d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce')\n d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce')\n d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce')\n img_loss = lbann.MeanSquaredError([pred_y,gt_y])\n rec_error = lbann.L2Norm2(lbann.WeightedSum([pred_y,gt_y], scaling_factors=\"1 -1\"))\n\n layers = list(lbann.traverse_layer_graph(input))\n # Setup objective function\n weights = set()\n src_layers = []\n dst_layers = []\n for l in layers:\n if(l.weights and \"disc0\" in l.name and \"instance1\" in l.name):\n src_layers.append(l.name)\n #freeze weights in disc2\n if(l.weights and \"disc1\" in l.name):\n dst_layers.append(l.name)\n for idx in range(len(l.weights)):\n l.weights[idx].optimizer = lbann.NoOptimizer()\n weights.update(l.weights)\n l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4)\n d_adv_bce = lbann.LayerTerm(d_adv_bce,scale=0.01)\n obj = lbann.ObjectiveFunction([d1_real_bce,d1_fake_bce,d_adv_bce,img_loss,rec_error,l2_reg])\n # Initialize check metric callback\n metrics = [lbann.Metric(img_loss, name='recon_error')]\n #pred_y = macc_models.MACCWAE.pred_y_name\n callbacks = [lbann.CallbackPrint(),\n lbann.CallbackTimer(),\n lbann.CallbackSaveModel(dir=args.dump_models),\n lbann.CallbackReplaceWeights(source_layers=list2str(src_layers),\n destination_layers=list2str(dst_layers),\n batch_interval=2)]\n\n if(args.ltfb_batch_interval > 0) :\n callbacks.append(lbann.CallbackLTFB(batch_interval=args.ltfb_batch_interval,metric='recon_error',\n low_score_wins=True,\n exchange_hyperparameters=True))\n\n # Construct model\n return lbann.Model(args.num_epochs,\n serialize_io=True,\n weights=weights,\n layers=layers,\n metrics=metrics,\n objective_function=obj,\n callbacks=callbacks)", "def model_4_parameters(num_features, num_classes, image_info):\n parameters = {}\n parameters['num_features'] = num_features\n parameters['num_classes'] = num_classes\n if image_info['key'][:5] == \"pavia\":\n parameters['C'] = 1.0\n else:\n parameters['C'] = 40.0\n \n return parameters", "def modelAVOAkiRichards3(interface):\n interface[:,6]=0.5*(interface[:,0]/interface[:,3]+ \\\n interface[:,2]/interface[:,5])\n interface[:,7]=(interface[:,0]/(2*interface[:,3]))- \\\n 4*((interface[:,4]**2/interface[:,3]**2)* \\\n (interface[:,1]/interface[:,4]))- \\\n 2*(interface[:,4]**2/interface[:,3]**2)* \\\n (interface[:,2]/interface[:,5])", "def dynamic_model(self, input_val: float) -> float:\n pass", "def load_cifar10_data(img_rows, img_cols):\n\n # Load cifar10 training and validation sets\n (X_train, Y_train), (X_valid, Y_valid) = cifar10.load_data()\n\n # Resize training images\n X_train = np.array([cv2.resize(img, (img_rows, \\\n img_cols)) for img in X_train[:, :, :, :]])\n\n X_valid = np.array([cv2.resize(img, (img_rows, \\\n img_cols)) for img in X_valid[:, :, :, :]])\n\n # Check the data type of X_train or X_valid\n for each in X_train:\n print(type(each))\n\n # Transform targets to keras compatible format\n Y_train = np_utils.to_categorical(Y_train, num_classes)\n Y_valid = np_utils.to_categorical(Y_valid, num_classes)\n\n X_train = X_train.astype('float32')\n X_valid = X_valid.astype('float32')\n\n # Data normalization\n X_train = X_train / 255.0\n X_valid = X_valid / 255.0\n\n return X_train, Y_train, X_valid, Y_valid", "def model(self):" ]
[ "0.6843386", "0.650111", "0.609306", "0.60163224", "0.5996454", "0.59331244", "0.588775", "0.58561015", "0.5830527", "0.57808924", "0.57354146", "0.5720595", "0.5690742", "0.56661737", "0.5664544", "0.5653322", "0.56082076", "0.55979353", "0.5551245", "0.5541251", "0.5541251", "0.5531916", "0.55178267", "0.54801166", "0.546483", "0.54569495", "0.5417938", "0.5394017", "0.53901184", "0.53713876" ]
0.6535784
1
This is the main prediction adding function. It starts by grabbing a file to open from standard in, which contains one message board page. It processes each message contained in the page.
def main(): # db_user = raw_input('DB username: ') db_user = 'oraclech' pw = getpass.getpass() odb = oracle_db.OracleDb(db_user, pw, database='oraclech_new') contest_id = raw_input('Current Contest ID? ') round_num = raw_input('Current Round Number? ') round_nums = round_num.split(',') topic_num = raw_input('Current Topic Number? ') page_num = raw_input('Current Page Number? ') contest = odb.GetContest(contest_id) try: file_path = '%s/r%dt%dp%02d.html' % (contest['Name'].lower(), int(round_nums[-1]), int(topic_num), int(page_num)) file_path = '/home/oraclech/topics/' + file_path print file_path file = open(file_path, 'r') except IOError: file_path = raw_input('File to open (in /home/oraclech/topics/): ') file_path = '/home/oraclech/topics/' + file_path file = open(file_path, 'r') page = file.read() parser = message_board.Parser() messages = parser.Parse(page) for message in messages: message['Timestamp'] -= datetime.timedelta(minutes=TIME_OFFSET) ParsePredictions(odb, message, contest, round_nums) odb.Commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_poems(self):\n file = open(self.name, \"r\")\n content = file.readlines()\n for i in content:\n self.add_msg_and_index(i.strip())", "def process_input_file(sess, char_dict, model_settings, model_vars, input_file):\n \n with open(input_file, 'r') as f:\n for s in f: # s is the line string\n if s and (len(s) > 0):\n chars = list(s.strip())\n cids = char_dict.chars2cids(chars)\n \n in_embedding = np.eye(model_settings['input_classes'])\n one_hot = in_embedding[cids]\n one_hot_by_t = np.expand_dims(one_hot, 1)\n\n # run session to retriev prob\n probs = process_sentence(sess, model_settings, model_vars, one_hot_by_t) \n labels = viterbi(probs)\n words = char_dict.chars2words(chars, labels)\n print('|'.join(words))", "def start():\n\n global input, frameList, processPageList\n\n print \"\\n******************************************************************\"\n print \"\\tThis is a simulation for LRU replacement.\"\n print \"\\tAt any time you may choose to quit('q') this program.\"\n print \"******************************************************************\"\n\n # Open File\n with open(\"input.txt\") as f:\n original = f.readlines()\n\n # Remove \":\", \"\\n\"\n for line in original:\n input = line.replace(\":\", \"\")\n input = input.replace('\\n', \"\")\n\n # Split by tab, add to processList\n output = input.partition('\\t')\n\n # convert process to number\n pid = output[0]\n proc = int(pid[1])\n\n # convert binary page # to decimal\n page = int(str(output[2]), 2)\n\n # add process and page # to list\n processPageList.append([proc, page])", "def main() -> None:\n\n # Retrieving the wiki URL\n url = get_api_url()\n print(url)\n\n # Creates file if it does not exist\n open(TEXT_FILE, \"a\")\n\n with open(TEXT_FILE, \"r\") as f:\n last_title = f.readline().strip()\n print(\"Starting from:\", last_title)\n\n # Retrieving the pages JSON and extracting page titles\n pages_json = get_pages_json(url, last_title)\n pages = pages_json[\"query\"][\"allpages\"]\n print(\"Pages to be scanned:\", pages)\n\n # Tagging operations\n for page in pages:\n curr_title = page[\"title\"]\n cats_to_add = get_categories(curr_title)\n if cats_to_add:\n print(\"Adding categories\", cats_to_add, \"to '%s'\" % curr_title)\n for cat in cats_to_add:\n add_category(curr_title, \"[[Category:\" + cat + \"]]\")\n\n # Extracting title to continue iterating from next run\n if \"continue\" in pages_json:\n continue_from_title = pages_json[\"continue\"][\"apcontinue\"]\n print(\"Continuing from:\", continue_from_title, \"next run.\")\n else:\n continue_from_title = \"\"\n\n with open(TEXT_FILE, \"w+\") as f:\n f.write(continue_from_title)\n print(\"Wrote\", continue_from_title, \"in\", TEXT_FILE)\n\n print(\"No pages left to be tagged\")", "def main():\n token_dict_dict = {}\n all_dict = {}\n pronoun_proportion_list = []\n tag = 'PRP' # base tag for all pronouns, see 'https://www.clips.uantwerpen.be/pages/MBSP-tags' for more info\n\n for text in glob.glob(file_loc):\n file_title = os.path.basename(text).split('.')[0]\n\n with open(text, 'r') as f:\n speech = f.read()\n text_dict = {}\n\n try:\n #TextBlob goodness that tags all the words for me\n speech_blob = TextBlob(clean(speech))\n speech_blob.tags\n except:\n #for some reason Trump's address contained a unicode 128 character that I couldn't find\n #instead of getting rid of it in a single file, i decided to have an except that could catch that case in\n #all sitations and handle them accordingly\n\n #lets the user know that there was an issue, and that it's been handled\n print file_title,\n print \"contains unexpected unicode characters. they have been removed and the document has been processed\"\n\n #gets rid of all unicode characters. i could do this by default, but all the other files ran fine\n #so i didn't think it was worth it\n speech_blob = TextBlob(clean(speech.decode('unicode_escape').encode('ascii','ignore')))\n\n for token in speech_blob.tags:\n # builds the inital dictionary of data, only looks at words with a specified tag\n if tag in token[1]:\n try:\n text_dict[token[0]] += 1\n except:\n text_dict[token[0]] = 1\n try:\n all_dict[token[0]] += 1\n except:\n all_dict[token[0]] = 1\n #breaks the title into 3 pieces: number, president, date\n token_dict_dict[file_title] = text_dict\n partial_split, date = string.rsplit(file_title, '_', 1)\n num_pres, pres = string.split(partial_split, '_', 1)\n\n pronoun_proportion_list.append(\n (pres, date, total_to_proportion(pronoun_breakdown(token_dict_dict[file_title])))\n )\n create_pronoun_graph(sort_list_by_president_order(pronoun_proportion_list))", "def main() -> None:\n\n\t# Retrieving the wiki URL\n\turl = get_api_url()\n\tprint(url)\n\n\t# Creates file if it does not exist\n\topen(TEXT_FILE, \"a\")\n\n\twith open(TEXT_FILE, \"r\") as f:\n\t\tlast_title = f.readline().strip()\n\t\tprint(\"Starting from:\", last_title)\n\n\tmodify_pages(url, last_title)\n\n\tprint(\"\\nNo pages left to be tagged\")", "def main(fpath, html_dir):\n with open(fpath, \"r\") as fptr:\n pageid_emojis = {os.path.split(k)[-1]: v for k, v in json.load(fptr).items()}\n\n print(\"Loading emoji corpus. This may take a while...\")\n index, corpus = [], []\n for html_fpath in glob.glob(os.path.join(html_dir, \"*\")):\n with open(html_fpath, \"r\") as fptr:\n html = fptr.read()\n\n tree = etree.fromstring(html)\n text = \"\".join(tree.xpath(\".//text()\"))\n corpus.append(text)\n\n pageid = os.path.splitext(os.path.split(html_fpath)[-1])[0]\n index.append(pageid_emojis[pageid])\n\n vectorizer = sklearn.feature_extraction.text.TfidfVectorizer(\n max_features=10000, min_df=1, preprocessor=preprocess\n )\n\n print(\"Fitting Model. This may take a while...\")\n weights = vectorizer.fit_transform(corpus)\n\n model = {\n \"weights\": weights,\n \"columns\": vectorizer.get_feature_names(),\n \"index\": index,\n }\n\n with open(\"build/model.pckl\", \"wb\") as fptr:\n pickle.dump(model, fptr)", "def Run():\n file_name = AskForFileName()\n file_content = ReadFileContents(file_name)\n head_list = BuildHeadList(file_content)\n atom_list = BuildAtomList(file_content)\n tail_list = BuildTailList(file_content)\n WriteNewFile(head_list, atom_list, tail_list)", "def main(inputfname, outfname):\n with open(inputfname, 'rt', encoding='utf8') as fh:\n # first block\n reviews = []\n while True:\n comment = next(fh).strip()\n if not comment:\n # blank line, block separator\n break\n url_moviedb = next(fh).strip()\n url_moviedb, movie_id = fix_moviedb(url_moviedb)\n reviews.append((comment, url_moviedb, movie_id))\n\n # second block\n futures = []\n while True:\n try:\n title = next(fh).strip()\n except StopIteration:\n break\n if not title:\n continue\n url_moviedb = next(fh).strip()\n url_moviedb, movie_id = fix_moviedb(url_moviedb)\n futures.append((title, url_moviedb, movie_id))\n\n lines, viewed = process_reviews(reviews)\n lines.append(\"\")\n lines.extend(process_futures(futures))\n lines.append(\"\")\n\n pelis_lines, raw_pending = proc_pelshtml(futures, viewed)\n\n lines.extend(line.format(enter='', space=' ') for line in raw_pending)\n lines.append(\"\")\n lines.extend(pelis_lines)\n lines.extend(line.format(enter='<br/>', space='&nbsp;') for line in raw_pending)\n\n with open(outfname, 'wt', encoding='utf8') as fh:\n fh.write(\"\\n\".join(lines))", "def data_preprocessing():\n lineid_content = get_lineid_content()\n print('Read movie_lines.txt file complete...')\n convos = get_convos()\n print('Read movie_conversations.txt file complete...')\n print('Building dataset')\n get_data(lineid_content, convos)", "def main():\n feed_db, entry_db = openDBs(FEED_DB_FN, ENTRY_DB_FN)\n\n feeds = [ x.strip() for x in open(FEEDS_FN, \"r\").readlines() ]\n \n entries = getNewFeedEntries(feeds, feed_db, entry_db)\n \n if len(entries) > 0:\n out_fn = HTML_FN % time.strftime(\"%Y%m%d-%H%M%S\")\n writeAggregatorPage(entries, out_fn, DATE_HDR_TMPL, FEED_HDR_TMPL, \n ENTRY_TMPL, PAGE_TMPL)\n emailAggregatorPage(FROM_ADDR, TO_ADDR, SUBJECT, SMTP_HOST, out_fn)\n \n closeDBs(feed_db, entry_db)", "def main(nlp, file_path, final_file_path, from_line=0, to_line=None):\n with open(final_file_path, \"w\") as parsed_file:\n with open(file_path) as cnn_dm:\n line = cnn_dm.readline().strip()\n article_idx = 0\n while article_idx < from_line:\n line = cnn_dm.readline().strip()\n article_idx += 1\n if to_line is None:\n while line is not None and line != '':\n process_line(nlp, line, parsed_file)\n article_idx += 1\n print(\"{} articles processed from file {}\".format(article_idx, file_path))\n line = cnn_dm.readline().strip()\n else:\n while article_idx < to_line and line is not None and line != '':\n process_line(nlp, line, parsed_file)\n article_idx += 1\n print(\"{}th article processed from file {}\".format(article_idx, file_path))\n line = cnn_dm.readline().strip()", "def train(self, train_data):\n with open(train_data, 'r') as train_data:\n while True:\n tokens = train_data.readline().split()\n pos = train_data.readline().split()\n labels = train_data.readline().split()\n if not tokens or not pos or not labels:\n break\n # Generate transition probabilities\n for i in range(0, len(labels) - self.N_VALUE + 1):\n self.add_label_sequence(labels[i:i + self.N_VALUE])\n # Generate lexical generation probabilities\n for i in range(0, len(tokens)):\n token = tokens[i].lower()\n label = labels[i]\n self.add_word_tag(token, label)\n self.handle_unknowns()", "def process_training_data(train_page_names):\n\n print('Reading data')\n images_train = []\n labels_train = []\n for page_name in train_page_names:\n images_train = utils.load_char_images(page_name, images_train)\n labels_train = utils.load_labels(page_name, labels_train)\n labels_train = np.array(labels_train)\n\n print('Extracting features from training data')\n bbox_size = get_bounding_box_size(images_train)\n fvectors_train_full = images_to_feature_vectors(images_train, bbox_size)\n\n model_data = dict()\n model_data['labels_train'] = labels_train.tolist()\n model_data['bbox_size'] = bbox_size\n\n print('Reducing to 10 dimensions')\n fvectors_train = reduce_dimensions(fvectors_train_full, model_data)\n\n model_data['fvectors_train'] = fvectors_train.tolist()\n\n with open('ListOfOneHundredThousandWords.txt') as word_file:\n words_list = [words.replace(\" \", \"\").strip('\\n').upper() for words in word_file]\n model_data['words'] = words_list\n\n return model_data", "def __init__(self, messages):\n self.pq = PriorityQueue()\n self.cover = []\n self.words_in_cover = set()\n\n # add message dictionary and process all messages (add to priority queue)\n self.message_corpus = messages\n # TODO: process messages prior to ingestion\n for msg_id in self.message_corpus.iterkeys():\n self.add_entry(msg_id)", "def main():\n with open(\"page_data.yaml\", 'r') as inputstr:\n config_data = yaml.safe_load(inputstr)\n ointf = OutputInterface('template.txt')\n table_data = get_song_artist_matches()\n ofilen = config_data['directory'] + os.sep + 'common_songs.html'\n title = 'Song Titles and Band Name Overlap'\n header = ['No.', 'Artist', 'Peak', 'Date', 'Song/Artist', 'Peak',\n 'Date', 'Song']\n ointf.build_page(ofilen, title, header, fmt_table(table_data))\n ointf.inject(XTRAEDIT)\n ointf.output()", "def preprocess(self):\n\n self._build_labels_dict(['one', 'two', 'three', 'four', 'five'])\n\n with open(self.data_path + self.file_name, 'rb') as csvfile:\n\n reader = csv.reader(csvfile, delimiter=\",\")\n for row in reader:\n self.texts.append(row[1])\n self.labels.append(self.labels_index[row[0]])\n\n print('Found %s texts.' % len(self.texts))", "def process(self):\n\n linelang = defaultdict(int)\n wordlang = defaultdict(int)\n\n linefont = defaultdict(int)\n wordfont = defaultdict(int)\n\n inputfiles = self.input_files\n for input_file in inputfiles:\n\n alignurl = input_file.url\n pcgts = parse(alignurl, True)\n page = pcgts.get_Page()\n regions = page.get_TextRegion()\n\n for region in regions:\n lines = region.get_TextLine()\n\n for line in lines:\n try:\n llang = line.primaryLanguage\n linelang[llang] += 1\n except TypeError:\n pass\n\n try:\n lfont = line.fontFamily\n linefont[lfont] += 1\n except TypeError:\n pass\n\n words = line.get_Word()\n for word in words:\n try:\n wlang = word.language\n wordlang[wlang] += 1\n except TypeError:\n pass\n\n try:\n wfont = word.get_TextStyle().fontFamily\n wordfont[wfont] += 1\n except TypeError:\n pass\n\n #predominant language\n try:\n lang = max(linelang, key=lambda k: linelang[k])\n except TypeError:\n try:\n lang = max(wordlang, key=lambda k: wordlang[k])\n except TypeError:\n lang = 'German'\n\n #predominant font\n try:\n font = max(linefont, key=lambda k: linefont[k])\n except TypeError:\n try:\n font = max(wordfont, key=lambda k: wordfont[k])\n except TypeError:\n font = 'Antiqua'\n\n\n print(lang)\n print(font)", "def process(self):\n # Opening and preprocessing of the input file\n if self.options.mbtiles_fromdisk or self.options.mbtiles_todisk:\n if self.options.mbtiles_fromdisk:\n i_parm=10\n if self.options.mbtiles_todisk:\n i_parm=11\n if self.options.verbose:\n print \"GDAL2MbTiles :mbtiles from/to disk [\",i_parm,\"] mbtiles_fromdisk[\",self.options.mbtiles_fromdisk,\"] mbtiles_todisk[\",self.options.mbtiles_todisk,\"]\"\n self.mbtiles_setup(i_parm)\n return\n else:\n if self.options.verbose:\n print \"GDAL2MbTiles :tile creation mbtiles[\",self.options.mbtiles,\"]\"\n self.open_input()\n # Generation of main metadata files and HTML viewers\n self.generate_metadata()\n # Generation of the lowest tiles\n self.generate_base_tiles()\n # Generation of the overview tiles (higher in the pyramid)\n self.generate_overview_tiles()\n # Generating of KML\n self.generate_kml()", "def main():\n file = \"http://icarus.cs.weber.edu/~hvalle/hafb/words.txt\"\n words = fetch_words(file)\n print_items(words)", "def gnumed_hl7_importer(file_name):\n #TODO ERROR for opening a file\n file = open(file_name)\n\n message = ''\n\n find_message_header = re.compile(r\"^MSH\")\n #TODO Have it be able to process both a directory or an individual file\n for n in file:\n if re.match(find_message_header, n):\n if message != '':\n process_lab_message(message)\n message = n + '\\r'\n else:\n message = message + n + '\\r'", "def main():\n if sys.argv[1] == 'train':\n build_training_data()\n main_trainer()\n\n if sys.argv[1] == 'predict' and len(sys.argv) > 2:\n from predict import classification\n from AdaboostPredict import decision_stumps\n input_file = open(sys.argv[2])\n data = input_file.readlines()\n print(\"Decision Tree prediction\")\n for i in data:\n print(classification(i, i.strip().split()))\n\n print(\"\\nAdaboost prediction\")\n\n for i in data:\n print(decision_stumps(i, i.strip().split()))\n\n elif sys.argv[1] == 'predict':\n print('Wrong usage for prediction. Please supply a file after predict')", "def parse_input():\n with open(\"input_07.txt\", \"r\") as f:\n for line in f:\n Bag(line)", "def main(training_file_name):\n attribute, inverse = build_classifier(training_file_name)\n trained_file = open(TRAINED_FILE_NAME, mode='w')\n prolog(trained_file)\n write_body(trained_file, attribute, inverse)\n epilog(trained_file)", "def process_from_file():\r\n global default_input_path\r\n print \"JoomFind v 1.0\"\r\n print \"\\n\\nTrying to read URL(s) form \" + default_input_path + \" file...\\n\"\r\n try:\r\n if not default_input_path:\r\n f = open(\"urls.txt\")\r\n else:\r\n f=open(default_input_path)\r\n cwd=os.getcwd()\r\n file_path = cwd + path_slash + f.name\r\n\t# extracting url's to list from file\r\n start_urls = [url.strip() for url in f.readlines() if url[0] not in ['#',' ',\"\\n\"]]\r\n if not start_urls:\r\n print \"File is empty. Add some URL(s) first.\\n\"\r\n f.close()\r\n return 0\r\n except:\r\n print \"File not found. Make sure it exists.\\n\"\r\n return 0\r\n #print start_urls\r\n \r\n num=str(len(start_urls))\r\n print \"Found \" + num + \" URL(s) on \" + time.asctime(time.localtime(time.time())) + \"\\n\"\r\n \r\n of=open(default_output_path,'a+')\r\n of.write(\"\\n\\n\\tScanning \" + num + \" URL(s) \")\r\n of.write(\"\\n\\n\\tDate\\Time : \" + time.asctime(time.localtime(time.time())) )\r\n of.write(\"\\n\\n\\tInput file path : \" + default_input_path + \"\\n\\n\")\r\n of.close()\r\n \r\n for url in start_urls:\r\n global provided_url\r\n provided_url=url\r\n print \"\\nWorking on URL \" + str(start_urls.index(url)+1) + \": \" + provided_url\r\n processing()\r\n print \"\\nAll done! Check '\" + default_output_path +\"' file for results.\\n\"", "def train_model(filename):\n counts = get_file_counts(filename)\n new_file = open(filename, \"r\")\n num_lines = 0\n for line in new_file:\n num_lines += 1 \n #number of lines in file\n return counts_to_probs(counts, num_lines)", "def pre_process(fname, num_ex, alt_speaker):\n conversation = []\n with PathManager.open(fname) as f:\n lines = f.readlines()\n random.shuffle(lines)\n lines = lines[:num_ex]\n for line in lines:\n data = json.loads(line)\n dialogue = data[\"dialog\"]\n for item in dialogue:\n speaker = item[0]['id']\n text = item[0]['text']\n conversation += [(speaker, text)]\n conversation += [(END_OF_CONVO, END_OF_CONVO)]\n\n return conversation", "def parse_results_file(filename):\n\tfile = open(filename, 'r')\n\tpretext=[line for line in file.readlines() if line.strip()]\n\tfile.close()\n\n\ttext = []\n\tprocessed = []\n\tlanguages = 'NONE'\n\tID = 'NONE'\n\t\n\tmoreheader = raw_input('Extra header labels from question field (e.g.: item,condition,factor1,factor2): ')\n\tstim_type = raw_input('What type are your stims? (i.e. AcceptabilityJudgment): ')\n\toutput_loc = raw_input('Where would you like to put your parsed file? (enter filename path): ')\n\t\n\t#takes out comments\n\tfor line in pretext:\n\t\tif re.match('#', line):\n\t\t\tcontinue\n\t\telse:\n\t\t\ttext.append(line)\n\n\tfirst = 1;\n\n\tfor line in range(len(text)):\n\t\t#get their info\n\t\tif re.search('Form', text[line]):\n\t\t\tif re.search('number', text[line]):\n\t\t\t\tID = re.split('number,', text[line])[1].strip()\n\t\t\telif re.search('age', text[line]):\n\t\t\t\tlanguages = re.split('age,', text[line])[1].strip()\n\n\t\t#looks for the main stimulus type, as entered earlier\t\t\n\t\tif re.search(stim_type, text[line]):\n\t\t\tif first:\n\t\t\t\t#print 'first'\n\t\t\t\tprocessed.append(str(ID+ ','+languages+','+text[line]))\n\t\t\t\tfirst=0\n\t\t\telse:\n\t\t\t\ttoAmend = processed.pop()\n\t\t\t\t#print str('toAmend: ' + toAmend)\n\t\t\t\ttoAdd=''\n\t\t\t\tsplits = re.split('NULL,', text[line])\n\t\t\t\tfor thing in splits[1:]:\n\t\t\t\t\tif thing is not '':\n\t\t\t\t\t\ttoAdd = str(toAdd + ',' + thing.strip(','))\n\t\t\t\t#print str('toAdd: ' + toAdd)\n\t\t\t\tprocessed.append(str(toAmend.strip()+ toAdd))\n\t\t\t\tfirst = 1\n\n\t\t#if the line is a question line, there's more to append\n\t\tif re.search('Question', text[line]):\n\t\t\ttoAmend = processed.pop()\n\t\t\tpart = re.split('\\$', text[line])[1]\n\t\t\tpart.strip('$')\n\t\t\tparts = part.split('%2C')\n\t\t\tprocessed.append(str(toAmend.strip()+ ','+ string.join(parts, ',')+'\\n'))\n\t\t\t\n\toutput = open(output_loc, 'w')\n\n\theader = 'ID,Languages,Time sent,MD5 Hash of IP Address,Controller,Item Number,Element Number,Type,Group,Stimulus,Answer,RT,'\n\n\toutput.write(str(header+moreheader+'\\n'))\n\n\t#put it all into a text file\n\tfor line in processed:\n\t\toutput.write(line)\n\toutput.close()", "def process(self, message, **kwargs):\n if self.classifier is None:\n self.train()\n\n if message.get(\"text\") is not None:\n sid = SentimentIntensityAnalyzer()\n res = sid.polarity_scores(message.get(\"text\"))\n key, value = max(res.items(), key=lambda x: x[1])\n\n if key == \"pos\":\n key = \"Positive\"\n elif key == \"neg\":\n key = \"Negative\"\n else:\n key = \"Neutral\"\n\n custom_tokens = self.remove_noise(word_tokenize(message.get(\"text\")))\n t = self.classifier.prob_classify(dict([token, True] for token in custom_tokens))\n\n sentiment = 'Positive' if t.prob('Positive') > t.prob('Negative') else 'Negative'\n confidence = max(t.prob('Positive'), t.prob('Negative'))\n\n found, entry = self.manager.getMovieName(message.get(\"text\"))\n movie = str(entry['original_title'].item())\n \n genre_entry, aux_found_genre = self.manager.fuzzy_find_genre(message.get(\"text\"), with_ratio=True)[0]\n genre = genre_entry\n \n\n if len(message.get(\"text\")) > 20:\n entity = self.convert_to_rasa(sentiment, confidence, name=\"our_sentiment_extractor\")\n else:\n entity = self.convert_to_rasa(key, value, name=\"builtin_sentiment_extractor\")\n\n message.set(\"sentiment\", [entity], add_to_output=True)\n\n entity = self.convert_movie_to_rasa(movie, found)\n message.set(\"movies\", [entity], add_to_output=True)\n\n if message.get(\"text\").strip() == \"no\":\n found_genre = False\n else:\n found_genre = True if aux_found_genre > 80 else False\n\n entity = self.convert_movie_to_rasa(genre, found_genre, entity=\"genres_detected\")\n print(entity)\n message.set(\"genres\", [entity], add_to_output=True)", "def process_file(file_name):\n pass # delete this line and replace with your code here" ]
[ "0.5945443", "0.5409753", "0.5362085", "0.5329677", "0.52767473", "0.5266814", "0.5265608", "0.52539533", "0.5241925", "0.5217411", "0.5205404", "0.51909417", "0.5187099", "0.51860255", "0.5160676", "0.5133867", "0.5133248", "0.51240563", "0.5112366", "0.51108074", "0.5107463", "0.51022255", "0.5090593", "0.5090475", "0.5085295", "0.5069151", "0.50685585", "0.5063926", "0.50522864", "0.5047008" ]
0.6416924
0
This function parses the predictions in one individual message. If the message contains predictions, they will be inserted into the oracle database.
def ParsePredictions(odb, message, contest, round_nums): duel = 0 if contest['CompetitorsPerMatch'] == 2: duel = 1 user_id = odb.GetUserId(message['User']) if user_id is None: user_id = GetUserId(odb, message['User'], add_alt=1) # This enables admins to enter predictions for other users. # TODO: Make this a flag or something # if user_id in (1,2): # user_id = PromptForId(odb, message, user_id) # Split the message into lines so we can examine each for predictions. pattern = ('^\s*(.*?)\s*' '(?:over .*?)?' '(?:with\s*)?' '(?:w/\s*)?' '(?:\W+\s*)?' '(\d{1,3}[,\.]?\d*)\s*%') prediction_re = re.compile(pattern) lines = re.split('(?:<br />)+', message['Text']) for line in lines: match = prediction_re.search(line) if match is not None: winner_name = match.group(1) # Eliminate double quotes because they will cause problems with MySQL. winner_name = winner_name.replace('"', '') percent = match.group(2) percent = percent.replace(',', '.') percent = float(percent) if percent > 100: percent = 100.0 if duel and (float(percent) < 50): # This is an invalid prediction print 'Invalid prediction from %s: %s with %s' % (message['User'], winner_name, percent) continue winner_id = DecipherName(odb, winner_name, line, contest['Type'], contest['ContestId'], round_nums) if winner_id is None: continue match_id = odb.LookupMatchId(winner_id, round_nums, contest['ContestId'], message['Timestamp']) if match_id is None: print '\n%s predicted a competitors who isn\'t in this round:\n%s\n' \ % (message['User'], line) continue if duel: old_prediction = odb.GetPredictions(user_id=user_id, match_id=match_id) else: old_prediction = odb.GetPredictions(user_id=user_id, match_id=match_id, character_id=winner_id) if not old_prediction or \ old_prediction[0]['LastUpdated'] <= message['Timestamp']: # Check if the prediction is late. match_info = odb.GetMatches(match_id=match_id) time_margin = datetime.timedelta(minutes=0) if message['Timestamp'] + time_margin >= match_info[0]['MatchDate']: print '\nAccept late prediction from %s posted at %s?' \ % (message['User'], message['Timestamp']) print '%s with %s' % (winner_name, percent) accept_late = raw_input('(y/n): ') if accept_late != 'y': continue odb.SetPrediction(user_id, match_id, winner_id, percent, message['Timestamp'], duel=duel) print '%s predicts match %s: %s with %s' % (message['User'], match_id, winner_name, percent) else: # We already have a newer prediction. print "Ignoring old prediction from %s at %s" % (message['User'], message['Timestamp']) # TODO(dscotton): Check if the prediction is being submitted too late.
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_raw_predictions(self, raw_predictions):\n result = []\n for line in raw_predictions.split(\"\\n\"):\n line_parts = line.split(\"|\")\n type = line_parts[0]\n assert type.lstrip(\"*\") in (\n \"FP\", \"TP\", \"TN\", \"FN\"), 'Expected {} to be in (FP, TP, TN, FN), {}'.format(line[0], line)\n\n docid = line_parts[1]\n start_end = line_parts[2]\n entity_name = line_parts[3]\n alt_gene = None\n alt_gene_start_end = None\n\n if type.lstrip(\"*\") == \"TP\":\n start_end = line_parts[3]\n entity_name = line_parts[2]\n alt_gene = line_parts[4]\n alt_gene_start_end = line_parts[5]\n\n result.append({\n \"type\": type,\n \"docid\": docid,\n \"start_end\": start_end,\n \"entity_name\": entity_name,\n \"alt_gene\": alt_gene,\n \"alt_gene_start_end\": alt_gene_start_end,\n })\n return result", "def process(self, message, **kwargs):\r\n # type: (Message, **Any) -> None\r\n\r\n start = time.time()\r\n intent = {\"name\": None, \"confidence\": 0.0}\r\n intent_ranking = []\r\n\r\n if self.predictor is None:\r\n logger.error(\"There is no trained tf.session: \"\r\n \"component is either not trained or \"\r\n \"didn't receive enough training data\")\r\n\r\n else:\r\n\r\n X = message.get(\"text_features\").tolist()\r\n examples = []\r\n feature = {}\r\n # convert input x to tf.feature with float feature spec\r\n feature['a_in'] = tf.train.Feature(float_list=tf.train.FloatList(value=X))\r\n # build tf.example for prediction\r\n example = tf.train.Example(\r\n features=tf.train.Features(\r\n feature=feature\r\n )\r\n )\r\n examples.append(example.SerializeToString())\r\n\r\n # Make predictions.\r\n logger.info(\"estimator prediction finished\")\r\n result_dict = self.predictor({'inputs': examples})\r\n result_score_list = result_dict['scores'][0]\r\n max_score = np.max(result_dict['scores'][0])\r\n max_index = np.argmax(result_dict['scores'][0])\r\n\r\n\r\n # if X contains all zeros do not predict some label\r\n\r\n\r\n\r\n if len(X)>0:\r\n intent = {\r\n \"name\": self.inv_intent_dict[max_index], \"confidence\": float(max_score)\r\n }\r\n ranking = result_score_list[:len(result_score_list)]\r\n intent_ranking = [{\"name\": self.inv_intent_dict[intent_idx],\r\n \"confidence\": float(score)}\r\n for intent_idx, score in enumerate(ranking)]\r\n\r\n\r\n intent_ranking = sorted(intent_ranking, key=lambda s: s['confidence'], reverse=True)\r\n\r\n message.set(\"intent\", intent, add_to_output=True)\r\n message.set(\"intent_ranking\", intent_ranking, add_to_output=True)\r\n end = time.time()\r\n logger.info(\"bert intent classifier time cost %.3f s\" % (end - start))", "def parse_prediction(self, predictions):\n\t\tusers = list()\n\t\tprint(predictions)\n\t\tfor prediction in predictions:\n\t\t\tfor email in prediction:\n\t\t\t\tusers.append(email)\n\t\t\t\t\n\t\treturn users", "def prediction(name=None, message=''):", "def process(self, message, **kwargs):\n if self.classifier is None:\n self.train()\n\n if message.get(\"text\") is not None:\n sid = SentimentIntensityAnalyzer()\n res = sid.polarity_scores(message.get(\"text\"))\n key, value = max(res.items(), key=lambda x: x[1])\n\n if key == \"pos\":\n key = \"Positive\"\n elif key == \"neg\":\n key = \"Negative\"\n else:\n key = \"Neutral\"\n\n custom_tokens = self.remove_noise(word_tokenize(message.get(\"text\")))\n t = self.classifier.prob_classify(dict([token, True] for token in custom_tokens))\n\n sentiment = 'Positive' if t.prob('Positive') > t.prob('Negative') else 'Negative'\n confidence = max(t.prob('Positive'), t.prob('Negative'))\n\n found, entry = self.manager.getMovieName(message.get(\"text\"))\n movie = str(entry['original_title'].item())\n \n genre_entry, aux_found_genre = self.manager.fuzzy_find_genre(message.get(\"text\"), with_ratio=True)[0]\n genre = genre_entry\n \n\n if len(message.get(\"text\")) > 20:\n entity = self.convert_to_rasa(sentiment, confidence, name=\"our_sentiment_extractor\")\n else:\n entity = self.convert_to_rasa(key, value, name=\"builtin_sentiment_extractor\")\n\n message.set(\"sentiment\", [entity], add_to_output=True)\n\n entity = self.convert_movie_to_rasa(movie, found)\n message.set(\"movies\", [entity], add_to_output=True)\n\n if message.get(\"text\").strip() == \"no\":\n found_genre = False\n else:\n found_genre = True if aux_found_genre > 80 else False\n\n entity = self.convert_movie_to_rasa(genre, found_genre, entity=\"genres_detected\")\n print(entity)\n message.set(\"genres\", [entity], add_to_output=True)", "def _postprocess(self, preds):\n ntok = preds.pop(\"ntok\")\n ids = preds.pop(\"input_ids\")[:ntok]\n preds[\"tokens\"] = self._detokenize(ids)\n\n # Decode predicted top-k tokens.\n # token_topk_preds will be a List[List[(word, prob)]]\n # Initialize prediction for 0th token as N/A.\n token_topk_preds = [[(\"N/A\", 1.)]]\n pred_ids = preds.pop(\"top_k_indices\")[:ntok] # <int>[num_tokens, k]\n pred_probs = preds.pop(\"top_k_probs\")[:ntok] # <float32>[num_tokens, k]\n for token_pred_ids, token_pred_probs in zip(pred_ids, pred_probs):\n token_pred_words = self._detokenize(token_pred_ids)\n token_topk_preds.append(list(zip(token_pred_words, token_pred_probs)))\n preds[\"pred_tokens\"] = token_topk_preds\n\n # Process attention.\n for key in preds:\n if not re.match(r\"layer_(\\d+)/attention\", key):\n continue\n # Select only real tokens, since most of this matrix is padding.\n # <float32>[num_heads, max_seq_length, max_seq_length]\n # -> <float32>[num_heads, num_tokens, num_tokens]\n preds[key] = preds[key][:, :ntok, :ntok].transpose((0, 2, 1))\n # Make a copy of this array to avoid memory leaks, since NumPy otherwise\n # keeps a pointer around that prevents the source array from being GCed.\n preds[key] = preds[key].copy()\n\n return preds", "def process(self, message: \"Message\", **kwargs: Any) -> None:\n\n label, label_ranking = self.predict_label(message)\n\n message.set(\"intent\", label, add_to_output=True)\n message.set(\"intent_ranking\", label_ranking, add_to_output=True)", "def test_service_api_predict_multiple_raw_classified(service_app):\n response = service_app.post('/predict',\n data=json.dumps(data),\n content_type='application/json')\n response_data = json.loads(response.data)\n assert response.headers['Content-Type'] == 'application/json'\n assert response.status_code == 200\n assert response_data['message'] == 'Records successfully classified'\n assert len(response_data['prediction'].keys()) == 102\n assert response_data['prediction']['business_outcome'] == [4, 5]\n assert response_data['prediction']['phat'] == [0.8228085289874678, 0.753958838418463]\n assert all(len(value) == 2 for value in response_data['prediction'].values())", "def postprocess_relation_predictions(predictions):\n \n output = {}\n word_pairs = [wp.split(\" -> \") for wp in predictions.keys()]\n for wp in word_pairs:\n fp = \" -> \".join([wp[0], wp[1]])\n bp = \" -> \".join([wp[1], wp[0]])\n \n if predictions[fp][\"relations\"][0] == predictions[bp][\"relations\"][0]:\n if predictions[fp][\"confidence\"][0] > predictions[bp][\"confidence\"][0]:\n output[fp] = predictions[fp]\n else:\n output[bp] = predictions[bp]\n else:\n output[fp] = predictions[fp]\n output[bp] = predictions[bp]\n return output", "def test_service_api_predict_single_raw_classified(service_app):\n response = service_app.post('/predict',\n data=json.dumps(data[1:2]),\n content_type='application/json')\n\n response_data = json.loads(response.data)\n assert response.headers['Content-Type'] == 'application/json'\n assert response.status_code == 200\n assert response_data['message'] == 'Records successfully classified'\n assert len(response_data['prediction'].keys()) == 102\n assert response_data['prediction']['business_outcome'] == [4]\n assert response_data['prediction']['phat'] == [0.8228085289874678]\n assert all(len(value) == 1 for value in response_data['prediction'].values())", "def process_classified_message(self, message, classification): \n pass", "def predict(request):\n request_json = request.get_json()\n if request_json and 'review_body' in request_json:\n content = request_json['review_body'] # TODO add review_summary\n prediction = get_prediction(\n content, 'projects/207895552307/locations/us-central1/models/TCN5004391989450375168')\n classifications = []\n return MessageToJson(prediction)\n else:\n return f'ERROR: Missing review_body!'", "def postprocess(self, data):\n all_predictions, all_nbest_json, scores_diff_json = predictions(self._dev_dataset,\n data,\n self._tokenizer)\n\n if len(all_nbest_json) == 0 or len(all_nbest_json[0]) == 0:\n return [{'predicted': '',\n 'confidence': 0}]\n\n return [{'predicted': all_nbest_json[0][0]['text'],\n 'confidence': all_nbest_json[0][0]['probability']}]", "def validate(self, validate_data):\n with open(validate_data, 'r') as validate_data:\n true_positive = 0\n true_negative = 0\n false_positive = 0\n false_negative = 0\n result = {}\n for type in self.label_type_map:\n result[type] = []\n while True:\n tokens = validate_data.readline().split()\n pos = validate_data.readline().split()\n labels = validate_data.readline().split()\n if not tokens or not pos or not labels:\n break\n # Classify all named entities in a sentence 85\n curr_results = self.viterbi(tokens)\n for i in range(0, len(labels)):\n if curr_results[i] != 'O':\n if labels[i] == 'O':\n false_positive += 1 # Not 'O', but should be 'O'\n else:\n if self.label_type_map[labels[i]] == self.label_type_map[curr_results[i]]:\n true_positive += 1 # Correct prediction\n else:\n if labels[i] == 'O':\n true_negative += 1 # Correct prediction of 'O'\n else:\n false_negative += 1 # Predicted 'O', not 'O'\n # Calculate precision - TP / (TP + FP)\n precision = float(true_positive) / float(true_positive + false_positive)\n # Calculate recall - TP / (TP + FN)\n recall = float(true_positive) / float(true_positive + false_negative)\n # Calculate F-Score - 2 * P * R / (P + R)\n f_score = float(2*precision * recall) / float(precision + recall)\n print \"Precision: \" + str(precision)\n print \"Recall: \" + str(recall)\n print \"F-score: \" + str(f_score)", "def rank_pre_extract(self, mention_data, predictions):\n mdata = pd.DataFrame(mention_data)\n\n\n\n\n pass", "def model_evaluate(query):\n engine = create_engine('sqlite:///data/DisasterResponse.db')\n df = pd.read_sql_table('Messages', engine)\n\n model = joblib.load(\"models/model.pkl\")\n \n classification_labels = model.predict([query])[0]\n classification_results = dict(zip(df.columns[4:], classification_labels))\n\n return classification_labels, classification_results", "def _process_prediction(\n prediction: Mapping[Text, np.ndarray],\n text_examples: Mapping[Text, eval_utils.OpenKpTextExample],\n writer_tfrecord,\n writer_jsonl,\n metrics: Optional[Mapping[int, _MetricAverager]] = None) -> None:\n # [kp_max_length, long_max_length] shape.\n ngram_logits = prediction['ngram_logits']\n\n features = collections.OrderedDict()\n features['ngram_logits'] = input_utils.create_float_feature(\n ngram_logits.flatten())\n\n position_predictions = eval_utils.logits_to_predictions(\n ngram_logits, max_predictions=FLAGS.max_position_predictions)\n # Sort predictions for convenience.\n position_predictions.sort(key=lambda x: x.logit, reverse=True)\n features['top_pos_logit'] = input_utils.create_float_feature(\n x.logit for x in position_predictions)\n features['top_pos_start_idx'] = input_utils.create_int_feature(\n x.start_idx for x in position_predictions)\n features['top_pos_phrase_len'] = input_utils.create_int_feature(\n x.phrase_len for x in position_predictions)\n\n url = ''.join(chr(x) for x in prediction['url_code_points'] if x != -1)\n features['url'] = input_utils.create_bytes_feature([url])\n\n if url in text_examples:\n text_example = text_examples[url]\n kp_predictions = text_example.get_key_phrase_predictions(\n position_predictions, max_predictions=FLAGS.max_kp_predictions)\n if len(kp_predictions) < FLAGS.max_kp_predictions:\n tf.logging.warn(f'Made fewer than `max_kp_predictions` for URL: {url}')\n writer_jsonl.write(\n json.dumps({\n 'url': url,\n 'KeyPhrases': [[kp] for kp in kp_predictions]\n }) + '\\n')\n\n features['kp_predictions'] = input_utils.create_bytes_feature(\n kp_predictions)\n\n if metrics is not None:\n precision, recall, f1 = text_example.get_score_full(kp_predictions)\n for i in (1, 3, 5):\n p = precision[i - 1]\n r = recall[i - 1]\n f = f1[i - 1]\n features[f'p_at_{i}'] = input_utils.create_float_feature([p])\n features[f'r_at_{i}'] = input_utils.create_float_feature([r])\n features[f'f1_at_{i}'] = input_utils.create_float_feature([f])\n metrics[i].add_example(precision=p, recall=r, f1=f)\n else:\n tf.logging.error(f'No text example found for URL: {url}')\n\n writer_tfrecord.write(\n tf.train.Example(features=tf.train.Features(\n feature=features)).SerializeToString())", "def tag2predictions(ote_tag_sequence, ts_tag_sequence):\n n_tag = len(ote_tag_sequence)\n # opinion target sequence and targeted sentiment sequence\n ot_sequence, ts_sequence = [], []\n beg, end = -1, -1\n for i in range(n_tag):\n tag = ote_tag_sequence[i]\n if tag == 'S':\n ot_sequence.append((i, i))\n elif tag == 'B':\n beg = i\n elif tag == 'E':\n end = i\n if end > beg and beg != -1:\n ot_sequence.append((beg, end))\n beg, end = -1, -1\n sentiments = []\n beg, end = -1, -1\n for i in range(n_tag):\n ts_tag = ts_tag_sequence[i]\n # current position and sentiment\n eles = ts_tag.split('-')\n if len(eles) == 2:\n pos, sentiment = eles\n else:\n pos, sentiment = 'O', 'O'\n if sentiment != 'O':\n # current word is a subjective word\n sentiments.append(sentiment)\n if pos == 'S':\n # singleton\n ts_sequence.append((i, i, sentiments[0]))\n sentiments = []\n elif pos == 'B':\n beg = i\n elif pos == 'E':\n end = i\n # schema1: only the consistent sentiment tags are accepted\n # that is, all of the sentiment tags are the same\n if end > beg > -1 and len(set(sentiments)) == 1:\n ts_sequence.append((beg, end, sentiment))\n sentiments = []\n beg, end = -1, -1\n\n # schema2: only consider the sentiment at the beginning of the aspect span\n # if end > beg > -1:\n # ts_sequence.append((beg, end, sentiments[0]))\n # sentiments = []\n # beg, end = -1, -1\n return ot_sequence, ts_sequence", "def parse(self, message):\n resp = json.loads((self.send_api_request(message)).decode('utf-8'))\n\n nlu_response = NLUResponse()\n nlu_response.text = message\n intent_schema = IntentSchema()\n if resp[\"result\"][\"metadata\"]:\n intent_schema.name = resp[\"result\"][\"metadata\"][\"intentName\"]\n intent_schema.confidence = resp[\"result\"][\"score\"]\n else: # fallback if no intent is given by the nlu\n intent_schema.name = \"greet\"\n intent_schema.confidence = 0.0\n nlu_response.intent = intent_schema\n print(\"Recognized Intent by Dialogflow {}\".format(intent_schema.name ))\n\n pp = pprint.PrettyPrinter(indent=4)\n #pp.pprint(resp)\n\n try:\n nlu_response.entities = []\n entities = resp[\"result\"][\"parameters\"]\n resolved_query = resp[\"result\"][\"resolvedQuery\"]\n\n for key, value in entities.items():\n if value:\n entity_schema = EntitiesSchema()\n entity_schema.start = resolved_query.find(value)\n entity_schema.end = resolved_query.find(value) + len(value)\n entity_schema.entity = key\n entity_schema.value = value\n nlu_response.entities.append(entity_schema)\n #print(\"Key: {}, Value: {}\".format(key, value))\n except Exception as err:\n logging.warning('No Entites extracted {}'.format(err))\n\n schema = RasaNLUSchema()\n data, error = schema.dump(nlu_response)\n\n return data", "def insult_me(\n message : str \n ):\n \n #load model\n model = Detoxify('original')\n \n #predict toxicity\n results = model.predict(message)\n \n #echo results\n click.echo(pd.Series(results))", "def post(self):\n result = {'status': 'error'}\n\n args = input_parser.parse_args()\n input_data = args['image'].read()\n image = self.model_wrapper._read_image(input_data)\n preds = self.model_wrapper._predict(image)\n\n # Modify this code if the schema is changed\n label_preds = [{'label_id': p[0], 'label': p[1], 'probability': p[2]} for p in [x for x in preds]]\n result['predictions'] = label_preds\n result['status'] = 'ok'\n\n return result", "def predict(self, sentences: List[str]) -> List[Dict[str, Any]]:\n inputs, offset_mapping = self.tokenize(sentences=sentences)\n outputs = self.model(**inputs)\n logits = outputs[\"logits\"]\n predicted_intents = self.convert_intent_logits(intent_logits=logits[1])\n predicted_entities = self.convert_entities_logits(entities_logits=logits[0], offset_mapping=offset_mapping)\n predicted_outputs = []\n for sentence, intent_sentence, entities_sentence in zip(sentences, predicted_intents, predicted_entities):\n predicted_outputs.append({})\n predicted_outputs[-1].update(intent_sentence)\n predicted_outputs[-1].update({\"entities\": entities_sentence})\n for entity in predicted_outputs[-1][\"entities\"]:\n entity[\"text\"] = sentence[entity[\"start\"]: entity[\"end\"]]\n\n if self.synonym_dict.get(entity[\"text\"], None):\n entity[\"original_text\"] = entity[\"text\"]\n entity[\"text\"] = self.synonym_dict[entity[\"text\"]]\n\n predicted_outputs[-1][\"text\"] = sentence\n\n return predicted_outputs", "def make_text_predictions(self, text, preprocessor):\n text = preprocessor.preprocess(text)\n y_pred = self.sgd_pipeline.predict_proba([text])\n return y_pred", "def update_predictions(data):\n # TODO: Priority 1 - update predictions with inference results\n # TODO: Understand from a research team exactly what the data is going to look like\n trackID = data[0]\n prediction = data[1]\n confidence = data[2]\n to_Insert_Array = [trackID, prediction, confidence]\n OutPut_Data[trackID] = to_Insert_Array", "def make_predictions(model, tokenizer, paragraphs):\n \n prediction_dataloader = bert_preprocess(paragraphs, tokenizer) \n predictions = eval_cpu(prediction_dataloader, model) \n malware_ids = get_pred_ids(predictions) \n return malware_ids", "def _parse_fit_and_predict_result(result):\n if len(result) > 1 and result[1] and not isinstance(result[1], str):\n # Scores object does not resemble a label prediction (always string)\n y = result[0]\n scores = result[1]\n else:\n y = result\n scores = None\n return y, scores", "def post(self):\n # use parser and find the user's query\n args = parser.parse_args()\n title = args['title']\n author = model.encode_author(args['author'])\n text = args['text']\n\n X = model.vector_and_stack(title=title, text=text, author=author)\n\n prediction = model.predict(X)\n\n # Output either 'Negative' or 'Positive' along with the score\n if round(prediction[0]) == 0:\n pred_text = 'Reliable News'\n else:\n pred_text = 'Unreliable News'\n\n # round the predict proba value and set to new variable\n confidence = round(prediction[0], 3)\n\n # create JSON object\n output = {'prediction': pred_text, 'fake_rate': confidence}\n\n return output, 200", "def predict(self):\n\n global pos\n pos = (pos + 1) % len(ue_data) # iterate through entire list one by one in cycle manner and will be updated when live feed will be coming through KPIMON to influxDB\n sample = ue_data[pos]\n ue_df = pd.DataFrame([sample], columns=db.data.columns)\n val = predict_anomaly(self, ue_df)\n if (val is not None) and (len(val) > 2):\n msg_to_ts(self, val)", "def postprocess(self, prediction_dict, **params):\r\n pass", "def _preprocess(self):\n self.data['sentences'] = self.data['text'].apply(self._tokenize_sent)\n self.data['nouns'] = self.data['sentences'].apply(self._get_nouns)\n # self._get_frequent_features()\n # self._compactness_pruning()\n # self._redundancy_pruning()\n # self._get_features()\n self._extract_opinions()" ]
[ "0.61501306", "0.6139516", "0.60400605", "0.5975302", "0.5914858", "0.5826979", "0.582425", "0.57627505", "0.5746949", "0.5707061", "0.56706715", "0.5667077", "0.5660581", "0.5609151", "0.56070286", "0.5595583", "0.55625534", "0.5559505", "0.5535425", "0.55325687", "0.55043447", "0.5487266", "0.5471245", "0.54620105", "0.5452101", "0.5431067", "0.54127175", "0.54061115", "0.5394219", "0.5383141" ]
0.6788969
0
Prompt for user input to figure out who predictions are for. This function is written so that the Oracle host can post predictions in the topic for other users.
def PromptForId(odb, message, orig_id=1): print 'Is this prediction for someone other than the poster?\n\n%s\n\n' % \ (message['Text']) diff_user = raw_input('(y/n): ') if diff_user == 'n': return orig_id user_name = raw_input('Username this prediction is for? ') user_id = odb.GetUserId(user_name) if user_id is None: print 'Unrecognized username, try again.\n' return PromptForId(odb, message, orig_id) else: return user_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_prompt(self):\n # we need _something_ in the dictionary even if the user decides to use all defaults\n # otherwise for some unknown reason it won't work\n user_in = {'__meta__': '__user_input__'}\n\n print('Please enter the information asked for in the following prompts in order to configure your deployment')\n # get the config information from the user\n for p in self.prompts:\n answer = input(p['prompt'])\n if len(answer.strip()) > 0 and 'variable' in p.keys():\n user_in[p['variable']] = answer\n\n # return the data\n return user_in", "def main():\n\n google_email = raw_input('Email: ')\n google_password = getpass('Password: ')\n auth = GetAuthentication(google_email, google_password)\n model = raw_input('Model: ')\n\n query = []\n message = 'Enter feature for classification. Type quit when done: '\n while True:\n feature = raw_input(message)\n if feature == 'quit':\n break\n try:\n float(feature)\n query.append(float(feature))\n except ValueError:\n query.append(feature)\n print query\n print Predict(auth, model, query)", "def build_dataset_prompt():\n\n print(\"\")\n print(\"Let's start by choosing what features you'd like to look at/explore!\")", "def interactive_shell():\n print(\"\"\"\nTo exit, enter 'EXIT'.\nEnter a sentence like \ninput> wth is it????\"\"\")\n\n while True:\n try:\n # for python 2\n sentence = raw_input(\"input> \")\n except NameError:\n # for python 3\n sentence = input(\"input> \")\n\n words_raw = sentence.strip()\n\n if words_raw == \"EXIT\":\n break\n \n words_raw = Encoder.str2uni(words_raw)\n label_prob, label_pred = predict(words_raw)\n if label_pred[0] == 0:\n print(\"OUTPUT> Subversive \\t\\t PROB> %.2f\"%(100*(1-label_prob.data[0])))\n else:\n print(\"OUTPUT> None \\t\\t PROB> %.2f\"%(100*label_prob.data[0]))", "def user_question():\n return input('What would you like? (espresso/latte/cappuccino): ')", "def prompt():\n\n\t# Inform the user on what price data has been taken\n\tprint(\"\\nCurrent available historical data for calibration: \")\n\tdata_files = listdir('call_data/')\n\tfor i in range(len(data_files)):\n\t\tprint(data_files[i])\n\t# Ask the user if they would like to sample more points\n\tdone = False\n\twhile done != True:\n\t\tinp = input(\"\\nWould you like to sample more historical price data? (y/n) \")\n\t\ttry:\n\t\t\tif inp.lower() == 'y':\n\t\t\t\tsample_calls.random_calls()\n\t\t\tif inp.lower() == 'n':\n\t\t\t\tprint(\"Continuing to analysis.\\n\")\n\t\t\t\tdone = True\n\t\t\telse:\n\t\t\t\tprint(\"Invalid input.\")\n\t\texcept ValueError:\n\t\t\tprint(\"\\nUnable to interpret input. Please try again.\")", "def get_input(self):\n if self.console_label is None:\n self.text_area.delete('1.0', END)\n print(\"The training has finished and the training file was created and sent to the server! Go Back.\")\n return\n\n valid_responses = {'y', 'n', 'u', 'f'}\n\n user_input = self.user_input.get()\n\n self.user_input.delete(0, END)\n\n if user_input not in valid_responses:\n return\n\n self.console_label.label_record_pair(user_input, self.current_record_pair)\n\n if user_input == 'f':\n self.upload_training_file()\n self.current_record_pair = None\n self.console_label = None\n self.text_area.delete('1.0', END)\n return\n\n self.text_area.yview(END)\n\n self.current_record_pair = self.console_label.get_uncertain_pair()", "def response_training_prompt(self):\n self._last_response = \"training_prompt\"\n print(\"Training Prompt\")\n self.user_stats.set_followup(self._last_worst_grip)\n return Mglove_str_gen.training_prompt(self.user_stats.get_followup())", "def __ask_query(self):\n self.__output = list()\n return input(form('What do you want to search?\\n> '))", "def use_random_keyword_classifier():\n while True:\n utterance = input(\"\\n\\nEnter utterance you want to classify, \\ntype menu or exit to go back:\\n-> \").lower()\n if utterance == \"menu\" or utterance == \"exit\":\n break\n else:\n try:\n label_pred = random_keyword_classifier(utterance)\n print(\"Prediction: {0}\".format(*label_pred))\n except ValueError:\n print(\"Prediction: {0}\".format(\"null\"))", "def prediction(name=None, message=''):", "def get_user_input(hparams=None):\n utterance = input()\n # End the chat\n if utterance == 'end()':\n return utterance, utterance\n # Authorization not present\n if not hparams or not hparams.UNAME or not hparams.TOKEN:\n return utterance, utterance\n\n results = get_results(hparams, utterance)\n textual_errors = results['textual_errors']\n\n correct_sent = correct_utterance(textual_errors, utterance)\n if not correct_sent == utterance:\n print_result(results)\n print(\"The correct form is: \", correct_sent)\n print()\n # Return the correct sentence and the original utterance\n return correct_sent, utterance", "def ask(self):\n keyword = input(self.foretext)\n\n self.input_asked = True\n\n if keyword in self.keywords:\n self.retrieved_input = keyword\n if keyword in self.functions:\n function, args, kwargs = self.functions[keyword]\n return function(*args, **kwargs)\n\n else:\n return keyword\n else:\n return self.fallback(keyword)", "def print_recommendations_from_user_input(self):\n\n getting_name = True\n\n print(\"Please enter username and press enter:\\n\")\n\n while getting_name:\n username = input()\n\n redditor = self(username)\n\n if not redditor.username:\n print(\"Redditor does not exist. Please enter again.\\n\")\n continue\n\n break\n\n redditor.print_recommendations()", "def prompt(self):\n # TODO: fix null input\n print('Enter user input: ')\n userinput = input()\n print(f'User chose: {userinput}')\n return userinput", "def use_keyword_classifier():\n while True:\n utterance = input(\"\\n\\nEnter utterance you want to classify, \\ntype menu or exit to go back:\\n-> \").lower()\n if utterance == \"menu\" or utterance == \"exit\":\n break\n else:\n try:\n label_pred = keyword_classifier(utterance)\n print(\"Prediction: {0}\".format(*label_pred))\n except ValueError:\n print(\"Prediction: {0}\".format(\"null\"))", "def _ask(threshold, matcher, facts):\n io.reply('What do you want to ask me?')\n while True:\n message = io.prompt_without_parse('ask')\n response = ask(threshold, matcher, facts, message)\n type = response['type']\n if type == 'invalid':\n io.reply(response['validation_mesage'])\n elif type == 'no_match':\n # TODO: how to give detailed information on why no match was found?\n io.reply('Sorry. My digital brain is not yet fully evolved. I did not understand it.')\n io.reply('Could you rephrase the question, so I might be able to understand it?')\n return\n elif type == 'select_match':\n _select_match(response['matches'])\n else:\n io.reply(response['answer'])\n return", "def ask(self, prompt: str) -> str:\n raise NotImplementedError", "def question():\n print('Enter 1 to search database by habitat with detailed information\\nEnter 2 to search database by coordinates \\nEnter 3 to search by habitat in csv file for a quick overview without detail')\n print('habitat search options so far:\\n Alpenvorland, Niederrheinisches Tiefland, Oberrheinisches Tiefland')\n src = int(input('Enter here:'))\n\n if src == 1:\n habitat = input('Enter name of habitat\\n')\n query = \"habitat = '\" + habitat + \"'\"\n search_db_via_query(query)\n elif src == 2:\n search_by_coordinates()\n elif src == 3:\n search_by_habitat()\n else:\n print('no data')", "def displayPredictions(predictions):\n if predictions == []:\n print(\"\\n No predictions to display. Run manual game analysis and select games to predict first.\")\n print(\"\\nPress enter to return to previous menu.\\n\")\n input()\n return\n else:\n print(\"\\n Predictions\")\n print(\"-----------\\n\")\n for game in predictions:\n print(game[0],game[1],game[2],game[3])\n print(\"\\nPress enter to return to previous menu.\\n\")\n input()\n return", "def getUserInput():\n\n logging.info(\"\\nGood day! Press enter to accept default reduction options.\")\n\n fullReduction = getParam(\n \"Do a full data reduction with default parameters loaded from recipes/default_input.cfg? [no]: \",\n False,\n \"Type yes to start Nifty with data reduction input parameters loaded from recipes/default_input.cfg file.\"\n )\n if fullReduction == False:\n # \"Select in\". User has to turn individual steps on.\n # TODO(nat): Implement these steps.\n date = \"\"\n program = \"\"\n copy = \"\"\n\n sort = getParam(\n \"Sort data? [no]: \",\n False\n )\n rawPath = getParam(\n \"Path to raw files directory? [~/data]: \",\n \"~/data\"\n )\n tel = getParam(\n \"Apply a telluric correction? [no]: \",\n False\n )\n # See if we want to reduce the baseline calibrations. And if so, which substeps\n # to perform.\n calibrationReduction = getParam(\n \"Reduce baseline calibrations? [no]: \",\n False\n )\n # By default do all of them.\n rstart = getParam(\n \"Starting point of baseline calibration reductions? [1]: \",\n 1\n )\n rstop = getParam(\n \"Stopping point of baseline calibration reductions? [4]: \",\n 4\n )\n\n # Check for tellurics as well; by default do all reduction steps.\n telluricReduction = getParam(\n \"Reduce telluric data? [no]: \",\n False\n )\n telStart = getParam(\n \"Starting point of science and telluric reductions? [1]: \",\n 1\n )\n telStop = getParam(\n \"Stopping point of science and telluric reductions? [6]: \",\n 6\n )\n # Set the telluric application correction method. Choices are iraf.telluric and a python variant.\n # Set the h-line removal method with the vega() function in nifsReduce as default.\n hline_method = getParam(\n \"H-line removal method? [vega]: \",\n \"vega\"\n )\n # Set yes or no for interactive the h line removal, telluric correction, and continuum fitting\n hlineinter = getParam(\n \"Interative H-line removal? [no]: \",\n False\n )\n continuuminter = getParam(\n \"Interative telluric continuum fitting? [no]: \",\n False\n )\n telluric_correction_method = getParam(\n \"Telluric correction method? [python]: \",\n \"python\"\n )\n telinter = getParam(\n \"Interactive telluric correction? [no]: \",\n False\n )\n # Check for science as well.\n scienceReduction = getParam(\n \"Reduce science data? [no]: \",\n False\n )\n sciStart = getParam(\n \"Starting point of science and telluric reductions? [1]: \",\n 1\n )\n sciStop = getParam(\n \"Stopping point of science and telluric reductions? [6]: \",\n 6\n )\n efficiencySpectrumCorrection = getParam(\n \"Do a flux calibration? [no]: \",\n False\n )\n spectemp = getParam(\n \"Effective temperature in kelvin of telluric standard star? [\"\"]: \",\n \"\"\n )\n mag = getParam(\n \"Magnitude of standard star? [\"\"]: \",\n \"\"\n )\n merge = getParam(\n \"Produce one final 3D cube? [no]: \",\n False\n )\n use_pq_offsets = getParam(\n \"Use pq offsets to merge data cubes? [yes]: \",\n \"yes\"\n )\n im3dtran = getParam(\n \"Transpose cubes for faster merging? [no]: \",\n False\n )\n over = getParam(\n \"Overwrite old files? [no]: \",\n False\n )\n debug = getParam(\n \"Pause after each data reduction step? [yes]: \",\n \"yes\"\n )\n\n # Serialize and save the options as a .cfg file.\n options = ConfigObj(unrepr=True)\n options['date'] = date\n options['program'] = program\n options['rawPath'] = rawPath\n options['over'] = over\n options['copy'] = copy\n options['sort'] = sort\n options['calibrationReduction'] = calibrationReduction\n options['scienceReduction'] = scienceReduction\n options['merge'] = merge\n options['tel'] = tel\n options['telluricReduction'] = telluricReduction\n options['spectemp'] = spectemp\n options['mag'] = mag\n options['efficiencySpectrumCorrection'] = efficiencySpectrumCorrection\n options['rstart']= rstart\n options['rstop'] = rstop\n options['telStart'] = telStart\n options['telStop'] = telStop\n options['sciStart'] = sciStart\n options['sciStop'] = sciStop\n options['hline_method'] = hline_method\n options['hlineinter'] = hlineinter\n options['continuuminter'] = continuuminter\n options['telluric_correction_method'] = telluric_correction_method\n options['telinter'] = telinter\n options['use_pq_offsets'] = use_pq_offsets\n options['im3dtran'] = im3dtran\n options['debug'] = debug\n with open(RUNTIME_DATA_PATH+'/config.cfg', 'w') as outfile:\n options.write(outfile)\n\n return fullReduction", "def prompt_user(prompt):\r\n # raw_input returns the empty string for \"enter\"\r\n yes = set(['yes', 'y'])\r\n no = set(['no','n'])\r\n\r\n try:\r\n print(prompt)\r\n choice = raw_input().lower()\r\n # would like to test for exception here, but not sure if we can do that without importing IPython\r\n except:\r\n print('Stdin is not implemented.')\r\n print('You need to set')\r\n print('overide_manual_authorize=True')\r\n print('to proceed with the download. Please set that variable and continue.')\r\n raise\r\n\r\n\r\n if choice in yes:\r\n return True\r\n elif choice in no:\r\n return False\r\n else:\r\n print(\"Your response was a \" + choice)\r\n print(\"Please respond with 'yes', 'y' or 'no', 'n'\")\r\n #return prompt_user()\r", "def prompt(self, upstream_name):\n self.prompt_events[upstream_name].set()", "def ask_user_input(self, sentence):\n user_input = raw_input(sentence + \" : \")\n return user_input", "def nlu_cli(default_mood, user_id):\n mood = -1 # TODO currently superfulous while loop given default mood.\n while mood not in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:\n mood = input(\n \"Enter your current mood on a scale of 1 to 10 where \"\n + \"1 is negative, 5 is neutral, and 10 is positive (default is \"\n + str(default_mood) + \"): \"\n )\n if mood == \"\" or not mood.isdigit():\n mood = default_mood\n else:\n mood = int(mood)\n mood = default_mood if mood == \"\" else int(mood)\n\n topic = \"\"\n while topic == \"\":\n topic = input(\"Enter Topic: \").strip().lower()\n\n #loop until they select correct dialogue act, show help after first fail\n dialogue_act = \"\"\n first = True\n da_names = [da.name for da in DA if da.name not in\n ['statement', 'question', 'response_action']\n ]\n while dialogue_act not in da_names:\n dialogue_act = input(\"Enter dialogue Act: \").strip().lower()\n\n # TODO add help print out descriptions\n if first and dialogue_act not in da_names:\n first = False\n # Help, details what each dialogue act means.\n print(\"Enter a dialogue act from list below:\\n\", da_names)\n\n question_type = None\n if is_question(DA[dialogue_act]):\n question_type = \"\"\n first = True\n question_types = [qt.name for qt in QT]\n while question_type not in question_types:\n question_type = input(\"Enter question type: \").strip().lower()\n\n # TODO add help print out descriptions\n if first and question_type not in question_types:\n first = False\n # Help, details what each dialogue act means.\n print(\"Enter a question type from list below:\\n\",\n question_types)\n\n text = input(\n \"Enter utterance text: \"\n ).strip()\n\n sentiment = -1\n while sentiment not in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:\n sentiment = input(\n \"Enter utterance sentiment 1 to 10. \"\n + \"1 negative, 5 neutral, and 10 positive: \"\n )\n sentiment = -1 if sentiment == \"\" else int(sentiment)\n\n assertiveness = -1\n while assertiveness not in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:\n assertiveness = input(\n \"Enter utterance assertiveness 1 to 10. \"\n + \"1 passive/listening oriented, 5 neutral, and \"\n + \"10 assertive/leading conversation: \"\n )\n assertiveness = -1 if assertiveness == \"\" else int(assertiveness)\n\n return Utterance(\n user_id,\n DA[dialogue_act],\n topic,\n sentiment,\n assertiveness,\n text,\n question_type\n ), mood", "def topics_from_user_input(self):\n\n getting_name = True\n\n print(\"\\nPlease enter username and press enter:\\n\")\n\n while getting_name:\n username = input()\n\n redditor = self(username)\n\n if not redditor.username:\n print(\"Redditor does not exist. Please enter again.\\n\")\n continue\n\n break\n\n redditor.print_topics()", "def get_user_input(arg_pair: EviPair):\n global HUMAN_CORRECT_PRED\n\n while True:\n try:\n choice = int(raw_input())\n\n if choice in [1,2]:\n\n if choice == arg_pair.label:\n HUMAN_CORRECT_PRED += 1\n\n break\n else:\n print(WRONG_INPUT)\n except ValueError:\n print(WRONG_INPUT)\n\n return choice", "def UserInput(self, username, userinput):\n pass", "def getQuestion():\n cat = getCategories(True)\n print(\"--- Categories ---\")\n print(cat[0])\n print(cat[1])\n print(cat[2])\n userInput = input(\"Please, pick a subject: \\n\").casefold()\n\n\n \n if userInput != \"other\":\n print(\"You have chosen: \" + userInput.title())\n print(\"##############################\")\n #Input question here XD\n\n \n\n question, answer, w, t = getQuestion(userInput, \"easy\")\n \n userAnswer = input(question)\n if userAnswer.lower() == answer.lower():\n print(\"Wehey \\n\")\n elif userAnswer.lower() != answer.lower():\n print(\"This is not the answer fool!\")\n print(\"The answer is: \\n\")\n print(answer + \"\\n\")\n elif userInput == \"other\":\n print(\"You have chosen: \" + userInput.title())\n print(\"##############################\")\n userInput = input(\"Want to hear a joke, fun fact or something random? \\n\")\n if userInput == \"Yes\" or userInput == \"yes\":\n print(random.choice(randomStuff) + \"\\n\")", "def getInput():\t\n\tglobal active_\n\n\t#to disable the service \n\tactive_ = False \n\t\n\t# reading the previous input\n\tprev_input_ = rospy.get_param('/input')\n\tinput_ = prev_input_\n\t\n\t#in order to make the user to choose one of the 5 possible inputs\n\twhile (prev_input_ == input_) or (input_ > 5 or input_ < 1):\n\t\tif input_ > 5 or input_ < 1: \n\t\t\t#in the case in which the user make another selection\n\t\t\tprint \"Unknown input, please try again\" \n\t\t\n\t\t#propose to the user which are the real possibilities\n\t\tprint(\"Please select one of the following senteces\\n\")\n\t\tprint(\"1 - Move the robot randomly in the environment, by choosing one of six possible target positions\\n\")\n\t\tprint(\"2 - The user can chose the next target position\\n\")\n\t\tprint(\"3 - Start following the external walls\\n\")\n\t\tprint(\"4 - Stop the robot in the last position\\n\")\n\t\tprint(\"5 - Change the planning algorithm from move_base to bug0 and vice versa\\n\")\n\n\t\t#read the input typed by the user\t\n\t\tinput_ = (int(raw_input(\"Please select a number between 1 and 5: \")))\n\n\t#set the choice made by the user\n\tif input_ >= 1 and input_ <= 5:\n\t\trospy.set_param('/input', input_)" ]
[ "0.64296", "0.6354998", "0.63514704", "0.6275108", "0.6199466", "0.6192252", "0.6019146", "0.59782445", "0.5917745", "0.590989", "0.584681", "0.58215415", "0.5810385", "0.579001", "0.5771973", "0.576907", "0.5767045", "0.5765213", "0.5736743", "0.5734095", "0.57227224", "0.57066315", "0.5703555", "0.5700363", "0.5690616", "0.5664714", "0.5642506", "0.56071585", "0.55962694", "0.5566717" ]
0.67718136
0
tokenized_sentence = ["tu", "que", "tal"] all_words = ["tu", "yo", "soy", "que", "tal"] bag = [ 1, 0, 0, 1, 1]
def bag_of_words(tokenized_sentence, all_words): tokenized_sentence = [stem(w) for w in tokenized_sentence] #print(tokenized_sentence) bag = np.zeros_like(all_words, dtype=np.float32) for idx, w in enumerate(all_words): if w in tokenized_sentence: bag[idx] = 1.0 return bag
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bag_of_words(batch, TEXT):\n V = len(TEXT.vocab)\n X = torch.zeros(batch.text.size(0), V)\n ones = torch.ones(batch.text.size(1))\n for b in range(batch.text.size(0)):\n X[b].index_add_(0, batch.text.data[b], ones)\n X[b][TEXT.vocab.stoi['<pad>']] = 0\n X = Variable(X, requires_grad=False)\n return X", "def bagOfWords(self,phrase):\n return self._support.bagOfWords(phrase)", "def make_bag_words(document_tokenized):\n bag_words = dict()\n for token in document_tokenized:\n if token in bag_words.keys():\n bag_words[token] += 1\n else:\n bag_words[token] = 1\n return bag_words", "def featurize(self, data):\n \n bag_of_words = []\n\n tokens = data.split()\n\n for i in tokens:\n bag_of_words.append((i, True))\n\n return bag_of_words", "def bag_of_words_model(x, y):\n target = tf.one_hot(y, NUMBER_OF_CATEGORIES, 1, 0)\n word_vectors = learn.ops.categorical_variable(x, n_classes=n_words,\n embedding_size=EMBEDDING_SIZE, name='words')\n features = tf.reduce_max(word_vectors, reduction_indices=1)\n prediction, loss = learn.models.logistic_regression(features, target)\n train_op = tf.contrib.layers.optimize_loss(\n loss, tf.contrib.framework.get_global_step(),\n optimizer='Adam', learning_rate=0.01)\n return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op", "def gen_bag_of_words_df(self):\n\t\tdef word_vector(doc_text):\n\t\t\tfreqs = pd.Series(collections.Counter(doc_text.split()))\n\t\t\treturn freqs.loc[set(freqs.index.values)|set(self.stems)]\n\t\tself.bagofwords = self.dataframe.text.apply(word_vector).replace({np.nan:0})", "def process(self, sentence):\n\n # selects onlt alphanumeric words\n words = self.tokenizer.tokenize(sentence)\n\n # lemmatize the words\n words = [self.lemmatizer.lemmatize(word) for word in words]\n\n # lowercase all the words and remove single characters\n words = [word.lower() for word in words if len(word) > 1]\n\n # remove the stopwords using NLTK\n words = [word for word in words if word not in stopwords.words('english')]\n\n return words", "def sent_tagged(novel_text):\n novel = []\n novel_tagged = NER_tagger.tag(word_tokenize(novel_text))\n novel_sent_tokenized = sent_tokenize(novel_text)\n novel_tokenized = [word_tokenize(novel_sent_tokenized[i]) for i in range(len(novel_sent_tokenized))]\n sent_length = [len(novel_tokenized[i]) for i in range(len(novel_sent_tokenized))]\n \n a = 0\n b = 0\n for i in sent_length:\n b += i\n novel.append(novel_tagged[a:b])\n a = b\n \n return(novel)", "def tokenizer(sentence):\n words = []\n for phrase in sentence.split('.'):\n for piece in phrase.split(','):\n for word in piece.split(' '):\n words.append(word)\n return words", "def tokenize_tweets_by_word(self):\n sent_tok_list = []\n all_tok_list = []\n\n for t in range(len(self.all_tweets) - 1):\n sent_list = sent_tokenize(self.all_tweets[t][0][1]) # all_tweets[tweet][look at tuple][look at text]\n all_tok_list.append([])\n for s in sent_list: # for each sentence\n word_tok_list = word_tokenize(s)\n pos_tag_list = pos_tag(word_tok_list)\n all_tok_list[t].append(pos_tag_list)\n print(pos_tag_list)\n\n return all_tok_list", "def Viterbi_Most_Common_Tag(tagged_seq:Sequence[str],most_common_tag:str, train_bag:Sequence[Tuple[str, str]]=train_tagged_words)-> Sequence[Tuple[str, str]]:\n V = list(set([pair[0] for pair in train_bag]))\n words = [pair[0] for pair in tagged_seq]\n tags = [pair[1] for pair in tagged_seq]\n\n for word_index, word in enumerate(words):\n if word not in V:\n tags[word_index] = most_common_tag\n\n return list(zip(words, tags))", "def bigrams(sentence):\n return [word+'_'+sentence[i+1] \n if word+'_'+sentence[i+1] in word_ids else None \n for i, word in enumerate(sentence[:-1])] if config.USE_BIGRAMS else []", "def apply_bag_of_words(values, max_df=1.0, min_df=1):\n bw = TfidfVectorizer(stop_words=\"english\", max_df=max_df, min_df=min_df)\n features = bw.fit_transform(values)\n return (bw.get_feature_names(), features)", "def bag_of_words_model(features, target):\n target = tf.one_hot(target, 2, 1, 0)\n features = tf.contrib.layers.bow_encoder(\n features, vocab_size=n_words, embed_dim=Embedding_size, scope=\"input_layer\")\n hidden_layer1 = tf.contrib.layers.fully_connected(features, 100, scope=\"hidden_layer1\")\n logits = tf.contrib.layers.fully_connected(hidden_layer1, 2, scope=\"output_layer\",\n activation_fn=None)\n loss = tf.contrib.losses.softmax_cross_entropy(logits, target)\n train_op = tf.contrib.layers.optimize_loss(\n loss, tf.contrib.framework.get_global_step(),\n optimizer='Adam', learning_rate=0.01)\n return (\n {'class': tf.argmax(logits, 1),\n 'prob': tf.nn.softmax(logits)},\n loss, train_op)", "def fit(self, tagged_sents):\n\n X = []\n y_true = []\n for sent in tagged_sents:\n #frase=list\n frase = [word[0] for word in sent]\n #print(sent)\n #for w in sent:\n # frase.append(w[0])\n for i in range(0,len(sent)):\n self._palabrasvistas.add(sent[i][0]) # como es set, si ya esta va a obviarla\n x = feature_dict(frase, i)\n X.append(x)\n y_true.append(sent[i][1])\n\n #print(X)\n #print(y_true)\n self.pipeline.fit(X, y_true)", "def tokenize_tag(tag):\n sentences = nltk.sent_tokenize(tag.text)\n sentence_words = []\n for sentence in sentences:\n words = nltk.casual_tokenize(sentence)\n lower_words = [w.lower() for w in words]\n filtered_words = [w for w in lower_words if w not in stop_words and not w.isdigit() and len(w) > 2]\n sentence_words += filtered_words\n return sentence_words", "def sents_to_tokens(sents, wordset):\n padded_sentences = ([\"<s>\", \"<s>\"] + s + [\"</s>\"] for s in sents)\n # This will canonicalize words, and replace anything not in vocab with <unk>\n return np.array([utils.canonicalize_word(w, wordset=wordset) \n for w in utils.flatten(padded_sentences)], dtype=object)", "def bag_of_words(files_data_train,files_data_test):\n\tcount_vector = sklearn.feature_extraction.text.CountVectorizer()\n\t#print count_vector.fit(files_data)\n\tword_train = count_vector.fit_transform(files_data_train)\n\tword_test = count_vector.transform(files_data_test)\n\tprint len(count_vector.get_feature_names())\n\treturn word_train,word_test", "def Viterbi(words:Sequence[str], train_bag:Sequence[Tuple[str, str]]=train_tagged_words)-> Sequence[Tuple[str, str]]:\n state = []\n tags_set = list(set([pair[1] for pair in train_bag]))\n\n for key, word in enumerate(words):\n # initialise list of probability column for a given observation\n p = []\n for tag in tags_set:\n if key == 0:\n transition_p = tags_df.loc['.', tag]\n else:\n transition_p = tags_df.loc[state[-1], tag]\n\n # compute emission and state probabilities\n emission_p_parts = word_given_tag(word, tag)\n emission_p = emission_p_parts[0]/emission_p_parts[1]\n state_probability = emission_p * transition_p\n p.append(state_probability)\n\n p_max = max(p)\n # getting state for which probability is maximum\n state_max = tags_set[p.index(p_max)]\n state.append(state_max)\n return list(zip(words, state))", "def word2features(self,sent, i):\n word = sent[i][0]\n #postag = sent[i][1]\n\n features = {\n 'bias': 1.0,\n 'word.lower()': word.lower(),\n 'word.isupper()': word.isupper(),\n 'word.istitle()': word.istitle(),\n 'word.isdigit()': word.isdigit(),\n 'word.shape()':self.shape(word),\n 'word.isalnum()':word.isalnum(),\n 'word.isalpha()':word.isalpha(),\n # 'postag': postag,\n # 'postag[:2]': postag[:2],\n }\n if i > 0:\n word1 = sent[i - 1][0]\n #postag1 = sent[i - 1][1]\n features.update({\n '-1:word.lower()': word1.lower(),\n '-1:word.istitle()': word1.istitle(),\n '-1:word.isupper()': word1.isupper(),\n '-1:word.isdigit()': word1.isdigit(),\n '-1:word.isalnum()':word1.isalnum(),\n '-1:word.isalpha()':word1.isalpha(),\n # '-1:postag': postag1,\n # '-1:postag[:2]': postag1[:2],\n })\n else:\n features['BOS'] = True\n\n if i > 1:\n word2 = sent[i - 2][0]\n #postag2 = sent[i - 2][1]\n features.update({\n '-2:word.lower()': word2.lower(),\n '-2:word.istitle()': word2.istitle(),\n '-2:word.isupper()': word2.isupper(),\n '-2:word.isdigit()': word2.isdigit(),\n '-2:word.isalnum()': word2.isalnum(),\n '-2:word.isalpha()': word2.isalpha(),\n # '-2:postag': postag2,\n # '-2:postag[:2]': postag2[:2],\n })\n else:\n features['BOS1'] = True\n if i > 2:\n word3 = sent[i - 3][0]\n #postag3 = sent[i - 3][1]\n features.update({\n '-3:word.lower()': word3.lower(),\n '-3:word.istitle()': word3.istitle(),\n '-3:word.isupper()': word3.isupper(),\n '-3:word.isdigit()': word3.isdigit(),\n '-3:word.isalnum()': word3.isalnum(),\n '-3:word.isalpha()': word3.isalpha(),\n # '-3:postag': postag3,\n # '-3:postag[:2]': postag3[:2],\n })\n else:\n features['BOS2'] = True\n\n if i > 3:\n word4 = sent[i - 4][0]\n #postag4 = sent[i - 4][1]\n features.update({\n '-4:word.lower()': word4.lower(),\n '-4:word.istitle()': word4.istitle(),\n '-4:word.isupper()': word4.isupper(),\n '-4:word.isdigit()': word4.isdigit(),\n '-4:word.isalnum()': word4.isalnum(),\n '-4:word.isalpha()': word4.isalpha(),\n # '-4:postag': postag4,\n # '-4:postag[:2]': postag4[:2],\n })\n else:\n features['BOS2'] = True\n\n if i < len(sent) - 1:\n word1 = sent[i + 1][0]\n features.update({\n '+1:word.lower()': word1.lower(),\n '+1:word.istitle()': word1.istitle(),\n '+1:word.isupper()': word1.isupper(),\n '+1:word.isdigit()': word1.isdigit(),\n '+1:word.isalnum()': word1.isalnum(),\n '+1:word.isalpha()': word1.isalpha(),\n # '+1:postag': postag1,\n # '+1:postag[:2]': postag1[:2],\n })\n else:\n features['EOS'] = True\n if i < len(sent) - 2:\n word12 = sent[i + 2][0]\n #postag12 = sent[i + 2][1]\n features.update({\n '+2:word.lower()': word12.lower(),\n '+2:word.istitle()': word12.istitle(),\n '+2:word.isupper()': word12.isupper(),\n '+2:word.isdigit()': word12.isdigit(),\n '+2:word.isalnum()': word12.isalnum(),\n '+2:word.isalpha()': word12.isalpha(),\n # '+2:postag': postag12,\n # '+2:postag[:2]': postag12[:2],\n })\n else:\n features['EOS2'] = True\n if i < len(sent) - 3:\n word13 = sent[i + 3][0]\n #postag13 = sent[i + 3][1]\n features.update({\n '+3:word.lower()': word13.lower(),\n '+3:word.istitle()': word13.istitle(),\n '+3:word.isupper()': word13.isupper(),\n '+3:word.isdigit()': word13.isdigit(),\n '+3:word.isalnum()': word13.isalnum(),\n '+3:word.isalpha()': word13.isalpha(),\n # '+3:postag': postag13,\n # '+3:postag[:2]': postag13[:2],\n })\n else:\n features['EOS2'] = True\n if i < len(sent) - 4:\n word14 = sent[i + 4][0]\n #postag14 = sent[i + 4][1]\n features.update({\n '+4:word.lower()': word14.lower(),\n '+4:word.istitle()': word14.istitle(),\n '+4:word.isupper()': word14.isupper(),\n '+4:word.isdigit()': word14.isdigit(),\n '+4:word.isalnum()': word14.isalnum(),\n '+4:word.isalpha()': word14.isalpha(),\n # '+4:postag': postag14,\n # '+4:postag[:2]': postag14[:2],\n })\n else:\n features['EOS2'] = True\n return features", "def tag(self, sent):\n return [self.tag_word(w) for w in sent]", "def tag(self, sent):\n return [self.tag_word(w) for w in sent]", "def pos_tag(self, sentence):\n tags = []\n tokens = sentence.split(\" \")\n for i in range(len(tokens)):\n tags.append('')\n for i in range (len(tokens)):\n feat = []\n feat.append(self.features(tokens,tags,i))\n tag_predicted = self.postagger.predict(feat)[0]\n tags[i] = tag_predicted\n return tags", "def word_given_tag(word, tag, train_bag): # train_bag=train_tagged_words\n tag_list = [pair for pair in train_bag if pair[1] == tag]\n count_tag = len(tag_list) # total number of times the passed tag occurred in train_bag\n w_given_tag_list = [pair[0] for pair in tag_list if pair[0] == word]\n # now calculate the total number of times the passed word occurred as the passed tag.\n count_w_given_tag = len(w_given_tag_list)\n return count_w_given_tag, count_tag", "def get_tokens(sent):\n return word_tokenize(sent)", "def word_given_tag(word:str, tag:str, train_bag:Sequence[Tuple[str, str]]=train_tagged_words)-> Tuple[int, int]: \n tag_list = [pair for pair in train_bag if pair[1] == tag]\n count_tag = len(tag_list)\n w_given_tag_list = [pair[0] for pair in tag_list if pair[0] == word]\n count_w_given_tag = len(w_given_tag_list)\n\n return (count_w_given_tag, count_tag)", "def process_words(texts, bigram_mod,trigram_mod,stop_words=stop_words, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\r\n texts = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]\r\n texts = [bigram_mod[doc] for doc in texts]\r\n texts = [trigram_mod[bigram_mod[doc]] for doc in texts]\r\n texts_out = []\r\n nlp = spacy.load('en_core_web_sm')\r\n for sent in texts:\r\n doc = nlp(\" \".join(sent)) \r\n texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])\r\n texts_out = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts_out] \r\n return texts_out", "def _tag_tokens(self, targets: list, tokens: list, tags: dict=BIO_TAGS, bert_tokenizer=None, verbose: bool=False):\n if bert_tokenizer is not None:\n tokenizer = bert_tokenizer\n\n if len(targets) > 0:\n tags_list = []\n for tgt in targets:\n t_list = []\n inside = False\n found = False\n if bert_tokenizer is not None:\n tgt_terms = tokenizer.tokenize(tgt[1]) \n else:\n tgt_terms = self._tokenize_line(tgt[1])\n\n if verbose:\n print(tgt_terms)\n\n for i in range(len(tokens)):\n if tokens[i] == tgt_terms[0] and not found: \n # token is the beginning (B) of target terms sequence\n t_list.append(tags[\"B\"])\n if len(tgt_terms) > 1 and tokens[i:i+len(tgt_terms)] == tgt_terms:\n # check if the matching token is not a repetition of the term\n # and is the actual target term, if so the correct sequence is found \n inside = True\n found = True\n\n elif inside == True:\n # multi words terms\n if tokens[i] in tgt_terms[1:-1] and len(tgt_terms) > 2:\n # token is inside (I) the target terms sequence\n t_list.append(tags[\"I\"])\n\n elif tokens[i] == tgt_terms[-1]:\n # token is the last (L) target term\n t_list.append(tags[\"I\"]) # tags[\"L\"] \n inside = False\n\n # when the last tgt_word is repeated inside the tgt_terms \n inside = False\n\n else:\n # token is outside (O) the target terms sequence\n t_list.append(tags[\"O\"])\n\n tags_list.append(torch.Tensor(t_list))\n\n # merge tags\n tags_tensor = torch.stack(tags_list)\n res = torch.min(tags_tensor, dim=0)\n if verbose:\n print(\"targets:\", targets)\n print(\"tokens:\", tokens, \"-- len:\", len(tokens))\n print(\"tags:\", tags_list)\n #print(\"tags:\", tags_tensor.size())\n #print(\"res:\", res.values.size())\n \n return res.values\n\n else:\n return [tags[\"O\"] for t in tokens]", "def average_one_hots(sent, word_to_ind):\n known_words = 0\n size = len(word_to_ind.keys())\n sum_vec = np.zeros((size,))\n for token in sent.text: #going over all tokens and summing their embeddings\n if (token in word_to_ind):\n sum_vec += get_one_hot(size, word_to_ind[token])\n known_words += 1\n if (known_words != 0):\n return sum_vec / known_words\n else:\n return sum_vec", "def _gen_words(sentence, labels):\r\n word = \"\"\r\n words = []\r\n for token, label in zip(sentence, labels):\r\n word += token\r\n if label in [1, 3]:\r\n words.append(word)\r\n word = \"\"\r\n return words" ]
[ "0.7064958", "0.6989011", "0.6753921", "0.66266555", "0.6409904", "0.63231957", "0.6267341", "0.62106866", "0.6205491", "0.6204041", "0.6201656", "0.61629564", "0.61408895", "0.60988027", "0.6071161", "0.60488117", "0.60472023", "0.59949446", "0.59798604", "0.5959912", "0.59515476", "0.59515476", "0.59323484", "0.5925336", "0.592347", "0.59178835", "0.5916029", "0.5913357", "0.59004587", "0.59002924" ]
0.8335468
0
A decorator that registers a coroutine as a preinvoke hook. A preinvoke hook is called directly before the command is called. This makes it a useful function to set up database connections or any type of set up required.
def before_invoke(self, coro): if not asyncio.iscoroutinefunction(coro): raise TypeError('The pre-invoke hook must be a coroutine.') self._before_invoke = coro return coro
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def before_invoke(coro) -> Callable[[T], T]:\n def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]:\n if isinstance(func, Command):\n func.before_invoke(coro)\n else:\n func.__before_invoke__ = coro\n return func\n return decorator # type: ignore", "def on_start(self):\n\n def decorator(coro):\n self._hooks.append((\"start\", coro))\n return coro\n\n return decorator", "def command(*args, **kwargs):\r\n def decorator(func):\r\n if not asyncio.iscoroutinefunction(func):\r\n raise TypeError(f'<{func.__qualname__}> must be a coroutine function')\r\n new_func = CommandParent(func, **kwargs)\r\n _HANDLER.commands[new_func.name] = new_func\r\n return new_func\r\n return decorator", "def pre_process(cls, *args, **kwargs):\n pass", "def pre_process(cls, *args, **kwargs):\n pass", "def pre_process(cls, *args, **kwargs):\n pass", "def pre_process(cls, *args, **kwargs):\n pass", "def on_pre_execution(**kwargs):\n logging.debug(\"Calling callbacks: %s\", __pre_exec_callbacks)\n for callback in __pre_exec_callbacks:\n try:\n callback(**kwargs)\n except Exception:\n logging.exception(\"Failed on pre-execution callback using %s\", callback)", "def preloop(self):\n super(CoreCommand, self).preloop() # sets up command completion", "def before_worker_start(func):\n _func_only(func)\n worker_methods_db.register_before_start(func)\n return func", "def at_pre_cmd(self):\n pass", "def before_call(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> None:", "def before(hook_name, methods, kwargs):\n for hookimpl in methods:\n self._plugin2calls[hookimpl.plugin].add(hook_name)", "def pre_execute(self):", "def precmd(self, line):\n return cmd.Cmd.precmd(self, line)", "async def pre_action_init(self) -> None:", "def set_asyncgen_hooks(*args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__\n pass", "def uses_before_args(self, args):\n self.pod_args['uses_before'] = args", "def register_pre_exec_callback(action_logger):\n logging.debug(\"Adding %s to pre execution callback\", action_logger)\n __pre_exec_callbacks.append(action_logger)", "def preloop(self):\n cmd.Cmd.preloop(self) ## sets up command completion\n self._hist = [] ## No history yet\n self._locals = {} ## Initialize execution namespace for user\n self._globals = {}", "def before_request(self, func: typing.Callable):\n return self.add_hook(type_=\"pre\", hook=func)", "def setup_stage(method):\n def decorator(self):\n name = method.func_name\n if should_run_stage(name):\n say(\"Setup.%s\" % name)\n method(self)\n set_stage(name)\n decorator.__doc__ = method.__doc__\n return decorator", "def preloop(self):\n cmd.Cmd.preloop(self) ## sets up command completion\n self._hist = [] ## No history yet\n self._locals = {} ## Initialize execution namespace for user\n self._globals = {}", "def before(self, before: Route.Decorator):\n pass", "def precmd(self, statement):\n return statement", "def wrap_before(before, condition=lambda *args, **kwargs: True):\n def decorator(func):\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n if condition(*args, **kwargs):\n before()\n return func(*args, **kwargs)\n return wrapped\n return decorator", "def monkey_patch():\n tornado.gen.coroutine = coroutine", "def pre(self, pre):\n\n self._pre = pre", "def premain(self):\r\n return self._premain", "def task_prerequisite(pre_req_task: PromiseProxy, key: str=None, trigger: callable=bool) -> callable:\n if not callable(trigger):\n raise Exception(\"trigger must be a function returning a bool\")\n\n def decorator(task_needing_pre_req: PromiseProxy)->PromiseProxy:\n def maybe_run(kwargs):\n if not key:\n trigger_arg = kwargs\n else:\n trigger_arg = kwargs.get(key)\n if not trigger(trigger_arg):\n from celery import current_task\n current_task.enqueue_child(pre_req_task.s(**kwargs), block=True)\n\n maybe_run.__name__ = task_needing_pre_req.__name__ + \"Needs\" + pre_req_task.__name__\n\n dependencies = ConverterRegister.list_converters(task_name=task_needing_pre_req.__name__, pre_task=True)\n ConverterRegister.register_for_task(task_needing_pre_req, True, *dependencies)(maybe_run)\n return task_needing_pre_req\n return decorator" ]
[ "0.711384", "0.59254164", "0.59139216", "0.57862735", "0.57862735", "0.57862735", "0.57862735", "0.5783765", "0.56661844", "0.55336636", "0.54575396", "0.5412497", "0.535912", "0.5343143", "0.5328596", "0.5258286", "0.5252766", "0.5236413", "0.5234541", "0.5177922", "0.5162681", "0.5116589", "0.51105386", "0.5107895", "0.5097092", "0.50829715", "0.50817966", "0.5081604", "0.5064644", "0.5060832" ]
0.7430255
0
A decorator that registers a coroutine as a postinvoke hook. A postinvoke hook is called directly after the command is called. This makes it a useful function to cleanup database connections or any type of clean up required.
def after_invoke(self, coro): if not asyncio.iscoroutinefunction(coro): raise TypeError('The post-invoke hook must be a coroutine.') self._after_invoke = coro return coro
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def after_invoke(coro) -> Callable[[T], T]:\n def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]:\n if isinstance(func, Command):\n func.after_invoke(coro)\n else:\n func.__after_invoke__ = coro\n return func\n return decorator # type: ignore", "def post_hook(config, final=False):\n if config.post_hook:\n if final or config.verb != \"renew\":\n logger.info(\"Running post-hook command: %s\", config.post_hook)\n _run_hook(config.post_hook)", "def postloop(self):\n super(CoreCommand, self).postloop() # Clean up command completion", "def Postcall(function_to_call_later): \n def postcall_inside(fun): \n @functools.wraps(fun)\n def relay(*args, **kwargs):\n return function_to_call_later(fun(*args, **kwargs))\n return relay\n return postcall_inside", "def on_post_execution(**kwargs):\n logging.debug(\"Calling callbacks: %s\", __post_exec_callbacks)\n for callback in __post_exec_callbacks:\n try:\n callback(**kwargs)\n except Exception:\n logging.exception(\"Failed on post-execution callback using %s\", callback)", "def on_shutdown(self):\n\n def decorator(coro):\n self._hooks.append((\"shutdown\", coro))\n return coro\n\n return decorator", "def _post_run_hook(self, runtime):\n pass", "def post_process(cls, *args, **kwargs):\n pass", "def post_process(cls, *args, **kwargs):\n pass", "def post_process(cls, *args, **kwargs):\n pass", "def post_process(cls, *args, **kwargs):\n pass", "def postprocess(self, postprocess_args: dict) -> \"Handle\":\n return self.apply_call(postprocess_args[\"pre_call_hash\"])", "def post(self, *args):\n event = self\n if args:\n event = copy.copy(self)\n event._set_args(args)\n if not is_mainthread():\n return MainThreadCallback(manager.post, event)()\n else:\n return manager.post(event)", "def command(*args, **kwargs):\r\n def decorator(func):\r\n if not asyncio.iscoroutinefunction(func):\r\n raise TypeError(f'<{func.__qualname__}> must be a coroutine function')\r\n new_func = CommandParent(func, **kwargs)\r\n _HANDLER.commands[new_func.name] = new_func\r\n return new_func\r\n return decorator", "def cmdfinalization_hook(self, data: plugin.CommandFinalizationData) -> plugin.CommandFinalizationData:\n self.called_cmdfinalization += 1\n return data", "def at_post_cmd(self):\n pass", "def _run_callable_with_postamble(postamble, callable_, *args, **kwargs):\n def fn():\n try:\n return callable_(*args, **kwargs)\n finally:\n postamble()\n return fn", "async def post_behavior_run(self) -> None:", "def postloop(self):\n cmd.Cmd.postloop(self) ## Clean up command completion", "def with_post_function(self, post_fcn):\n old_post = self._post\n self._post = lambda loss: post_fcn(old_post(loss))\n return self", "def _post_construct(self, func, *args, **kwargs):\r\n ParseContext.locate().on_context_exit(func, *args, **kwargs)", "def register_post_iteration_hook(self, hook):\n handle = hooks.RemovableHandle(self._hooks)\n self._post_iteration_hooks[handle.id] = hook\n return handle", "async def post_launch(self, **kwargs: Any) -> None:\n pass", "def postloop(self):\n cmd.Cmd.postloop(self) ## Clean up command completion\n print(\"Exiting...\")", "def register_post_exec_callback(action_logger):\n logging.debug(\"Adding %s to post execution callback\", action_logger)\n __post_exec_callbacks.append(action_logger)", "def after_test(self, func, *args, **kwargs):\n pass", "def post_mortem(*args, debug_fn: Optional[Callable] = None, **kwargs) -> None:\n if debug_fn is None:\n import pdb\n\n debug_fn = pdb.post_mortem\n\n debug_fn()", "def add_cmd_handler(self, cmd, func):\n len_args = len(inspect.getargspec(func)[0])\n def add_meta(f):\n def decorator(*args, **kwargs):\n f(*args, **kwargs)\n decorator.bytes_needed = len_args - 1 # exclude self\n decorator.__name__ = f.__name__\n return decorator\n func = add_meta(func)\n self._command_handlers[cmd] = func", "def post_init_func(fn):\n fn.__has_run__ = False\n @functools.wraps(fn)\n def wrapper_fn(*args, **kwargs):\n if fn.__has_run__:\n cui.message('Warning: executing post_init_func %s more than once.' % fn)\n\n result = fn(*args, **kwargs)\n fn.__has_run__ = True\n return result\n\n Core.__post_init_functions__.append(wrapper_fn)\n return wrapper_fn", "def postcmd(self, stop, line):\n stop = super(Pdb, self).postcmd(stop, line)\n if self.sticky:\n if stop and not self.commands_defining:\n self._sticky_handle_cls()\n else:\n self._flush_sticky_messages()\n return stop" ]
[ "0.7060543", "0.59133196", "0.5863492", "0.57352626", "0.56779206", "0.563653", "0.55416155", "0.5520004", "0.5520004", "0.5520004", "0.5520004", "0.5489191", "0.5475493", "0.54560333", "0.54542017", "0.5424057", "0.5418961", "0.5323915", "0.5317458", "0.5298286", "0.5239417", "0.5172448", "0.5140746", "0.5138076", "0.5105538", "0.50862706", "0.5026589", "0.5003015", "0.49877232", "0.4962875" ]
0.7256072
0
An iterator that recursively walks through all commands and subcommands. Yields
def walk_commands(self) -> typing.Generator[Command, None, None]: for command in self.commands: yield command if isinstance(command, Group): yield from command.walk_commands()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __iter__(self):\n for node in self.grammar.walk():\n yield node", "def HierarchyIterator(obj):\n while obj:\n yield obj\n for opChild in SplineInputGeneratorHelper.HierarchyIterator(obj.GetDown()):\n yield opChild\n obj = obj.GetNext()", "def __iter__(self):\n for child in self.children:\n yield child", "def walk(self):\n current = self\n yield current\n while current.parent:\n current = current.parent\n yield current", "def __next__(self):\n for child in self.children:\n yield child", "def __iter__(self):\n for tree in self._tree.subTrees():\n yield self.__class__(tree)", "def iter_tree(self):\n yield self\n for c in self.children:\n for ci in c.iter_tree:\n yield ci", "def walk(self):\n yield self\n for child in self.children:\n for descendant in child.walk():\n yield descendant", "def __iter__(self):\n\n for i in self._children:\n yield i", "def __next__(self):\n for child in self.children:\n yield child\n return\n #self.parent.next()", "def RecurseKeys(self):\n yield self\n for subkey in self.GetSubkeys():\n for key in subkey.RecurseKeys():\n yield key", "def walk(self):\n for name, child in sorted(self._children.items()):\n if isinstance(child, PackageEntry):\n yield name, child\n else:\n yield from child._walk(f'{name}/')", "def RecurseKeys(self):\n root_key = self.GetRootKey()\n if root_key:\n for registry_key in root_key.RecurseKeys():\n yield registry_key", "def traverse(self):\n yield self\n for k in self._kids:\n for kk in k.traverse():\n yield kk", "def iterate_item(tree_item):\n if not tree_item.is_construct():\n return\n for index, arg in enumerate(tree_item.construct.args):\n if isinstance(arg, syntax.Construct):\n yield from iterate_item(TreeItem(arg, tree_item, index))\n elif isinstance(arg, list):\n for i, litem in enumerate(arg):\n yield from iterate_item(TreeItem(litem, tree_item, index, i))\n yield tree_item", "def __next__(self):\n for child in self.children:\n yield child\n next(self.parent)", "def __next__(self):\n for child in self.children:\n yield child\n next(self.parent)", "def __iter__(self):\n leaf_paths, leaf_vals = self._find_combinatorial_leaves()\n return self._combinations_generator(leaf_paths, leaf_vals)", "def __iter__(self):\n\n yield from self._traverse_forward(self.root)", "def _traverse_tree(self):\n if not self.children:\n yield self\n for child in self.children:\n yield from child._traverse_tree()", "def walk(self):\n if self.left is not None:\n yield from self.left.walk()\n yield self.item\n if self.right is not None:\n yield from self.right.walk()", "def __iter__(self):\n handle = self.parent.handle\n cur = getattr(gv, \"first%s\" % self.type)(handle)\n nextitem = getattr(gv, \"next%s\" % self.type)\n while gv.ok(cur):\n yield self.get(gv.nameof(cur))\n cur = nextitem(handle, cur)", "def walk(self): # FileObj.walk\n yield self", "def walk(self): # DirObj.walk\n for name, subdir in self.subdirs.iteritems():\n for e in subdir.walk():\n yield e\n for name, fileEntry in self.files.iteritems():\n yield fileEntry\n yield self", "def iter(self):\n for elem in self:\n if isinstance(elem, Tree):\n for elem2 in elem.iter:\n yield elem2\n else:\n yield elem", "def __iter__(self):\n yield self\n if not self.is_leaf():\n yield from self.left_subtree\n yield from self.right_subtree", "def __iter__(self):\n for key in self._ctx:\n yield key", "def subtrees(self):\n yield from subtrees(self)", "def iter_submodules(self, *args: Any, **kwargs: Any) -> Iterator[Submodule]:\n return RootModule(self).traverse(*args, **kwargs)", "def __iter__(self):\n if self:\n if self.hasLeftChild():\n for elem in self.leftChild:\n yield elem\n yield self.key\n if self.hasRightChild():\n for elem in self.rightChild:\n yield elem" ]
[ "0.69049597", "0.6855705", "0.67742354", "0.67373574", "0.6698454", "0.6655534", "0.6623079", "0.65826356", "0.65400165", "0.650673", "0.63782173", "0.63522226", "0.63168263", "0.63032573", "0.63010323", "0.62850904", "0.62850904", "0.626276", "0.6249921", "0.62281144", "0.6198699", "0.6186187", "0.6163791", "0.6154762", "0.6150242", "0.6129784", "0.6127721", "0.61209995", "0.61205065", "0.6104184" ]
0.75511664
0
A decorator that registers a coroutine as a preinvoke hook. This allows you to refer to one before invoke hook for several commands that do not have to be within the same cog. Example
def before_invoke(coro) -> Callable[[T], T]: def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]: if isinstance(func, Command): func.before_invoke(coro) else: func.__before_invoke__ = coro return func return decorator # type: ignore
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def before_invoke(self, coro):\n if not asyncio.iscoroutinefunction(coro):\n raise TypeError('The pre-invoke hook must be a coroutine.')\n\n self._before_invoke = coro\n return coro", "def wrap_before(before, condition=lambda *args, **kwargs: True):\n def decorator(func):\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n if condition(*args, **kwargs):\n before()\n return func(*args, **kwargs)\n return wrapped\n return decorator", "def before_call(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> None:", "def uses_before_args(self, args):\n self.pod_args['uses_before'] = args", "def on_start(self):\n\n def decorator(coro):\n self._hooks.append((\"start\", coro))\n return coro\n\n return decorator", "def before(hook_name, methods, kwargs):\n for hookimpl in methods:\n self._plugin2calls[hookimpl.plugin].add(hook_name)", "def on_pre_execution(**kwargs):\n logging.debug(\"Calling callbacks: %s\", __pre_exec_callbacks)\n for callback in __pre_exec_callbacks:\n try:\n callback(**kwargs)\n except Exception:\n logging.exception(\"Failed on pre-execution callback using %s\", callback)", "def command(*args, **kwargs):\r\n def decorator(func):\r\n if not asyncio.iscoroutinefunction(func):\r\n raise TypeError(f'<{func.__qualname__}> must be a coroutine function')\r\n new_func = CommandParent(func, **kwargs)\r\n _HANDLER.commands[new_func.name] = new_func\r\n return new_func\r\n return decorator", "def pre_process(cls, *args, **kwargs):\n pass", "def pre_process(cls, *args, **kwargs):\n pass", "def pre_process(cls, *args, **kwargs):\n pass", "def pre_process(cls, *args, **kwargs):\n pass", "def coroutine(func):\n @wraps(func)\n def primer(*args, **kwargs):\n gen = func(*args, **kwargs)\n next(gen)\n return gen\n return primer", "def before_request(self, func: typing.Callable):\n return self.add_hook(type_=\"pre\", hook=func)", "def before(n):\n\n def decorate(fn):\n i = 0\n\n @wraps(fn)\n def wrapped(*args, **kwargs):\n nonlocal i\n i += 1\n if i < n:\n return fn(*args, **kwargs)\n\n return wrapped\n\n return decorate", "def before(self, before: Route.Decorator):\n pass", "def task_prerequisite(pre_req_task: PromiseProxy, key: str=None, trigger: callable=bool) -> callable:\n if not callable(trigger):\n raise Exception(\"trigger must be a function returning a bool\")\n\n def decorator(task_needing_pre_req: PromiseProxy)->PromiseProxy:\n def maybe_run(kwargs):\n if not key:\n trigger_arg = kwargs\n else:\n trigger_arg = kwargs.get(key)\n if not trigger(trigger_arg):\n from celery import current_task\n current_task.enqueue_child(pre_req_task.s(**kwargs), block=True)\n\n maybe_run.__name__ = task_needing_pre_req.__name__ + \"Needs\" + pre_req_task.__name__\n\n dependencies = ConverterRegister.list_converters(task_name=task_needing_pre_req.__name__, pre_task=True)\n ConverterRegister.register_for_task(task_needing_pre_req, True, *dependencies)(maybe_run)\n return task_needing_pre_req\n return decorator", "def precondition(precond):\n def decorator(f):\n def decorated(instance, data):\n try:\n precond(data)\n except UnmetPrecondition:\n # bypass the pipe\n return data\n else:\n return f(instance, data)\n return decorated\n return decorator", "def precmd(self, line):\n return cmd.Cmd.precmd(self, line)", "def before_worker_start(func):\n _func_only(func)\n worker_methods_db.register_before_start(func)\n return func", "def register_pre_exec_callback(action_logger):\n logging.debug(\"Adding %s to pre execution callback\", action_logger)\n __pre_exec_callbacks.append(action_logger)", "def preloop(self):\n super(CoreCommand, self).preloop() # sets up command completion", "def run_before(self):\n\n for path in self.hooks.get('before', []):\n self.run_module(path)", "def moo(func):\n def decorated(*args, **kwargs):\n print 'moo'\n return func(*args, **kwargs) # Run decorated function.\n return decorated", "def preoptimized(self, *args):\n return _ida_hexrays.Hexrays_Hooks_preoptimized(self, *args)", "def register_method_before(fn, phase): # type: (Callable, str) -> None\n PackageMixinsMeta._methods_to_be_added[fn.__name__] = fn\n PackageMixinsMeta._add_method_before[phase].append(fn)", "def op_prepare_before_hook(\n self,\n op: Callable,\n args: Tuple[Any, ...],\n kwargs: Dict[str, Any],\n ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:\n seen_q_op_info = self._get_cur_seen_q_op_info()\n\n def _maybe_observe(arg, tensor_info):\n tensor_id = tensor_info.id\n # TODO: do not run this twice on input and output\n if str(tensor_id) in self.tensor_id_to_observer:\n observer = self.tensor_id_to_observer[str(tensor_id)]\n return observer(arg)\n else:\n return arg\n\n args = iterate_and_apply(\n args, seen_q_op_info.input_tensor_infos, _maybe_observe)\n\n return args, kwargs", "def preprocess(generator):\n\n def preprocess_decorator(method):\n\n @wrapper(method)\n def preprocess_wrapper(self, *args, **kwargs):\n self.increment_pc()\n pc = tuple(self.program_counter)\n try:\n return self._pool.pop(pc), True\n except KeyError:\n key = (generator, args)\n pcs = self._needed_data.setdefault(key, [])\n pcs.append(pc)\n self.fork_pc()\n try:\n return method(self, *args, **kwargs), False\n finally:\n self.unfork_pc()\n\n return preprocess_wrapper\n return preprocess_decorator", "def before_test(self, func, *args, **kwargs):\n pass", "def before_call(\n self, cb: CircuitBreaker, func: Callable[..., T], *args: Any, **kwargs: Any\n ) -> None:" ]
[ "0.7762231", "0.61355424", "0.6063686", "0.5962607", "0.58490914", "0.5809584", "0.5795975", "0.5628273", "0.55104494", "0.55104494", "0.55104494", "0.55104494", "0.54770267", "0.5420125", "0.53899693", "0.53138167", "0.53052264", "0.52775484", "0.52747756", "0.5244982", "0.5242406", "0.5233766", "0.5225506", "0.5223654", "0.52235746", "0.52158237", "0.5200052", "0.51995105", "0.5151784", "0.51484436" ]
0.7769411
0
A decorator that registers a coroutine as a postinvoke hook. This allows you to refer to one after invoke hook for several commands that do not have to be within the same cog.
def after_invoke(coro) -> Callable[[T], T]: def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]: if isinstance(func, Command): func.after_invoke(coro) else: func.__after_invoke__ = coro return func return decorator # type: ignore
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def after_invoke(self, coro):\n if not asyncio.iscoroutinefunction(coro):\n raise TypeError('The post-invoke hook must be a coroutine.')\n\n self._after_invoke = coro\n return coro", "def Postcall(function_to_call_later): \n def postcall_inside(fun): \n @functools.wraps(fun)\n def relay(*args, **kwargs):\n return function_to_call_later(fun(*args, **kwargs))\n return relay\n return postcall_inside", "def post_hook(config, final=False):\n if config.post_hook:\n if final or config.verb != \"renew\":\n logger.info(\"Running post-hook command: %s\", config.post_hook)\n _run_hook(config.post_hook)", "def on_post_execution(**kwargs):\n logging.debug(\"Calling callbacks: %s\", __post_exec_callbacks)\n for callback in __post_exec_callbacks:\n try:\n callback(**kwargs)\n except Exception:\n logging.exception(\"Failed on post-execution callback using %s\", callback)", "def uses_after_args(self, args):\n self.pod_args['uses_after'] = args", "def on_shutdown(self):\n\n def decorator(coro):\n self._hooks.append((\"shutdown\", coro))\n return coro\n\n return decorator", "def post(self, *args):\n event = self\n if args:\n event = copy.copy(self)\n event._set_args(args)\n if not is_mainthread():\n return MainThreadCallback(manager.post, event)()\n else:\n return manager.post(event)", "def post_event(self, func, *args, **kwargs):\n if not callable(func):\n assert(len(func) == 5)\n self._events.append(func + (log.get_tb(1), time.time()))\n else:\n self._events.append((func, args, kwargs, None, 0, log.get_tb(), time.time()))", "def with_post_function(self, post_fcn):\n old_post = self._post\n self._post = lambda loss: post_fcn(old_post(loss))\n return self", "def register_method_after(fn, phase): # type: (Callable, str) -> None\n PackageMixinsMeta._methods_to_be_added[fn.__name__] = fn\n PackageMixinsMeta._add_method_after[phase].append(fn)", "def before_invoke(coro) -> Callable[[T], T]:\n def decorator(func: Union[Command, CoroFunc]) -> Union[Command, CoroFunc]:\n if isinstance(func, Command):\n func.before_invoke(coro)\n else:\n func.__before_invoke__ = coro\n return func\n return decorator # type: ignore", "def wrap_after(after, condition=lambda *args, **kwargs: True):\n def decorator(func):\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n result = func(*args, **kwargs)\n if condition(*args, **kwargs):\n after()\n return result\n return wrapped\n return decorator", "def register_post_exec_callback(action_logger):\n logging.debug(\"Adding %s to post execution callback\", action_logger)\n __post_exec_callbacks.append(action_logger)", "def _run_callable_with_postamble(postamble, callable_, *args, **kwargs):\n def fn():\n try:\n return callable_(*args, **kwargs)\n finally:\n postamble()\n return fn", "def command(*args, **kwargs):\r\n def decorator(func):\r\n if not asyncio.iscoroutinefunction(func):\r\n raise TypeError(f'<{func.__qualname__}> must be a coroutine function')\r\n new_func = CommandParent(func, **kwargs)\r\n _HANDLER.commands[new_func.name] = new_func\r\n return new_func\r\n return decorator", "def _timestep_after_hook(self, *args, **kwargs):\n pass", "def post_init_func(fn):\n fn.__has_run__ = False\n @functools.wraps(fn)\n def wrapper_fn(*args, **kwargs):\n if fn.__has_run__:\n cui.message('Warning: executing post_init_func %s more than once.' % fn)\n\n result = fn(*args, **kwargs)\n fn.__has_run__ = True\n return result\n\n Core.__post_init_functions__.append(wrapper_fn)\n return wrapper_fn", "def after_test(self, func, *args, **kwargs):\n pass", "def register_post_iteration_hook(self, hook):\n handle = hooks.RemovableHandle(self._hooks)\n self._post_iteration_hooks[handle.id] = hook\n return handle", "def end_of_wrappers(args, wrapper):\n if not len(args):\n return wrapper\n elif len(args) == 1 and callable(args[0]):\n return wrapper(args[0])\n raise IncorrectUseOfTheDecoratorError('You used the awaitable decorator incorrectly. Read the documentation.')", "def set_asyncgen_hooks(*args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__\n pass", "def postloop(self):\n super(CoreCommand, self).postloop() # Clean up command completion", "def test_initialize_post_save_execution(monkeypatch, caplog):\n caplog.set_level(logging.DEBUG)\n\n jupyter_config_obj = Config(FileContentsManager=FileContentsManager())\n\n def mocked_post_save(model, os_path, contents_manager):\n \"\"\"Append a token to os_path to certify that function ran.\"\"\"\n os_path.append(\"nbautoexport\")\n\n monkeypatch.setattr(nbautoexport_root, \"post_save\", mocked_post_save)\n\n # Initialize post_save hook\n jupyter_config.initialize_post_save_hook(jupyter_config_obj)\n\n assert caplog_contains(\n caplog,\n level=logging.INFO,\n in_msg=\"nbautoexport | Successfully registered post-save hook\",\n )\n assert isinstance(jupyter_config_obj.FileContentsManager, FileContentsManager)\n assert callable(jupyter_config_obj.FileContentsManager.post_save_hook)\n\n # Execute post_save hook\n os_path_list = []\n jupyter_config_obj.FileContentsManager.run_post_save_hook(model=None, os_path=os_path_list)\n assert os_path_list == [\"nbautoexport\"]", "def at_post_cmd(self):\n pass", "def on_completion(self):\n\n def decorator(coro):\n self._hooks.append((\"completion\", coro))\n return coro\n\n return decorator", "def postprocess(self, postprocess_args: dict) -> \"Handle\":\n return self.apply_call(postprocess_args[\"pre_call_hash\"])", "def _post_run_hook(self, runtime):\n pass", "def add_cmd_handler(self, cmd, func):\n len_args = len(inspect.getargspec(func)[0])\n def add_meta(f):\n def decorator(*args, **kwargs):\n f(*args, **kwargs)\n decorator.bytes_needed = len_args - 1 # exclude self\n decorator.__name__ = f.__name__\n return decorator\n func = add_meta(func)\n self._command_handlers[cmd] = func", "def after_request(self, func: typing.Callable):\n return self.add_hook(type_=\"post\", hook=func)", "def wrapper(self, *args, **kwargs):\n if self.afterid:\n self.master.after_cancel(self.afterid)\n function(self, *args, **kwargs)\n self.afterid = self.master.after(5000, self.cycle)" ]
[ "0.7621359", "0.6003703", "0.590855", "0.580963", "0.56406933", "0.5633439", "0.5529226", "0.551219", "0.5412582", "0.53565824", "0.5354295", "0.5315982", "0.53044456", "0.5290917", "0.52646506", "0.5262968", "0.5256531", "0.52481365", "0.52188635", "0.5216198", "0.5194705", "0.5188315", "0.51808304", "0.51497084", "0.5149054", "0.5103507", "0.5091547", "0.5079638", "0.50781643", "0.49885142" ]
0.77887046
0
Initializes a storage merge reader.
def __init__(self, session, storage_writer, task_storage_reader): super(StorageMergeReader, self).__init__() self._active_container_type = None self._active_generator = None self._container_types = [] self._event_data_identifier_mappings = {} self._event_data_parser_mappings = {} self._event_data_stream_identifier_mappings = {} self._session = session self._storage_writer = storage_writer self._task_storage_reader = task_storage_reader self.number_of_containers = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n\n self.reader = reader.Reader()", "def __init__(self, storage_writer, task, redis_client=None):\n super(RedisMergeReader, self).__init__(storage_writer)\n self._active_container_type = None\n self._container_types = []\n self._active_cursor = 0\n self._add_active_container_method = None\n self._store = redis_store.RedisStore(\n definitions.STORAGE_TYPE_TASK,\n session_identifier=task.session_identifier,\n task_identifier=task.identifier)\n self._store.Open(redis_client=redis_client)\n self._event_data_identifier_mappings = {}\n self._event_data_stream_identifier_mappings = {}\n self._add_container_type_methods = {}\n self._active_extra_containers = []\n\n # Create a runtime lookup table for the add container type method. This\n # prevents having to create a series of if-else checks for container types.\n # The table is generated at runtime as there are no forward function\n # declarations in Python.\n for container_type, method_name in self._ADD_CONTAINER_TYPE_METHODS.items():\n method = getattr(self, method_name, None)\n if not method:\n raise RuntimeError(\n 'Add method missing for container type: {0:s}'.format(\n container_type))\n\n self._add_container_type_methods[container_type] = method", "def __init__(self):\n config = self.read_config()\n self.deployment = config['deployment']\n self.deployment_config = config[self.deployment]\n logger.info(f'Initializing storage client with the {self.deployment} deployment config {pformat(self.deployment_config)}')\n\n # get the MLOS config from the user else default it from the deployment config file\n # self.mlos_config = config['MLOS']\n # logger.info(f'Initializing storage client with the MLOS config {pformat(self.mlos_config)}')\n\n # setup the mount path\n if self.deployment == \"LOCAL\":\n self.mount_dir = self.setup_mount()\n logger.info(f'Mount directory setup completed: {self.mount_dir}')", "def _OpenRead(self):\n has_storage_metadata = self._ReadStorageMetadata()\n if not has_storage_metadata:\n # TODO: remove serializer.txt stream support in favor\n # of storage metadata.\n if self._read_only:\n logging.warning('Storage file does not contain a metadata stream.')\n\n stored_serialization_format = self._ReadSerializerStream()\n if stored_serialization_format:\n self.serialization_format = stored_serialization_format\n\n if self.serialization_format != definitions.SERIALIZER_FORMAT_JSON:\n raise IOError('Unsupported serialization format: {0:s}'.format(\n self.serialization_format))\n\n self._serializer = json_serializer.JSONAttributeContainerSerializer\n\n for container_type, stream_name_prefix in (\n self._STREAM_NAME_PREFIXES.items()):\n stream_name_prefix = '{0:s}_data.'.format(stream_name_prefix)\n self._last_stream_numbers[container_type] = self._GetLastStreamNumber(\n stream_name_prefix)\n\n self._analysis_report_stream_number = self._GetLastStreamNumber(\n 'analysis_report_data.')\n self._last_preprocess = self._GetLastStreamNumber('preprocess.')\n\n last_session_start = self._GetLastStreamNumber('session_start.')\n last_session_completion = self._GetLastStreamNumber('session_completion.')\n\n # TODO: handle open sessions.\n if last_session_start != last_session_completion:\n logging.warning('Detected unclosed session.')\n\n self._last_session = last_session_completion\n\n last_task_start = self._GetLastStreamNumber('task_start.')\n last_task_completion = self._GetLastStreamNumber('task_completion.')\n\n # TODO: handle open tasks.\n if last_task_start != last_task_completion:\n logging.warning('Detected unclosed task.')\n\n self._last_task = last_task_completion", "def _CreateTaskStorageMergeReader(self, path):\n return gzip_file.GZIPStorageMergeReader(self, path)", "def _init_storage(self):\n if self._ is None:\n self._ = Parameters(self)", "def __init__(self, *args, **kwargs):\n logger.debug(\"SlokaReader: Initialize\")\n super(SlokaReader, self).__init__(*args, **kwargs)", "def initialize_file_readers():\n savefile_path = os.path.join(os.getcwd()+ \"/../data/\", SAVE_FILE)\n file_reader_list = []\n for file in os.listdir(savefile_path):\n file_reader = open(os.path.join(savefile_path,file), \"r\")\n file_reader_list.append({\"file_reader\": file_reader, \"last_read\": { \"word\": \"\", \"doc_score_list\": []}})\n return file_reader_list", "def _init(self):\n if os.path.exists(self.fname):\n with open(self.fname, \"rb\") as fh:\n self.db = pickle.load(fh)\n else:\n self.db = {}\n print(\"DB loaded, len\", len(self.db))", "def initialize(self):\n self.population.initialize()\n self.cache.initialize()\n if self.storage:\n self.storage.initialize()", "def __init__(self, fileStore, sqlStore, uid=None, gid=None, merge=False):\n\n self.fileStore = fileStore\n self.sqlStore = sqlStore\n self.uid = uid\n self.gid = gid\n self.merge = merge", "def __init__(self, iReader):\n self.__index_reader = iReader", "def __init__(self, reader, ack_queue, settings_json, db_lock):\n\t\tsuper().__init__()\n\t\tself._reader = reader\n\t\tself._settings = settings_json\n\t\tself.progress = DownloaderProgress()\n\t\tself._session = None\n\t\tself._db_lock = db_lock\n\t\tself._ack_queue = ack_queue\n\t\tself.daemon = True", "def __init__(self, path):\n super(ZIPStorageFileReader, self).__init__(path)\n self._storage_file = ZIPStorageFile()\n self._storage_file.Open(path=path)", "def __init__(self, storage_config: Dict[str, Any], storage_paths: List[str], local_dir: str):\n _ = storage_config\n self.local_dir = local_dir\n self.storage_paths = storage_paths\n self._file_records = {} # type: Dict[str, datetime.datetime]", "def __init__(self,file_reader):\n self.file_reader = file_reader", "def init_storage(self):\n\n # Create the average rotation matrix (first order).\n self.first_frame_order = zeros((INC+1, 3, 3), float64)\n\n # Create the frame order matrix (each element is ensemble averaged and corresponds to a different time step).\n self.second_frame_order = zeros((INC+1, 9, 9), float64)\n\n # Init the rotation matrix.\n self.rot = zeros((3, 3), float64)\n\n # Some data arrays.\n self.full = zeros(INC+1)\n self.count = zeros(INC+1)", "def __init__(self, genomeReader=None):\n self.genomeReader = genomeReader", "def __init__(self, max_readers=2):\n if not isinstance(max_readers, int):\n raise TypeError(\"SyncedDictionary.__init__: expected max_readers to be of type int\")\n\n self.__dict = {}\n self.max_readers = max_readers\n self.semaphore_lock = Semaphore(value=self.max_readers)\n self.write_lock = Lock()", "def __init__(self):\n self.database = None\n self.dataset = None\n \n self._database_temp_cache_dir = None\n self.path_of_pickle_file=None\n # open default config\n self.default_config = YamlConfig(DEXNET_API_DEFAULTS_FILE)\n # Resolve gripper_dir and cache_dir relative to dex-net root\n self.default_config['cache_dir'] = None\n for key in ['gripper_dir']:\n if not os.path.isabs(self.default_config[key]):\n self.default_config[key] = os.path.realpath(DEXNET_DIR + self.default_config[key])", "def __init__(self):\n\n self.storage: list = Storage()\n\n # Start for get data in API and set in storage\n self._set_proxies_in_storage()", "def __init__(self, iReader):\n self.ireader = iReader\n self.num_of_doc = iReader.getNumberOfDocuments()", "def __init__(self):\n print ('Initializing Data reader object...')\n data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels = self.readDataFromFile()\n test_10k_x, test_10k_y, training_55k_x, training_55k_y, validation_5k_x, validation_5k_y = self.dataTransform(\n data_Test_Image, data_Test_Labels, data_Train_Images, data_Train_Labels)\n self.train = zip(training_55k_x, training_55k_y)\n self.valid = zip(validation_5k_x, validation_5k_y)\n self.test = zip(test_10k_x, test_10k_y)\n\n self.train_position = 0\n print ('Initialized!')", "def init(self, *, update=False):\n if self.initialized:\n return\n\n if not self.need_update is None:\n update |= self.need_update\n\n # TODO: add more useful data in lock file\n msg = 'Enzi:init: this project has dependencies, launching LockLoader'\n if update:\n msg = 'Enzi:init: this project has dependencies, launching LockLoader'\n else:\n msg = 'Enzi:init: launching LockLoader'\n \n logger.debug(msg)\n locked = LockLoader(self, self.work_dir).load(update)\n\n if locked.cache and 'git' in locked.cache:\n self.git_db_records = locked.cache['git']\n\n self.locked = locked\n\n dep_msg = pprint.pformat(locked.dep_dumps())\n cache_msg = pprint.pformat(locked.cache)\n logger.debug('Enzi:init: locked deps:\\n{}'.format(dep_msg))\n logger.debug('Enzi:init: locked caches:\\n{}'.format(cache_msg))\n\n if self.config_mtime != locked.config_mtime:\n self.non_lazy_configure = True\n\n if not self.config.dependencies:\n logger.debug('Enzi:init: this project has no dependencies')\n\n self.init_deps_graph()\n self.initialized = True", "def __init__(self, reader_schema, writer_schema=None, input_file=None):\n\n if writer_schema is None:\n writer_schema = reader_schema\n self._reader_schema = reader_schema\n self._writer_schema = writer_schema\n self._reader_schema_json = json.loads(str(self._reader_schema))\n self._writer_schema_json = json.loads(str(self._writer_schema))\n self._input_file = input_file\n self._set_avro_readers()", "def setup_read(self, batch_id=None):\n self.load_config()\n self.setup_log()\n self.setup_db()\n\n if batch_id:\n self.batch_id = batch_id\n else:\n self.batch_id = self.db.max_batch_id()", "def initialize_storage(cfg: ElasticBlastConfig, query_files: List[str] = [], wait=ElbExecutionMode.WAIT) -> None:\n use_local_ssd = cfg.cluster.use_local_ssd\n if use_local_ssd:\n initialize_local_ssd(cfg, query_files, wait)\n else:\n initialize_persistent_disk(cfg, query_files, wait)", "def __init__(self, directory, mode=Mode.READONLY):\n if not os.path.exists(directory):\n raise IOError(\"Directory %s for raw store does not exist\"%\n directory)\n self.directory = directory\n with open(os.path.join(directory, \"__rawformat__\"), 'rb') as rawformat:\n self.__dict__.update(pickle.load(rawformat))\n \n fname = self.fname = os.path.join(directory, \"__store___\")\n colCache = self.colCacheDir = os.path.join(directory, \"__colstore___\")\n\n self._f = None\n self.mode = mode\n self._openfile()", "def __int__(self, storage):\r\n self._storage = storage\r\n self.tables={}", "def __init__(self):\n this = _libsbml.new_SBMLReader()\n try: self.this.append(this)\n except: self.this = this" ]
[ "0.62885654", "0.58362806", "0.57856005", "0.5741826", "0.5728234", "0.5686032", "0.5671583", "0.55968064", "0.55951667", "0.55562574", "0.5551261", "0.5526546", "0.5510841", "0.54997456", "0.54607743", "0.54495597", "0.54392564", "0.54389495", "0.5437427", "0.5426284", "0.5393658", "0.53568137", "0.5324049", "0.5313718", "0.5308236", "0.53009635", "0.5294963", "0.52890724", "0.5283382", "0.5273112" ]
0.70713705
0
Handle page head and create dict with different content
def handle_page_head(self, head_content): return_dict = {} return_dict['title'] = self.find_in_content(r'title:.+', head_content) return_dict['permalink'] = self.find_in_content(r'permalink:.+', head_content) return return_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ParsePageHead(read):\n page_head = {}\n result = struct.unpack_from(PageHeadFormat, read, 0)\n for i in xrange(len(PageHeadProperties)):\n page_head[PageHeadProperties[i]] = result[i]\n return page_head", "def handle_page_body(self, body_content):\n return_dict = {}\n return_dict['content'] = self.markdown_to_html(body_content)\n return return_dict", "def parse(self, response):\n yield{\n 'url': response.url,\n 'title': response.css(\"h1.article-main-title::text\").get(),\n 'sub_title': response.css(\"h2.article-sub-title::text\").get(),\n 'article_image': (response.css(\"div.article-image img::attr(src)\").get()),\n 'body': '\\n\\n'.join(response.css(\"div.article-body-container p::text\").getall()),\n 'published_date': (response.css(\"div.article-credit::text\").get().replace('|','').replace('\\r',''))[1:],\n 'source': 'One'\n }", "def get_meta():\n meta = {\n 'pages': _get_pages()\n }\n return meta", "def _get_new_data(self, page_url, soup):\n data = {}\n data['url'] = page_url\n title = soup.find('dd', class_='lemmaWgt-lemmaTitle-title').find('h1')\n data['title'] = title.get_text()\n summary = soup.find('div', class_='lemma-summary')\n data['summary'] = summary.get_text()\n return data", "def parse_header(self):\n bodystart=re.compile(r\"<body>\", re.IGNORECASE).search(self.lines).span()[1]\n oldheader=self.lines[0:bodystart]\n start=re.compile(\"<title>\", re.IGNORECASE).search(oldheader).span()[1]\n finish=re.compile(\"</title>\", re.IGNORECASE).search(oldheader).span()[0]\n titles=oldheader[start:finish].split(\"--\")\n # Duplicate if needed\n if len(titles)==1: titles.append(titles[0])\n self.title, self.header= {}, {}\n for i, lang in enumerate(LANGS):\n self.title[lang]=titles[i]\n self.header[lang]=\"%s%s%s\" % (oldheader[:start], self.title[lang], oldheader[finish:],)", "def get_pages_data(title: str) -> dict: \n data_object = layout_data.objects.get(title = title)\n data = {\n 'title': data_object.title,\n 'main_consistion' : data_object.main_consistion,\n 'list_of_links' : data_object.data['link'][:3],\n 'main_consistion_2' : data_object.main_consistion_2,\n 'list_of_links_2' : data_object.data['link'][3:]\n }\n return data", "def parse_html_content(self, data):\n result = {}\n if data == '':\n return result\n dom = lh.fromstring(data)\n result['css_links'] = {\n urljoin(\n self.base_url,\n href) for href in dom.xpath('//link[@rel=\"stylesheet\"]/@href')}\n result['js_links'] = {urljoin(self.base_url, src)\n for src in dom.xpath('//script/@src')}\n result['img_links'] = {urljoin(self.base_url, src)\n for src in dom.xpath('//img/@src')}\n result['icon_links'] = {\n urljoin(\n self.base_url,\n src) for src in dom.xpath('//link[contains(@rel,\"icon\")]/@href')}\n return result", "def set_header(cls, response):\n head = {key: response[key] for key in (response.keys() & cls.HEAD_KEYS)}\n\n for key_name, key_path in cls.HEAD_EXTRA:\n value = response\n try:\n for key in key_path:\n value = value[key]\n except KeyError:\n continue\n head[key_name] = value\n\n return head", "def scrape_detail_pages(h, s):\n n = list()\n for i, header in enumerate(h):\n notice = dict()\n notice['header'] = header\n print notice['header']['link']\n notice['detail'] = create_detail(notice['header']['link'])\n notice['detail']['postcode'] = (\n extract_postcode(notice['detail']['address'])\n )\n notice['search_details'] = s\n n.append(notice)\n return n", "def header(node):\n\n (title, description) = get_page_contents(node)\n return '''<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n <meta charset=\"utf-8\" />\n <meta name=\"author\" content=\"''' + AUTHOR + '''\" />\n <meta name=\"generator\" content=\"minimalsite-%%%VERSION%%%\" />\n <meta name=\"description\" content=\"''' + description + '''\" />\n <title>''' + title + '''</title>\n <link rel=\"shortcut icon\" href=\"/favicon.ico\" />\n <style type=\"text/css\">\n #container {\n width: 80%;\n margin: 30px auto;\n }\n #content {\n margin-left: 170px;\n text-align: justify;\n }\n #edit {\n clear: both;\n text-align: right;\n font-size: small;\n }\n footer {\n clear: both;\n }\n nav {\n float: left;\n width: 160px;\n }\n nav li a.current {\n background-color: blue;\n color: #ffffff;\n font-weight: bold;\n }\n nav ul {\n margin: 0;\n padding: 0;\n list-style: none;\n width: 150px; /* Width of Menu Items */\n border-bottom: 1px solid #ccc;\n }\n nav ul li {\n position: relative;\n }\n nav li ul {\n position: absolute;\n left: 149px; /* Set 1px less than menu width */\n top: 0;\n display: none;\n }\n /* Styles for Menu Items */\n nav ul li a {\n display: block;\n text-decoration: none;\n color: #777;\n background: #fff; /* IE6 Bug */\n padding: 5px;\n border: 1px solid #ccc; /* IE6 Bug */\n border-bottom: 0;\n }\n /* Holly Hack. IE Requirement \\*/\n * html ul li { float: left; height: 1%; }\n * html ul li a { height: 1%; }\n /* End */\n nav li:hover ul, li.over ul { display: block; } /* The magic */\n </style>\n </head>\n <body>\n <div id=\"container\">\n <header>\n <h1><a href=\"''' + '../' * node.page.level + '''index.''' + DST_EXT + '''\">''' + SITE_NAME + '''</a></h1>\n </header>\n <div id=\"path\">\n You are here: %%%PATH%%%\n </div>\n <div id=\"main\">\n <nav>\n ''' + menu(node) + '''\n </nav>\n <div id=\"content\">\n'''", "def gather_headlines(urls):\n pass", "def all_headlines_from(url):\n pass", "def create_head(css, title):\r\n doc = \"<!DOCTYPE html>\\n<html>\\n\"\r\n head = \"<head>\\n<title>\" + title + \"\\n</title>\" + css + \"</head>\\n\"\r\n header = doc + head + \"<body>\\n<h1>\" + title + \"\\n</h1>\\n<hr/>\\n\"\r\n return header", "def page_header(self, request, title):\n request['body'].append('''\\\n<html>\n <head><title>%s</title></head>\n <style type=\"text/css\">\n * {\n font-family: verdana,sans-serif;\n }\n body {\n width: 50em;\n margin: 1em;\n }\n div {\n padding: .5em;\n }\n table {\n margin: none;\n padding: none;\n }\n .alert {\n border: 1px solid #e7dc2b;\n background: #fff888;\n }\n .error {\n border: 1px solid #ff0000;\n background: #ffaaaa;\n }\n #verify-form {\n border: 1px solid #777777;\n background: #dddddd;\n margin-top: 1em;\n padding-bottom: 0em;\n }\n </style>\n <body>\n <h1>%s</h1>\n <p>\n This example consumer uses the <a\n href=\"http://openid.schtuff.com/\">Python OpenID</a> library. It\n just verifies that the URL that you enter is your identity URL.\n </p>\n''' % (title, title))", "def initialPage():\n\treturn header() + footer()", "def show_header():\n return {};", "def _handle_head_request(self):\n self._header_only = True\n self._handle_get_request()", "def Header(page):\n style = {\n \"background-color\": \"var(--red_color)\",\n \"color\": \"black\",\n \"text-shadow\": \"0 0 10px #ffffff\"\n }\n\n button1 = html.A(id=\"btn1box\", children=[\n html.Button(\"OVERVIEW\", id=\"btn1\", className=\"btn\")\n ], href=\"/dashboard/overview\")\n\n button2 = html.A(id=\"btn2box\", children=[\n html.Button(\"BOOK\", id=\"btn2\", className=\"btn\")\n ], href=\"/dashboard/book\")\n\n button3 = html.A(id=\"btn3box\", children=[\n html.Button(\"WORD\", id=\"btn3\", className=\"btn\")\n ], href=\"/dashboard/word\")\n\n if page == \"overview\":\n button1.children[0].style = style\n if page == \"book\":\n button2.children[0].style = style\n if page == \"word\":\n button3.children[0].style = style\n\n return html.Div(id=\"header\", children=[\n get_title(),\n get_subtitle(),\n button1,\n button2,\n button3\n ])", "def _send_regenerated_head(self, content):\n self.send_response(200)\n self.send_header(\"Content-type\", 'text/html')\n self.send_header(\"Content-Length\", len(content))\n self.send_header(\"Last-Modified\", self.date_time_string())\n self.end_headers()", "def parse_article_html(page_resp):\n article_url = page_resp.url\n \n article_page_soup = bs4.BeautifulSoup(page_resp.text, \"lxml\")\n \n title_html = article_page_soup.find_all(\"h1\")[0]\n title_text = title_html.contents[0]\n \n date = article_page_soup.find_all(\"small\", {'class': 'gray'})[0]\n date_text = date.contents[4].replace(\" \", \"\").split(\"\\n\")[3][:10]\n \n article_content = article_page_soup.find_all(\"div\", {'class': 'rich_media_content'})[0]\n article_text = article_content.get_text('\\n')\n is_original = check_if_original(article_content) or '[原创]' in title_text\n \n return {\n 'title': title_text,\n 'date': date_text,\n 'url': article_url,\n 'is_original': is_original,\n 'text': article_text\n \n}", "def build_head(self, root):\n head = ET.SubElement(root, \"head\")\n for key, val, attr in self.headers:\n if val:\n ET.SubElement(head, # pylint: disable-msg=W0142\n key, **attr).text = val\n else:\n ET.SubElement(head, key, **attr) # pylint: disable-msg=W0142\n ET.SubElement(head, \"title\").text = self.title", "def init_meta():\n meta = {}\n meta[\"title\"] = None\n meta[\"authors\"] = []\n meta[\"date\"] = None\n meta[\"abstract\"] = None\n meta[\"notes\"] = [] \n return meta", "def all_headlines(html_root_node):\n pass", "def _build_header_dictionary(self):\n start = 0\n #print self.raw_data\n for a in range(20):\n redatapuller = re.compile(\"\\r\\n\\r\\n\\r\\n(?P<word>.*?)\\t.*?\\n\", re.DOTALL)\n m = redatapuller.search(self.raw_data[start:])\n if not(m):\n break\n self.header_dictionary[m.group(\"word\")] = start + m.end()\n if a==0:\n self.header_dictionary[\"main\"] = start + m.end()\n start += m.end()", "def handle_head(self, tag, attrs):\n self.head = 'open'", "def get_headlines(newssource):\n \n \n newssource_dict = {}\n url = 'https://newsapi.org/v1/articles?source=' + newssource + '&sortBy=top&apiKey=' + api\n request = http.request('GET',url,timeout=4.0)\n\n headline = json.loads(request.data)\n \n if not headline['articles']:\n return \"NewsAPI can not receive information from\" + newsource + \"right now\"\n \n newssource_dict['url'] = headline['articles'][0]['url']\n newssource_dict['title']= headline['articles'][0]['title']\n newssource_dict['description'] = headline['articles'][0]['description']\n \n \n return newssource_dict", "def _extract_page_info(article: dict, url: str) -> dict:\n\n if not article:\n return {}\n language = detect(article.get('content_text'))\n if len(language) > 2 and len(language[2]) > 1:\n language_code = language[2][0][1]\n else:\n language_code = None\n return {'url': url, 'language': language_code}", "def common_header_part2(outfile: TextIO, indexpath: str = \"\", include_map: bool = False) -> None:\n outfile.write(\" </head>\\n\")\n outfile.write(\"\\n\")\n if include_map:\n outfile.write(\" <body onload=\\\"initialize()\\\">\\n\")\n else:\n outfile.write(\" <body>\\n\")\n outfile.write(\" <div id=\\\"skip-links\\\" role=\\\"complementary\\\" aria-label=\\\"Skip links menu\\\">\")\n outfile.write(\"<a href=\\\"#Main\\\" tabindex=\\\"1\\\">Skip to content</a></div>\\n\")\n outfile.write(\" <div id=\\\"home\\\">\\n\")\n outfile.write(\" <a href=\\\"\" + indexpath + \"index.html\\\" class=\\\"home-title\\\">Fiddler Crabs</a>\\n\")\n outfile.write(\" <a href=\\\"\" + indexpath +\n \"index.html\\\" class=\\\"home-link\\\">\" + fetch_fa_glyph(\"home\") + \"Home</a>\\n\")\n # outfile.write(\" <a href=\\\"\" + indexpath +\n # \"blog\\\" class=\\\"home-link\\\">\" + fetch_fa_glyph(\"blog\") + \"Blog</a>\\n\")\n outfile.write(\" </div>\\n\")", "def initialise_har(_page_ref):" ]
[ "0.65806377", "0.6083205", "0.59432834", "0.59213173", "0.5893205", "0.58655334", "0.5822796", "0.5818106", "0.58166486", "0.57890755", "0.57307625", "0.5705943", "0.5701683", "0.5639737", "0.5633458", "0.56205946", "0.557755", "0.5573049", "0.5549238", "0.5528069", "0.55255145", "0.54993224", "0.54980236", "0.5486533", "0.5480765", "0.54735017", "0.5465648", "0.54609907", "0.5441623", "0.5431237" ]
0.8366756
0
Handle page body and create page dict
def handle_page_body(self, body_content): return_dict = {} return_dict['content'] = self.markdown_to_html(body_content) return return_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_mapping_page(self, id, body):\n info = {}\n info['original'] = self.__re_search(body, *self.regx['original'])\n info['save'] = self.__re_search(body, *self.regx['save'])\n info['price'] = self.__re_search(body, *self.regx['price'])\n info['rebate'] = self.__re_search(body, *self.regx['rebate'])\n return info", "def create_page(self):", "def parse(self, response):\n yield{\n 'url': response.url,\n 'title': response.css(\"h1.article-main-title::text\").get(),\n 'sub_title': response.css(\"h2.article-sub-title::text\").get(),\n 'article_image': (response.css(\"div.article-image img::attr(src)\").get()),\n 'body': '\\n\\n'.join(response.css(\"div.article-body-container p::text\").getall()),\n 'published_date': (response.css(\"div.article-credit::text\").get().replace('|','').replace('\\r',''))[1:],\n 'source': 'One'\n }", "def prepare_wiki_page(self, req, page, fields):\r\n pass", "def _get_new_data(self, page_url, soup):\n data = {}\n data['url'] = page_url\n title = soup.find('dd', class_='lemmaWgt-lemmaTitle-title').find('h1')\n data['title'] = title.get_text()\n summary = soup.find('div', class_='lemma-summary')\n data['summary'] = summary.get_text()\n return data", "def handle_page_head(self, head_content):\n return_dict = {}\n return_dict['title'] = self.find_in_content(r'title:.+', head_content)\n return_dict['permalink'] = self.find_in_content(r'permalink:.+',\n head_content)\n return return_dict", "def _page_call(self, url, request) -> Dict:\n response = self._post(url, request)\n raise_on_error(response)\n return response.json()", "def _DoPageProcessing(self, mr, nonce):\n with mr.profiler.Phase('common request data'):\n self._DoCommonRequestProcessing(self.request, mr)\n self._MaybeRedirectToBrandedDomain(self.request, mr.project_name)\n page_data = self.GatherBaseData(mr, nonce)\n\n with mr.profiler.Phase('page processing'):\n page_data.update(self.GatherPageData(mr))\n page_data.update(mr.form_overrides)\n template_helpers.ExpandLabels(page_data)\n self._RecordVisitTime(mr)\n\n return page_data", "def parse_webpage(self, response):\n item = response.meta['item']\n print(\"Request url {}, actual requested url {}\".format(item['url'], response.request.url))\n # website url\n item['website_url'] = response.request.url\n\n item['name'] = self.guess_company_name(response)\n item['domain'] = self.get_domain(response)\n\n # get website title\n item['website_title'] = self.get_webpage_title(response)\n # get description from website\n item['website_desc'] = self.get_webpage_description(response)\n\n # get keywords from website\n item['keywords'] = self.get_webpage_keywords(response)\n\n # try to get email and phones\n item['email'] = self.extract_email(response)\n item['phone'] = self.extract_phone(response)\n\n if not item['email']:\n # try to get contact info\n # check if there is kontakt link on the page\n item = self.check_webpage_for_contact_details(item, response, \"impressum\")\n\n if not item['email']:\n try:\n # try Contact\n item = self.check_webpage_for_contact_details(item, response, \"kontakt\")\n\n except Exception as e:\n print(\"Exception\", e)\n\n if item['email']:\n item['email'] = item['email'].replace(\"(at)\", \"@\")\n yield item", "def create_page_objects(self, data):\n for page in data['pages']:\n self.create_page(page)", "def get_pages(epObject, fileDict):\r\n homePage = DOMAIN + epObject.ViewLink\r\n soup = make_soup(homePage)\r\n fileDict['pageUrls'].append(homePage)\r\n fileDict['pageFileNames'].append('index.html')\r\n fileDict['pageIds'].append(str(epObject.ObjectId))\r\n for a in soup.find_all('a', {'href': 'javascript://'}):\r\n if a['onclick'].find('GotoPage') > 0:\r\n pageId = get_page_id(str(a['onclick']), str(epObject.ObjectId))\r\n if pageId not in fileDict['pageIds']:\r\n address = homePage + \"&pageId={0}\".format(pageId)\r\n fileName = a.string.replace(' ', '').lower() + \".html\"\r\n fileDict['pageUrls'].append(address)\r\n fileDict['pageFileNames'].append(fileName)\r\n fileDict['pageIds'].append(pageId)\r\n return fileDict", "def get_new_page_data(self, draft=False):\n page_data = {\n 'title': 'test page %d' % self.counter,\n 'slug': 'test-page-%d' % self.counter, 'language': 'en',\n 'sites': [1], 'status': Page.DRAFT if draft else Page.PUBLISHED,\n # used to disable an error with connected models\n 'document_set-TOTAL_FORMS': 0, 'document_set-INITIAL_FORMS': 0,\n }\n self.counter = self.counter + 1\n return page_data", "def get_pages_data(title: str) -> dict: \n data_object = layout_data.objects.get(title = title)\n data = {\n 'title': data_object.title,\n 'main_consistion' : data_object.main_consistion,\n 'list_of_links' : data_object.data['link'][:3],\n 'main_consistion_2' : data_object.main_consistion_2,\n 'list_of_links_2' : data_object.data['link'][3:]\n }\n return data", "def setMITPageBody(self, context, fdata):\n fns = context.source.listFiles()\n for fn in fns:\n import os\n mimetype = mimetypes.guess_type(fn)\n textDoc = ''\n if mimetype:\n if mimetype[0]:\n textDoc = mimetype[0].split('/')[0]\n\n if fn[-1] != os.sep and textDoc == 'text':\n data = context.source.readFile(fn)\n from BeautifulSoup import BeautifulSoup\n soup = BeautifulSoup(data)\n \n ftext = ''\n if soup.findAll('div',attrs={'class':'maincontent'}):\n bc = soup.findAll('div',attrs={'class':'bread-crumb'})\n if bc:\n titleTag = bc[0].nextSibling.nextSibling\n bc[0].extract()\n if titleTag.name == 'h1':\n titleTag.extract()\n ftext = str(soup.findAll('div',attrs={'class':'maincontent'})[0])\n \n if not ftext:\n tbls = soup('table')\n for tbl in tbls:\n if tbl.has_key('summary'):\n summary = tbl['summary']\n if summary.find('Main Content Header') > 0:\n ftext = str(tbl)\n\n if ftext:\n fdata[fn] = ftext", "def create_page(self, data):\n env = Environment(loader=FileSystemLoader(self.template_folder), trim_blocks=True, lstrip_blocks=True)\n template = env.get_template(self.template_file_name)\n template_vars = {'class_name': self.get_class_name(data['name']), 'page': data}\n output = template.render(template_vars)\n formatted_output = output.encode('utf8').strip()\n file_name = data['name'] + self.get_output_file_type()\n result_html = open(os.path.join(self.output_folder, file_name), 'w')\n result_html.write(formatted_output)\n result_html.close()", "def generate_page_sections_dict(self, project_page_data: dict):\n wiki_obj = WikiService()\n\n short_description = (\n f\"\\n{project_page_data['project']['shortDescription']}\\n\"\n )\n\n created_date = wiki_obj.format_date_text(\n project_page_data['project']['created']\n )\n # created_date_text = f\"\\n{created_date}\\n\" \n # due_date = wiki_obj.format_date_text(\n # project_page_data['project']['due_date']\n # )\n timeframe = (\n f\"\\n* '''Start Date:''' {created_date}\\n\"\n # f\"\\n* '''End Date:''' Estimate {due_date}\\n\"\n )\n\n\n project_url = (\n f\"\\n{project_page_data['project']['url']}\\n\"\n )\n\n hashtag = (\n project_page_data['project']['changesetComment']\n )\n hashtag = (\n project_page_data['project']['changesetComment'].replace(\n \"#\", \"<nowiki>#</nowiki>\"\n )\n )\n hashtag_text = (\n f\"\\n{hashtag}\\n\"\n )\n\n instructions_text = (\n project_page_data['project']\n ['externalSource']\n ['instructions']\n )\n instructions = (\n f\"\\n{instructions_text}\\n\"\n )\n\n per_task_instructions_text = (\n project_page_data['project']\n ['externalSource']\n ['perTaskInstructions']\n )\n per_task_instructions = (\n f\"\\n{per_task_instructions_text}\\n\"\n )\n\n imagery_text = (\n project_page_data['project']\n ['externalSource']\n ['imagery']\n )\n imagery = (\n f\"\\n{imagery_text}\\n\"\n )\n\n license_text = (\n project_page_data['project']\n ['externalSource']\n ['license']\n )\n license = (\n f\"\\n{license_text}\\n\"\n )\n\n # metrics = (\n # f\"\\n* {project_page_data.instructions}\\n\"\n # )\n # quality_assurance = (\n # f\"\\n* {project_page_data.quality_assurance}\\n\"\n # )\n\n users = project_page_data['project'][\"users\"]\n project_users = \"\"\n for user in users:\n project_users += (\n f\"\\n| {user['userId']}\\n| {user['userName']}\\n|-\"\n )\n\n \n\n project_page_sections = {\n self.short_description_section: short_description,\n self.timeframe_section: timeframe,\n # self.timeframe_section: {\n # self.created_section: created_date_text\n # }, # if choose use subsection for timeframe \n self.url_section: project_url,\n self.external_sources_section: {\n self.instructions_section: instructions,\n self.per_task_instructions_section: per_task_instructions,\n self.imagery_section: imagery,\n self.license_section: license\n },\n self.hashtag_section: hashtag_text,\n # self.instructions_section: instructions,\n # self.metrics_section: metrics,\n # self.quality_assurance_section: quality_assurance,\n self.team_user_section: {\n self.users_list_section: project_users\n }\n }\n return project_page_sections", "def prepare_body(self, page_instructions):\n template = loader.get_template(str(page_instructions.body))\n self.prepared_instructions['body'] = unicode(\n template.render(self.context))", "def create_page(self, space, title, body, parent_id=None, update_message=None):\n page_structure = {\n 'title': title,\n 'type': 'page',\n 'space': {\n 'key': space\n },\n 'body': {\n 'storage': {\n 'value': body,\n 'representation': 'storage'\n }\n }\n }\n\n if parent_id is not None:\n if type(parent_id) is str:\n parent_id = int(parent_id)\n page_structure['ancestors'] = [{'id': parent_id}]\n\n if update_message is not None:\n page_structure['version'] = {'message': update_message}\n\n print(json.dumps(page_structure))\n return self.api.content.post(json=page_structure)", "def page_data():\n return scrape()", "def parse_detail_page(self, response):\n self.logger.info('Parse Detail Page function called on %s', response.url)\n item = response.meta.get('item', {})\n item['url'] = response.url\n item['title'] = response.css(TITLE_SELECTOR).extract_first(\"\").strip()\n item['price'] = self.get_price(response)\n return item", "def olive_parser(text: str) -> dict:\n soup = BeautifulSoup(text, \"lxml\")\n root = soup.find(\"xmd-entity\")\n page_no = root['page_no']\n identifier = root['id']\n language = root['language']\n title = soup.meta['name']\n entity_type = root['entity_type']\n issue_date = soup.meta['issue_date']\n\n out = {\n \"meta\": {\n \"language\": None,\n \"type\": {}\n },\n \"r\": [],\n \"stats\": {},\n \"legacy\": {\"continuation_from\": None, \"continuation_to\": None},\n }\n out[\"meta\"][\"title\"] = title\n out[\"meta\"][\"page_no\"] = [int(page_no)]\n out[\"meta\"][\"language\"] = normalize_language(language)\n out[\"meta\"][\"type\"][\"raw\"] = entity_type\n out[\"meta\"][\"issue_date\"] = issue_date\n\n new_region = {\n \"c\": [],\n \"p\": []\n }\n\n new_paragraph = {\n \"l\": []\n }\n\n new_line = {\n \"c\": [],\n \"t\": []\n }\n\n new_token = {\n \"c\": [],\n \"tx\": \"\"\n }\n\n for primitive in soup.find_all(\"primitive\"):\n\n # store coordinate of text areas (boxes) by page\n # 1) page number, 2) coordinate list\n region = copy.deepcopy(new_region)\n region[\"c\"] = [int(i) for i in primitive.get('box').split(\" \")]\n\n para = None\n line = None\n line_counter = 0\n\n for tag in primitive.find_all(recursive=False):\n\n if tag.name == \"l\":\n\n if para is None and line is None:\n para = copy.deepcopy(new_paragraph)\n line = copy.deepcopy(new_line)\n\n if line_counter > 0 and line is not None:\n line = normalize_line(line, out[\"meta\"][\"language\"])\n para[\"l\"].append(line)\n\n if tag.get(\"p\") in [\"S\", \"SA\"] and line_counter > 0:\n region[\"p\"].append(para)\n para = copy.deepcopy(new_paragraph)\n\n line = copy.deepcopy(new_line)\n line[\"c\"] = [\n int(i)\n for i in tag.get('box').split(\" \")\n ]\n line_counter += 1\n\n if tag.name in [\"w\", \"q\"]:\n\n # store coordinates of each token\n # 1) token, 2) page number, 3) coordinate list\n t = copy.deepcopy(new_token)\n t[\"c\"] = [int(i) for i in tag.get('box').split(\" \")]\n t[\"tx\"] = tag.string\n t[\"s\"] = int(tag.get('style_ref'))\n\n if tag.name == \"q\" and tag.get('qid') is not None:\n qid = tag.get('qid')\n normalized_form = soup.find('qw', qid=qid).text\n t[\"nf\"] = normalized_form\n t[\"qid\"] = qid\n\n # append the token to the line\n line[\"t\"].append(t)\n\n # append orphan lines\n if line is not None:\n line = normalize_line(line, out[\"meta\"][\"language\"])\n para[\"l\"].append(line)\n\n region[\"p\"].append(para)\n\n if para is not None:\n out[\"r\"].append(region)\n\n out[\"legacy\"][\"id\"] = identifier\n out[\"legacy\"][\"source\"] = soup.link['source']\n \"\"\"\n # I suspect this could be deleted\n out[\"legacy\"][\"word_count\"] = int(soup.meta['wordcnt'])\n out[\"legacy\"][\"chars_count\"] = int(soup.meta['total_chars_count'])\n suspicious_chars_count = int(soup.meta['suspicious_chars_count'])\n out[\"legacy\"][\"suspicious_chars_count\"] = int(suspicious_chars_count)\n \"\"\"\n out[\"legacy\"][\"first_id\"] = soup.link['first_id']\n out[\"legacy\"][\"last_id\"] = soup.link['last_id']\n out[\"legacy\"][\"next_id\"] = soup.link['next_id']\n out[\"legacy\"][\"prev_id\"] = soup.link['prev_id']\n\n if root.has_attr('continuation_from'):\n out[\"legacy\"][\"continuation_from\"] = root['continuation_from']\n\n if root.has_attr('continuation_to'):\n out[\"legacy\"][\"continuation_to\"] = root['continuation_to']\n\n return out", "def _parse_page_contents (self, page_soup):\n netflix_page_data = self.extract_inline_netflix_page_data(page_soup=page_soup)\n self.user_data = self._parse_user_data(netflix_page_data=netflix_page_data)\n self.esn = self._parse_esn_data(netflix_page_data=netflix_page_data)\n self.api_data = self._parse_api_base_data(netflix_page_data=netflix_page_data)\n self.profiles = self._parse_profile_data(netflix_page_data=netflix_page_data)\n self.log(msg='Found ESN \"' + self.esn + '\"')\n return netflix_page_data", "def populate_file_dict(epObject, uc, fileDict):\r\n fileDict = get_pages(epObject, fileDict)\r\n for url in fileDict['pageUrls']:\r\n soup = make_soup(url)\r\n fileDict = get_embedded_object(soup, fileDict, uc)\r\n fileDict = get_css(soup, fileDict)\r\n fileDict = get_img(soup, fileDict, uc)\r\n return fileDict", "def parse(self, response):\n announcement_urls = response.css('#TD1 > table > tbody > tr > td.tdline2 > a::attr(href)').extract()\n for announcement_url in announcement_urls:\n yield Request(url=parse.urljoin(response.url, announcement_url), callback=self.parse_detail)\n\n # next page\n total_num_text = response.css('#Table1 > tbody > tr > td:nth-child(1)::text').extract()[-1]\n match_re = re.match('.*?共(\\d+)页', total_num_text)\n if not match_re:\n print('extract total page number error, please check the page source.')\n return\n total_num = int(match_re.group(1))\n if self.current_page <= total_num:\n form_request_text = re.match(\".*'(.*)?'\", response.css(\n '#Table1 > tbody > tr > td:nth-child(3) > input.cls-navigate-next::attr(onclick)').extract_first()).group(1)\n next_page_url = form_request_text.split('?')[0]\n form_data = form_request_text.split('?', 1)[1].split('&')\n yield scrapy.FormRequest(\n url=parse.urljoin(response.url, next_page_url),\n formdata={\n 'ISAJAXLOAD': form_data[0].split('=')[1],\n 'displayContentId': form_data[1].split('=')[1],\n 'SHOWTYPE': form_data[2].split('=')[1],\n 'CATALOGTYPE': form_data[3].split('=')[1],\n 'ORIGINAL_CATALOGID': form_data[4].split('=')[1],\n 'HEAD': '本所公告', # todo 第二页返回时发现乱码 经测试该字段是固定的 先这样处理\n 'CATALOGID': form_data[6].split('=')[1],\n 'TYPE': form_data[7].split('=')[1],\n 'COUNT': form_data[8].split('=')[1],\n 'ARTICLESOURCE': form_data[9].split('=')[1],\n 'LANGUAGE': form_data[10].split('=')[1],\n 'REPETITION': form_data[11].split('=')[1],\n 'DATESTYLE': form_data[12].split('=')[1],\n 'DATETYPE': form_data[13].split('=')[1],\n 'SEARCHBOXSHOWSTYLE': form_data[14].split('=')[1],\n 'INHERIT': form_data[15].split('=')[1],\n 'USESEARCHCATALOGID': form_data[16].split('=')[1],\n 'REPORT_ACTION': form_data[17].split('=')[1],\n 'PAGESIZE': form_data[18].split('=')[1],\n 'PAGECOUNT': form_data[19].split('=')[1],\n 'RECORDCOUNT': form_data[20].split('=')[1],\n 'PAGENO': form_data[21].split('=')[1],\n },\n callback=self.parse\n )\n self.current_page += 1", "def parse_post_content(self, response):\n post = Post()\n post['title'] = response.xpath('//h2/a/text()')[0].extract()\n post['image_url'] = response.xpath(\"//div[@class='cont group']//img/@src\")[0].extract()\n yield post", "def content_creator():\n with temporary_url_for_logger(app) as logger:\n with logger:\n content = page.render_html(\n solution=solution,\n static_url=static_url,\n lesson_url=lesson_url,\n subpage_url=subpage_url,\n vars=variables\n )\n absolute_urls = [url_for(logged[0], **logged[1]) for logged in logger.logged_calls]\n\n relative_urls = [get_relative_url(request.path, x) for x in absolute_urls]\n\n return {\"content\": content, \"urls\": relative_urls}", "def GatherPageData(self, mr):\n with mr.profiler.Phase('get issue, comment, and attachment'):\n try:\n attachment, issue = tracker_helpers.GetAttachmentIfAllowed(\n mr, self.services)\n except exceptions.NoSuchIssueException:\n webapp2.abort(404, 'issue not found')\n except exceptions.NoSuchAttachmentException:\n webapp2.abort(404, 'attachment not found')\n except exceptions.NoSuchCommentException:\n webapp2.abort(404, 'comment not found')\n\n content = []\n if attachment.gcs_object_id:\n bucket_name = app_identity.get_default_gcs_bucket_name()\n full_path = '/' + bucket_name + attachment.gcs_object_id\n logging.info(\"reading gcs: %s\" % full_path)\n with cloudstorage.open(full_path, 'r') as f:\n content = f.read()\n\n filesize = len(content)\n\n # This servlet only displays safe textual attachments. The user should\n # not have been given a link to this servlet for any other kind.\n if not attachment_helpers.IsViewableText(attachment.mimetype, filesize):\n self.abort(400, 'not a text file')\n\n u_text, is_binary, too_large = filecontent.DecodeFileContents(content)\n lines = prettify.PrepareSourceLinesForHighlighting(u_text.encode('utf8'))\n\n config = self.services.config.GetProjectConfig(mr.cnxn, mr.project_id)\n granted_perms = tracker_bizobj.GetGrantedPerms(\n issue, mr.auth.effective_ids, config)\n page_perms = self.MakePagePerms(\n mr, issue, permissions.DELETE_ISSUE, permissions.CREATE_ISSUE,\n granted_perms=granted_perms)\n\n page_data = {\n 'issue_tab_mode': 'issueDetail',\n 'local_id': issue.local_id,\n 'filename': attachment.filename,\n 'filesize': template_helpers.BytesKbOrMb(filesize),\n 'file_lines': lines,\n 'is_binary': ezt.boolean(is_binary),\n 'too_large': ezt.boolean(too_large),\n 'code_reviews': None,\n 'page_perms': page_perms,\n }\n if is_binary or too_large:\n page_data['should_prettify'] = ezt.boolean(False)\n else:\n page_data.update(prettify.BuildPrettifyData(\n len(lines), attachment.filename))\n\n return page_data", "def _parse(self):\n soup = BS(self._current_html, 'lxml')\n for item in soup.select('div.c'):\n temp = {}\n # main content\n ctt = item.select('span.ctt')\n if not ctt:\n continue\n weibo_body = item.select('div')\n if len(weibo_body) > 1:\n temp['content'] = weibo_body[0].text\n btn_group = weibo_body[1].text\n else:\n temp['content'] = weibo_body[0].select('span.ctt')[0].text\n btn_group = weibo_body[0].text\n temp['is_repost'] = True if REPO_TEST_PATTERN.match(\n temp['content']) else False\n try:\n temp['like_num'] = LIKE_NUM_PATTERN.findall(btn_group)[0]\n temp['cmt_num'] = COMMENT_NUM_PATTERN.findall(btn_group)[0]\n temp['repo_num'] = REPO_NUM_PATTERN.findall(btn_group)[0]\n except Exception:\n pass\n cmt = item.select('.cmt')\n # visibility\n if cmt:\n try:\n temp['visibility'] = VISIBILITY_PATTERN.findall(\n cmt[0].text)[0]\n except Exception:\n pass\n\n # img in main content\n img = item.select('div a img')\n img_src = img[0].attrs['src'] if img else None\n temp['img_src'] = img_src\n LOGGER.debug('img_src: {}'.format(img_src))\n # time & source device\n ct = item.select('span.ct')\n if ct:\n ct = ct[0]\n text = ct.text\n reg_result = TIME_PATTERN.findall(text)[0]\n\n temp['time'] = ar(\n '{}年{}'.format(self._current_year, reg_result[0]),\n DATE_FMTS[0]\n ).naive if reg_result[0] else ar(\n reg_result[1], DATE_FMTS[1]\n ).naive\n temp['source'] = SOURCE_DEVICE_PATTERN.findall(text)[0]\n self._post_item = Post(**temp)\n self._attachment_item = Attachment(\n uri=img_src, post=self._post_item)\n self._store()", "def _build_page(page, config, site_navigation, env, dirty=False):\n\n # Run the `pre_page` plugin event\n page = config['plugins'].run_event(\n 'pre_page', page, config=config, site_navigation=site_navigation\n )\n\n page.read_source(config=config)\n\n # Run `page_markdown` plugin events.\n page.markdown = config['plugins'].run_event(\n 'page_markdown', page.markdown, page=page, config=config, site_navigation=site_navigation\n )\n\n page.render(config, site_navigation)\n\n # Run `page_content` plugin events.\n page.content = config['plugins'].run_event(\n 'page_content', page.content, page=page, config=config, site_navigation=site_navigation\n )\n\n context = get_context(site_navigation, config, page)\n\n # Allow 'template:' override in md source files.\n if 'template' in page.meta:\n template = env.get_template(page.meta['template'])\n else:\n template = env.get_template('main.html')\n\n # Run `page_context` plugin events.\n context = config['plugins'].run_event(\n 'page_context', context, page=page, config=config, site_navigation=site_navigation\n )\n\n # Render the template.\n output_content = template.render(context)\n\n # Run `post_page` plugin events.\n output_content = config['plugins'].run_event(\n 'post_page', output_content, page=page, config=config\n )\n\n # Write the output file.\n if output_content.strip():\n utils.write_file(output_content.encode('utf-8'), page.abs_output_path)\n else:\n log.info(\"Page skipped: '{}'. Generated empty output.\".format(page.title))", "def _getParentPage(self):\n page = {}\n tag=[]\n \n data= self.soup.findAll('div','span8')\n for d in data:\n tag=d.findAll('div','pd-comment')\n \n try:\n # page['title'] = stripHtml(self.soup.find('div','breadcrumbs')\\\n # .findAll('a')[-1].renderContents())\n for t in tag:\n title=(t.find('h4'))\n page['title'] = title\n log.info(self.log_msg(\"title:%s\"%page['title']))\n except:\n log.exception(self.log_msg(\"Title not fetched\"))\n return False\n \n try:\n#==============================================================================\n# rating_tag = self.soup.find('div','reviews-ratingcombined')\n# page['ef_product_rating_overall'] = float(rating_tag.b.renderContents())\n# for each in rating_tag.findParent('div').findAll('div','reviews-rating'):\n# key = 'ef_product_rating_' + stripHtml(each.label.renderContents\\\n# ()).lower().split('/')[0].replace(' ','_')\n# page[key] = float(each.b.renderContents())\n#==============================================================================\n for r in tag:\n rating_tag=(r.find('div','badge pd-review-score')).replace('Overall','')\n page['rating_tag'] = rating_tag\n \n except:\n log.exception(self.log_msg(\"Specifications not found!!\"))\n \n try:\n self.updateParentExtractedEntities(page) \n if checkSessionInfo(self.genre, self.session_info_out, \\\n self.task.instance_data['uri'],self.task.instance_data.get('update')):\n log.info(self.log_msg('Check Session info return True'))\n return False\n result = updateSessionInfo(self.genre, self.session_info_out,\\\n self.task.instance_data['uri'], get_hash(page) ,'Post',\\\n self.task.instance_data.get('update'))\n if not result['updated']:\n return False\n page['uri'] = self.task.instance_data['uri']\n page['data'] = ''\n page['path'] = [self.task.instance_data['uri']]\n page['parent_path'] = []\n page['uri_domain'] = unicode(urlparse.urlparse(page['uri'])[1])\n page['priority'] = self.task.priority\n page['level'] = self.task.level\n page['last_updated_time'] = page['posted_date'] = page['pickup_date'] = \\\n datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\")\n page['connector_instance_log_id'] = self.task.connector_instance_log_id\n page['connector_instance_id'] = self.task.connector_instance_id\n page['workspace_id'] = self.task.workspace_id\n page['client_id'] = self.task.client_id\n page['client_name'] = self.task.client_name\n page['versioned'] = False\n page['task_log_id'] = self.task.id\n page['entity'] = 'Post'\n page['category'] = self.task.instance_data.get('category','')\n self.pages.append(page)\n log.info(self.log_msg('Parent Page added'))\n return True\n except:\n log.exception(self.log_msg(\"Exception while adding parent Page info\"))\n return False" ]
[ "0.64590585", "0.6278418", "0.6139448", "0.5948847", "0.5872562", "0.5811106", "0.57711333", "0.57591534", "0.5752565", "0.57495147", "0.5747665", "0.5722854", "0.5712628", "0.571118", "0.5696923", "0.5694322", "0.5614354", "0.56108755", "0.56095475", "0.5606405", "0.5568982", "0.55583835", "0.5549157", "0.5501079", "0.5497534", "0.54887444", "0.548851", "0.5474049", "0.5466202", "0.5463767" ]
0.7880545
0
Read pages and save the instance into database
def read_pages(self): for file in os.listdir(self.repo_path): if file.endswith('.md'): if str(file) is not ('README.md' or '404.md'): with open(self.repo_path + file, 'r') as page_file: file_data = page_file.read() content_dict = self.page_call_scrapers(file_data) content_dict['repo'] = RepoDbIO().get_repo( self.user, self.repo_name) PageDbIO().save_db_instance(content_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, *args, **kwargs):\n created = False\n if self.pk is None:\n created = True\n super(Base, self).save(*args, **kwargs)\n if created is True:\n for i in range(self.page_count):\n page = Page(work=self, number=i+1)\n page.save()", "def ingest(cls):\n for html in cls.query.all():\n session.add(html.parse())\n\n session.commit()", "def save_pr(self):\r\n\t\tsql = \"drop table if exists pagelink\"\r\n\t\tself.cur.execute(sql)\r\n\t\tsql = \"create table pagelink(urlid integer, fromids text, toids text, pagerank real)\"\r\n\t\tself.cur.execute(sql)\r\n\t\tfor urlid in self.url_ids:\r\n\t\t\tfromids = ' '.join([str(v) for v in self.from_ids[urlid]])\r\n\t\t\ttoids = ' '.join([str(v) for v in self.to_ids[urlid]])\r\n\t\t\tsql = \"insert into pagelink values(%d,'%s','%s',%f)\" \\\r\n\t\t\t\t % (urlid, fromids, toids, self.all_scores[urlid])\r\n\t\t\tself.cur.execute(sql)\r\n\t\tself.conn.commit()", "def save_page(self,bk=None):\n self.put()\n if not bk:\n bk = SuiBook.seek_by_id(self.book)\n bk.update_page_entity(self.key().id(),self)", "def load_pages(self):\n if not hasattr(self,'_pages_entities'):\n pids = self.get_pages_list()\n# logging.info('SuiBook.load_pages: pids = %s'%pids)\n self._pages_entities = SuiPage.get_by_id([int(pid) for pid in pids if pid])\n return self._pages_entities", "def create_page_objects(self, data):\n for page in data['pages']:\n self.create_page(page)", "def get_data(self):\n has_next_page = True\n page = 1\n while has_next_page:\n print(f'Getting page {page}')\n response = self.get_articles(\n page=page,\n size=200,\n order_by='extracted_at',\n order_type='asc'\n )\n pagination = response.get('pagination')\n has_next_page = pagination.get('has_next')\n self.save_articles(response.get('articles'))\n page += 1\n time.sleep(2.5)", "def create_page(self):", "def _add_page_to_storage(page: Page):\n if page:\n if page.valid_mime:\n CrawlerStorageManager.create_file_from_page(page)\n page.save_to_json_file()", "def read_pages(self, repo, extension, exception_list):\n for file in os.listdir(self.repo_path):\n if file.endswith('.'.join(['', extension])):\n if file not in exception_list:\n file_handler = FileHandler(self.repo_path, file)\n content = file_handler.read_file()\n head_data, body_content = (\n file_handler.read_wrapped_content(content, '---'))\n head_dict = YAMLHandler().read_yaml(head_data)\n # will have to intialize full_dict as the content of dict\n # never changes.\n full_dict = {}\n full_dict = dict(copy.deepcopy(head_dict))\n full_dict['content'] = body_content\n full_dict['repo'] = repo\n PageDbIO().save_db_instance(full_dict)", "def store(self):\n print(\"Please take a break, this will take a while :).\")\n\n wiki_db = GenericLookup(\n \"entity_word_embedding\",\n os.path.join(self.base_url, self.wiki_version, \"generated\"),\n table_name=\"wiki\",\n columns={\"p_e_m\": \"blob\", \"lower\": \"text\", \"freq\": \"INTEGER\"},\n )\n\n wiki_db.load_wiki(self.p_e_m, self.mention_freq, batch_size=50000, reset=True)", "def save_pages_to_onprogress_pages(db, pages, domain):\r\n cursor = db.cursor()\r\n #get domain id\r\n try:\r\n cursor.execute(\"SELECT domain_id, max_pages FROM on_progress_domains WHERE url = %(d)s\", {'d':domain})\r\n domain_id = cursor.fetchone()\r\n if domain_id == None:\r\n print \"foriegn key not found\"\r\n return\r\n except:\r\n cursor.close()\r\n raise RuntimeError(\"An Exception happened with the Database, make sure you are connected\")\r\n\r\n for page in pages:\r\n #if max pages reached\r\n if domain_id[1] < 0:\r\n print \"max pages reached zero, Terminating crawler for the domain:\", domain, \"after waiting pages are done processing\"\r\n return\r\n max_pages_updated = int(domain_id[1]) - 1\r\n sql_string1 = \"INSERT INTO on_progress_pages(domain_id, page_url) VALUES(%s, %s)\"\r\n sql_string2 = \"UPDATE on_progress_domains SET max_pages = %s WHERE domain_id = %s\"\r\n try:\r\n\r\n cursor.execute(sql_string1, (int(domain_id[0]), page))\r\n cursor.execute(sql_string2, (max_pages_updated, domain_id[0]))\r\n db.commit()\r\n except:\r\n cursor.close()\r\n raise RuntimeError(\"An Exception happened with the Database, make sure you are connected\")\r\n cursor.close()", "def _visit_pages(self, seed_url):\n\n # for single_url in seed_url:\n # update_sql = \" UPDATE fetch_list SET times = times+1 WHERE url = '{}'and source_id =17\".format(\n # single_url[0])\n # Dao.execute_dmls(update_sql)\n # self._base_url = single_url[0]\n # self._now_url = single_url[0]\n # html = self.get_page_content_str(single_url[0])\n # try:\n # self._extract_data(html)\n # except Exception as e:\n # print(e)\n # update_sql = \" UPDATE fetch_list SET status = 1 WHERE url = '{}'and source_id =17\".format(\n # single_url[0])\n # Dao.execute_dmls(update_sql)\n\n # 单个url\n # html = self.get_page_content_str(self._seed_url[0]) #用数据库的时候\n seed_url = self._root_url + seed_url[seed_url.rindex(\"?\"):]\n html = self.get_page_content_str(seed_url) #单个URL\n self.findEachBuilding(html)\n # b = set(self._resualt)\n # self._resualt=[i for i in b]\n # # dao=Dao()\n # insert_sql=\"\"\n # for res1 in b :\n # insert_sql = \"INSERT INTO merchant_tmp (description,url )VALUES ( '{}', 'http://www.youlin.me/category/407')\".format(res1)\n # print( insert_sql )\n # dao = Dao()\n # dao.execute_dmls(insert_sql)", "def process(self):\r\n if self.file_handle.closed:\r\n self.file_handle = open(self.file_handle.name)\r\n\r\n self.file_handle.seek(0)\r\n parsed = etree.parse(self.file_handle)\r\n count = 0\r\n\r\n ids = []\r\n for post in parsed.findall('post'):\r\n if 'javascript:' in post.get('href'):\r\n continue\r\n\r\n add_date = dateparser.parse(post.get('time'))\r\n\r\n try:\r\n bmark = self.save_bookmark(\r\n unicode(post.get('href')),\r\n unicode(post.get('description')),\r\n unicode(post.get('extended')),\r\n unicode(post.get('tag')),\r\n dt=add_date)\r\n count = count + 1\r\n if bmark:\r\n bmark.stored = bmark.stored.replace(tzinfo=None)\r\n DBSession.flush()\r\n except InvalidBookmark:\r\n bmark = None\r\n\r\n if bmark:\r\n ids.append(bmark.bid)\r\n\r\n if count % COMMIT_SIZE == 0:\r\n transaction.commit()\r\n\r\n # Commit any that are left since the last commit performed.\r\n transaction.commit()\r\n\r\n from bookie.bcelery import tasks\r\n # For each bookmark in this set that we saved, sign up to\r\n # fetch its content.\r\n for bid in ids:\r\n tasks.fetch_bmark_content.delay(bid)\r\n\r\n # Start a new transaction for the next grouping.\r\n transaction.begin()", "def save(self):\n for page in self.pages.get_published_pages():\n site_path = page.path_to_page.replace('.md', '').replace(\n self.source_path, '').strip('/')\n save_path = self.output_path\n\n # ensure we are not creating a directory for the index file that\n # that lives at the source_path\n if page.full_path() != f'{self.source_path}{os.sep}index.md':\n site_path = slugify_path(site_path)\n save_path = os.path.join('', self.output_path, site_path)\n\n try:\n os.makedirs(save_path, exist_ok=True)\n except Exception as e:\n log((f'unable to create directories: {save_path}'\n f' because: {e}'), True)\n continue\n\n try:\n save_file = os.path.join(save_path, 'index.html')\n log(f'saving {save_file}')\n\n published = self.pages.get_published_pages()\n prev_page = self.pages.get_previous_page(page)\n next_page = self.pages.get_next_page(page)\n content = page.render(published_pages=published,\n previous_page=prev_page, next_page=next_page)\n write(save_file, content)\n except Exception as e:\n log(f'unable to save file: {save_file} -- {e}', True)\n\n unpublished = self.pages.get_unpublished_pages()\n if len(unpublished):\n log('')\n log('these pages were unpublished and not rendered:', True)\n for up in unpublished:\n log(up.path_to_page, True)\n log('')\n\n # build the _tags pages\n for tag, pages in self.tags.pages.items():\n content = self.tags.render(tag, pages)\n tag_index_dir = f'{self.tag_dir}/{slugify(tag)}'\n tag_index = f'{tag_index_dir}/index.html'\n os.makedirs(tag_index_dir, exist_ok=True)\n write(tag_index, content)\n\n log('finished builidng site')", "def class_to_db(self):", "def __init__(self, settings):\n\n \tself.redis = Redis(host=settings.redis.bind_address, port=settings.redis.port)\n \tself.store = Store(settings.content.path)\n self.get_all_pages() # page modification times\n self.get_all_aliases() # page aliases", "def get(self):\n query = WikiPage.all()\n \n for record in query:\n record.delete()\n \n # set up default data\n homepage_text = \"Currently this demo site is under testing.\\n\\nThis wiki is made for personal study for Google App Engine, especially to learn user api and datastore.\\n\\nAttention:\\n\\n- you can not delete home page, but it does not alert anything.\\n- the page contents will be reset once a day\"\n \n markdownpage_text = \"> Markdown is a text-to-HTML conversion tool for web writers. Markdown allows you to write using an easy-to-read, easy-to-write plain text format, then convert it to structurally valid XHTML (or HTML).\\n\\n[Markdown](http://daringfireball.net/projects/markdown/)\\n\\nThis wiki renders its content with [Showdown](http://attacklab.net/showdown/) which is JavaScript port of Markdown.\"\n \n testpage_text = \"Try to edit this page.\"\n \n page_content = WikiPage(key_name=hashlib.sha256(\"home\").hexdigest(), title=\"home\", content=homepage_text)\n page_content.put()\n page_content = WikiPage(key_name=hashlib.sha256(\"markdown\").hexdigest(), title=\"markdown\", content=markdownpage_text)\n page_content.put()\n page_content = WikiPage(key_name=hashlib.sha256(\"test page\").hexdigest(), title=\"test page\", content=testpage_text)\n page_content.put()\n \n self.response.out.write(\"finished to reset the datastore.\")", "def _getParentPage(self):\n page = {}\n tag=[]\n \n data= self.soup.findAll('div','span8')\n for d in data:\n tag=d.findAll('div','pd-comment')\n \n try:\n # page['title'] = stripHtml(self.soup.find('div','breadcrumbs')\\\n # .findAll('a')[-1].renderContents())\n for t in tag:\n title=(t.find('h4'))\n page['title'] = title\n log.info(self.log_msg(\"title:%s\"%page['title']))\n except:\n log.exception(self.log_msg(\"Title not fetched\"))\n return False\n \n try:\n#==============================================================================\n# rating_tag = self.soup.find('div','reviews-ratingcombined')\n# page['ef_product_rating_overall'] = float(rating_tag.b.renderContents())\n# for each in rating_tag.findParent('div').findAll('div','reviews-rating'):\n# key = 'ef_product_rating_' + stripHtml(each.label.renderContents\\\n# ()).lower().split('/')[0].replace(' ','_')\n# page[key] = float(each.b.renderContents())\n#==============================================================================\n for r in tag:\n rating_tag=(r.find('div','badge pd-review-score')).replace('Overall','')\n page['rating_tag'] = rating_tag\n \n except:\n log.exception(self.log_msg(\"Specifications not found!!\"))\n \n try:\n self.updateParentExtractedEntities(page) \n if checkSessionInfo(self.genre, self.session_info_out, \\\n self.task.instance_data['uri'],self.task.instance_data.get('update')):\n log.info(self.log_msg('Check Session info return True'))\n return False\n result = updateSessionInfo(self.genre, self.session_info_out,\\\n self.task.instance_data['uri'], get_hash(page) ,'Post',\\\n self.task.instance_data.get('update'))\n if not result['updated']:\n return False\n page['uri'] = self.task.instance_data['uri']\n page['data'] = ''\n page['path'] = [self.task.instance_data['uri']]\n page['parent_path'] = []\n page['uri_domain'] = unicode(urlparse.urlparse(page['uri'])[1])\n page['priority'] = self.task.priority\n page['level'] = self.task.level\n page['last_updated_time'] = page['posted_date'] = page['pickup_date'] = \\\n datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\")\n page['connector_instance_log_id'] = self.task.connector_instance_log_id\n page['connector_instance_id'] = self.task.connector_instance_id\n page['workspace_id'] = self.task.workspace_id\n page['client_id'] = self.task.client_id\n page['client_name'] = self.task.client_name\n page['versioned'] = False\n page['task_log_id'] = self.task.id\n page['entity'] = 'Post'\n page['category'] = self.task.instance_data.get('category','')\n self.pages.append(page)\n log.info(self.log_msg('Parent Page added'))\n return True\n except:\n log.exception(self.log_msg(\"Exception while adding parent Page info\"))\n return False", "def insert_page_to_db(page_data):\n\tprint('-> Insert page data to database')\n\tfor i in range(len(page_data[0])):\n\t\tsql = \"\"\"INSERT INTO selenium (title, description, tags, time, language, rate)\n\t\t\t\tVALUES (%s, %s, %s, %s, %s, %s)\"\"\"\n\t\t# values = (title[i], description[i], tags[i], time[i], language[i], rate[i])\n\t\tvalues = (page_data[0][i], page_data[1][i], page_data[2][i], page_data[3][i], page_data[4][i], page_data[5][i])\n\t\tcursor.execute(sql, values)\n\tdb_connection.commit()", "def save(self):\n num_pages = len(self._saved_page_states)\n for state in self._saved_page_states:\n self.__dict__.update(state)\n self.draw_page_number(num_pages)\n canvas.Canvas.showPage(self)\n canvas.Canvas.save(self)", "def save(self):\n num_pages = len(self._saved_page_states)\n for state in self._saved_page_states:\n self.__dict__.update(state)\n self.draw_page_number(num_pages)\n canvas.Canvas.showPage(self)\n canvas.Canvas.save(self)", "def updateFileData(self):\n with open(pagePath(self.pageName)) as f:\n self.fileData = f.read()\n self.lastUpdated = time.time()", "def process(self):\r\n soup = BeautifulSoup(self.file_handle)\r\n count = 0\r\n\r\n ids = []\r\n for tag in soup.findAll('dt'):\r\n if 'javascript:' in str(tag):\r\n continue\r\n\r\n # if we have a dd as next sibling, get it's content\r\n if tag.nextSibling and tag.nextSibling.name == 'dd':\r\n extended = tag.nextSibling.text\r\n else:\r\n extended = u\"\"\r\n\r\n link = tag.a\r\n\r\n # Skip any bookmarks with an attribute of PRIVATE.\r\n if link.has_key('PRIVATE'): # noqa\r\n continue\r\n\r\n import_add_date = float(link['add_date'])\r\n\r\n if import_add_date > 9999999999:\r\n # Remove microseconds from the timestamp\r\n import_add_date = import_add_date / 1000\r\n add_date = datetime.fromtimestamp(import_add_date)\r\n\r\n try:\r\n bmark = self.save_bookmark(\r\n unicode(link['href']),\r\n unicode(link.text),\r\n unicode(extended),\r\n u\" \".join(unicode(link.get('tags', '')).split(u',')),\r\n dt=add_date)\r\n count = count + 1\r\n DBSession.flush()\r\n except InvalidBookmark:\r\n bmark = None\r\n\r\n if bmark:\r\n ids.append(bmark.bid)\r\n\r\n if count % COMMIT_SIZE == 0:\r\n transaction.commit()\r\n\r\n # Commit any that are left since the last commit performed.\r\n transaction.commit()\r\n\r\n from bookie.bcelery import tasks\r\n # For each bookmark in this set that we saved, sign up to\r\n # fetch its content.\r\n for bid in ids:\r\n tasks.fetch_bmark_content.delay(bid)\r\n\r\n # Start a new transaction for the next grouping.\r\n transaction.begin()", "def page_instances(self):\n return self._open(self.app.page_instances)", "def _parse_page_contents (self, page_soup):\n netflix_page_data = self.extract_inline_netflix_page_data(page_soup=page_soup)\n self.user_data = self._parse_user_data(netflix_page_data=netflix_page_data)\n self.esn = self._parse_esn_data(netflix_page_data=netflix_page_data)\n self.api_data = self._parse_api_base_data(netflix_page_data=netflix_page_data)\n self.profiles = self._parse_profile_data(netflix_page_data=netflix_page_data)\n self.log(msg='Found ESN \"' + self.esn + '\"')\n return netflix_page_data", "def handle(self, *args, **options):\n\n # they look strange but are what comes over from wordpress API\n # im giessing there are redirects in place to make this work\n SOURCES = {\n 'sample-page': 'aac',\n 'home-2': 'commissioning',\n 'nhs-england-and-nhs-improvement-corona-virus': 'coronavirus',\n 'greener-nhs': 'greenernhs',\n 'improvement-knowledge-hub': 'improvement-hub',\n 'tbc': 'non-executive-opportunities',\n 'nhs-rightcare': 'rightcare',\n }\n # for BasePage models\n pages = BasePage.objects.all().order_by('-depth')\n\n for page in pages:\n first_published = page.first_published_at\n last_published = page.last_published_at\n latest_revision_created = page.latest_revision_created_at\n\n if page.slug in SOURCES.keys():\n # print(SOURCES[page.wp_slug])\n sys.stdout.write('\\n✅ {} is fixed'.format(SOURCES[page.wp_slug]))\n slug = SOURCES[page.wp_slug]\n page.slug = slug\n \"\"\"\n running save_revision() as it seems like a good idea to not break page paths\n just to be safe...\n try to keep revision dates to match whats in wordpress as our\n revisions reset that at the save()\n \"\"\"\n try:\n rev = page.save_revision()\n page.first_published_at = first_published\n page.last_published_at = last_published\n page.latest_revision_created_at = latest_revision_created\n # probably not the best way to do this but need to update the dates on the page record\n # to keep in sync with wordpress at the import stage\n # futher imports will collect new data and new dates.\n page.save()\n rev.publish()\n except ValidationError:\n print('⚠️ {} slug cannot be updated!!!'.format(page))\n time.sleep(2)\n\n # for ComponentsPage models\n # pages = ComponentsPage.objects.all().order_by('-depth')\n\n # for page in pages:\n # first_published = page.first_published_at\n # last_published = page.last_published_at\n # latest_revision_created = page.latest_revision_created_at\n\n # if page.slug in SOURCES.keys():\n # # print(SOURCES[page.wp_slug])\n # sys.stdout.write('\\n✅ {} is fixed'.format(SOURCES[page.wp_slug]))\n # slug = SOURCES[page.wp_slug]\n # page.slug = slug\n # \"\"\"\n # running save_revision() as it seems like a good idea to not break page paths\n # just to be safe...\n # try to keep revision dates to match whats in wordpress as our\n # revisions reset that at the save()\n # \"\"\"\n # try:\n # rev = page.save_revision()\n # page.first_published_at = first_published\n # page.last_published_at = last_published\n # page.latest_revision_created_at = latest_revision_created\n # # probably not the best way to do this but need to update the dates on the page record\n # # to keep in sync with wordpress at the import stage\n # # futher imports will collect new data and new dates.\n # page.save()\n # rev.publish()\n # except ValidationError:\n # print('⚠️ {} slug cannot be updated!!!'.format(page))\n # time.sleep(2)\n\n sys.stdout.write('\\n✅ Done\\n')", "def load(self):\n db = CrawlDBI.DBI(dbtype='crawler')\n if self.rowid is not None:\n rows = db.select(table='checkables',\n fields=['rowid',\n 'path',\n 'type',\n 'cos',\n 'cart',\n 'ttypes',\n 'checksum',\n 'last_check',\n 'fails',\n 'reported'],\n where=\"rowid = ?\",\n data=(self.rowid,))\n else:\n rows = db.select(table='checkables',\n fields=['rowid',\n 'path',\n 'type',\n 'cos',\n 'cart',\n 'ttypes',\n 'checksum',\n 'last_check',\n 'fails',\n 'reported'],\n where=\"path = ?\",\n data=(self.path,))\n if 0 == len(rows):\n self.in_db = False\n elif 1 == len(rows):\n self.in_db = True\n rz = list(rows[0])\n self.rowid = rz.pop(0)\n self.path = rz.pop(0)\n self.type = rz.pop(0)\n self.cos = rz.pop(0)\n self.cart = rz.pop(0)\n self.ttypes = rz.pop(0)\n self.checksum = rz.pop(0)\n self.last_check = rz.pop(0)\n try:\n self.fails = rz.pop(0)\n except IndexError:\n self.fails = 0\n try:\n self.reported = rz.pop(0)\n except IndexError:\n self.reported = 0\n self.dirty = False\n else:\n raise StandardError(\"There appears to be more than one copy \" +\n \"of %s in the database\" % self)\n\n db.close()", "def procesPage(self, page):\n item = pywikibot.ItemPage.fromPage(page)\n pywikibot.output('Processing %s' % page)\n if not item.exists():\n pywikibot.output('%s doesn\\'t have a wikidata item :(' % page)\n #TODO FIXME: We should provide an option to create the page\n else:\n pagetext = page.get()\n templates = pywikibot.extract_templates_and_params(pagetext)\n for (template, fielddict) in templates:\n # We found the template we were looking for\n if template.replace(u'_', u' ')==self.templateTitle:\n for field, value in fielddict.items():\n # This field contains something useful for us\n if field in self.fields:\n # Check if the property isn't already set\n claim = pywikibot.Claim(self.repo, self.fields[field])\n if claim.getID() in item.get().get('claims'):\n pywikibot.output(u'A claim for %s already exists. Skipping' % (claim.getID(),))\n #TODO FIXME: This is a very crude way of dupe checking\n else:\n # Try to extract a valid page\n match = re.search(pywikibot.link_regex, value)\n if match:\n try:\n link = pywikibot.Link(match.group(1))\n linkedPage = pywikibot.Page(link)\n if linkedPage.isRedirectPage():\n linkedPage = linkedPage.getRedirectTarget()\n linkedItem = pywikibot.ItemPage.fromPage(linkedPage)\n claim.setTarget(linkedItem)\n pywikibot.output('Adding %s --> %s' % (claim.getID(), claim.getTarget().getID()))\n item.addClaim(claim)\n if self.source:\n claim.addSource(self.source, bot=True)\n except pywikibot.exceptions.NoPage:\n pywikibot.output('[[%s]] doesn\\'t exist so I can\\'t link to it' % (linkedItem.title(),))", "def loaddata(self):\n # Connect to the db\n self.conn, self.c = self.connect_db(self.dbname)\n # create the bdefile table to \n self.c.execute(oeeutil.sql_create_bdefile_table)\n # Delete any previous records\n self.c.execute('DELETE FROM bdefile')\n # hold the content for analysis\n for item in self.content:\n self.c.execute('INSERT INTO bdefile VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)', item)\n self.c.executescript(oeeutil.sql_create_bdefile_view)\n self.conn.commit()" ]
[ "0.6114496", "0.60416394", "0.59844977", "0.59610665", "0.5876366", "0.5847255", "0.57638466", "0.5758273", "0.57403046", "0.57091016", "0.5637539", "0.5621401", "0.55633825", "0.5521344", "0.5509448", "0.55065763", "0.5492751", "0.54720575", "0.5468598", "0.54553676", "0.5405154", "0.5405154", "0.54045534", "0.53856575", "0.5385307", "0.5374102", "0.53687745", "0.53653115", "0.5362891", "0.5359291" ]
0.6453269
0
read all files that we want at any time. we have to pass the extension and exception_list. exception_list will contain the list of all the files that should'nt be scanned. repo is the Repo db instance
def read_pages(self, repo, extension, exception_list): for file in os.listdir(self.repo_path): if file.endswith('.'.join(['', extension])): if file not in exception_list: file_handler = FileHandler(self.repo_path, file) content = file_handler.read_file() head_data, body_content = ( file_handler.read_wrapped_content(content, '---')) head_dict = YAMLHandler().read_yaml(head_data) # will have to intialize full_dict as the content of dict # never changes. full_dict = {} full_dict = dict(copy.deepcopy(head_dict)) full_dict['content'] = body_content full_dict['repo'] = repo PageDbIO().save_db_instance(full_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_files_with_extension(self, extension=sys.argv[1]) -> list:\n if extension == \"\":\n raise EnvironmentError(\"No extension provided!\")\n\n result = []\n for idx, file in enumerate(self.file_list):\n if re.search(extension + \"$\", file):\n result.append(file)\n\n if len(result) == 0:\n raise Exception(\"No {} files found.\".format(extension))\n\n return result", "def initFileList(self,extension):\r\n self.listExec.Clear()\r\n for fname in os.listdir(\"data\"):\r\n #print 'testing file ' , fname\r\n \r\n if extension in fname :\r\n #print fname\r\n self.listExec.Append(fname)\r\n self.Refresh()", "def _readFiles(self):\n template_files = []\n for file in os.listdir(self.template_folder):\n if file.endswith(\".xml\"):\n template_files.append(file)\n return template_files", "def read_files(folder):\n print_header(\"READING FILES FROM FOLDER (RECURSIVE)\", \"=\")\n files = []\n for dirpath, dirnames, filenames in os.walk(folder):\n if not dirpath.endswith(\"updates\"):\n for filename in filenames:\n root, ext = os.path.splitext(filename)\n if ext.lower() == \".sql\":\n full_path = os.path.join(dirpath, filename)\n with open(full_path, \"r\") as f:\n sql = f.read()\n sql = sql.decode(\"latin-1\")\n\n files.append((filename, sql))\n return files", "def list_files(file, folder, extension = '*.evtx'):\r\n if file:\r\n return [file]\r\n elif folder:\r\n return [ y for x in os.walk(folder) for y in glob(os.path.join(x[0], extension))]\r\n else:\r\n return []", "def open_files(self, listed_files):\n for file_ in listed_files:\n try: \n with codecs.open(file_, \"r\", encoding=\"utf-8\", errors=\"ignore\") as f:\n yield f\n except Exception as error:\n print(error, file=sys.stderr)\n exit(0)", "def list_files(folder_path):\n try:\n for name in os.listdir(folder_path):\n base, ext = os.path.splitext(name)\n if ext != '.rst':\n continue\n yield os.path.join(folder_path, name)\n except OSError as ex:\n log.error('Exception occured in list_files: {0}'.format(ex))", "def listFiles(self):\n pass", "def get_files_to_be_indexed(self):\n\t\tfiles = self.get_all_files()\n\t\tfiles_list = []\n\t\tfor name in files:\n\t\t\tif(name.split('.')[-1] in self.accepted_formats and os.stat(os.path.join(self.root, name)).st_size < 5000000):\n\t\t\t\tfiles_list.append(os.path.join(self.root, name))\n\t\treturn files_list[0:-1]", "def read_files_to_list(fdir, expression='*.json'):\n filelist = []\n for file in glob.glob(path.join(fdir, expression)):\n filelist.append(file)\n return filelist", "def test_filter_file_exceptions_early_onlyall():\n exceptions = Exceptions(os.path.join(os.path.dirname(__file__),\n 'early_exceptions.yaml'))\n\n package = Package('test', os.path.dirname(__file__))\n files = [os.path.join(os.path.dirname(__file__),\n 'uncommontext')]\n\n filtered_files = exceptions.filter_file_exceptions_early(package, files)\n\n assert filtered_files == files", "def opendir() -> list:\n fileexr = [nf for nf in listdir(path=format(getcwd()))\n if search(pattern=r'.exr$', string=nf) and (not search(pattern=r'^L', string=nf))]\n if fileexr:\n for nf in fileexr: \n yield nf\n else:\n print('Exr file not found!')", "def get_file_list(self):\n try:\n for filename in os.listdir(SHARED_DIR):\n self.file_list.append(filename)\n except Exception as e:\n print \"Error: retriving file list, %s\" % e", "def _find_files(research_structure, raise_on_all_missing=True):\n found = []\n filenames = []\n paths_searched = []\n ## config file lookup resolution\n for enforce_file_existence, cascaded, fun in research_structure:\n candidate = fun()\n if candidate is None:\n continue\n paths_searched.append(candidate)\n filenames.append((cascaded, candidate))\n if os.path.exists(candidate):\n found.append(candidate)\n if cascaded is False:\n break\n else:\n if enforce_file_existence:\n raise ValueError(\"File %r does not exists.\" % candidate)\n if not found and raise_on_all_missing:\n raise ValueError(\"No config file was found in those paths: %s.\"\n % ', '.join(paths_searched))\n return filenames", "def _get_files(repo, patterns, options):\n ctx = repo[None]\n match = match_func(repo, ctx, patterns, options)\n try:\n status = ctx.status(listclean=True, listignored=True, listunknown=True)\n except TypeError:\n # Compatibility with older Mercurial versions.\n status = ctx.status(clean=True, ignored=True, unknown=True)\n modified = status[0]\n added = status[1]\n unknown = status[4]\n ignored = status[5]\n clean = status[6]\n files = []\n for file_list in [clean, modified, added]:\n for filename in file_list:\n if match(filename):\n files.append(filename)\n for file_list in [ignored, unknown]:\n for filename in file_list:\n if match.exact(filename):\n files.append(filename)\n return files", "def extract_files(self) -> list:\n pass", "def find_files():\n \n p = re.compile(REGEX_PART_NUMBER)\n job_files = []\n \n for root, dirs, files in os.walk(project_path): # r at start of string need to prevent unicode error\n for filename in files:\n re_part_number = p.match(filename)\n if re_part_number:\n file_ext = filename.split(\".\")[-1].lower() # extract file extension \n file_size = os.path.getsize((os.path.join(root, filename))) # filesize in bytes \n \n part_number = re_part_number.group() # extract part number from regular expression match\n part_code = part_number.split(\"-\")[0]\n \n destinations = [] # destinations is a list in case a filetype is both a source and output filetype\n \n if (file_ext in EXTS_SOURCE_FILES) and flag_find_source_files:\n destinations.append(os.path.join(target_source_path,part_code,part_number)) \n \n if (file_ext in EXTS_OUTPUT_FILES) and flag_find_output_files:\n destinations.append(os.path.join(target_source_path,part_code,part_number)) \n \n if destinations: \n job_files.append(File(filename,root,file_size,destinations,part_number,part_code))\n print(f\"Found: {filename}\")\n \n return job_files", "def _collect_files(folders, extention='Default'):\r\n if isinstance(extention, str):\r\n if extention.lower() == 'default':\r\n extention = ['.*']\r\n else:\r\n extention = [extention]\r\n files = []\r\n for f in folders:\r\n for e in extention:\r\n files += glob(os.path.join(f, f'*{e}'))\r\n return files", "def load_file_list(path=None, regx='\\.npz'):\n if path == False:\n path = os.getcwd()\n file_list = os.listdir(path)\n return_list = []\n for idx, f in enumerate(file_list):\n if re.search(regx, f):\n return_list.append(f)\n # return_list.sort()\n print('Match file list = %s' % return_list)\n print('Number of files = %d' % len(return_list))\n return return_list", "def process_all_files():\n src_files = get_doc_files()\n\n for src_pathname in src_files:\n if src_pathname.suffix in MARKDOWN_EXTENSIONS:\n process_file_markdown(src_pathname)\n elif src_pathname.suffix in STATIC_ASSET_EXTENSIONS:\n process_file_copytodest(src_pathname)", "def find_files(extensions):\n\n return [fname for fname in os.listdir('.') if fname.endswith(extensions)]", "def get_files(data_dir, extension):\n files = []\n if data_dir and os.path.exists(data_dir):\n files = glob.glob(data_dir + \"/*.\" + extension)\n return files", "def search(self):\n files = os.listdir(self.filePath)\n txt_file = []\n for f in files:\n f_ext = f.split('.')[-1]\n if f_ext == self.flag:\n if self.flag == 'txt':\n txt_file.append(FileTxt(os.sep.join([self.filePath, f])))\n\n if self.flag == 'csv':\n txt_file.append(FileCsv(os.sep.join([self.filePath, f])))\n\n return txt_file", "def read_file_list(filename):\n\n # hint: when you read lines of files, there will be a \"newline\"\n # (end-of-line character) at the end of each line, and you want to\n # strip that off before you print it. Do some research on that!\n\n # with open(filename, 'r') as file:\n # print(file.read())\n #cwd = os.getcwd() # This gets the visual studio code opened location\n cwd = os.path.dirname(os.path.realpath(__file__))\n print(cwd)\n try:\n file_contents = Path(cwd + \"\\\\\" + filename).read_text()\n except:\n return \"File not found\"\n return file_contents", "def collect_files_with_extensions(self, extension: str) -> List[str]:\n occurrences = []\n for position in os.listdir(self.directory):\n if os.path.isdir(position):\n for file in os.listdir(position):\n if os.path.isfile(os.path.join(position, file)) and file.endswith(\n extension\n ):\n occurrences.append(os.path.join(self.directory, position, file))\n return occurrences", "def load(self, ignored_exts: Optional[Set[str]] = None) -> List[Document]:\n # Read file paths\n file_paths = self._get_file_paths(ignored_exts=ignored_exts)\n if not file_paths:\n raise FileNotFoundError('The directory seems to be empty')\n # Read all files\n docs: List[Document] = []\n invalid_doc_paths: List[str] = []\n for file_path in file_paths:\n with open(file_path, mode='r') as fp:\n try:\n content = fp.read().replace('\\n', '')\n except UnicodeDecodeError:\n invalid_doc_paths.append(file_path)\n docs.append(Document(file_path, content))\n\n if invalid_doc_paths:\n listed_paths = '\\n'.join(invalid_doc_paths)\n msg = f'Files in the following paths seem to be binary files:\\n{listed_paths}'\n logging.warning(msg)\n\n return docs", "def get_all_filenames_from_dir(directory,suffex, filename_allowed_list = None):\n\n files_list = list()\n if filename_allowed_list == None:\n for item in glob.glob(directory+'*'+suffex): # Example /datasets/Stock_dataset/Stocks/*.txt\n files_list.append(item) \n else:\n filename_allowed_list = [v.lower() for v in filename_allowed_list] # To avoid case sensitve\n for item in glob.glob(directory+'*'+suffex):\n if item.split(\"/\")[-1].split('.')[0].lower() in filename_allowed_list: # Since linux is case sensitive, then so is this function, make sure the names match correctly\n files_list.append(item)\n if not len(files_list) == len(filename_allowed_list):\n print 'Some Stocks files are missing'\n return files_list", "def get_files(self, extension, recurs=True):\n if self._dirname is None:\n return []\n\n if os.path.exists(self._dirname) is False:\n raise NoDirectoryError(dirname=self._dirname)\n\n return sppasDirUtils.dir_entries(self._dirname, extension, recurs)", "def find_files(self,start_dir=None,pattern=\"*\",file_extention=\"*.fif\",recursive=True,debug=False,abspath=False,\n ignore_case=False):\n pattern = self.update_pattern(pattern,ignore_case=ignore_case)\n \n if not isinstance(file_extention,(list)):\n s = file_extention\n file_extention = list()\n file_extention.append(s)\n \n if debug or self.debug:\n logger.debug(\"start dir : {}\\n\".format(start_dir) +\n \" -> glob pattern : {}\\n\".format(pattern) +\n \" -> file extention : {}\\n\".format(file_extention) +\n \" -> glob recursive : {}\\n\".format(recursive) +\n \" -> adding abs path: {}\\n\".format(abspath)\n )\n files_found = []\n with self.working_directory(start_dir):\n for fext in file_extention: # ToDo fext re /\\.vhdr|vmrk|eeg$/\n for f in glob.iglob(pattern + fext,recursive=recursive):\n #print(f)\n if abspath:\n files_found.append(os.path.abspath(os.path.join(start_dir,f)))\n else:\n files_found.append(f)\n \n files_found.sort()\n return files_found", "def _list_files(basePath, validExts=(\".jpg\", \".jpeg\", \".png\", \".bmp\", \".tif\", \".tiff\"), contains=None):\n for (rootDir, dirNames, filenames) in os.walk(basePath):\n # loop over the filenames in the current directory\n for filename in filenames:\n # if the contains string is not none and the filename does not contain\n # the supplied string, then ignore the file\n if contains is not None and filename.find(contains) == -1:\n continue\n\n # determine the file extension of the current file\n ext = filename[filename.rfind(\".\"):].lower()\n\n # check to see if the file is an image and should be processed\n if ext.endswith(validExts):\n # construct the path to the image and yield it\n imagePath = os.path.join(rootDir, filename).replace(\" \", \"\\\\ \")\n yield imagePath" ]
[ "0.6206179", "0.6107929", "0.6043444", "0.593927", "0.58915067", "0.58556247", "0.57860583", "0.57807255", "0.5777453", "0.5773534", "0.5771638", "0.57592523", "0.57556206", "0.5750356", "0.5739557", "0.57273585", "0.57255244", "0.57110775", "0.5685773", "0.56543374", "0.56475216", "0.5642602", "0.5633059", "0.56317985", "0.5627203", "0.56150335", "0.56080693", "0.56073314", "0.55956423", "0.55956405" ]
0.72571635
0
Update status to database
def _update_status(self): self._db_update({'status': self.status})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateStatus(self, status):\n pass", "def UpdateStatus(self, status):\r\n self.status.update(status)", "def updatestatus(self):\n self.status = self.query()\n if self.status['success']:\n return True\n else:\n return False", "def update_status(request_id, status):\n pass", "def update_status(self, id, status):\n sql = f\"UPDATE incidences SET status = \\'{status}\\'\\\n WHERE incidences.id = {id}\"\n conn = Db().con\n curr = conn.cursor()\n curr.execute(sql)\n conn.commit()", "def _status_btn_clicked(root, item):\n sql_status_update = 'UPDATE job SET Job_Status = \"Complete\" WHERE Job_ID = '+str(item[0])+';'\n print (sql_status_update)\n conn = pymysql.connect(host='localhost', user='root', password='#######', db='######')\n a = conn.cursor()\n a.execute(sql_status_update)\n conn.commit()\n a.close()\n conn.close()", "def change_status(self, status, application_id):", "def set_status(self, status):\n # TODO log to db\n self.status = status", "def update(self, **kwargs):\n self.status = status.parse(status.get(host=self._host, port=self._port))", "def _set_status(self):\n result = self._get_status()\n if result and result[0]['state'] == 'aborted':\n raise Exception(\"Aborted because the status flag is set to 'aborted' in dynamodb\")\n\n # record the status\n self.status['timestamp'] = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n self.db_handler.update_item({'api_version': TsV2CatalogHandler.api_version}, self.status)", "def update_status(request):\n return 0", "def change_status(id):\n query = \"\"\"UPDATE parcels SET status = %s WHERE id = %s\"\"\"\n tuple =('delivered' , id)\n db.insert(query, tuple)", "def set_status(self, p_id, status):\n try:\n cursor = self.conn.cursor()\n command = '''\n UPDATE Player\n SET Status = ?\n WHERE P_ID = ?\n '''\n cursor.execute(command, (status, p_id))\n self.conn.commit()\n except BaseException as e:\n self.log.log_error('Fehler beim setzen des Status', e)\n raise e", "def update_status(self) -> None:\n try:\n (rc, mid) = self.mqttc.publish(\n self.config.status_topic, json.dumps(self.status), qos=0, retain=False\n )\n if rc == mqtt.MQTT_ERR_SUCCESS:\n logging.info(\n f\"The request for a status update has been successfully accepted: mid={mid}\"\n )\n else:\n logging.warning(\"The request for a status update has been rejected\")\n except ValueError as e:\n logging.warning(f\"Cannot send status update: {e}\")", "def _write_status(self, status, cls=MySQLStatus):", "def _update_status(self, status: dict):\n with generate_retry_session() as session:\n session.headers.update({\n 'Authorization': 'Bearer {}'.format(self.platform_auth_token)\n })\n url = '{}/training/definitions/{}/jobs/{}/status'.format(\n ORGANIZATION_ENDPOINT, self.job_definition_name, self.training_job_id)\n res = session.put(url, json=status)\n res.raise_for_status()", "def update(self, new_status: Status) -> None:\n self._status = new_status", "def on_status_update(self, data):\n # TODO: Update User/Client object with this info\n print ('Status Update: %s' % data)", "def set_server_status():\n set_to_db(key='status', str_value=json.dumps(initial_server_status))", "def set_status(self, scenario_id, status):\n self.cur.execute(\n \"UPDATE execute_list SET status = %s WHERE id = %s\",\n (status, scenario_id),\n )", "def UpdateStatus(self,pid):\n\t\tb1=Rents.objects.filter(paymentid_id=pid).first()\n\t\tamount=Payment.objects.filter(paymentid=pid).values('amount')\n\t\tb=b1.__dict__\n\t\tquant=b['quantity']\n\t\tbookid=b['bookid_id']\n\t\tprice=amount[0]['amount']/quant\n\t\t#price=float(\"{.2f}\".format(amount[0]['amount']))/float(\"{0:.2f}\".format(quant))\n\t\tRents.objects.filter(paymentid_id=pid).update(status='r')\n\t\tBook.objects.filter(bookid=bookid).update(quantity=F('quantity')+quant)\n\t\tStatus.objects.filter(ISBN=b['ISBN'],rentprice=price).update(quantity=F('quantity')+quant)\n\t\tUpload.objects.filter(owner_id_id=b['owner_id_id'],sellprice=price).update(qtyavailable=F('qtyavailable')+quant)\n\t\tself.notifyBuyer(b['ISBN'])", "def updateAccountStatus(login:str, status:int=0)->bool:\n\n query = f\"UPDATE {Account.tablename} SET {Account.statusCol} = ? WHERE {Account.loginCol} = ?\"\n\n try:\n db = DataBaseConnection()\n db.cursor.execute(query, status, login)\n\n if status == 1: # activation\n newActivationDate = datetime.now().date()\n newExpirationDate = (newActivationDate + datetimePack.timedelta(days=155)).date() # warning : + 5 mois\n\n newActivationDate = str(newActivationDate)\n newExpirationDate = str(newExpirationDate)\n\n query = f\"UPDATE {Account.tablename} SET {Account.activationDateCol} = ?, {Account.expirationDateCol} = ? WHERE {Account.loginCol} = ?\"\n\n db.cursor.execute(query, newActivationDate, newExpirationDate, login)\n\n except Exception as error:\n return {\"flag\": \"queryError\", \"message\": f\"{error}\"}\n else:\n db.conn.commit()\n return True", "async def async_update(self):\n\n await self.status_request()", "def update_status(conn, episode_info, status=\"watched_status\"):\n\tp_key = get_p_key(episode_info)\n\t\n\tstatus_update = f'UPDATE shows SET watched_status = {episode_info[status]} WHERE p_key = \"{p_key}\";'\n\t\n\texecute_sql(conn, status_update)", "async def change_status(self, status: str) -> int:\n data = {'status': str(status)}\n r = await self.request.request(url='https://www.roblox.com/home/updatestatus', method='POST', data=j.dumps(data))\n return r.status_code", "def updatestatus(id, status):\n username = os.getlogin()\n res = requests.put('{}update/{}/'.format(base_url, id),\n data={\"keyword_fetching_status\": status, \"user_fetched\": username})\n res = res.json()\n return res", "def update_status(request):\n tasklist = request.GET.get(\"tasklist\")\n pk = request.GET.get(\"pk\")\n status = request.GET.get(\"status\")\n qs = Todo.objects.get(pk=pk)\n qs.status = status\n if status == \"Done\":\n qs.close()\n elif status == \"Undone\":\n qs.reopen()\n elif status == \"In-Progress\":\n qs.in_progress()\n qs.save()\n return redirect(\"tasks\", tasklist=tasklist)", "def update_status(self, id, status):\n\n records = self.db.get_table()\n index = -1\n\n for i in range(0, len(records)):\n if str(records[i][\"id\"]) == str(id):\n index = i\n\n if index == -1:\n return False\n\n records[index][\"status\"] = status\n self.db.update_cell(index, 'status', status)\n\n return records[index]", "def refresh_status(self):\n\n pass", "def update(self):\n db.session.commit()" ]
[ "0.8104977", "0.7823668", "0.7671553", "0.74507135", "0.7403075", "0.73636246", "0.72525454", "0.725253", "0.72357446", "0.7171517", "0.7166486", "0.70517457", "0.7043152", "0.7029378", "0.7019931", "0.6978809", "0.69682807", "0.6892425", "0.68785965", "0.6876388", "0.6852951", "0.6809229", "0.6797506", "0.67923355", "0.67908597", "0.67906153", "0.6767138", "0.6749058", "0.6727241", "0.6712441" ]
0.90569335
0
Function to generate distributed training scripts.
def generate_disttrain_scipts(self): train_py = "/home/haihuam/Projects/RepPoints/mmdetection/tools/train.py" py = self.global_setting.get('python', sys.executable) ex_options = self.global_setting.get('train_options', str()) if not os.access(py, os.X_OK): py = "/home/haihuam/anaconda3/envs/RepPoints/bin/python" if os.access(py, os.X_OK): content = "set -e \n" content += "export CUDA_VISIBLE_DEVICES=" + \ ",".join(self.selected_gpus)+ " \n" content += "cd %s \n"%(self.run_dir) content += "%s -m torch.distributed.launch "%(py) content += "--nproc_per_node=%s "%(self.setting['train_num_gpu']) content += "--master_port %s "%(self.dist_train_port) content += "%s %s --launcher pytorch "%(train_py, self.setting['config_file']) content += "--work_dir %s "%(self.run_dir) content += "--validate %s &> %s.log \n"%(ex_options, self.stage) content += "touch train.done \n" # return content self.script_content = content else: print("Error: %s is not executable."%py) sys.exit(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def script_generator(self):\n\n self._get_free_tcp_port()\n\n train_py = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/train.py\"\n py = self.global_setting.get('python', sys.executable)\n ex_options = self.global_setting.get('train_options', str())\n\n if not os.access(py, os.X_OK):\n py = \"/home/haihuam/anaconda3/envs/RepPoints/bin/python\"\n \n if os.access(py, os.X_OK):\n content = \"set -e \\n\"\n content += \"export CUDA_VISIBLE_DEVICES=\" + \\\n \",\".join(self.selected_gpus)+ \" \\n\"\n\n content += \"cd %s \\n\"%(self.run_dir)\n content += \"%s -m torch.distributed.launch \"%(py)\n content += \"--nproc_per_node=%s \"%(self.setting['train_num_gpu'])\n content += \"--master_port %s \"%(self.dist_train_port)\n content += \"%s %s --launcher pytorch \"%(train_py, self.setting['config_file'])\n content += \"--work_dir %s \"%(self.run_dir)\n content += \"--resume_from latest.pth \"\n content += \"--validate %s &> %s.log \\n\"%(ex_options, self.stage)\n content += \"touch train.done \\n\"\n # return content\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def script_generator(self):\n py = self.global_setting.get('python', sys.executable)\n ex_options = self.global_setting.get('evaluate_options', str())\n train_py = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/train.py\"\n if os.access(py, os.X_OK):\n content = \"set -e \\n\"\n content += \"export CUDA_VISIBLE_DEVICES=\" + \\\n \",\".join(self.selected_gpus)+ \" \\n\"\n content += \"cd %s \\n\"%(self.run_dir)\n \n content += \"%s %s %s --work_dir %s --validate %s &> train.log \\n\"%(py, \n train_py,\n self.setting['config_file'],\n self.run_dir,\n ex_options)\n content += \"touch evaluate.done \\n\"\n\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def generate_singletrain_scipts(self):\n py = self.global_setting.get('python', sys.executable)\n ex_options = self.global_setting.get('train_options', str())\n train_py = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/train.py\"\n\n if not os.access(py, os.X_OK):\n py = \"/home/haihuam/anaconda3/envs/RepPoints/bin/python\"\n\n if os.access(py, os.X_OK):\n content = \"set -e \\n\"\n content += \"export CUDA_VISIBLE_DEVICES=\" + \\\n \",\".join(self.selected_gpus)+ \" \\n\"\n content += \"cd %s \\n\"%(self.run_dir)\n content += \"%s %s %s \"%(py, train_py, self.setting['config_file'])\n content += \"--work_dir %s \"%(self.run_dir)\n content += \"--validate %s &> %s.log \\n\"%(ex_options, self.stage)\n content += \"touch train.done \\n\"\n\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def generateParallelScript(hub, user_name, server_list):\n all_tasks = []\n slot_names = hub['SlotIO'].keys()\n\n for slot_name in slot_names:\n vivado = f'VIV_VER={args.vivado_version} vivado -mode batch -source {slot_name}_synth.tcl'\n \n # broadcast the results\n transfer = []\n for server in server_list:\n transfer.append(f'rsync_with_retry.sh --target-server {server} --user-name {user_name} --dir-to-sync {synth_dir}/{slot_name}/')\n transfer_str = \" && \".join(transfer)\n\n command = f'cd {synth_dir}/{slot_name} && {vivado} && {transfer_str}'\n all_tasks.append(command)\n\n num_job_server = math.ceil(len(all_tasks) / len(server_list) ) \n for i, server in enumerate(server_list):\n local_tasks = all_tasks[i * num_job_server: (i+1) * num_job_server]\n open(f'{synth_dir}/parallel_slot_synth_{server}.txt', 'w').write('\\n'.join(local_tasks))", "def gen_script(model: onnx.ModelProto, output_file: str = None) -> str:\n current_dir = os.path.dirname(os.path.realpath(__file__))\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(current_dir + '/templates/'))\n model_header_render = gen_model_header(env, model)\n imports, main_function, sub_functions = gen_graph_functions(env, model.graph)\n\n wdir = \"\"\n if len(imports) > 0:\n # need to set wdir to enable imports\n wdir = util.resolve_systemds_root() + \"/scripts\"\n\n main_template = env.get_template(\"main.dml.jinja\")\n result_render = main_template.render(\n title=\"This file was generated by onnx-systemds\",\n model_header_render=model_header_render,\n wdir=wdir,\n imports=imports,\n main_function=main_function,\n sub_functions=sub_functions\n )\n if output_file:\n directory = os.path.dirname(output_file)\n if len(directory) > 0:\n os.makedirs(directory, exist_ok=True)\n with open(output_file, 'w') as f:\n f.write(result_render)\n\n return result_render", "def generate(train_data_path, trained_model_path, num_output_files):\n # load the notes used to train the model\n\n train_data = data_preprocess.load_from_pickle(train_data_path)\n training_notes = train_data[\"data\"]\n note_translator = train_data[\"note_translator\"]\n\n net = networks.TransformerNet.load_checkpoint(trained_model_path)\n\n for i in range(num_output_files):\n prediction_output = generate_notes(net, training_notes, note_translator)\n create_midi(prediction_output, file_suffix=i)", "def main():\n if len(sys.argv) < 3:\n message = \"\"\"\n Usage: python generate_dataset.py <dataset_name> <number of files> <size of each file in bytes>\n \"\"\"\n print(message)\n sys.exit(0)\n dataset_name = sys.argv[1]\n file_number = int(sys.argv[2])\n file_size = int(sys.argv[3])\n\n if not os.path.exists(dataset_name):\n os.makedirs(dataset_name)\n\n for i in range(file_number):\n tmp_file = open('./' + dataset_name + '/' + dataset_name + '.file' + str(i), 'w+')\n tmp_file.write(os.urandom(file_size))\n tmp_file.close()", "def build_job_scripts(model_list, scenario_list, output_dir, cassandra_config_dir, cassandra_log_dir,\n cassandra_main_script, sbatch_account, sbatch_partition='slurm', sbatch_walltime='01:00:00',\n sbatch_ntasks=3, sbatch_nodes=3, sbatch_jobname='cassie', sbatch_logdir='.', template=None):\n\n # use default configuration template file if user does not give one\n if template is None:\n template = pkg_resources.resource_filename('cassie', 'data/sbatch_template.sh')\n\n # existing tags to replace in the template file\n model_tag = '<model>'\n scenario_tag = '<scenario>'\n account_tag = '<account>'\n partition_tag = '<partition>'\n ntasks_tag = '<ntasks>'\n nodes_tag = '<nodes>'\n time_tag = '<walltime>'\n jobname_tag = '<jobname>'\n logdir_tag = '<logdir>'\n cassandra_configdir_tag = '<cassconfigdir>'\n cassandra_logdir_tag = '<casslogdir>'\n cassandra_script_tag = '<cassmainscript>'\n\n for model in model_list:\n for scenario in scenario_list:\n\n output_file = os.path.join(output_dir, f'run_{model.lower()}_{scenario}.sh')\n\n with open(output_file, 'w') as out:\n with open(template) as get:\n\n f = get.read()\n\n # replace tag names with dynamic content\n fx = f.replace(model_tag, model)\n fx = fx.replace(scenario_tag, scenario)\n\n fx = fx.replace(account_tag, sbatch_account)\n fx = fx.replace(partition_tag, sbatch_partition)\n fx = fx.replace(ntasks_tag, str(sbatch_ntasks))\n fx = fx.replace(nodes_tag, str(sbatch_nodes))\n fx = fx.replace(time_tag, sbatch_walltime)\n fx = fx.replace(jobname_tag, sbatch_jobname)\n fx = fx.replace(logdir_tag, sbatch_logdir)\n\n fx = fx.replace(cassandra_configdir_tag, cassandra_config_dir)\n fx = fx.replace(cassandra_logdir_tag, cassandra_log_dir)\n fx = fx.replace(cassandra_script_tag, cassandra_main_script)\n\n out.write(fx)", "def generateDataset(self):\n if self.outdir[-1] != \"/\": \n self.outdir += \"/\"\n self.outdir += \"dataset_trackml\"\n i = 1\n while os.path.exists(self.outdir):\n self.outdir.replace(\"_\"+str(i-1), \"\")\n self.outdir += (\"_\"+str(i))\n i += 1\n cmd = \"mkdir -p \"+ self.outdir\n os.system(cmd)\n\n cont = pc.particleController()\n cont.generateEvents(self.numevents, self.hpe, self.detectors)\n\n self.generateHits(cont)\n self.generateTruths(cont)\n self.generateSolution(cont)", "def main(args, base_dir):\n for i in range(args.n_training):\n # value of the next seed\n seed = args.seed + i\n\n # The time when the current experiment started.\n now = strftime(\"%Y-%m-%d-%H:%M:%S\")\n\n # Create a save directory folder (if it doesn't exist).\n if args.log_dir is not None:\n dir_name = args.log_dir\n else:\n dir_name = os.path.join(base_dir, '{}/{}'.format(\n args.env_name, now))\n ensure_dir(dir_name)\n\n # Get the policy class.\n if args.alg == \"TD3\":\n from hbaselines.multiagent.td3 import MultiFeedForwardPolicy\n elif args.alg == \"SAC\":\n from hbaselines.multiagent.sac import MultiFeedForwardPolicy\n elif args.alg == \"PPO\":\n from hbaselines.multiagent.ppo import MultiFeedForwardPolicy\n elif args.alg == \"TRPO\":\n from hbaselines.multiagent.trpo import MultiFeedForwardPolicy\n else:\n raise ValueError(\"Unknown algorithm: {}\".format(args.alg))\n\n # Get the hyperparameters.\n hp = get_hyperparameters(args, MultiFeedForwardPolicy)\n\n # add the seed for logging purposes\n params_with_extra = hp.copy()\n params_with_extra['seed'] = seed\n params_with_extra['env_name'] = args.env_name\n params_with_extra['policy_name'] = \"MultiFeedForwardPolicy\"\n params_with_extra['algorithm'] = args.alg\n params_with_extra['date/time'] = now\n\n # Add the hyperparameters to the folder.\n with open(os.path.join(dir_name, 'hyperparameters.json'), 'w') as f:\n json.dump(params_with_extra, f, sort_keys=True, indent=4)\n\n run_exp(\n env=args.env_name,\n policy=MultiFeedForwardPolicy,\n hp=hp,\n dir_name=dir_name,\n evaluate=args.evaluate,\n seed=seed,\n eval_interval=args.eval_interval,\n log_interval=args.log_interval,\n save_interval=args.save_interval,\n initial_exploration_steps=args.initial_exploration_steps,\n ckpt_path=args.ckpt_path,\n )", "def generate_files_from_network(id):\n\tfolder_prefix = \"results/\"+id+\"/\"\n\tnetwork_prefix = \"results/\"+id+\"_\"\n\tg = open(network_prefix+'network.json', 'r')\n\tdata = json.load(g)\n\tnames = []\n\tfor node in data:\n\t\tmy_name = data[node]['my_name']\n\t\tnames.append(my_name)\n\t\ttargets = data[node]['target']\n\t\tn_receive = data[node]['receivers']\n\n\t\t#generate_python_file_from_node(folder_prefix, my_name, targets, n_receive)\n\n\tg.close()\n\n\n\n\twith open(folder_prefix+'run.sh', 'w') as f:\n\t\tfor name in names:\n\t\t\tif name!=names[-1]:\n\t\t\t\tf.write('python ../../run_node.py '+name+' '+id+' &\\n')\n\t\t\telse:\n\t\t\t\tf.write('python ../../run_node.py '+name+' '+id+' \\n')\n\n\n\twith open(folder_prefix+'start.sh', 'w') as f:\n\t\tf.write('simulaqron reset\\nsimulaqron set backend qutip\\nsimulaqron start --nodes ')\n\t\tfor name in names:\n\t\t\tif name!=names[-1]:\n\t\t\t\tf.write(name+',')\n\t\t\telse:\n\t\t\t\tf.write(name)\n\treturn", "def task_generate_virtual_samples():\n metadata_files = Path(__file__).parent.glob('*_meta.yaml')\n data_files = Path(__file__).parent.glob('*_data.yaml')\n\n script = Path(__file__).parents[0] / \"virtual_experiment.py\"\n\n return {\n \"actions\": [f\"{PYTHON_EXE} {script}\"],\n \"file_dep\": [script, *metadata_files],\n \"verbosity\": 2, # show stdout\n \"targets\": [*data_files],\n \"setup\": [\"generate_virtual_metadata\"],\n \"clean\": [clean_targets]\n }", "def main():\n\t# import training data\n\tfiles = [INPATH + f for f in os.listdir(INPATH) if \".json\" in f]\n\n\t# import books\n\tprint(\"Loading training data...\")\n\tbookList = loadBooks(files)\n\tprint(\"Load complete.\")\n\n\t# loop through element types and store data structure\n\tfor key, value in ELEMENTS.items():\n\t\tprint(\"Generating: %s\" % key)\n\n\t\t# set file outpath\n\t\toutfile = \"%s.json\" % key\n\t\toutpath = OUTPATH % outfile\n\n\t\tgenerateTrain(bookList, key, value, outpath)", "def train_distributed():\n # Distributed stuff learnt from this repo: https://github.com/GoogleCloudPlatform/cloudml-dist-\n # mnist-example/blob/master/trainer/task.py\n\n # For Distributed TensorFlow\n env = json.loads(os.environ.get('TF_CONFIG', '{}'))\n cluster_info = env.get('cluster')\n cluster_spec = tf.train.ClusterSpec(cluster_info)\n task_info = env.get('task')\n job_name, task_index = task_info['type'], task_info['index']\n\n device_fn = tf.train.replica_device_setter(\n cluster=cluster_spec,\n worker_device='/job:%s/task:%d' % (job_name, task_index))\n\n print(\"Start job:%s, index:%d\" % (job_name, task_index))\n\n server = tf.train.Server(cluster_spec,\n job_name=job_name, task_index=task_index)\n\n # Start a parameter server node\n if job_name == 'ps':\n server.join()\n\n # Start a master/worker node\n if job_name == 'master' or job_name == 'worker':\n is_chief = (job_name == 'master')\n\n with tf.Graph().as_default() as graph: # TODO necessary?\n with tf.device(device_fn):\n # Prepare the data\n train_data, test_data, embeddings_file = prepare_data()\n\n # Create the model\n print(\"(%s,%d) Creating %d layers of %d units.\" %\n (job_name, task_index, FLAGS.num_layers, FLAGS.size))\n model = create_model(False)\n\n # Create train_dir\n if is_chief:\n if not tf.gfile.Exists(FLAGS.train_dir):\n tf.gfile.MkDir(FLAGS.train_dir)\n\n # TensorBoard summaries\n (test_loss, test_perplexity, bucket_loss_placeholders,\n bucket_perplexity_placeholders, summary, summary_writer) = create_summary_objects(graph)\n\n # Create supervisor\n init_op = tf.global_variables_initializer()\n\n # Create Supervisor. Disabling checkpoints and summaries, because we do that manually\n sv = tf.train.Supervisor(is_chief=is_chief, logdir=FLAGS.train_dir, init_op=init_op,\n init_fn=lambda session: after_init(session, model, embeddings_file),\n saver=model.saver, global_step=model.global_step,\n save_model_secs=0, save_summaries_secs=0, summary_op=None,\n summary_writer=None)\n\n with sv.managed_session(server.target) as sess:\n train(sess, model, train_data, test_data, summary, summary_writer, test_loss,\n test_perplexity, bucket_loss_placeholders, bucket_perplexity_placeholders,\n is_chief, job_name, task_index, sv.should_stop)\n sv.stop()", "def gen_cluster_script(\n crop,\n scheduler,\n batch_ids=None,\n *,\n mode=\"array\",\n num_procs=None,\n num_threads=None,\n num_nodes=None,\n num_workers=None,\n mem=None,\n mem_per_cpu=None,\n gigabytes=None,\n time=None,\n hours=None,\n minutes=None,\n seconds=None,\n conda_env=True,\n launcher=\"python\",\n setup=\"#\",\n shell_setup=\"\",\n mpi=False,\n temp_gigabytes=1,\n output_directory=None,\n debugging=False,\n **kwargs,\n):\n\n scheduler = scheduler.lower() # be case-insensitive for scheduler\n\n if scheduler not in (\"sge\", \"pbs\", \"slurm\"):\n raise ValueError(\"scheduler must be one of 'sge', 'pbs', or 'slurm'.\")\n\n if mode not in (\"array\", \"single\"):\n raise ValueError(\"mode must be one of 'array' or 'single'.\")\n\n # parse the number of threads\n if num_threads is None:\n if num_workers is None:\n # default to 1 thread per core for no workers\n num_threads = num_procs\n else:\n # default to 1 thread per worker\n num_threads = round(num_procs / num_workers)\n\n # parse the time requirement\n if hours is minutes is seconds is None:\n if time is not None:\n if isinstance(time, (int, float)):\n hours = time\n minutes, seconds = 0, 0\n elif isinstance(time, str):\n hours, minutes, seconds = time.split(\":\")\n else:\n hours, minutes, seconds = 1, 0, 0\n else:\n if time is not None:\n raise ValueError(\n \"Cannot specify both time and hours, minutes, seconds.\"\n )\n hours = 0 if hours is None else int(hours)\n minutes = 0 if minutes is None else int(minutes)\n seconds = 0 if seconds is None else int(seconds)\n\n if scheduler == \"slurm\":\n # only supply specified header options\n # TODO: same with PBS and SGE\n\n if num_nodes is not None:\n kwargs[\"nodes\"] = num_nodes\n if num_procs is not None:\n kwargs[\"cpus-per-task\"] = num_procs\n\n if gigabytes is not None:\n if mem is not None:\n raise ValueError(\"Cannot specify both gigabytes and mem.\")\n mem = gigabytes\n\n if mem is not None:\n if isinstance(mem, int):\n mem = f\"{mem}G\"\n kwargs[\"mem\"] = mem\n\n if mem_per_cpu is not None:\n if isinstance(mem_per_cpu, int):\n mem_per_cpu = f\"{mem_per_cpu}G\"\n kwargs[\"mem-per-cpu\"] = mem_per_cpu\n\n else:\n # pbs, sge\n # parse memory to gigabytes\n if (gigabytes is not None) and (mem is not None):\n raise ValueError(\"Cannot specify both gigabytes and mem.\")\n\n if mem is not None:\n # take gigabytes from mem\n gigabytes = int(mem)\n\n if output_directory is None:\n from os.path import expanduser\n\n home = expanduser(\"~\")\n output_directory = os.path.join(home, \"Scratch\", \"output\")\n\n if conda_env is True:\n # automatically set conda environment to be the\n # same as the one that's running this function\n conda_env = os.environ.get(\"CONDA_DEFAULT_ENV\", False)\n if conda_env:\n # but only if we are in a conda environment\n if (\"conda activate\" in shell_setup) or (\n \"mamba activate\" in shell_setup\n ):\n # and user is not already explicitly activating\n conda_env = False\n\n if isinstance(conda_env, str):\n # should now be a string\n shell_setup += f\"\\nconda activate {conda_env}\"\n elif conda_env is not False:\n raise ValueError(\n \"conda_env must be either ``False``, \"\n f\"``True`` or a string, not {conda_env}\"\n )\n\n crop.calc_progress()\n\n if kwargs:\n if scheduler == \"slurm\":\n header_options = \"\\n\".join([\n f\"#SBATCH --{k}\"\n if (v is None or v is True) else\n f\"#SBATCH --{k}={v}\"\n for k, v in kwargs.items()\n ])\n elif scheduler == \"pbs\":\n header_options = \"\\n\".join([\n f\"#PBS -l {k}\"\n if (v is None or v is True) else\n f\"#PBS -l {k}={v}\"\n for k, v in kwargs.items()\n ])\n elif scheduler == \"sge\":\n header_options = \"\\n\".join([\n f\"#$ -l {k}\"\n if (v is None or v is True) else\n f\"#$ -l {k}={v}\"\n for k, v in kwargs.items()\n ])\n else:\n header_options = \"\"\n\n if num_threads is None:\n if mpi:\n # assume single thread per rank\n num_threads = 1\n else:\n if num_workers is None:\n # assume all multithreading over all cores\n num_threads = num_procs\n else:\n # assume each worker has equal number of threads\n num_threads = max(1, num_procs // num_workers)\n\n if num_workers is not None:\n if num_workers * num_threads != num_procs:\n warnings.warn(\n f\"num_workers * num_threads ({num_workers} * {num_threads}) \"\n f\"!= num_procs ({num_procs}), may not be computationally \"\n \"efficient.\"\n )\n\n # get absolute path\n full_parent_dir = str(pathlib.Path(crop.parent_dir).expanduser().resolve())\n\n opts = {\n \"hours\": hours,\n \"minutes\": minutes,\n \"seconds\": seconds,\n \"gigabytes\": gigabytes,\n \"name\": crop.name,\n \"parent_dir\": full_parent_dir,\n \"num_procs\": num_procs,\n \"num_threads\": num_threads,\n \"num_nodes\": num_nodes,\n \"num_workers\": num_workers,\n \"launcher\": launcher,\n \"setup\": setup,\n \"shell_setup\": shell_setup,\n \"pe\": \"mpi\" if mpi else \"smp\",\n \"temp_gigabytes\": temp_gigabytes,\n \"output_directory\": output_directory,\n \"working_directory\": full_parent_dir,\n \"header_options\": header_options,\n \"debugging\": debugging,\n }\n\n if batch_ids is not None:\n # grow specific ids\n opts[\"batch_ids\"] = tuple(batch_ids)\n array_mode = \"partial\"\n elif crop.num_results == 0:\n # grow all ids\n opts[\"batch_ids\"] = range(1, crop.num_batches + 1)\n array_mode = \"all\"\n else:\n # find missing ids and grow them\n opts[\"batch_ids\"] = crop.missing_results()\n array_mode = \"partial\"\n\n # build the script!\n\n if scheduler == \"sge\":\n script = _SGE_HEADER\n if mode == \"array\":\n script += _SGE_ARRAY_HEADER\n elif scheduler == \"pbs\":\n script = _PBS_HEADER\n if mode == \"array\":\n script += _PBS_ARRAY_HEADER\n elif scheduler == \"slurm\":\n script = _SLURM_HEADER\n if mode == \"array\":\n script += _SLURM_ARRAY_HEADER\n\n script += _BASE\n\n if mode == \"array\":\n opts[\"run_start\"] = 1\n\n if array_mode == \"all\":\n opts[\"run_stop\"] = crop.num_batches\n if scheduler == \"sge\":\n script += _CLUSTER_SGE_GROW_ALL_SCRIPT\n elif scheduler == \"pbs\":\n script += _CLUSTER_PBS_GROW_ALL_SCRIPT\n elif scheduler == \"slurm\":\n script += _CLUSTER_SLURM_GROW_ALL_SCRIPT\n\n elif array_mode == \"partial\":\n opts[\"run_stop\"] = len(opts[\"batch_ids\"])\n if scheduler == \"sge\":\n script += _CLUSTER_SGE_GROW_PARTIAL_SCRIPT\n elif scheduler == \"pbs\":\n script += _CLUSTER_PBS_GROW_PARTIAL_SCRIPT\n elif scheduler == \"slurm\":\n script += _CLUSTER_SLURM_GROW_PARTIAL_SCRIPT\n\n elif mode == \"single\":\n if batch_ids is None:\n # grow all missing, but compute the list dynamically\n # this allows the job to be restarted\n opts[\"batch_ids\"] = \"crop.missing_results()\"\n script += _BASE_CLUSTER_GROW_SINGLE\n\n script += _BASE_CLUSTER_SCRIPT_END\n script = script.format(**opts)\n\n if (scheduler == \"pbs\") and len(opts[\"batch_ids\"]) == 1:\n # PBS can't handle arrays jobs of size 1...\n script = script.replace(\"#PBS -J 1-1\\n\", \"\").replace(\n \"$PBS_ARRAY_INDEX\", \"1\"\n )\n\n return script", "def write_mgen_scripts(tgen_names, traffic_profile, num_bots, bot_msg_size, bot_msg_rate,\n num_comps, comp_msg_size, comp_msg_rate, duration):\n\n mgen_scripts_by_tgen_node = {}\n bot_flows = {}\n bot_listen_ports = []\n comp_flows = {}\n comp_listen_ports = []\n\n if traffic_profile == 'leaky-udp-bucket':\n\n bot_flows, comp_flows = generate_simple_flows(tgen_names=tgen_names,\n num_bots=num_bots,\n bot_msg_size=bot_msg_size,\n bot_msg_rate=bot_msg_rate,\n num_comps=num_comps,\n comp_msg_size=comp_msg_size,\n comp_msg_rate=comp_msg_rate)\n\n bot_listen_ports = [TGEN_PORT_BASE + i for i in range(num_bots)]\n comp_listen_ports = [TGEN_PORT_BASE + i for i in range(num_bots, num_bots+num_comps)]\n\n # generate mgen scripts for all bots and store mapping between traffic generator name and file\n # path\n for bot_num, flow_dict in bot_flows.items():\n\n file_name = \"{}{}\".format(flow_dict[\"tgen_name\"], \"_traffic.mgn\")\n write_mgen_script(file_name=file_name,\n traffic_profile=traffic_profile,\n flows=flow_dict[\"flows\"],\n listen_ports=bot_listen_ports,\n duration=duration)\n\n # store which script goes to what tgen node\n mgen_scripts_by_tgen_node[flow_dict[\"tgen_name\"]] = file_name\n\n # generate mgen scripts for all competitor nodes and store mapping between traffic generator\n # name and file path\n for comp_num, flow_dict in comp_flows.items():\n\n file_name = \"{}{}\".format(flow_dict[\"tgen_name\"], \"_traffic.mgn\")\n write_mgen_script(file_name=file_name,\n traffic_profile=traffic_profile,\n flows=flow_dict[\"flows\"],\n listen_ports=comp_listen_ports,\n duration=duration)\n\n # store which script goes to what tgen node\n mgen_scripts_by_tgen_node[flow_dict[\"tgen_name\"]] = file_name\n\n return mgen_scripts_by_tgen_node", "def main():\n parser = make_argument_parser()\n args = parser.parse_args()\n\n input_dirs = args.inputdirs\n tf = args.factor\n valid_chroms = args.validchroms\n valid_input_dirs = args.validinputdirs\n test_chroms = args.testchroms\n epochs = args.epochs\n patience = args.patience\n learningrate = args.learningrate\n seed = args.seed\n utils.set_seed(seed)\n dropout_rate = args.dropout\n L = args.seqlen\n w = args.motifwidth\n utils.L = L\n utils.w = w\n utils.w2 = w/2\n negatives = args.negatives\n assert negatives > 0\n meta = args.meta\n gencode = args.gencode\n motif = args.motif\n\n num_motifs = args.kernels\n num_recurrent = args.recurrent\n num_dense = args.dense\n \n features = ['bigwig'] \n\n if tf:\n print 'Single-task training:', tf\n singleTask = True\n if meta:\n print 'Including metadata features'\n features.append('meta')\n if gencode:\n print 'Including genome annotations'\n features.append('gencode')\n else:\n print 'Multi-task training'\n singleTask = False\n #Cannot use any metadata features\n assert not meta\n assert not gencode\n\n if args.outputdir is None:\n clobber = True\n output_dir = args.outputdirc\n else:\n clobber = False\n output_dir = args.outputdir\n\n try: # adapted from dreme.py by T. Bailey\n os.makedirs(output_dir)\n except OSError as exc:\n if exc.errno == errno.EEXIST:\n if not clobber:\n print >> sys.stderr, ('output directory (%s) already exists '\n 'but you specified not to clobber it') % output_dir\n sys.exit(1)\n else:\n print >> sys.stderr, ('output directory (%s) already exists '\n 'so it will be clobbered') % output_dir\n\n print 'Loading genome'\n genome = utils.load_genome()\n if valid_input_dirs:\n print 'You specified at least one validation input directory'\n assert singleTask # This option only works for single-task training\n print 'Loading ChIP labels'\n if singleTask:\n chip_bed_list, nonnegative_regions_bed_list = \\\n utils.load_chip_singleTask(input_dirs, tf)\n if valid_input_dirs:\n valid_chip_bed_list, valid_nonnegative_regions_bed_list = \\\n utils.load_chip_singleTask(valid_input_dirs, tf)\n num_tfs = 1\n else:\n assert len(input_dirs) == 1 # multi-task training only supports one cell line\n input_dir = input_dirs[0]\n tfs, positive_windows, y_positive, nonnegative_regions_bed = \\\n utils.load_chip_multiTask(input_dir)\n num_tfs = len(tfs)\n print 'Loading bigWig data'\n bigwig_names, bigwig_files_list = utils.load_bigwigs(input_dirs)\n num_bigwigs = len(bigwig_names)\n if valid_input_dirs:\n valid_bigwig_names, valid_bigwig_files_list = utils.load_bigwigs(valid_input_dirs)\n assert valid_bigwig_names == bigwig_names\n if not singleTask:\n bigwig_files = bigwig_files_list[0]\n if meta:\n print 'Loading metadata features'\n meta_names, meta_list = utils.load_meta(input_dirs)\n if valid_input_dirs:\n valid_meta_names, valid_meta_list = utils.load_load(valid_input_dirs)\n assert valid_meta_names == meta_names\n else:# meta option was not selected, pass empty metadata features to the functions\n meta_list = [[] for bigwig_files in bigwig_files_list]\n if valid_input_dirs:\n valid_meta_list = [[] for bigwig_files in valid_bigwig_files_list]\n \n print 'Making features'\n if singleTask:\n if not valid_input_dirs: #validation directories not used, must pass placeholder values\n valid_chip_bed_list = None\n valid_nonnegative_regions_bed_list = None\n valid_bigwig_files_list = None\n valid_meta_list = None \n datagen_train, datagen_valid = \\\n utils.make_features_singleTask(chip_bed_list,\n nonnegative_regions_bed_list, bigwig_files_list, bigwig_names,\n meta_list, gencode, genome, epochs, negatives, valid_chroms, test_chroms, \n valid_chip_bed_list, valid_nonnegative_regions_bed_list, \n valid_bigwig_files_list, valid_meta_list)\n else:\n datagen_train, datagen_valid = \\\n utils.make_features_multiTask(positive_windows, y_positive,\n nonnegative_regions_bed, bigwig_files, bigwig_names,\n genome, epochs, valid_chroms, test_chroms)\n print 'Building model'\n if num_recurrent == 0:\n print 'You specified 0 LSTM units. Omitting BLSTM layer'\n if num_recurrent < 0:\n print 'You specified less than 0 LSTM units. Replacing BLSTM layer with global max-pooling layer'\n if meta or gencode:\n num_meta = 0\n if meta:\n num_meta = len(meta_names)\n if gencode:\n num_meta += 6\n model = utils.make_meta_model(num_tfs, num_bigwigs, num_meta, num_motifs, num_recurrent, num_dense, dropout_rate)\n else:\n model = utils.make_model(num_tfs, num_bigwigs, num_motifs, num_recurrent, num_dense, dropout_rate)\n\n if motif:\n assert singleTask # This option only works with single-task training\n motifs_db = utils.load_motif_db('resources/HOCOMOCOv9.meme')\n if tf in motifs_db:\n print 'Injecting canonical motif'\n pwm = motifs_db[tf]\n pwm += 0.001\n pwm = pwm / pwm.sum(axis=1)[:, np.newaxis]\n pwm = np.log2(pwm/0.25)\n utils.inject_pwm(model, pwm)\n output_tf_file = open(output_dir + '/chip.txt', 'w')\n if singleTask:\n output_tf_file.write(\"%s\\n\" % tf)\n else:\n for tf in tfs:\n output_tf_file.write(\"%s\\n\" % tf)\n output_tf_file.close()\n output_feature_file = open(output_dir + '/feature.txt', 'w')\n for feature in features:\n output_feature_file.write(\"%s\\n\" % feature)\n output_feature_file.close()\n output_bw_file = open(output_dir + '/bigwig.txt', 'w')\n for bw in bigwig_names:\n output_bw_file.write(\"%s\\n\" % bw)\n output_bw_file.close()\n if meta:\n output_meta_file = open(output_dir + '/meta.txt', 'w')\n for meta_name in meta_names:\n output_meta_file.write(\"%s\\n\" % meta_name)\n output_meta_file.close()\n model_json = model.to_json()\n output_json_file = open(output_dir + '/model.json', 'w')\n output_json_file.write(model_json)\n output_json_file.close()\n train(datagen_train, datagen_valid, model, epochs, patience, learningrate, output_dir)", "def _UpdateScripts(benchmark_spec, vm):\n benchmark = benchmark_spec.benchmark\n vm = vm or benchmark_spec.vms[0]\n\n config_sed = []\n config_sed += [(r'DGXSYSTEM=.*', fr'DGXSYSTEM=\\\"{DGXSYSTEM}\\\"')]\n gpus_per_node = nvidia_driver.QueryNumberOfGpus(vm)\n config_sed.append((\n r'DGXNGPU=.*', fr'DGXNGPU={gpus_per_node}\\n'\n fr'export CUDA_VISIBLE_DEVICES={\",\".join([str(gpu_number) for gpu_number in range(gpus_per_node)])}'\n ))\n config_sed += [(r'DGXNSOCKET=.*',\n fr'DGXNSOCKET={vm.CheckLsCpu().socket_count}')]\n config_sed += [(r'DGXSOCKETCORES=.*',\n fr'DGXSOCKETCORES={vm.CheckLsCpu().cores_per_socket}')]\n\n model = 'maskrcnn' if MASK in benchmark else benchmark\n framework = 'mxnet' if RESNET in benchmark else 'pytorch'\n script_path = (\n fr'$HOME/training_results_{VERSION.value}/NVIDIA/benchmarks/{model}/'\n fr'implementations/{framework}')\n\n config_files = [CONFIG]\n\n if MASK in benchmark:\n config_sed = _GetChangesForMask(config_sed)\n config_files = ['config_DGXA100.sh']\n\n elif RESNET in benchmark:\n config_sed = _GetChangesForResnet(config_sed)\n config_files = ['config_DGXA100_common.sh', 'config_DGXA100.sh']\n UpdateScriptForSmallGpuMem(vm)\n\n elif BERT in benchmark:\n config_sed = _GetChangesForBert(config_sed)\n config_files = ['config_DGXA100_common.sh', 'config_DGXA100_1x8x56x1.sh']\n\n vm.RemoteCommand(\n f'cd {script_path} && '\n f'sed \"{SedPairsToString(config_sed)}\" '\n f'{\" \".join(config_files)} > {CONFIG} && '\n f'chmod 755 {CONFIG} && '\n f'sed -i \"2 i source {CONFIG}\" run_and_time.sh && '\n f'sed -i \"2 i source {CONFIG}\" run_with_docker.sh')", "def test_documentation_popxl_mnist_rts_train(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def generate():", "def test_generate_all_training(self):\n facade = ChatetteFacade.get_or_create()\n\n input_dir_path = \"tests/system-testing/inputs/generate-all/\"\n input_filenames = [\n \"simplest.chatette\", \"only-words.chatette\",\n \"words-and-groups.chatette\", \"alias.chatette\", \"include.chatette\",\n \"slot.chatette\", \"slotrolegroup.chatette\"\n ]\n for filename in input_filenames:\n file_path = os.path.join(input_dir_path, filename)\n facade.run(file_path)\n if not TestSystem.check_no_duplicates(facade.train_examples):\n pytest.fail(\n \"Some examples were generated several times \" +\n \"when dealing with file '\" + filename + \"'.\\nGenerated: \" + \\\n str(facade.train_examples)\n )\n legal_examples = TestSystem.get_legal_examples(file_path)\n for ex in facade.train_examples:\n formatted_ex = {\"intent\": ex.intent_name, \"text\": ex.text}\n if formatted_ex not in legal_examples:\n pytest.fail(\n str(formatted_ex) + \" is not a legal example for '\" + \\\n file_path + \"'\"\n )\n if len(legal_examples) != len(facade.train_examples):\n training_texts = [ex.text for ex in facade.train_examples]\n for legal_ex in legal_examples:\n if legal_ex[\"text\"] not in training_texts:\n pytest.fail(\n \"Example '\" + legal_ex[\"text\"] + \\\n \"' was not generated.\"\n )\n pytest.fail(\n \"An unknown example was not generated (\" + \\\n str(len(facade.train_examples)) + \\\n \" generated instead of \" + str(len(legal_examples)) + \\\n \").\\nGenerated: \" + str(facade.train_examples)\n )\n legal_syn = TestSystem.get_legal_synonyms(file_path)\n if legal_syn is not None:\n synonyms = AST.get_or_create().get_entities_synonyms()\n for key in synonyms:\n if key not in legal_syn:\n pytest.fail(\n \"'\" + key + \"' shouldn't have any synonyms.\"\n )\n for syn in synonyms[key]:\n if syn not in legal_syn[key]:\n pytest.fail(\n \"'\" + syn + \"' shouldn't be a synonym of '\" + \\\n key + \"'\"\n )", "def main(politician, epochs):\n train_path = f\"..\\\\data\\\\{politician}\\\\training_data.txt\"\n val_path = f\"..\\\\data\\\\{politician}\\\\validation_data.txt\"\n\n tokenizer = AutoTokenizer.from_pretrained(\"anonymous-german-nlp/german-gpt2\")\n\n special_tokens_dict = {\n 'bos_token': '<BOS>',\n 'eos_token': '<EOS>',\n 'pad_token': '<PAD>',\n 'additional_special_tokens': ['<EOQ>']\n }\n tokenizer.add_special_tokens(special_tokens_dict)\n\n train_dataset, test_dataset, data_collator = load_dataset(train_path, val_path, tokenizer)\n\n model = AutoModelWithLMHead.from_pretrained(\"anonymous-german-nlp/german-gpt2\")\n model.resize_token_embeddings(len(tokenizer))\n\n training_args = TrainingArguments(\n output_dir=f\".\\\\output-models\\\\gpt2-{politician}-{epochs}\", # output directory\n overwrite_output_dir=True, # overwrite the content of the output directory\n num_train_epochs=epochs, # number of training epochs\n per_device_train_batch_size=32, # batch size for training\n per_device_eval_batch_size=64, # batch size for evaluation\n eval_steps=400, # Number of update steps between two evaluations.\n save_steps=800, # after # steps model is saved\n warmup_steps=500, # number of warmup steps for learning rate scheduler\n )\n\n trainer = Trainer(\n model=model,\n args=training_args,\n data_collator=data_collator,\n train_dataset=train_dataset,\n eval_dataset=test_dataset,\n prediction_loss_only=True,\n )\n\n trainer.train()\n trainer.save_model()", "def main(args):\n\n shared_policy_net = build_policy_net(args).share_memory()\n shared_value_net = build_value_net(args).share_memory()\n\n start_training_processes(\n args,\n shared_policy_net,\n shared_value_net,\n )", "def main():\n # get config and processing of clauses\n config = Config(load=False)\n\n # Generators\n dev = Dataset(config.filename_dev)\n test = Dataset(config.filename_test)\n train = Dataset(config.filename_train)\n\n # Build tags vocab\n vocab_tags = get_tag_vocab([train, dev, test])\n vocab_tags.add(UNK)\n\n # Save vocab\n write_vocab(vocab_tags, config.filename_tags)\n\n\n # Build and save char vocab\n train = Dataset(config.filename_train)\n vocab_chars = get_char_vocab(train)\n write_vocab(vocab_chars, config.filename_chars)", "def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")", "def test_generate_nb_training(self):\n facade = ChatetteFacade.get_or_create()\n\n input_dir_path = \\\n \"tests/system-testing/inputs/generate-nb/training-only/\"\n input_filenames = [\n \"only-words.chatette\", \"words-and-groups.chatette\",\n \"alias.chatette\", \"include.chatette\", \"slot.chatette\",\n \"bugfixes/bug-22-slot-position.chatette\"\n ]\n for filename in input_filenames:\n file_path = os.path.join(input_dir_path, filename)\n facade.run(file_path)\n # if not TestSystem.check_no_duplicates(facade.train_examples): # TODO: make sure there are no duplicates in this case\n # pytest.fail(\"Some examples were generated several times \"+\n # \"when dealing with file '\"+filename+\"'.\\n\"+\n # \"Generated: \"+str(facade.train_examples))\n legal_examples = TestSystem.get_legal_examples(file_path)\n for ex in facade.train_examples:\n formatted_ex = {\"intent\": ex.intent_name, \"text\": ex.text}\n if formatted_ex not in legal_examples:\n pytest.fail(\n str(formatted_ex) + \" is not a legal example for '\" + \\\n file_path + \"'\"\n )\n \n legal_syn = TestSystem.get_legal_synonyms(file_path)\n if legal_syn is not None:\n synonyms = AST.get_or_create().get_entities_synonyms()\n for key in synonyms:\n if key not in legal_syn:\n pytest.fail(\n \"'\" + key + \"' shouldn't have any synonyms.\"\n )\n for syn in synonyms[key]:\n if syn not in legal_syn[key]:\n pytest.fail(\n \"'\" + syn + \"' shouldn't be a synonym of '\" + \\\n key + \"'\"\n )\n\n filename_zero = \"zero-ex.chatette\"\n file_path = os.path.join(input_dir_path, filename_zero)\n facade.run(file_path)\n if len(facade.train_examples) != 0:\n pytest.fail(\n \"When dealing with file 'zero-ex.chatette', no examples \" + \\\n \"should be generated.\\nGenerated: \" + \\\n str(facade.train_examples)\n )\n\n filename_one = \"one-ex.chatette\"\n file_path = os.path.join(input_dir_path, filename_one)\n facade.run(file_path)\n print(\"TRAIN EX: \" + str(facade.train_examples))\n if len(facade.train_examples) != 1:\n pytest.fail(\n \"When dealing with file 'one-ex.chatette', one examples \" + \\\n \"should be generated.\\nGenerated: \" + \\\n str(facade.train_examples)\n )", "def gen_datafiles():\n\tnum_reads = 10000\n\tnum_samples = 100\n\tgen_sequences('hg38.fa', num_reads, num_samples, 1, 'hg38_train.txt')\n\tgen_sequences('HIV-1.fasta', num_reads, num_samples, 0, 'HIV-1_train.txt')\n\tgen_sequences('hg38.fa', num_reads, num_samples, 1, 'hg38_test.txt')\n\tgen_sequences('HIV-1.fasta', num_reads, num_samples, 0, 'HIV-1_test.txt')", "def generate_synthetic_dataset(args):\n logger = logging.getLogger(\"GACM\")\n logger.info('Checking the data files...')\n for data_path in args.train_dirs + args.dev_dirs + args.test_dirs:\n assert os.path.exists(data_path), '{} file does not exist.'.format(data_path)\n assert len(args.test_dirs) > 0, 'No test files are provided.'\n dataset = Dataset(args, train_dirs=args.train_dirs, dev_dirs=args.dev_dirs, test_dirs=args.test_dirs)\n logger.info('Initialize the model...')\n model = Agent(args, len(dataset.qid_query), len(dataset.uid_url), len(dataset.vid_vtype))\n logger.info('model.global_step: {}'.format(model.global_step))\n assert args.load_model > -1\n logger.info('Restoring the model...')\n model.load_model(model_dir=args.load_dir, model_prefix=args.algo, global_step=args.load_model, load_optimizer=False)\n\n synthetic_types = ['deterministic', 'stochastic']\n shuffle_splits = [None, [1, 11], [1, 6, 11]]\n amplifications = [1, 7]\n for synthetic_type in synthetic_types:\n for shuffle_split in shuffle_splits:\n for amplification in amplifications:\n #synthetic_type = 'deterministic'\n #shuffle_split = None\n #amplification = 1\n file_path = os.path.join(args.load_dir, '..', 'synthetic')\n model.generate_synthetic_dataset('test', dataset, file_path, \n 'synthetic_{}_{}_{}.txt'.format(synthetic_type[0].upper(), str(shuffle_split), amplification), \n synthetic_type=synthetic_type, shuffle_split=shuffle_split, amplification=amplification)\n # exit()\n logger.info('Done with click sequence generation.')", "def MakeDataSetFiles(dirname):\n\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n if not os.path.exists(os.path.join(dirname, 'train')):\n os.mkdir(os.path.join(dirname, 'train'))\n if not os.path.exists(os.path.join(dirname, 'test')):\n os.mkdir(os.path.join(dirname, 'test'))\n data_train = fetch_20newsgroups(subset='train', categories=None, shuffle=True, random_state=42)\n data_test = fetch_20newsgroups(subset='test', categories=None, shuffle=True, random_state=42)\n\n if dirname[-1] == '/' or dirname[-1] == '\\\\':\n dirname = dirname[:-1]\n \n Util.WriteClassFile(data_train.target, os.path.join(dirname, 'train_classes.txt'))\n Util.WriteClassFile(data_test.target,os.path.join(dirname, 'test_classes.txt'))\n\n\n train_counter = 0;\n for doc in data_train.data:\n filename = 'train_' + str(train_counter).zfill(5);\n f = file(os.path.join(dirname, 'train', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n train_counter = train_counter + 1;\n\n test_counter = 0;\n for doc in data_test.data:\n filename = 'test_' + str(test_counter).zfill(5);\n f = file(os.path.join(dirname, 'test', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n test_counter = test_counter + 1;\n\n class_index = file(os.path.join(dirname, 'class_label_index.txt'), 'w')\n for label in data_train.target_names:\n class_index.write(label + '\\n')\n class_index.close()", "def main():\n args = parse_args()\n args.seed = init_rand(seed=args.seed)\n\n _, log_file_exist = initialize_logging(\n logging_dir_path=args.save_dir,\n logging_file_name=args.logging_file_name,\n script_args=args,\n log_packages=args.log_packages,\n log_pip_packages=args.log_pip_packages)\n\n batch_size = prepare_ke_context(\n num_gpus=args.num_gpus,\n batch_size=args.batch_size)\n\n net = prepare_model(\n model_name=args.model,\n use_pretrained=args.use_pretrained,\n pretrained_model_file_path=args.resume.strip())\n num_classes = net.classes if hasattr(net, \"classes\") else 1000\n input_image_size = net.in_size if hasattr(net, \"in_size\") else (args.input_size, args.input_size)\n\n train_data, val_data = get_data_rec(\n rec_train=args.rec_train,\n rec_train_idx=args.rec_train_idx,\n rec_val=args.rec_val,\n rec_val_idx=args.rec_val_idx,\n batch_size=batch_size,\n num_workers=args.num_workers,\n input_image_size=input_image_size,\n resize_inv_factor=args.resize_inv_factor)\n train_gen = get_data_generator(\n data_iterator=train_data,\n num_classes=num_classes)\n val_gen = get_data_generator(\n data_iterator=val_data,\n num_classes=num_classes)\n\n net = prepare_trainer(\n net=net,\n optimizer_name=args.optimizer_name,\n momentum=args.momentum,\n lr=args.lr,\n num_gpus=args.num_gpus,\n state_file_path=args.resume_state)\n\n train_net(\n net=net,\n train_gen=train_gen,\n val_gen=val_gen,\n train_num_examples=1281167,\n val_num_examples=50048,\n num_epochs=args.num_epochs,\n checkpoint_filepath=os.path.join(args.save_dir, \"imagenet_{}.h5\".format(args.model)),\n start_epoch1=args.start_epoch)" ]
[ "0.7570193", "0.724141", "0.7049878", "0.6241078", "0.6189553", "0.6151004", "0.6111944", "0.6034311", "0.60096145", "0.59790266", "0.59271824", "0.58651984", "0.5852421", "0.5842843", "0.58210456", "0.58143574", "0.5811275", "0.58090484", "0.5808338", "0.58027387", "0.5800938", "0.5791731", "0.5762171", "0.5747953", "0.57390904", "0.5735502", "0.5723848", "0.5722253", "0.5713102", "0.57057106" ]
0.7527086
1
Function to generate the scripts to analyze the log.
def script_generator(self): analyze_tool = "/home/haihuam/Projects/RepPoints/mmdetection/tools/analyze_logs.py" ex_options = self.global_setting.get('analyze_options', str()) py = self.global_setting.get('python', sys.executable) if os.access(py, os.X_OK): content = "set -e \n" content += "cd %s \n"%(self.run_dir) content += "%s %s plot_curve *.log.json "%(py, analyze_tool) content += "--keys loss loss_cls loss_pts_init " content += "loss_pts_refine " content += "--out losses.pdf %s &> analyze.log \n"%(ex_options) content += "touch analyze.done \n" self.script_content = content else: print("Error: %s is not executable."%py) sys.exit(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def script_generator(self):\n py = self.global_setting.get('python', sys.executable)\n ex_options = self.global_setting.get('evaluate_options', str())\n train_py = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/train.py\"\n if os.access(py, os.X_OK):\n content = \"set -e \\n\"\n content += \"export CUDA_VISIBLE_DEVICES=\" + \\\n \",\".join(self.selected_gpus)+ \" \\n\"\n content += \"cd %s \\n\"%(self.run_dir)\n \n content += \"%s %s %s --work_dir %s --validate %s &> train.log \\n\"%(py, \n train_py,\n self.setting['config_file'],\n self.run_dir,\n ex_options)\n content += \"touch evaluate.done \\n\"\n\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def script(self):", "def main():\r\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"ldc_analysis.settings\")\r\n # hourly_sm_reading_histogram()\r\n # reading_count_histogram()\r\n sm_reading_exception_count_histogram()", "def main():\n config_file = get_conf(get_config_name())\n if not config_file:\n sys.exit(1)\n log = get_last_file(config_file[\"LOG_DIR\"])\n MAIN_LOGGER.info(\"we've got log file named %s\", log.path)\n file_name = os.path.join(os.path.dirname(__file__), config_file['REPORT_DIR'],\n \"report-{}.html\".format(log.date))\n if os.path.exists(file_name):\n MAIN_LOGGER.info(\"%s already exists\", file_name)\n sys.exit()\n res = gen_parse_log(log, config_file['PERCENT_FAILS'])\n if not res:\n sys.exit(1)\n MAIN_LOGGER.info(\"log parsed\")\n report = []\n for _ in range(int(config_file[\"REPORT_SIZE\"])):\n try:\n report.append(next(res))\n except StopIteration:\n pass\n MAIN_LOGGER.info(\"report file name %s\", file_name)\n\n if report:\n save_report(report, config_file['TEMPLATE_FILE'], file_name)", "def main():\n\n app_config = load_config()\n database_connection = mysql.connector.connect(**app_config[\"database\"])\n panelists = retrieve_all_panelist_appearance_counts(database_connection)\n show_years = retrieve_all_years(database_connection)\n\n rendered_report = render_report(show_years=show_years,\n panelists=panelists,\n report_settings=app_config[\"report\"])\n\n generate_output_files(rendered_report=rendered_report,\n report_settings=app_config[\"report\"])", "def main(): \n # Parse Arguments\n args = parse_arguments()\n\n # Print outdir\n print(\"Writing output to \" + args.outdir)\n\n # Print start statement\n print('Starting script for ' + args.file + ' at ' + str(datetime.datetime.now()), flush=True)\n\n # Put all the files in a function that will further handle the files as dataframe\n create_df(args.file, args.outdir)\n\n # Script is finished\n print('All done for ' + args.file + ' at ' + str(datetime.datetime.now()), flush=True)", "def prepare_afd_script():\n\n\t## get the variable to explain\n\tvar_to_explain = \"undef\"\n\tcmpt = 0\n\tdata_file = open(\"data/data.csv\", \"r\")\n\tfor line in data_file:\n\t\tif(cmpt == 0):\n\t\t\tline_in_array = line.split(\",\")\n\t\t\tvar_to_explain = line_in_array[0]\n\t\tcmpt += 1\n\tdata_file.close()\n\tvar_to_explain = var_to_explain.replace(\"\\\\\", \".\")\n\tif(var_to_explain[0] == \".\"):\n\t\tvar_to_explain = \"X\" + var_to_explain\n\n\t## write the script\n\ttemplate_file = open(\"scripts/afd_template.R\", \"r\")\n\tafd_script = open(\"scripts/afd_script.R\", \"w\")\n\tcmpt = 1\n\tfor line in template_file:\n\t\tif(cmpt == 108):\n\t\t\tline_to_write = \"data.lda <- lda(\"+str(var_to_explain)+\" ~ ., data=data)\"\n\t\t\tafd_script.write(line_to_write+\"\\n\")\n\t\telif(cmpt == 123):\n\t\t\t line_to_write = \"ldahist(data = data.lda.values$x[,comp], g=\"+str(var_to_explain)+\")\"\n\t\t\t afd_script.write(line_to_write+\"\\n\")\n\t\telif(cmpt == 132):\n\t\t\tline_to_write = \"text(data.lda.values$x[,1],data.lda.values$x[,2],\"+str(var_to_explain)+\",cex=0.7,pos=4,col=\\\"red\\\")\"\n\t\t\tafd_script.write(line_to_write+\"\\n\")\n\t\telse:\n\t\t\tafd_script.write(line)\n\t\tcmpt += 1\n\tafd_script.close()\n\ttemplate_file.close()", "def main():\n create_all_views()\n outlog_q1 = question_1()\n outlog_q2 = question_2()\n outlog_q3 = question_3()\n\n db.close()\n\n generateLog(outlog_q1, outlog_q2, outlog_q3)", "def generate(env, daos_prefix, comp_prefix, args):\n analyzer = Analyzer(env, daos_prefix, comp_prefix, args)\n analyzer.analyze_on_exit()", "def _reportFileAnalytics(self, sourceFiles, outputFile, language):\n \n #is this a single file or a set of files?\n bSingleFile = len(sourceFiles) == 1\n \n #open the output file for appending\n f=self.openFile(outputFile, \"a\") #open for appending\n f.write ('<font face=\"verdana\" color=\"' + AutoGrader.Const.HEADER_COLOR1 + '\">')\n f.write ('<br>\\n=======================================================<br>\\n')\n if bSingleFile:\n f.write(sourceFiles[0]) #if this is a single file, simply output its name\n else: #if these are multiple files, list the directory name in bold\n f.write('<b>' + os.path.split(sourceFiles[0])[0] + '</b>') #directory name in bold\n f.write ('<br>\\n=======================================================<br>\\n</font>')\n\n #for each file, report the analytics\n for sourceFile in sourceFiles:\n if bSingleFile == False: #only print the filename if we have more than 1 file in the list\n f.write ('<font face=\"verdana\" color=\"' + AutoGrader.Const.HEADER_COLOR1 + '\">')\n f.write(os.path.split(sourceFile)[1] + '</font><br>\\n')\n \n if language == 'C++':\n numLines, numComments = self.analyzeCppCode(sourceFile)\n f.write ('<font face=\"courier\" color=\"' + AutoGrader.Const.ANALYTICS_COLOR1 + '\">Code Lines: ' + str(numLines))\n f.write ('<br>\\n~#Comments: ' + str(numComments) + '<br>\\n')\n \n if language == 'Python':\n numLines, numDocStr, numComments, numDefs, numClasses = self.analyzePythonCode(sourceFile)\n f.write ('<font face=\"courier\" color=\"' + AutoGrader.Const.ANALYTICS_COLOR1 + '\">Code Lines: ' + str(numLines))\n f.write (AutoGrader.Const.HTML_TAB_CHAR*2 + '~#Functions: ' + str(numDefs))\n f.write (AutoGrader.Const.HTML_TAB_CHAR*2 + '~#Classes: ' + str(numClasses))\n f.write ('<br>\\n~#Comments: ' + str(numComments))\n f.write (AutoGrader.Const.HTML_TAB_CHAR*2 + '~#DocStrs: ' + str(numDocStr) + '<br>\\n')\n \n f.write('</font><br>') #skip a line between entries\n f.close()", "def run_report_generation(**kwargs):\n out = run_python_script_helper(\n os.path.dirname(__file__), \"report_generation_example.py\", **kwargs\n )\n return out", "def build_scripts_report(**kwargs):\n # All report functions support kwargs to support a unified interface,\n # even if they don't use them.\n _ = kwargs\n jss_connection = JSSConnection.get()\n all_policies = jss_connection.Policy().retrieve_all(\n subset=[\"general\", \"scripts\"])\n all_configs = jss_connection.ComputerConfiguration().retrieve_all()\n all_scripts = [(script.id, script.name) for script in\n jss_connection.Script()]\n if not all_scripts:\n report = Report(\"Script\", [], \"Script Usage Report\", {})\n else:\n policy_xpath = \"scripts/script\"\n config_xpath = \"scripts/script\"\n report = build_container_report(\n [(all_policies, policy_xpath), (all_configs, config_xpath)],\n all_scripts)\n report.get_result_by_name(\"Used\").description = (\n \"All scripts which are installed by policies or imaging \"\n \"configurations.\")\n report.get_result_by_name(\"Unused\").description = (\n \"All scripts which are not installed by any policies or imaging \"\n \"configurations.\")\n\n report.heading = \"Script Usage Report\"\n\n return report", "def setup_script_logging():\n #handlers = [logbook.NullHandler()]\n format_str = (\"[{record.time:%Y-%m-%dT%H:%MZ}] \"\n \"{record.level_name}: {record.message}\")\n\n #handler = logbook.StreamHandler(sys.stderr, format_string=format_str,\n # level=\"DEBUG\")\n #handler.push_thread()\n #return handler", "def _add_uuids(self, script):\n start = f\"print('{self.LOGSTART%(self.session.uuid, self.session.run_counter)}')\\n\"\n end = f\"\\n\\nprint('{self.LOGEND%(self.session.uuid, self.session.run_counter)}')\\n\"\n return start + script + end", "def analyse_screening_setup(self):\n\n control = self.control\n logger: LoggerProperties\n\n # Perform some input checks\n # Check project path exists\n if control.project_path == \"\":\n msg = \"Cannot process: Project location not set.\"\n raise LoggerWarning(msg)\n\n # Check at least one logger exists\n if not control.loggers:\n msg = \"Cannot process: No loggers exist in setup.\"\n raise LoggerWarning(msg)\n\n # Check all ids are unique\n control.check_logger_ids()\n\n # Check logging durations and sample lengths are positive\n enabled_loggers = (logger for logger in control.loggers if logger.enabled)\n for logger in enabled_loggers:\n if logger.duration <= 0:\n msg = f\"Cannot process: Logging duration for logger {logger.logger_id} is {logger.duration}.\\n\"\n f\"Logging duration must be greater than zero.\"\n raise LoggerWarning(msg)\n\n # TODO: Move to logger properties as a setup function\n if control.global_process_stats is True and logger.process_stats is True:\n if logger.stats_interval <= 0:\n msg = f\"Cannot process: Statistics sample length for logger \"\n f\"{logger.logger_id} is {logger.stats_interval}.\\n\"\n f\"Statistics sample length must be greater than zero.\"\n raise LoggerWarning(msg)\n\n if control.global_process_spect is True and logger.process_spect is True:\n if logger.spect_interval <= 0:\n msg = f\"Cannot process: Spectral sample length for logger \"\n f\"{logger.logger_id} is {logger.spect_interval}.\\n\"\n f\"Spectral sample length must be greater than zero.\"\n raise LoggerWarning(msg)\n\n # Paths to output folders\n control.set_output_paths()\n\n # Get raw filenames, check timestamps and select files in processing datetime range\n enabled_loggers = (logger for logger in control.loggers if logger.enabled)\n for logger in enabled_loggers:\n # Store logger filenames and check file timestamps\n self.statusbar.showMessage(\n f\"Checking setup: Checking file names for {logger.logger_id}. Please wait...\"\n )\n self.repaint()\n logger.get_filenames()\n\n # Select files to process and, if applicable, check file timestamps are valid\n logger.set_files_to_process()\n\n # Store expected file length\n logger.expected_data_points = logger.freq * logger.duration\n\n # Get all channel names and units if not already stored in logger object\n if len(logger.all_channel_names) == 0 and len(logger.all_channel_units) == 0:\n logger.get_all_columns()\n\n # Update column list in config dashboard if this logger is the one selected\n if logger.logger_id == self.inputDataModule.loggerList.currentItem().text():\n self.inputDataModule.set_logger_columns_list(logger)\n\n # Check requested channels exist\n # Connect warning signal to warning message box in DataLab class\n try:\n # Disconnect any existing connection to prevent repeated triggerings\n logger.logger_warning_signal.disconnect()\n except TypeError:\n pass\n logger.logger_warning_signal.connect(self.warning)\n\n # Set processed channel names and units as user values, if supplied, or file header values\n logger.set_selected_column_and_units_names()\n\n # Check for any columns without any units set and see if the units is embedded in the channel name;\n # if so extract units from channel name and add to units list\n logger.check_if_units_in_channel_name()\n\n # Check number of headers match number of columns to process\n # TODO: This should already have been enforced earlier so perhaps no longer required?\n logger.check_headers()", "def write_shell_scripts(airfoils, qsh_template, nsetup, ntype, out_dir):\n for nairfoil, sim_setup in airfoils.iteritems():\n for aoa in sim_setup['aoas']:\n # Create simulation name\n sim_name = create_sim_name(nairfoil, ntype, nsetup, aoa)\n # Create fluent journal file\n with open(qsh_template, 'r') as f:\n qtxt = f.read()\n # Start to replace parameters inside the journal\n qtxt = qtxt.replace('SIMNAME', sim_name)\n qtxt = qtxt.replace('in.jou', sim_name + '.jou')\n qtxt = qtxt.replace('fluent.out', sim_name + '.out')\n # Write new shell script to out_dir\n qout = sim_name + '.qsh'\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n with open(os.path.join(out_dir, qout), 'w') as f:\n f.write(qtxt)\n return True", "def analyze_run():\n file_datas_dict = load_datas(Args.data_files)\n plotables_dict = dict()\n for file_name, datas in file_datas_dict.viewitems():\n analized_datas = analyze_datas(datas,Args.analysis_attributes)\n plotables = ana_results_to_plotables(\n analized_datas,\n Args.analysis_attributes\n )\n if Args.dm_file_out:\n analysis_save_dm(\n analized_datas,\n plotables,\n Args.analysis_attributes,\n Args.dm_file_out\n )\n if Args.mat_file_out:\n analysis_save(\n plotables,\n Args.analysis_attributes,\n Args.mat_file_out\n )\n if Args.verbose:\n plotables_dict[file_name] = plotables\n if Args.verbose:\n ana_plot_figures(plotables_dict,Args.analysis_attributes)", "def generate_files(self):\n\t\tapply_stemmer, xml_file, query_file, expected_file = self.read_config_file()\n\t\tself.generate_query_file(query_file, xml_file, apply_stemmer)\n\t\tself.generate_expected_file(expected_file, xml_file)\n\t\tlogging.info('FINALIZADO: MÓDULO PROCESSADOR DE CONSULTAS')", "def main():\n p = Path.cwd()\n path = str(p)\n\n files = tracked_files()\n scripts = search_dir(p, path, files, '.py')\n scripts = [i for i in scripts if 'tests/' not in i[:7]]\n scripts = list(map(partial(process, p), scripts))\n\n for script in scripts:\n script['display'] = script['name'].replace('_', '\\_')\n write_readme(scripts)", "def generate_report():\n\n # Fetch the top 3 most viewed articles and number of views and print them\n articles_query = get_articles_query()\n popular_articles = execute_query(articles_query)\n print_top_articles(popular_articles)\n\n # Fetch the most popular authors and print them\n authors_query = get_authors_query()\n popular_authors = execute_query(authors_query)\n print_authors(popular_authors)\n\n # Print the days when there were more than 1% errors in HTTP requests\n errors_query = get_errorData_query()\n error_data = execute_query(errors_query)\n print_error_data(error_data)", "def reports_cli():", "def generate(self, fileName):\n self.preProcess()\n styleFile = open(fileName, 'w')\n # write head part\n head = \"\"\"#!/usr/bin/env python\n\nimport os\n\nfrom WMQuality.Code import Code\n\n# output of the log files\n# prefix of the files in cvs\n# quality script for using pylint:\nqualityScript = '%s'\n# output file:\nqualityReport = '%s'\n# rating threshold (min: 0, max 10)\nthreshold = %s\n\npackages = {\\\\\n \"\"\" % (self.script, self.report, self.threshold)\n styleFile.writelines(head)\n styleFile.writelines('\\n')\n\n for moduleName in self.module.keys():\n # find the one with the most votes per module:\n # register this.\n styleFile.writelines(\" '\" + moduleName + \"':'\" + self.module[moduleName] + \"',\\\\\\n\")\n styleFile.writelines('}\\n')\n tail = \"\"\"\ncode = Code(qualityScript, qualityReport, WMCore.WMInit.getWMBASE(), threshold, packages)\ncode.run()\ncode.summaryText()\n \"\"\"\n styleFile.writelines(tail)\n styleFile.close()", "def generateReportAndLog(xmlFiles, reportFile, logFile): \n rebotCommand = \"rebot --log %s --report %s --reporttitle \\\"%s\\\" --name ' ' %s*.xml\" % (logFile, reportFile, suiteName, payload)\n print 'rebotCommand: ' + rebotCommand\n rc = os.system(rebotCommand)\n return rc", "def build_custom_log(\n dp_shell_history: Path,\n fp_results: Path,\n *,\n daterange: List[str],\n username: str = None,\n wdir: Path = None,\n hostname: str = None,\n regexp: str = None,\n unique: bool = False,\n) -> None:\n dt_start, dt_end = get_daterange(daterange)\n\n log.trace(\"dt_start: {}\", dt_start) # type: ignore\n log.trace(\"dt_end: {}\", dt_end) # type: ignore\n\n hostname = os.uname().nodename if hostname is None else hostname\n regexp = \".*\" if regexp is None else regexp\n\n with fp_results.open(\"w\") as f:\n f.write(f\"# vim: filetype={SCRIPTNAME}\\n\\n\")\n\n dt_tmp = dt_start\n entry_count = 0\n while date_ym_value(dt_tmp) <= date_ym_value(dt_end):\n fp_log = Path(\n f\"{dp_shell_history}/{hostname}/{dt_tmp.year}/\"\n f\"{str(dt_tmp.month).zfill(2)}.log\"\n )\n\n try:\n if hostname.lower() == \"all\":\n fp_log = merge_hosts(\n dp_shell_history, dt_tmp.year, dt_tmp.month\n )\n\n skip_date_check = (\n dt_tmp.month != dt_start.month or dt_tmp.year != dt_start.year\n ) and (dt_tmp.month != dt_end.month or dt_tmp.year != dt_end.year)\n\n log_lines = process_logfile(\n fp_log,\n dt_start=dt_start,\n dt_end=dt_end,\n regexp=regexp,\n username=username,\n wdir=wdir,\n unique=unique,\n skip_date_check=skip_date_check,\n )\n\n with fp_results.open(\"a+\") as f:\n f.writelines(log_lines)\n\n entry_count += len(log_lines)\n except LogsNotFound:\n log.debug(f\"No Log Files for {dt_tmp.month}-{dt_tmp.year} Exist.\")\n finally:\n dt_tmp = dt_tmp + relativedelta(months=1)\n\n with fp_results.open(\"a+\") as f:\n f.write(\n f\"# Number of shell commands matched by {SCRIPTNAME} query: \"\n f\"{entry_count}\"\n )", "def run_script(self):\n pass", "def script_generator(self):\n\n self._get_free_tcp_port()\n\n train_py = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/train.py\"\n py = self.global_setting.get('python', sys.executable)\n ex_options = self.global_setting.get('train_options', str())\n\n if not os.access(py, os.X_OK):\n py = \"/home/haihuam/anaconda3/envs/RepPoints/bin/python\"\n \n if os.access(py, os.X_OK):\n content = \"set -e \\n\"\n content += \"export CUDA_VISIBLE_DEVICES=\" + \\\n \",\".join(self.selected_gpus)+ \" \\n\"\n\n content += \"cd %s \\n\"%(self.run_dir)\n content += \"%s -m torch.distributed.launch \"%(py)\n content += \"--nproc_per_node=%s \"%(self.setting['train_num_gpu'])\n content += \"--master_port %s \"%(self.dist_train_port)\n content += \"%s %s --launcher pytorch \"%(train_py, self.setting['config_file'])\n content += \"--work_dir %s \"%(self.run_dir)\n content += \"--resume_from latest.pth \"\n content += \"--validate %s &> %s.log \\n\"%(ex_options, self.stage)\n content += \"touch train.done \\n\"\n # return content\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def make_log():\n log_file = os.path.join(phys_dir,'ge_phys2bids_'+datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")+'.log')\n with open(log_file,'w') as log:\n log.write('-------- GE phys2bids --------\\n\\n')\n log.write('DICOM directory: %s\\n'%dcm_dir)\n log.write('Physiology directory: %s\\n'%phys_dir)\n log.write('Output directory: %s\\n\\n'%out_dir)\n log.write('%d EPI files were found\\n\\n'%len(dcm_dict))\n for rn in dcm_dict.keys():\n log.write('------------------------------\\n')\n log.write('%s\\n'%dcm_dict[rn]['out_name'])\n log.write('Start time: %s\\n'%dcm_dict[rn]['start_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('End time: %s\\n'%dcm_dict[rn]['end_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('PPG file: %s\\n'%dcm_dict[rn]['ppg_file'])\n log.write('Respiration file: %s\\n'%dcm_dict[rn]['resp_file'])\n log.write('ECG file: %s\\n'%dcm_dict[rn]['ecg_file'])\n log.write('------------------------------\\n\\n')", "def startLogFiles():\n #global techLog\n global userl\n _generic_startLogFiles(True)", "def run_all(self):\n\n self.run_mash() ###Run MASH analysis\n self.filter_query() ###Filter fasta sequences out based on p value\n self.build_index(self.filtered_out_path) ###Build index for off-target analysis\n os.remove(self.filtered_out_path) ###Clean up intermediate fasta file\n self.format_gRNA(self.path1) ###Format everything in the right order\n self.run_OTF() ###Run off-target analysis\n self.output_parse() ###Parse output values and update table", "def main():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('-c', '--conf', required=True, help='Path to config yml file')\n parser_interval = parser.add_mutually_exclusive_group()\n parser_interval.add_argument(\n '--hourly',\n action='store_true',\n help='Use rules configured as \"hourly\"'\n )\n parser_interval.add_argument(\n '--daily',\n action='store_true',\n help='Use rules configured as \"daily\"'\n )\n parser.add_argument(\n 'path',\n nargs='+',\n help='Path to folder with log'\n )\n args = parser.parse_args()\n with open(args.conf) as conf_file:\n conf = yaml.safe_load(conf_file)\n interval = 'daily' if args.daily else 'hourly' if args.hourly else ''\n compressors = []\n now = local_tz_now()\n for path in args.path:\n compressors += process_path(now, conf, interval, os.path.abspath(path))\n if compressors:\n run_compressors(compressors)" ]
[ "0.6372324", "0.6196502", "0.58817005", "0.5872832", "0.5843183", "0.57949096", "0.5750669", "0.5748845", "0.57101727", "0.56722856", "0.5642168", "0.56294507", "0.5594446", "0.55805284", "0.552635", "0.55260634", "0.5525768", "0.5515622", "0.5510869", "0.5489981", "0.5473564", "0.5468409", "0.54418916", "0.5420824", "0.54070115", "0.53906447", "0.535757", "0.53369313", "0.53326344", "0.5328118" ]
0.81086046
0
Function to generate distributed training scripts.
def script_generator(self): self._get_free_tcp_port() train_py = "/home/haihuam/Projects/RepPoints/mmdetection/tools/train.py" py = self.global_setting.get('python', sys.executable) ex_options = self.global_setting.get('train_options', str()) if not os.access(py, os.X_OK): py = "/home/haihuam/anaconda3/envs/RepPoints/bin/python" if os.access(py, os.X_OK): content = "set -e \n" content += "export CUDA_VISIBLE_DEVICES=" + \ ",".join(self.selected_gpus)+ " \n" content += "cd %s \n"%(self.run_dir) content += "%s -m torch.distributed.launch "%(py) content += "--nproc_per_node=%s "%(self.setting['train_num_gpu']) content += "--master_port %s "%(self.dist_train_port) content += "%s %s --launcher pytorch "%(train_py, self.setting['config_file']) content += "--work_dir %s "%(self.run_dir) content += "--resume_from latest.pth " content += "--validate %s &> %s.log \n"%(ex_options, self.stage) content += "touch train.done \n" # return content self.script_content = content else: print("Error: %s is not executable."%py) sys.exit(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_disttrain_scipts(self):\n train_py = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/train.py\"\n py = self.global_setting.get('python', sys.executable)\n ex_options = self.global_setting.get('train_options', str())\n\n if not os.access(py, os.X_OK):\n py = \"/home/haihuam/anaconda3/envs/RepPoints/bin/python\"\n \n if os.access(py, os.X_OK):\n content = \"set -e \\n\"\n content += \"export CUDA_VISIBLE_DEVICES=\" + \\\n \",\".join(self.selected_gpus)+ \" \\n\"\n\n content += \"cd %s \\n\"%(self.run_dir)\n content += \"%s -m torch.distributed.launch \"%(py)\n content += \"--nproc_per_node=%s \"%(self.setting['train_num_gpu'])\n content += \"--master_port %s \"%(self.dist_train_port)\n content += \"%s %s --launcher pytorch \"%(train_py, self.setting['config_file'])\n content += \"--work_dir %s \"%(self.run_dir)\n content += \"--validate %s &> %s.log \\n\"%(ex_options, self.stage)\n content += \"touch train.done \\n\"\n # return content\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def script_generator(self):\n py = self.global_setting.get('python', sys.executable)\n ex_options = self.global_setting.get('evaluate_options', str())\n train_py = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/train.py\"\n if os.access(py, os.X_OK):\n content = \"set -e \\n\"\n content += \"export CUDA_VISIBLE_DEVICES=\" + \\\n \",\".join(self.selected_gpus)+ \" \\n\"\n content += \"cd %s \\n\"%(self.run_dir)\n \n content += \"%s %s %s --work_dir %s --validate %s &> train.log \\n\"%(py, \n train_py,\n self.setting['config_file'],\n self.run_dir,\n ex_options)\n content += \"touch evaluate.done \\n\"\n\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def generate_singletrain_scipts(self):\n py = self.global_setting.get('python', sys.executable)\n ex_options = self.global_setting.get('train_options', str())\n train_py = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/train.py\"\n\n if not os.access(py, os.X_OK):\n py = \"/home/haihuam/anaconda3/envs/RepPoints/bin/python\"\n\n if os.access(py, os.X_OK):\n content = \"set -e \\n\"\n content += \"export CUDA_VISIBLE_DEVICES=\" + \\\n \",\".join(self.selected_gpus)+ \" \\n\"\n content += \"cd %s \\n\"%(self.run_dir)\n content += \"%s %s %s \"%(py, train_py, self.setting['config_file'])\n content += \"--work_dir %s \"%(self.run_dir)\n content += \"--validate %s &> %s.log \\n\"%(ex_options, self.stage)\n content += \"touch train.done \\n\"\n\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def generateParallelScript(hub, user_name, server_list):\n all_tasks = []\n slot_names = hub['SlotIO'].keys()\n\n for slot_name in slot_names:\n vivado = f'VIV_VER={args.vivado_version} vivado -mode batch -source {slot_name}_synth.tcl'\n \n # broadcast the results\n transfer = []\n for server in server_list:\n transfer.append(f'rsync_with_retry.sh --target-server {server} --user-name {user_name} --dir-to-sync {synth_dir}/{slot_name}/')\n transfer_str = \" && \".join(transfer)\n\n command = f'cd {synth_dir}/{slot_name} && {vivado} && {transfer_str}'\n all_tasks.append(command)\n\n num_job_server = math.ceil(len(all_tasks) / len(server_list) ) \n for i, server in enumerate(server_list):\n local_tasks = all_tasks[i * num_job_server: (i+1) * num_job_server]\n open(f'{synth_dir}/parallel_slot_synth_{server}.txt', 'w').write('\\n'.join(local_tasks))", "def gen_script(model: onnx.ModelProto, output_file: str = None) -> str:\n current_dir = os.path.dirname(os.path.realpath(__file__))\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(current_dir + '/templates/'))\n model_header_render = gen_model_header(env, model)\n imports, main_function, sub_functions = gen_graph_functions(env, model.graph)\n\n wdir = \"\"\n if len(imports) > 0:\n # need to set wdir to enable imports\n wdir = util.resolve_systemds_root() + \"/scripts\"\n\n main_template = env.get_template(\"main.dml.jinja\")\n result_render = main_template.render(\n title=\"This file was generated by onnx-systemds\",\n model_header_render=model_header_render,\n wdir=wdir,\n imports=imports,\n main_function=main_function,\n sub_functions=sub_functions\n )\n if output_file:\n directory = os.path.dirname(output_file)\n if len(directory) > 0:\n os.makedirs(directory, exist_ok=True)\n with open(output_file, 'w') as f:\n f.write(result_render)\n\n return result_render", "def generate(train_data_path, trained_model_path, num_output_files):\n # load the notes used to train the model\n\n train_data = data_preprocess.load_from_pickle(train_data_path)\n training_notes = train_data[\"data\"]\n note_translator = train_data[\"note_translator\"]\n\n net = networks.TransformerNet.load_checkpoint(trained_model_path)\n\n for i in range(num_output_files):\n prediction_output = generate_notes(net, training_notes, note_translator)\n create_midi(prediction_output, file_suffix=i)", "def main():\n if len(sys.argv) < 3:\n message = \"\"\"\n Usage: python generate_dataset.py <dataset_name> <number of files> <size of each file in bytes>\n \"\"\"\n print(message)\n sys.exit(0)\n dataset_name = sys.argv[1]\n file_number = int(sys.argv[2])\n file_size = int(sys.argv[3])\n\n if not os.path.exists(dataset_name):\n os.makedirs(dataset_name)\n\n for i in range(file_number):\n tmp_file = open('./' + dataset_name + '/' + dataset_name + '.file' + str(i), 'w+')\n tmp_file.write(os.urandom(file_size))\n tmp_file.close()", "def build_job_scripts(model_list, scenario_list, output_dir, cassandra_config_dir, cassandra_log_dir,\n cassandra_main_script, sbatch_account, sbatch_partition='slurm', sbatch_walltime='01:00:00',\n sbatch_ntasks=3, sbatch_nodes=3, sbatch_jobname='cassie', sbatch_logdir='.', template=None):\n\n # use default configuration template file if user does not give one\n if template is None:\n template = pkg_resources.resource_filename('cassie', 'data/sbatch_template.sh')\n\n # existing tags to replace in the template file\n model_tag = '<model>'\n scenario_tag = '<scenario>'\n account_tag = '<account>'\n partition_tag = '<partition>'\n ntasks_tag = '<ntasks>'\n nodes_tag = '<nodes>'\n time_tag = '<walltime>'\n jobname_tag = '<jobname>'\n logdir_tag = '<logdir>'\n cassandra_configdir_tag = '<cassconfigdir>'\n cassandra_logdir_tag = '<casslogdir>'\n cassandra_script_tag = '<cassmainscript>'\n\n for model in model_list:\n for scenario in scenario_list:\n\n output_file = os.path.join(output_dir, f'run_{model.lower()}_{scenario}.sh')\n\n with open(output_file, 'w') as out:\n with open(template) as get:\n\n f = get.read()\n\n # replace tag names with dynamic content\n fx = f.replace(model_tag, model)\n fx = fx.replace(scenario_tag, scenario)\n\n fx = fx.replace(account_tag, sbatch_account)\n fx = fx.replace(partition_tag, sbatch_partition)\n fx = fx.replace(ntasks_tag, str(sbatch_ntasks))\n fx = fx.replace(nodes_tag, str(sbatch_nodes))\n fx = fx.replace(time_tag, sbatch_walltime)\n fx = fx.replace(jobname_tag, sbatch_jobname)\n fx = fx.replace(logdir_tag, sbatch_logdir)\n\n fx = fx.replace(cassandra_configdir_tag, cassandra_config_dir)\n fx = fx.replace(cassandra_logdir_tag, cassandra_log_dir)\n fx = fx.replace(cassandra_script_tag, cassandra_main_script)\n\n out.write(fx)", "def generateDataset(self):\n if self.outdir[-1] != \"/\": \n self.outdir += \"/\"\n self.outdir += \"dataset_trackml\"\n i = 1\n while os.path.exists(self.outdir):\n self.outdir.replace(\"_\"+str(i-1), \"\")\n self.outdir += (\"_\"+str(i))\n i += 1\n cmd = \"mkdir -p \"+ self.outdir\n os.system(cmd)\n\n cont = pc.particleController()\n cont.generateEvents(self.numevents, self.hpe, self.detectors)\n\n self.generateHits(cont)\n self.generateTruths(cont)\n self.generateSolution(cont)", "def main(args, base_dir):\n for i in range(args.n_training):\n # value of the next seed\n seed = args.seed + i\n\n # The time when the current experiment started.\n now = strftime(\"%Y-%m-%d-%H:%M:%S\")\n\n # Create a save directory folder (if it doesn't exist).\n if args.log_dir is not None:\n dir_name = args.log_dir\n else:\n dir_name = os.path.join(base_dir, '{}/{}'.format(\n args.env_name, now))\n ensure_dir(dir_name)\n\n # Get the policy class.\n if args.alg == \"TD3\":\n from hbaselines.multiagent.td3 import MultiFeedForwardPolicy\n elif args.alg == \"SAC\":\n from hbaselines.multiagent.sac import MultiFeedForwardPolicy\n elif args.alg == \"PPO\":\n from hbaselines.multiagent.ppo import MultiFeedForwardPolicy\n elif args.alg == \"TRPO\":\n from hbaselines.multiagent.trpo import MultiFeedForwardPolicy\n else:\n raise ValueError(\"Unknown algorithm: {}\".format(args.alg))\n\n # Get the hyperparameters.\n hp = get_hyperparameters(args, MultiFeedForwardPolicy)\n\n # add the seed for logging purposes\n params_with_extra = hp.copy()\n params_with_extra['seed'] = seed\n params_with_extra['env_name'] = args.env_name\n params_with_extra['policy_name'] = \"MultiFeedForwardPolicy\"\n params_with_extra['algorithm'] = args.alg\n params_with_extra['date/time'] = now\n\n # Add the hyperparameters to the folder.\n with open(os.path.join(dir_name, 'hyperparameters.json'), 'w') as f:\n json.dump(params_with_extra, f, sort_keys=True, indent=4)\n\n run_exp(\n env=args.env_name,\n policy=MultiFeedForwardPolicy,\n hp=hp,\n dir_name=dir_name,\n evaluate=args.evaluate,\n seed=seed,\n eval_interval=args.eval_interval,\n log_interval=args.log_interval,\n save_interval=args.save_interval,\n initial_exploration_steps=args.initial_exploration_steps,\n ckpt_path=args.ckpt_path,\n )", "def generate_files_from_network(id):\n\tfolder_prefix = \"results/\"+id+\"/\"\n\tnetwork_prefix = \"results/\"+id+\"_\"\n\tg = open(network_prefix+'network.json', 'r')\n\tdata = json.load(g)\n\tnames = []\n\tfor node in data:\n\t\tmy_name = data[node]['my_name']\n\t\tnames.append(my_name)\n\t\ttargets = data[node]['target']\n\t\tn_receive = data[node]['receivers']\n\n\t\t#generate_python_file_from_node(folder_prefix, my_name, targets, n_receive)\n\n\tg.close()\n\n\n\n\twith open(folder_prefix+'run.sh', 'w') as f:\n\t\tfor name in names:\n\t\t\tif name!=names[-1]:\n\t\t\t\tf.write('python ../../run_node.py '+name+' '+id+' &\\n')\n\t\t\telse:\n\t\t\t\tf.write('python ../../run_node.py '+name+' '+id+' \\n')\n\n\n\twith open(folder_prefix+'start.sh', 'w') as f:\n\t\tf.write('simulaqron reset\\nsimulaqron set backend qutip\\nsimulaqron start --nodes ')\n\t\tfor name in names:\n\t\t\tif name!=names[-1]:\n\t\t\t\tf.write(name+',')\n\t\t\telse:\n\t\t\t\tf.write(name)\n\treturn", "def task_generate_virtual_samples():\n metadata_files = Path(__file__).parent.glob('*_meta.yaml')\n data_files = Path(__file__).parent.glob('*_data.yaml')\n\n script = Path(__file__).parents[0] / \"virtual_experiment.py\"\n\n return {\n \"actions\": [f\"{PYTHON_EXE} {script}\"],\n \"file_dep\": [script, *metadata_files],\n \"verbosity\": 2, # show stdout\n \"targets\": [*data_files],\n \"setup\": [\"generate_virtual_metadata\"],\n \"clean\": [clean_targets]\n }", "def main():\n\t# import training data\n\tfiles = [INPATH + f for f in os.listdir(INPATH) if \".json\" in f]\n\n\t# import books\n\tprint(\"Loading training data...\")\n\tbookList = loadBooks(files)\n\tprint(\"Load complete.\")\n\n\t# loop through element types and store data structure\n\tfor key, value in ELEMENTS.items():\n\t\tprint(\"Generating: %s\" % key)\n\n\t\t# set file outpath\n\t\toutfile = \"%s.json\" % key\n\t\toutpath = OUTPATH % outfile\n\n\t\tgenerateTrain(bookList, key, value, outpath)", "def train_distributed():\n # Distributed stuff learnt from this repo: https://github.com/GoogleCloudPlatform/cloudml-dist-\n # mnist-example/blob/master/trainer/task.py\n\n # For Distributed TensorFlow\n env = json.loads(os.environ.get('TF_CONFIG', '{}'))\n cluster_info = env.get('cluster')\n cluster_spec = tf.train.ClusterSpec(cluster_info)\n task_info = env.get('task')\n job_name, task_index = task_info['type'], task_info['index']\n\n device_fn = tf.train.replica_device_setter(\n cluster=cluster_spec,\n worker_device='/job:%s/task:%d' % (job_name, task_index))\n\n print(\"Start job:%s, index:%d\" % (job_name, task_index))\n\n server = tf.train.Server(cluster_spec,\n job_name=job_name, task_index=task_index)\n\n # Start a parameter server node\n if job_name == 'ps':\n server.join()\n\n # Start a master/worker node\n if job_name == 'master' or job_name == 'worker':\n is_chief = (job_name == 'master')\n\n with tf.Graph().as_default() as graph: # TODO necessary?\n with tf.device(device_fn):\n # Prepare the data\n train_data, test_data, embeddings_file = prepare_data()\n\n # Create the model\n print(\"(%s,%d) Creating %d layers of %d units.\" %\n (job_name, task_index, FLAGS.num_layers, FLAGS.size))\n model = create_model(False)\n\n # Create train_dir\n if is_chief:\n if not tf.gfile.Exists(FLAGS.train_dir):\n tf.gfile.MkDir(FLAGS.train_dir)\n\n # TensorBoard summaries\n (test_loss, test_perplexity, bucket_loss_placeholders,\n bucket_perplexity_placeholders, summary, summary_writer) = create_summary_objects(graph)\n\n # Create supervisor\n init_op = tf.global_variables_initializer()\n\n # Create Supervisor. Disabling checkpoints and summaries, because we do that manually\n sv = tf.train.Supervisor(is_chief=is_chief, logdir=FLAGS.train_dir, init_op=init_op,\n init_fn=lambda session: after_init(session, model, embeddings_file),\n saver=model.saver, global_step=model.global_step,\n save_model_secs=0, save_summaries_secs=0, summary_op=None,\n summary_writer=None)\n\n with sv.managed_session(server.target) as sess:\n train(sess, model, train_data, test_data, summary, summary_writer, test_loss,\n test_perplexity, bucket_loss_placeholders, bucket_perplexity_placeholders,\n is_chief, job_name, task_index, sv.should_stop)\n sv.stop()", "def gen_cluster_script(\n crop,\n scheduler,\n batch_ids=None,\n *,\n mode=\"array\",\n num_procs=None,\n num_threads=None,\n num_nodes=None,\n num_workers=None,\n mem=None,\n mem_per_cpu=None,\n gigabytes=None,\n time=None,\n hours=None,\n minutes=None,\n seconds=None,\n conda_env=True,\n launcher=\"python\",\n setup=\"#\",\n shell_setup=\"\",\n mpi=False,\n temp_gigabytes=1,\n output_directory=None,\n debugging=False,\n **kwargs,\n):\n\n scheduler = scheduler.lower() # be case-insensitive for scheduler\n\n if scheduler not in (\"sge\", \"pbs\", \"slurm\"):\n raise ValueError(\"scheduler must be one of 'sge', 'pbs', or 'slurm'.\")\n\n if mode not in (\"array\", \"single\"):\n raise ValueError(\"mode must be one of 'array' or 'single'.\")\n\n # parse the number of threads\n if num_threads is None:\n if num_workers is None:\n # default to 1 thread per core for no workers\n num_threads = num_procs\n else:\n # default to 1 thread per worker\n num_threads = round(num_procs / num_workers)\n\n # parse the time requirement\n if hours is minutes is seconds is None:\n if time is not None:\n if isinstance(time, (int, float)):\n hours = time\n minutes, seconds = 0, 0\n elif isinstance(time, str):\n hours, minutes, seconds = time.split(\":\")\n else:\n hours, minutes, seconds = 1, 0, 0\n else:\n if time is not None:\n raise ValueError(\n \"Cannot specify both time and hours, minutes, seconds.\"\n )\n hours = 0 if hours is None else int(hours)\n minutes = 0 if minutes is None else int(minutes)\n seconds = 0 if seconds is None else int(seconds)\n\n if scheduler == \"slurm\":\n # only supply specified header options\n # TODO: same with PBS and SGE\n\n if num_nodes is not None:\n kwargs[\"nodes\"] = num_nodes\n if num_procs is not None:\n kwargs[\"cpus-per-task\"] = num_procs\n\n if gigabytes is not None:\n if mem is not None:\n raise ValueError(\"Cannot specify both gigabytes and mem.\")\n mem = gigabytes\n\n if mem is not None:\n if isinstance(mem, int):\n mem = f\"{mem}G\"\n kwargs[\"mem\"] = mem\n\n if mem_per_cpu is not None:\n if isinstance(mem_per_cpu, int):\n mem_per_cpu = f\"{mem_per_cpu}G\"\n kwargs[\"mem-per-cpu\"] = mem_per_cpu\n\n else:\n # pbs, sge\n # parse memory to gigabytes\n if (gigabytes is not None) and (mem is not None):\n raise ValueError(\"Cannot specify both gigabytes and mem.\")\n\n if mem is not None:\n # take gigabytes from mem\n gigabytes = int(mem)\n\n if output_directory is None:\n from os.path import expanduser\n\n home = expanduser(\"~\")\n output_directory = os.path.join(home, \"Scratch\", \"output\")\n\n if conda_env is True:\n # automatically set conda environment to be the\n # same as the one that's running this function\n conda_env = os.environ.get(\"CONDA_DEFAULT_ENV\", False)\n if conda_env:\n # but only if we are in a conda environment\n if (\"conda activate\" in shell_setup) or (\n \"mamba activate\" in shell_setup\n ):\n # and user is not already explicitly activating\n conda_env = False\n\n if isinstance(conda_env, str):\n # should now be a string\n shell_setup += f\"\\nconda activate {conda_env}\"\n elif conda_env is not False:\n raise ValueError(\n \"conda_env must be either ``False``, \"\n f\"``True`` or a string, not {conda_env}\"\n )\n\n crop.calc_progress()\n\n if kwargs:\n if scheduler == \"slurm\":\n header_options = \"\\n\".join([\n f\"#SBATCH --{k}\"\n if (v is None or v is True) else\n f\"#SBATCH --{k}={v}\"\n for k, v in kwargs.items()\n ])\n elif scheduler == \"pbs\":\n header_options = \"\\n\".join([\n f\"#PBS -l {k}\"\n if (v is None or v is True) else\n f\"#PBS -l {k}={v}\"\n for k, v in kwargs.items()\n ])\n elif scheduler == \"sge\":\n header_options = \"\\n\".join([\n f\"#$ -l {k}\"\n if (v is None or v is True) else\n f\"#$ -l {k}={v}\"\n for k, v in kwargs.items()\n ])\n else:\n header_options = \"\"\n\n if num_threads is None:\n if mpi:\n # assume single thread per rank\n num_threads = 1\n else:\n if num_workers is None:\n # assume all multithreading over all cores\n num_threads = num_procs\n else:\n # assume each worker has equal number of threads\n num_threads = max(1, num_procs // num_workers)\n\n if num_workers is not None:\n if num_workers * num_threads != num_procs:\n warnings.warn(\n f\"num_workers * num_threads ({num_workers} * {num_threads}) \"\n f\"!= num_procs ({num_procs}), may not be computationally \"\n \"efficient.\"\n )\n\n # get absolute path\n full_parent_dir = str(pathlib.Path(crop.parent_dir).expanduser().resolve())\n\n opts = {\n \"hours\": hours,\n \"minutes\": minutes,\n \"seconds\": seconds,\n \"gigabytes\": gigabytes,\n \"name\": crop.name,\n \"parent_dir\": full_parent_dir,\n \"num_procs\": num_procs,\n \"num_threads\": num_threads,\n \"num_nodes\": num_nodes,\n \"num_workers\": num_workers,\n \"launcher\": launcher,\n \"setup\": setup,\n \"shell_setup\": shell_setup,\n \"pe\": \"mpi\" if mpi else \"smp\",\n \"temp_gigabytes\": temp_gigabytes,\n \"output_directory\": output_directory,\n \"working_directory\": full_parent_dir,\n \"header_options\": header_options,\n \"debugging\": debugging,\n }\n\n if batch_ids is not None:\n # grow specific ids\n opts[\"batch_ids\"] = tuple(batch_ids)\n array_mode = \"partial\"\n elif crop.num_results == 0:\n # grow all ids\n opts[\"batch_ids\"] = range(1, crop.num_batches + 1)\n array_mode = \"all\"\n else:\n # find missing ids and grow them\n opts[\"batch_ids\"] = crop.missing_results()\n array_mode = \"partial\"\n\n # build the script!\n\n if scheduler == \"sge\":\n script = _SGE_HEADER\n if mode == \"array\":\n script += _SGE_ARRAY_HEADER\n elif scheduler == \"pbs\":\n script = _PBS_HEADER\n if mode == \"array\":\n script += _PBS_ARRAY_HEADER\n elif scheduler == \"slurm\":\n script = _SLURM_HEADER\n if mode == \"array\":\n script += _SLURM_ARRAY_HEADER\n\n script += _BASE\n\n if mode == \"array\":\n opts[\"run_start\"] = 1\n\n if array_mode == \"all\":\n opts[\"run_stop\"] = crop.num_batches\n if scheduler == \"sge\":\n script += _CLUSTER_SGE_GROW_ALL_SCRIPT\n elif scheduler == \"pbs\":\n script += _CLUSTER_PBS_GROW_ALL_SCRIPT\n elif scheduler == \"slurm\":\n script += _CLUSTER_SLURM_GROW_ALL_SCRIPT\n\n elif array_mode == \"partial\":\n opts[\"run_stop\"] = len(opts[\"batch_ids\"])\n if scheduler == \"sge\":\n script += _CLUSTER_SGE_GROW_PARTIAL_SCRIPT\n elif scheduler == \"pbs\":\n script += _CLUSTER_PBS_GROW_PARTIAL_SCRIPT\n elif scheduler == \"slurm\":\n script += _CLUSTER_SLURM_GROW_PARTIAL_SCRIPT\n\n elif mode == \"single\":\n if batch_ids is None:\n # grow all missing, but compute the list dynamically\n # this allows the job to be restarted\n opts[\"batch_ids\"] = \"crop.missing_results()\"\n script += _BASE_CLUSTER_GROW_SINGLE\n\n script += _BASE_CLUSTER_SCRIPT_END\n script = script.format(**opts)\n\n if (scheduler == \"pbs\") and len(opts[\"batch_ids\"]) == 1:\n # PBS can't handle arrays jobs of size 1...\n script = script.replace(\"#PBS -J 1-1\\n\", \"\").replace(\n \"$PBS_ARRAY_INDEX\", \"1\"\n )\n\n return script", "def write_mgen_scripts(tgen_names, traffic_profile, num_bots, bot_msg_size, bot_msg_rate,\n num_comps, comp_msg_size, comp_msg_rate, duration):\n\n mgen_scripts_by_tgen_node = {}\n bot_flows = {}\n bot_listen_ports = []\n comp_flows = {}\n comp_listen_ports = []\n\n if traffic_profile == 'leaky-udp-bucket':\n\n bot_flows, comp_flows = generate_simple_flows(tgen_names=tgen_names,\n num_bots=num_bots,\n bot_msg_size=bot_msg_size,\n bot_msg_rate=bot_msg_rate,\n num_comps=num_comps,\n comp_msg_size=comp_msg_size,\n comp_msg_rate=comp_msg_rate)\n\n bot_listen_ports = [TGEN_PORT_BASE + i for i in range(num_bots)]\n comp_listen_ports = [TGEN_PORT_BASE + i for i in range(num_bots, num_bots+num_comps)]\n\n # generate mgen scripts for all bots and store mapping between traffic generator name and file\n # path\n for bot_num, flow_dict in bot_flows.items():\n\n file_name = \"{}{}\".format(flow_dict[\"tgen_name\"], \"_traffic.mgn\")\n write_mgen_script(file_name=file_name,\n traffic_profile=traffic_profile,\n flows=flow_dict[\"flows\"],\n listen_ports=bot_listen_ports,\n duration=duration)\n\n # store which script goes to what tgen node\n mgen_scripts_by_tgen_node[flow_dict[\"tgen_name\"]] = file_name\n\n # generate mgen scripts for all competitor nodes and store mapping between traffic generator\n # name and file path\n for comp_num, flow_dict in comp_flows.items():\n\n file_name = \"{}{}\".format(flow_dict[\"tgen_name\"], \"_traffic.mgn\")\n write_mgen_script(file_name=file_name,\n traffic_profile=traffic_profile,\n flows=flow_dict[\"flows\"],\n listen_ports=comp_listen_ports,\n duration=duration)\n\n # store which script goes to what tgen node\n mgen_scripts_by_tgen_node[flow_dict[\"tgen_name\"]] = file_name\n\n return mgen_scripts_by_tgen_node", "def main():\n parser = make_argument_parser()\n args = parser.parse_args()\n\n input_dirs = args.inputdirs\n tf = args.factor\n valid_chroms = args.validchroms\n valid_input_dirs = args.validinputdirs\n test_chroms = args.testchroms\n epochs = args.epochs\n patience = args.patience\n learningrate = args.learningrate\n seed = args.seed\n utils.set_seed(seed)\n dropout_rate = args.dropout\n L = args.seqlen\n w = args.motifwidth\n utils.L = L\n utils.w = w\n utils.w2 = w/2\n negatives = args.negatives\n assert negatives > 0\n meta = args.meta\n gencode = args.gencode\n motif = args.motif\n\n num_motifs = args.kernels\n num_recurrent = args.recurrent\n num_dense = args.dense\n \n features = ['bigwig'] \n\n if tf:\n print 'Single-task training:', tf\n singleTask = True\n if meta:\n print 'Including metadata features'\n features.append('meta')\n if gencode:\n print 'Including genome annotations'\n features.append('gencode')\n else:\n print 'Multi-task training'\n singleTask = False\n #Cannot use any metadata features\n assert not meta\n assert not gencode\n\n if args.outputdir is None:\n clobber = True\n output_dir = args.outputdirc\n else:\n clobber = False\n output_dir = args.outputdir\n\n try: # adapted from dreme.py by T. Bailey\n os.makedirs(output_dir)\n except OSError as exc:\n if exc.errno == errno.EEXIST:\n if not clobber:\n print >> sys.stderr, ('output directory (%s) already exists '\n 'but you specified not to clobber it') % output_dir\n sys.exit(1)\n else:\n print >> sys.stderr, ('output directory (%s) already exists '\n 'so it will be clobbered') % output_dir\n\n print 'Loading genome'\n genome = utils.load_genome()\n if valid_input_dirs:\n print 'You specified at least one validation input directory'\n assert singleTask # This option only works for single-task training\n print 'Loading ChIP labels'\n if singleTask:\n chip_bed_list, nonnegative_regions_bed_list = \\\n utils.load_chip_singleTask(input_dirs, tf)\n if valid_input_dirs:\n valid_chip_bed_list, valid_nonnegative_regions_bed_list = \\\n utils.load_chip_singleTask(valid_input_dirs, tf)\n num_tfs = 1\n else:\n assert len(input_dirs) == 1 # multi-task training only supports one cell line\n input_dir = input_dirs[0]\n tfs, positive_windows, y_positive, nonnegative_regions_bed = \\\n utils.load_chip_multiTask(input_dir)\n num_tfs = len(tfs)\n print 'Loading bigWig data'\n bigwig_names, bigwig_files_list = utils.load_bigwigs(input_dirs)\n num_bigwigs = len(bigwig_names)\n if valid_input_dirs:\n valid_bigwig_names, valid_bigwig_files_list = utils.load_bigwigs(valid_input_dirs)\n assert valid_bigwig_names == bigwig_names\n if not singleTask:\n bigwig_files = bigwig_files_list[0]\n if meta:\n print 'Loading metadata features'\n meta_names, meta_list = utils.load_meta(input_dirs)\n if valid_input_dirs:\n valid_meta_names, valid_meta_list = utils.load_load(valid_input_dirs)\n assert valid_meta_names == meta_names\n else:# meta option was not selected, pass empty metadata features to the functions\n meta_list = [[] for bigwig_files in bigwig_files_list]\n if valid_input_dirs:\n valid_meta_list = [[] for bigwig_files in valid_bigwig_files_list]\n \n print 'Making features'\n if singleTask:\n if not valid_input_dirs: #validation directories not used, must pass placeholder values\n valid_chip_bed_list = None\n valid_nonnegative_regions_bed_list = None\n valid_bigwig_files_list = None\n valid_meta_list = None \n datagen_train, datagen_valid = \\\n utils.make_features_singleTask(chip_bed_list,\n nonnegative_regions_bed_list, bigwig_files_list, bigwig_names,\n meta_list, gencode, genome, epochs, negatives, valid_chroms, test_chroms, \n valid_chip_bed_list, valid_nonnegative_regions_bed_list, \n valid_bigwig_files_list, valid_meta_list)\n else:\n datagen_train, datagen_valid = \\\n utils.make_features_multiTask(positive_windows, y_positive,\n nonnegative_regions_bed, bigwig_files, bigwig_names,\n genome, epochs, valid_chroms, test_chroms)\n print 'Building model'\n if num_recurrent == 0:\n print 'You specified 0 LSTM units. Omitting BLSTM layer'\n if num_recurrent < 0:\n print 'You specified less than 0 LSTM units. Replacing BLSTM layer with global max-pooling layer'\n if meta or gencode:\n num_meta = 0\n if meta:\n num_meta = len(meta_names)\n if gencode:\n num_meta += 6\n model = utils.make_meta_model(num_tfs, num_bigwigs, num_meta, num_motifs, num_recurrent, num_dense, dropout_rate)\n else:\n model = utils.make_model(num_tfs, num_bigwigs, num_motifs, num_recurrent, num_dense, dropout_rate)\n\n if motif:\n assert singleTask # This option only works with single-task training\n motifs_db = utils.load_motif_db('resources/HOCOMOCOv9.meme')\n if tf in motifs_db:\n print 'Injecting canonical motif'\n pwm = motifs_db[tf]\n pwm += 0.001\n pwm = pwm / pwm.sum(axis=1)[:, np.newaxis]\n pwm = np.log2(pwm/0.25)\n utils.inject_pwm(model, pwm)\n output_tf_file = open(output_dir + '/chip.txt', 'w')\n if singleTask:\n output_tf_file.write(\"%s\\n\" % tf)\n else:\n for tf in tfs:\n output_tf_file.write(\"%s\\n\" % tf)\n output_tf_file.close()\n output_feature_file = open(output_dir + '/feature.txt', 'w')\n for feature in features:\n output_feature_file.write(\"%s\\n\" % feature)\n output_feature_file.close()\n output_bw_file = open(output_dir + '/bigwig.txt', 'w')\n for bw in bigwig_names:\n output_bw_file.write(\"%s\\n\" % bw)\n output_bw_file.close()\n if meta:\n output_meta_file = open(output_dir + '/meta.txt', 'w')\n for meta_name in meta_names:\n output_meta_file.write(\"%s\\n\" % meta_name)\n output_meta_file.close()\n model_json = model.to_json()\n output_json_file = open(output_dir + '/model.json', 'w')\n output_json_file.write(model_json)\n output_json_file.close()\n train(datagen_train, datagen_valid, model, epochs, patience, learningrate, output_dir)", "def _UpdateScripts(benchmark_spec, vm):\n benchmark = benchmark_spec.benchmark\n vm = vm or benchmark_spec.vms[0]\n\n config_sed = []\n config_sed += [(r'DGXSYSTEM=.*', fr'DGXSYSTEM=\\\"{DGXSYSTEM}\\\"')]\n gpus_per_node = nvidia_driver.QueryNumberOfGpus(vm)\n config_sed.append((\n r'DGXNGPU=.*', fr'DGXNGPU={gpus_per_node}\\n'\n fr'export CUDA_VISIBLE_DEVICES={\",\".join([str(gpu_number) for gpu_number in range(gpus_per_node)])}'\n ))\n config_sed += [(r'DGXNSOCKET=.*',\n fr'DGXNSOCKET={vm.CheckLsCpu().socket_count}')]\n config_sed += [(r'DGXSOCKETCORES=.*',\n fr'DGXSOCKETCORES={vm.CheckLsCpu().cores_per_socket}')]\n\n model = 'maskrcnn' if MASK in benchmark else benchmark\n framework = 'mxnet' if RESNET in benchmark else 'pytorch'\n script_path = (\n fr'$HOME/training_results_{VERSION.value}/NVIDIA/benchmarks/{model}/'\n fr'implementations/{framework}')\n\n config_files = [CONFIG]\n\n if MASK in benchmark:\n config_sed = _GetChangesForMask(config_sed)\n config_files = ['config_DGXA100.sh']\n\n elif RESNET in benchmark:\n config_sed = _GetChangesForResnet(config_sed)\n config_files = ['config_DGXA100_common.sh', 'config_DGXA100.sh']\n UpdateScriptForSmallGpuMem(vm)\n\n elif BERT in benchmark:\n config_sed = _GetChangesForBert(config_sed)\n config_files = ['config_DGXA100_common.sh', 'config_DGXA100_1x8x56x1.sh']\n\n vm.RemoteCommand(\n f'cd {script_path} && '\n f'sed \"{SedPairsToString(config_sed)}\" '\n f'{\" \".join(config_files)} > {CONFIG} && '\n f'chmod 755 {CONFIG} && '\n f'sed -i \"2 i source {CONFIG}\" run_and_time.sh && '\n f'sed -i \"2 i source {CONFIG}\" run_with_docker.sh')", "def test_documentation_popxl_mnist_rts_train(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def generate():", "def test_generate_all_training(self):\n facade = ChatetteFacade.get_or_create()\n\n input_dir_path = \"tests/system-testing/inputs/generate-all/\"\n input_filenames = [\n \"simplest.chatette\", \"only-words.chatette\",\n \"words-and-groups.chatette\", \"alias.chatette\", \"include.chatette\",\n \"slot.chatette\", \"slotrolegroup.chatette\"\n ]\n for filename in input_filenames:\n file_path = os.path.join(input_dir_path, filename)\n facade.run(file_path)\n if not TestSystem.check_no_duplicates(facade.train_examples):\n pytest.fail(\n \"Some examples were generated several times \" +\n \"when dealing with file '\" + filename + \"'.\\nGenerated: \" + \\\n str(facade.train_examples)\n )\n legal_examples = TestSystem.get_legal_examples(file_path)\n for ex in facade.train_examples:\n formatted_ex = {\"intent\": ex.intent_name, \"text\": ex.text}\n if formatted_ex not in legal_examples:\n pytest.fail(\n str(formatted_ex) + \" is not a legal example for '\" + \\\n file_path + \"'\"\n )\n if len(legal_examples) != len(facade.train_examples):\n training_texts = [ex.text for ex in facade.train_examples]\n for legal_ex in legal_examples:\n if legal_ex[\"text\"] not in training_texts:\n pytest.fail(\n \"Example '\" + legal_ex[\"text\"] + \\\n \"' was not generated.\"\n )\n pytest.fail(\n \"An unknown example was not generated (\" + \\\n str(len(facade.train_examples)) + \\\n \" generated instead of \" + str(len(legal_examples)) + \\\n \").\\nGenerated: \" + str(facade.train_examples)\n )\n legal_syn = TestSystem.get_legal_synonyms(file_path)\n if legal_syn is not None:\n synonyms = AST.get_or_create().get_entities_synonyms()\n for key in synonyms:\n if key not in legal_syn:\n pytest.fail(\n \"'\" + key + \"' shouldn't have any synonyms.\"\n )\n for syn in synonyms[key]:\n if syn not in legal_syn[key]:\n pytest.fail(\n \"'\" + syn + \"' shouldn't be a synonym of '\" + \\\n key + \"'\"\n )", "def main(politician, epochs):\n train_path = f\"..\\\\data\\\\{politician}\\\\training_data.txt\"\n val_path = f\"..\\\\data\\\\{politician}\\\\validation_data.txt\"\n\n tokenizer = AutoTokenizer.from_pretrained(\"anonymous-german-nlp/german-gpt2\")\n\n special_tokens_dict = {\n 'bos_token': '<BOS>',\n 'eos_token': '<EOS>',\n 'pad_token': '<PAD>',\n 'additional_special_tokens': ['<EOQ>']\n }\n tokenizer.add_special_tokens(special_tokens_dict)\n\n train_dataset, test_dataset, data_collator = load_dataset(train_path, val_path, tokenizer)\n\n model = AutoModelWithLMHead.from_pretrained(\"anonymous-german-nlp/german-gpt2\")\n model.resize_token_embeddings(len(tokenizer))\n\n training_args = TrainingArguments(\n output_dir=f\".\\\\output-models\\\\gpt2-{politician}-{epochs}\", # output directory\n overwrite_output_dir=True, # overwrite the content of the output directory\n num_train_epochs=epochs, # number of training epochs\n per_device_train_batch_size=32, # batch size for training\n per_device_eval_batch_size=64, # batch size for evaluation\n eval_steps=400, # Number of update steps between two evaluations.\n save_steps=800, # after # steps model is saved\n warmup_steps=500, # number of warmup steps for learning rate scheduler\n )\n\n trainer = Trainer(\n model=model,\n args=training_args,\n data_collator=data_collator,\n train_dataset=train_dataset,\n eval_dataset=test_dataset,\n prediction_loss_only=True,\n )\n\n trainer.train()\n trainer.save_model()", "def main(args):\n\n shared_policy_net = build_policy_net(args).share_memory()\n shared_value_net = build_value_net(args).share_memory()\n\n start_training_processes(\n args,\n shared_policy_net,\n shared_value_net,\n )", "def main():\n # get config and processing of clauses\n config = Config(load=False)\n\n # Generators\n dev = Dataset(config.filename_dev)\n test = Dataset(config.filename_test)\n train = Dataset(config.filename_train)\n\n # Build tags vocab\n vocab_tags = get_tag_vocab([train, dev, test])\n vocab_tags.add(UNK)\n\n # Save vocab\n write_vocab(vocab_tags, config.filename_tags)\n\n\n # Build and save char vocab\n train = Dataset(config.filename_train)\n vocab_chars = get_char_vocab(train)\n write_vocab(vocab_chars, config.filename_chars)", "def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")", "def test_generate_nb_training(self):\n facade = ChatetteFacade.get_or_create()\n\n input_dir_path = \\\n \"tests/system-testing/inputs/generate-nb/training-only/\"\n input_filenames = [\n \"only-words.chatette\", \"words-and-groups.chatette\",\n \"alias.chatette\", \"include.chatette\", \"slot.chatette\",\n \"bugfixes/bug-22-slot-position.chatette\"\n ]\n for filename in input_filenames:\n file_path = os.path.join(input_dir_path, filename)\n facade.run(file_path)\n # if not TestSystem.check_no_duplicates(facade.train_examples): # TODO: make sure there are no duplicates in this case\n # pytest.fail(\"Some examples were generated several times \"+\n # \"when dealing with file '\"+filename+\"'.\\n\"+\n # \"Generated: \"+str(facade.train_examples))\n legal_examples = TestSystem.get_legal_examples(file_path)\n for ex in facade.train_examples:\n formatted_ex = {\"intent\": ex.intent_name, \"text\": ex.text}\n if formatted_ex not in legal_examples:\n pytest.fail(\n str(formatted_ex) + \" is not a legal example for '\" + \\\n file_path + \"'\"\n )\n \n legal_syn = TestSystem.get_legal_synonyms(file_path)\n if legal_syn is not None:\n synonyms = AST.get_or_create().get_entities_synonyms()\n for key in synonyms:\n if key not in legal_syn:\n pytest.fail(\n \"'\" + key + \"' shouldn't have any synonyms.\"\n )\n for syn in synonyms[key]:\n if syn not in legal_syn[key]:\n pytest.fail(\n \"'\" + syn + \"' shouldn't be a synonym of '\" + \\\n key + \"'\"\n )\n\n filename_zero = \"zero-ex.chatette\"\n file_path = os.path.join(input_dir_path, filename_zero)\n facade.run(file_path)\n if len(facade.train_examples) != 0:\n pytest.fail(\n \"When dealing with file 'zero-ex.chatette', no examples \" + \\\n \"should be generated.\\nGenerated: \" + \\\n str(facade.train_examples)\n )\n\n filename_one = \"one-ex.chatette\"\n file_path = os.path.join(input_dir_path, filename_one)\n facade.run(file_path)\n print(\"TRAIN EX: \" + str(facade.train_examples))\n if len(facade.train_examples) != 1:\n pytest.fail(\n \"When dealing with file 'one-ex.chatette', one examples \" + \\\n \"should be generated.\\nGenerated: \" + \\\n str(facade.train_examples)\n )", "def gen_datafiles():\n\tnum_reads = 10000\n\tnum_samples = 100\n\tgen_sequences('hg38.fa', num_reads, num_samples, 1, 'hg38_train.txt')\n\tgen_sequences('HIV-1.fasta', num_reads, num_samples, 0, 'HIV-1_train.txt')\n\tgen_sequences('hg38.fa', num_reads, num_samples, 1, 'hg38_test.txt')\n\tgen_sequences('HIV-1.fasta', num_reads, num_samples, 0, 'HIV-1_test.txt')", "def generate_synthetic_dataset(args):\n logger = logging.getLogger(\"GACM\")\n logger.info('Checking the data files...')\n for data_path in args.train_dirs + args.dev_dirs + args.test_dirs:\n assert os.path.exists(data_path), '{} file does not exist.'.format(data_path)\n assert len(args.test_dirs) > 0, 'No test files are provided.'\n dataset = Dataset(args, train_dirs=args.train_dirs, dev_dirs=args.dev_dirs, test_dirs=args.test_dirs)\n logger.info('Initialize the model...')\n model = Agent(args, len(dataset.qid_query), len(dataset.uid_url), len(dataset.vid_vtype))\n logger.info('model.global_step: {}'.format(model.global_step))\n assert args.load_model > -1\n logger.info('Restoring the model...')\n model.load_model(model_dir=args.load_dir, model_prefix=args.algo, global_step=args.load_model, load_optimizer=False)\n\n synthetic_types = ['deterministic', 'stochastic']\n shuffle_splits = [None, [1, 11], [1, 6, 11]]\n amplifications = [1, 7]\n for synthetic_type in synthetic_types:\n for shuffle_split in shuffle_splits:\n for amplification in amplifications:\n #synthetic_type = 'deterministic'\n #shuffle_split = None\n #amplification = 1\n file_path = os.path.join(args.load_dir, '..', 'synthetic')\n model.generate_synthetic_dataset('test', dataset, file_path, \n 'synthetic_{}_{}_{}.txt'.format(synthetic_type[0].upper(), str(shuffle_split), amplification), \n synthetic_type=synthetic_type, shuffle_split=shuffle_split, amplification=amplification)\n # exit()\n logger.info('Done with click sequence generation.')", "def MakeDataSetFiles(dirname):\n\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n if not os.path.exists(os.path.join(dirname, 'train')):\n os.mkdir(os.path.join(dirname, 'train'))\n if not os.path.exists(os.path.join(dirname, 'test')):\n os.mkdir(os.path.join(dirname, 'test'))\n data_train = fetch_20newsgroups(subset='train', categories=None, shuffle=True, random_state=42)\n data_test = fetch_20newsgroups(subset='test', categories=None, shuffle=True, random_state=42)\n\n if dirname[-1] == '/' or dirname[-1] == '\\\\':\n dirname = dirname[:-1]\n \n Util.WriteClassFile(data_train.target, os.path.join(dirname, 'train_classes.txt'))\n Util.WriteClassFile(data_test.target,os.path.join(dirname, 'test_classes.txt'))\n\n\n train_counter = 0;\n for doc in data_train.data:\n filename = 'train_' + str(train_counter).zfill(5);\n f = file(os.path.join(dirname, 'train', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n train_counter = train_counter + 1;\n\n test_counter = 0;\n for doc in data_test.data:\n filename = 'test_' + str(test_counter).zfill(5);\n f = file(os.path.join(dirname, 'test', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n test_counter = test_counter + 1;\n\n class_index = file(os.path.join(dirname, 'class_label_index.txt'), 'w')\n for label in data_train.target_names:\n class_index.write(label + '\\n')\n class_index.close()", "def main():\n args = parse_args()\n args.seed = init_rand(seed=args.seed)\n\n _, log_file_exist = initialize_logging(\n logging_dir_path=args.save_dir,\n logging_file_name=args.logging_file_name,\n script_args=args,\n log_packages=args.log_packages,\n log_pip_packages=args.log_pip_packages)\n\n batch_size = prepare_ke_context(\n num_gpus=args.num_gpus,\n batch_size=args.batch_size)\n\n net = prepare_model(\n model_name=args.model,\n use_pretrained=args.use_pretrained,\n pretrained_model_file_path=args.resume.strip())\n num_classes = net.classes if hasattr(net, \"classes\") else 1000\n input_image_size = net.in_size if hasattr(net, \"in_size\") else (args.input_size, args.input_size)\n\n train_data, val_data = get_data_rec(\n rec_train=args.rec_train,\n rec_train_idx=args.rec_train_idx,\n rec_val=args.rec_val,\n rec_val_idx=args.rec_val_idx,\n batch_size=batch_size,\n num_workers=args.num_workers,\n input_image_size=input_image_size,\n resize_inv_factor=args.resize_inv_factor)\n train_gen = get_data_generator(\n data_iterator=train_data,\n num_classes=num_classes)\n val_gen = get_data_generator(\n data_iterator=val_data,\n num_classes=num_classes)\n\n net = prepare_trainer(\n net=net,\n optimizer_name=args.optimizer_name,\n momentum=args.momentum,\n lr=args.lr,\n num_gpus=args.num_gpus,\n state_file_path=args.resume_state)\n\n train_net(\n net=net,\n train_gen=train_gen,\n val_gen=val_gen,\n train_num_examples=1281167,\n val_num_examples=50048,\n num_epochs=args.num_epochs,\n checkpoint_filepath=os.path.join(args.save_dir, \"imagenet_{}.h5\".format(args.model)),\n start_epoch1=args.start_epoch)" ]
[ "0.7527086", "0.724141", "0.7049878", "0.6241078", "0.6189553", "0.6151004", "0.6111944", "0.6034311", "0.60096145", "0.59790266", "0.59271824", "0.58651984", "0.5852421", "0.5842843", "0.58210456", "0.58143574", "0.5811275", "0.58090484", "0.5808338", "0.58027387", "0.5800938", "0.5791731", "0.5762171", "0.5747953", "0.57390904", "0.5735502", "0.5723848", "0.5722253", "0.5713102", "0.57057106" ]
0.7570193
0
Validates and modifies the dictionary Checks tube number against given radii, max tube lengths, and given q_dof. Checks that the lengths are all divisible by delta_x and raises an error if not.
def config_validation(configuration): tube_num = configuration.get('tube_number') q_dof = configuration.get('q_dof') radius = configuration.get('tube_radius') delta_x = configuration.get('delta_x') tube_lengths = configuration.get('tube_lengths') if isinstance(q_dof, int): configuration['q_dof'] = [q_dof] * tube_num print(f"Using {q_dof} as q_dof for every tube.\n") elif isinstance(q_dof, list) and len(q_dof) == tube_num: pass else: raise ValueError(f"Input for q_dof of {q_dof} is not suitable.\n") if isinstance(radius, list) and len(radius) == tube_num: inner = [rad - 0.1 for rad in radius] configuration['tube_radius'] = {'outer': radius, 'inner': inner} elif isinstance(radius, dict) and 'outer' in radius.keys() and len(radius.get('outer')) == tube_num: if 'inner' in radius.keys() and len(radius.get('inner')) == tube_num: pass else: radius['inner'] = [rad - 0.1 for rad in radius.get('outer')] configuration['tube_radius'] = radius else: raise ValueError(f"Input for radius of {radius} is not suitable.\n") if isinstance(tube_lengths, (int, float)): configuration['tube_lengths'] = [tube_lengths] * tube_num print(f"Using {tube_lengths} as length for every tube.\n") elif isinstance(tube_lengths, list) and len(tube_lengths) == tube_num: pass else: raise ValueError(f"Input for tube_lengths of {tube_lengths} is not suitable.\n") new_lengths = configuration.get('tube_lengths') for this_length in new_lengths: if this_length % delta_x != 0: raise ValueError(f"Length input {this_length} not divisible by delta_x: {delta_x}\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check(self):\n # check for nonsense or missing mandatory parameters\n mdp = self.parameters.get( \"md\", [] )\n fp = self.parameters.get( \"files\", [] )\n ip = self.parameters.get( \"intervals\", [] )\n\n for keyword in (\"temperature\", \"steps\", \"stepsize\"):\n if keyword not in mdp:\n raise QDynInputError(\"Missing parameter '%s'\" % keyword)\n\n # fep file and lambdas require each other\n if (\"fep\" in fp and \"lambdas\" not in self.parameters) or \\\n (\"fep\" not in fp and \"lambdas\" in self.parameters):\n raise QDynInputError(\"Parameter 'fep' requires the 'lambdas' section and vice versa\")\n\n # when generating new velocities, both parms need to be present\n if (\"initial_temperature\" in mdp and \"random_seed\" not in mdp) or \\\n (\"initial_temperature\" not in mdp and \"random_seed\" in mdp):\n raise QDynInputError(\"Parameter 'initial_temperature' requires 'random_seed' and vice versa\")\n\n # if a restart file is not defined, we have to generate new velocities\n if \"restart\" not in fp and \"initial_temperature\" not in mdp:\n raise QDynInputError(\"No restart file, please set 'initial_temperature' and 'random_seed' to generate velocities\")\n\n # since energies are important let's not rely on default values in Q...\n # if an energy file is defined, energy interval must be defined\n # (there is no room for libertarian politics in stupidville)\n if (\"energy\" not in fp and \"energy\" in ip) or \\\n (\"energy\" in fp and \"energy\" not in ip):\n raise QDynInputError(\"'energy' must be defined in both 'intervals' and 'files' sections\")", "def check_qdof(base, q_dof):\n if base == 'linear_helix' or base == 'pure_helix':\n if q_dof != 2:\n raise ValueError(f'{base} should have 2 degrees of freedom, not {q_dof}.')\n elif base == 'quadratic' or base == 'linear':\n if q_dof < 2 or q_dof > 3:\n raise ValueError(f'{base} should have 2 or 3 degrees of freedom, not {q_dof}.')\n elif base == 'constant':\n if q_dof != 1:\n raise ValueError(f'{base} should have 1 degrees of freedom, not {q_dof}.')\n elif base == 'torsion_helix' or base == 'torsion_linear_helix':\n if q_dof != 3:\n raise ValueError(f'{base} should have 3 degrees of freedom, not {q_dof}.')\n elif base == 'full':\n if q_dof < 5 or q_dof > 8:\n raise ValueError(f'{base} should have 5-8 degrees of freedom, not {q_dof}.')\n else:\n print(f'{base} is not a defined strain base.')", "def verifyLengths( options, data ):\n types = [ 'maf', 'maf1e2', 'maf1e3', 'maf1e4',\n 'maf1e5', 'maf1e6', 'maf1e7', 'mafCpl1e2', \n 'mafCpl1e3', 'mafCpl1e4', 'mafCpl1e5', \n 'mafCpl1e6', 'mafCpl1e7', 'mafCtg1e2', \n 'mafCtg1e3', 'mafCtg1e4', 'mafCtg1e5', \n 'mafCtg1e6', 'mafCtg1e7', 'mafSpl1e2', \n 'mafSpl1e3', 'mafSpl1e4', 'mafSpl1e5', \n 'mafSpl1e6', 'mafSpl1e7', 'xAxis',\n 'mafCpEdgeCount', 'mafCpErrorCount', \n 'mafCpScafGapCount', 'blockEdgeCount' ]\n if len( data.chrNames ) != len( data.mafWigDict ): \n sys.stderr.write('the expected length of the data wig '\n 'dictionary is %d (i.e. number of chromosomes), but actual is %d\\n' \n % ( len( data.chrNames ), len( data.mafWigDict )))\n sys.exit( 1 )\n for c in data.chrNames:\n if len( types ) + 5 != len( data.mafWigDict[c] ): # extra 5 are from the *Max records\n sys.stderr.write('the expected length of the data wig '\n 'dictionary for %s is %d, but actual is %d\\n' \n % ( c, len( types ) + 5, len( data.mafWigDict[c] )))\n sys.stderr.write( '%s\\n' % str( data.mafWigDict[ c ].keys() ))\n sys.exit( 1 )\n sys.stderr.write('Verify number of records in data structure = %d, OK.\\n' % (len(types) + 4))\n for c in data.chrNames:\n for i in xrange(0, len( types ) - 1):\n if len( data.mafWigDict[c][ types[i] ] ) != len( data.mafWigDict[c][ types[i+1] ]):\n sys.stderr.write('the lengths of all vectors must the '\n 'same for a given chromosome. %s, %s (%d) != %s (%d)\\n' \n % ( c, types[i], len(data.mafWigDict[c][types[i]]), \n types[i+1], len(data.mafWigDict[c][types[i+1]]) ))\n sys.exit( 1 )\n sys.stderr.write('Verify length of records in data structure for chr %s are all %d, OK.\\n' \n % ( c, len(data.mafWigDict[c][ types[0] ])))\n sys.stderr.write('Verify lengths of arrays inside data structure, OK.\\n')", "def validate():\n with open(\"data/external/datasets/qanta.trick-no-edits.json\") as f:\n questions = [q for q in json.load(f)[\"questions\"]]\n with open(\"data/external/datasets/trickme-id-model.json\") as f:\n id_to_model = json.load(f)\n id_to_model = {int(k): v for k, v in id_to_model.items()}\n\n print(\"Lengths should be 946\")\n print(len(questions))\n print(len(id_to_model))\n print(Counter(id_to_model.values()))\n qid_to_page = {q[\"qanta_id\"]: q[\"page\"] for q in questions}\n qids = {q[\"qanta_id\"] for q in questions}\n id_to_model_qids = {k for k in id_to_model.keys()}\n print(len(qids.intersection(id_to_model_qids)))\n df = pd.read_json(\"output/tacl/all_rounds_df.json\")\n # This ID range is used for trickme questions\n df = df[df.qanta_id >= 2_000_000][[\"qanta_id\", \"page\"]]\n experiments_qid_to_page = {t.qanta_id: t.page for t in df.itertuples()}\n print(len(experiments_qid_to_page))\n for qid, page in experiments_qid_to_page.items():\n if qid_to_page[qid] != page:\n raise ValueError(f\"exp: {qid} {page} data: {qid_to_page[qid]}\")", "def is_valid(self):\n\n # Test whether every element in required_keys is in actual_keys\n actual_keys = set(self.fields.keys())\n required_keys = set(self.required_keys)\n has_required_keys = required_keys <= actual_keys\n if not has_required_keys:\n return False\n\n # TODO: Complete the following block. \n\n # Assume all is valid at first, then as soon as one invalid\n # is detected, whole thing becomes invalid.\n all_valid = True \n\n # Now iterate over each key-value pair to check\n for key, value in self.fields.items():\n if key == 'byr':\n this_key_valid = len(str(value)) == 4 and (1920 <= value <= 2002)\n all_valid = all_valid and this_key_valid\n if key == 'iyr':\n this_key_valid = len(str(value)) == 4 and (2010 <= value <= 2020)\n all_valid = all_valid and this_key_valid\n if key == 'eyr':\n this_key_valid = len(str(value)) == 4 and (2020 <= value <= 2030)\n all_valid = all_valid and this_key_valid\n if key == 'hgt':\n if len(str(value)) < 4:\n all_valid = False\n else:\n ending = value[-2:]\n num = int(value[:-2])\n this_key_valid = (ending == 'in' and (59 <= num <= 76)) or (ending == 'cm' and (150 <= num <= 193))\n all_valid = all_valid and this_key_valid\n if key == 'hcl':\n re_str = '#[0-9a-f]{6}'\n this_key_valid = re.search(re_str, str(value)) is not None and len(str(value)) == 7\n all_valid = all_valid and this_key_valid\n if key == 'ecl':\n this_key_valid = value in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n all_valid = all_valid and this_key_valid\n if key == 'pid':\n re_str = '[0-9]{9}'\n this_key_valid = re.search(re_str, str(value)) is not None and len(str(value)) == 9\n all_valid = all_valid and this_key_valid\n if key == 'cid':\n this_key_valid = True\n all_valid = all_valid and this_key_valid\n\n # If all fields are valid, return True\n return all_valid", "def Validate(self):\n \n hklmin = self.hklmin_txtCtrl.GetValue()\n hklmax = self.hklmax_txtCtrl.GetValue()\n hklsteps = self.hkl_steps_ctrl.GetValue()\n \n wmin = self.wmin_txtCtrl.GetValue()\n wmax = self.wmax_txtCtrl.GetValue()\n wsteps = self.w_steps_ctrl.GetValue()\n \n kx = self.kx_txtCtrl.GetValue()\n ky = self.ky_txtCtrl.GetValue()\n kz = self.kz_txtCtrl.GetValue()\n \n zmin = self.zmin_ctrl.GetValue()\n zmax = self.zmax_ctrl.GetValue()\n colorbar_bool = self.color_bar_box.GetValue()\n \n temp = self.temp_ctrl.GetValue()\n sphavg_bool = self.spherical_avg_box.GetValue()\n \n bgColor = \"pink\"\n failed = False\n \n #Validate hkl values\n num_hklmin = None\n num_hklmax = None\n try:\n num_hklmin = float(hklmin)*np.pi\n self.hklmin_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.hklmin_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_hklmax = float(hklmax)*np.pi\n self.hklmax_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.hklmax_txtCtrl.SetBackgroundColour(bgColor)\n failed = True \n \n #Validate w values\n num_wmin = None\n num_wmax = None\n try:\n num_wmin = float(wmin)\n self.wmin_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.wmin_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_wmax = float(wmax)\n self.wmax_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.wmax_txtCtrl.SetBackgroundColour(bgColor)\n failed = True \n \n #Validate kx,ky,kz,temp,zmin,zmax values\n num_kx = None\n num_ky = None\n num_kz = None\n num_temp = None\n num_zmin = None\n num_zmax = None\n try:\n num_kx = float(kx)\n self.kx_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.kx_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_ky = float(ky)\n self.ky_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.ky_txtCtrl.SetBackgroundColour(bgColor)\n failed = True \n try:\n num_kz = float(kz)\n self.kz_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.kz_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_temp = float(temp)\n self.temp_ctrl.SetBackgroundColour(\"white\")\n except:\n self.temp_ctrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_zmin = float(zmin)\n self.zmin_ctrl.SetBackgroundColour(\"white\")\n except:\n self.zmin_ctrl.SetBackgroundColour(bgColor)\n failed = True\n try:\n num_zmax = float(zmax)\n self.zmax_ctrl.SetBackgroundColour(\"white\")\n except:\n self.zmax_ctrl.SetBackgroundColour(bgColor)\n failed = True\n \n #Validate File Fields\n int_str = self.int_file_txtCtrl.GetValue()\n spin_str = self.spin_file_txtCtrl.GetValue()\n tau_str = self.tau_file_txtCtrl.GetValue()\n out_str = self.output_file_txtCtrl.GetValue()\n if int_str:\n self.int_file_txtCtrl.SetBackgroundColour(\"white\")\n else: \n self.int_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n if spin_str:\n self.spin_file_txtCtrl.SetBackgroundColour(\"white\")\n else: \n self.spin_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n if tau_str:\n self.tau_file_txtCtrl.SetBackgroundColour(\"white\")\n else: \n self.tau_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n if out_str:\n self.output_file_txtCtrl.SetBackgroundColour(\"white\")\n else: \n self.output_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n \n direction = {}\n direction['kx'] = num_kx\n direction['ky'] = num_ky\n direction['kz'] = num_kz\n hkl_interval = [num_hklmin, num_hklmax, int(self.hkl_steps_ctrl.GetValue())]\n w_interval = [num_wmin, num_wmax, int(self.w_steps_ctrl.GetValue())]\n \n tau_text = ''\n try:\n tau_file = open(tau_str,'r')\n tau_text = tau_file.read()\n self.tau_file_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.tau_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n\n items = tau_text.split()\n if len(items)%3 and not len(items):\n failed = True\n\n tau_list = []\n i = 0\n while not failed and i < len(items)-3:\n tau1, tau2, tau3 = None, None, None\n try:\n tau1 = float(items[i])\n tau2 = float(items[i+1])\n tau3 = float(items[i+2])\n self.tau_file_txtCtrl.SetBackgroundColour(\"white\")\n except:\n self.tau_file_txtCtrl.SetBackgroundColour(bgColor)\n failed = True\n tau_list.append([tau1,tau2,tau3])\n i+=3\n \n self.Refresh()\n# self.window.Show(True,True)\n \n plotstats = [zmin, zmax, colorbar_bool]\n \n return failed, hkl_interval, w_interval, tau_list, direction, num_temp, sphavg_bool, plotstats", "def test_get_Q(self):\n vect_length = 50\n x_vect = np.random.normal(size=vect_length) \\\n + 1.j * np.random.normal(size=vect_length)\n y_vect = np.random.normal(size=vect_length) \\\n + 1.j * np.random.normal(size=vect_length)\n\n self.ds.spw_Nfreqs = vect_length\n #Test if there is a warning if user does not pass the beam\n key1 = (0, 24, 38)\n key2 = (1, 24, 38)\n uvd = copy.deepcopy(self.uvd)\n ds_t = pspecdata.PSpecData(dsets=[uvd, uvd])\n\n for i in range(vect_length):\n try:\n Q_matrix = self.ds.get_Q(i)\n # Test that if the number of delay bins hasn't been set\n # the code defaults to putting that equal to Nfreqs\n self.assertEqual(self.ds.spw_Ndlys, self.ds.spw_Nfreqs)\n except IndexError:\n Q_matrix = np.ones((vect_length, vect_length))\n\n xQy = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, y_vect))\n yQx = np.dot(np.conjugate(y_vect), np.dot(Q_matrix, x_vect))\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n\n # Test that Q matrix has the right shape\n self.assertEqual(Q_matrix.shape, (vect_length, vect_length))\n\n # Test that x^t Q y == conj(y^t Q x)\n self.assertAlmostEqual(xQy, np.conjugate(yQx))\n\n # x^t Q x should be real\n self.assertAlmostEqual(np.imag(xQx), 0.)\n\n x_vect = np.ones(vect_length)\n try:\n Q_matrix = self.ds.get_Q(vect_length/2)\n except IndexError:\n Q_matrix = np.ones((vect_length, vect_length))\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n self.assertAlmostEqual(xQx, np.abs(vect_length**2.))\n\n # Now do all the same tests from above but for a different number\n # of delay channels\n self.ds.set_Ndlys(vect_length-3)\n for i in range(vect_length-3):\n try:\n Q_matrix = self.ds.get_Q(i)\n except IndexError:\n Q_matrix = np.ones((vect_length,vect_length))\n xQy = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, y_vect))\n yQx = np.dot(np.conjugate(y_vect), np.dot(Q_matrix, x_vect))\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n\n # Test that Q matrix has the right shape\n self.assertEqual(Q_matrix.shape, (vect_length, vect_length))\n\n # Test that x^t Q y == conj(y^t Q x)\n self.assertAlmostEqual(xQy, np.conjugate(yQx))\n\n # x^t Q x should be real\n self.assertAlmostEqual(np.imag(xQx), 0.)\n\n x_vect = np.ones(vect_length)\n try:\n Q_matrix = self.ds.get_Q((vect_length-2)/2-1)\n except IndexError:\n Q_matrix = np.ones((vect_length,vect_length))\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n self.assertAlmostEqual(xQx, np.abs(vect_length**2.))\n\n # Make sure that error is raised when asking for a delay mode outside\n # of the range of delay bins\n pytest.raises(IndexError, self.ds.get_Q, vect_length-1)", "def is_valid_conf(self, q: np.ndarray) -> bool:\n return check_limits(q, self.lim_lo, self.lim_up)", "def _validate_qty(values: dict):\n\n if not (quantity := values.get('quantity')):\n raise ValueError(\"Quantity attribute is required.\")\n\n if not (symbol := values.get('symbol')):\n raise ValueError(\"Symbol attribute is required.\")\n\n filter = symbol.filters.lot_size_filter\n # if ONE :=1 and not filter.min_qty <= quantity <= filter.max_qty:\n # ValueError(\"The quantity is not in valid range.\")\n\n if filter.step_size and not is_valid_significant_digits(\n quantity,\n symbol.qty_decimal_precision\n ):\n raise ValueError(\"The quantity precision is not valid.\")\n\n return values", "def f_check_adr_parameters_correctness(dict):\n\n if int(dict[\"operation_mode_num\"]) not in (0, 1, 2, 3, 4, 5, 6):\n print('\\n Error!!! Operation mode is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"FFT_size_samples\"]) not in (2048, 4096, 8192, 16384, 32768):\n print('\\n Error!!! FFT size is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"spectra_averaging\"]) < 16 or int(dict[\"spectra_averaging\"]) > 32768:\n print('\\n Error!!! Spectra averaging number is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"start_line_freq\"]) not in (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16): # 0 … (SFFT-1024)/1024\n print('\\n Error!!! Start frequency line is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"width_line_freq\"]) not in (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16):\n print('\\n Error!!! Frequency width line is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"width_line_freq\"]) > ((int(dict[\"FFT_size_samples\"]) - int(dict[\"start_line_freq\"]) * 1024) / 1024): # 1 … (SFFT-SLINE*1024)/1024\n print('\\n Error!!! Frequency width is bigger than FFT size allows!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"clock_source\"]) not in (0, 1):\n print('\\n Error!!! Clock source is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"sum_diff_mode_num\"]) not in (0, 1):\n print('\\n Error!!! Sum-diff mode is wrong!\\n')\n sys.exit(' Program stopped!')\n\n if int(dict[\"data_file_size\"]) < -1 or int(dict[\"data_file_size\"]) > 4096:\n print('\\n Error!!! File size value is wrong!\\n')\n sys.exit(' Program stopped!')\n\n '''\n if (int(dict[\"chan_diff_delay\"]) < 0 or int(parameters_dict[\"chan_diff_dalay\"]) > 1024):\n print('\\n Error!!! Channel difference delay is wrong!\\n')\n sys.exit(' Program stopped!')\n '''\n\n # print('\\n ADR parameters from file are correct!\\n')\n\n return dict", "def check_consistency(self):\n assert len(self.shape) == len(self.qhape) == len(self.dirs)\n # Qnums must be unique within a qim and correspond one-to-one with\n # dimensions in dim.\n assert all(\n (\n len(dim) == len(qim) == len(set(qim))\n for dim, qim in zip(self.shape, self.qhape)\n )\n )\n assert all(d == 1 or d == -1 for d in self.dirs)\n assert all(q == self._qod_func(q) for q in sum(self.qhape, []))\n # Check that every sect has a valid key and the correct shape and\n # dtype.\n for k, v in self.sects.items():\n assert v.dtype == self.dtype\n assert self.is_valid_key(k)\n block_shp_real = v.shape\n qnum_inds = tuple(\n self.qhape[i].index(qnum) for i, qnum in enumerate(k)\n )\n block_shp_claimed = tuple(\n [self.shape[i][j] for i, j in enumerate(qnum_inds)]\n )\n assert block_shp_claimed == block_shp_real\n if self.invar and (self.charge != 0 or not self.isscalar()):\n assert self.defval == 0\n return True", "def _check_determinancy(self, values, errors, combo):\n val, err = self.used_vars(values, errors, combo)\n n, m = len(val), len(self.get_equations(combo))\n\n if n != m:\n if m > n:\n s = '>'\n t = 'remove'\n v = err\n else:\n s = '<'\n t = 'add'\n v = val\n\n a = abs(n - m)\n\n raise ValueError('Indeterminant system:: Number of equations ({}) '\n '{} number of unknowns ({}). To correct, {} ({}) errors in {} '\n 'or adjust the input equations.'.format(m, s, n, t, a, v))", "def validate(base_url, keys, throttle, mdrate, mderrors, cterrors, max_file_size, tmpdir):\n tname = threading.current_thread().name\n app.logger.debug(\"Thread:{x} - Initialise Validate; base_url:{a}, throttle:{b}, mdrate:{c}, mderrors:{d}, cterrors:{e}, max_file_size:{f}, tmpdir:{g}\".format(x=tname, a=base_url, b=throttle, c=mdrate, d=mderrors, e=cterrors, f=max_file_size, g=tmpdir))\n\n mdopts = [\"mdonly\", \"md+ct\"]\n mdprobs = [mdrate, 1 - mdrate]\n\n mderroropts = [\"error\", \"ok\"]\n mderrorprobs = [mderrors, 1 - mderrors]\n\n cterroropts = [\"error\", \"ok\"]\n cterrorprobs = [cterrors, 1 - cterrors]\n\n while True:\n try:\n api_key = _select_from(keys)\n j = client.JPER(api_key, base_url)\n # print \"API \" + api_key\n\n # determine whether the metadata we're going to send will cause errors\n mdtype = _select_from(mderroropts, mderrorprobs)\n # print \"MD: \" + mdtype\n\n # generate a notification which may or may not have an error\n note = _make_notification(error=mdtype==\"error\")\n # print note\n\n # determine whether we're going to send some content\n hasct = _select_from(mdopts, mdprobs)\n # print \"CT: \" + hasct\n file_handle = None\n filepath = None\n cterr = \"ok\"\n if hasct == \"md+ct\":\n # determine if the content should have an error\n cterr = _select_from(cterroropts, cterrorprobs)\n #print \"CTERR:\" + cterr\n filepath = _get_file_path(tmpdir, max_file_size, error=cterr==\"error\")\n #print \"File\" + filepath\n file_handle = open(filepath)\n\n app.logger.debug(\"Thread:{x} - Validate request for Account:{y} Type:{z} MD:{a} CT:{b}\".format(x=tname, y=api_key, z=hasct, a=mdtype, b=cterr))\n\n # make the validate request (which will throw an exception more often than not, because that's what we're testing)\n try:\n j.validate(note, file_handle)\n app.logger.debug(\"Thread:{x} - Validate request resulted in success\".format(x=tname))\n except:\n app.logger.error(\"Thread:{x} - Validate request resulted in expected exception\".format(x=tname))\n\n # cleanup after ourselves\n if filepath is not None:\n file_handle.close()\n os.remove(filepath)\n\n # sleep before making the next request\n time.sleep(throttle)\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE - Fatal exception '{y}'\".format(x=tname, y=e.message))", "def consistency_check(self):\n\n for mode in [\"instantaneous\", \"infinite_duration\", \"fixed_duration\"]:\n for key, value in getattr(self.modes, mode).sources.items():\n dim = self.dimensions\n for axis in [\"x\", \"y\", \"z\"]:\n par = getattr(value, axis)\n bound = min(getattr(dim, axis))\n if isinstance(par, list):\n for item in par:\n if item < 0 or item > bound:\n raise ConsistencyError(\n f\"{mode} source {key} x position is \"\n f\"outside space domain (0, {bound}).\")\n else:\n if par < 0 or par > bound:\n raise ConsistencyError(\n f\"{mode} source {key} x position is \"\n f\"outside space domain (0, {bound}).\")\n\n for mode in [\"instantaneous\", \"infinite_duration\"]:\n for key, value in getattr(self.modes, mode).sources.items():\n if isinstance(value.time, list):\n for item in value.time:\n if item > self.total_time:\n raise ConsistencyError(\n f\"{mode} source {key} time is \"\n f\"outside time domain [0, {self.total_time}).\")\n else:\n if value.time > self.total_time:\n raise ConsistencyError(\n f\"{mode} source {key} time is \"\n f\"outside time domain [0, {self.total_time}).\")\n\n for key, value in getattr(self.modes, \"fixed_duration\").sources.items():\n if isinstance(value.start_time, list):\n for item in value.start_time:\n if item > self.total_time:\n raise ConsistencyError(\n f\"{mode} source {key} start time is \"\n f\"outside time domain [0, {self.total_time}).\")\n else:\n if value.start_time > self.total_time:\n raise ConsistencyError(\n f\"{mode} source {key} start time is \"\n f\"outside time domain [0, {self.total_time}).\")\n\n if isinstance(value.end_time, list):\n for item in value.end_time:\n if item > self.total_time:\n raise ConsistencyError(\n f\"{mode} source {key} end time is \"\n f\"outside time domain [0, {self.total_time}).\")\n else:\n if value.end_time > self.total_time:\n raise ConsistencyError(\n f\"{mode} source {key} end time is \"\n f\"outside time domain [0, {self.total_time}).\")\n \n dims = [\"x\", \"y\", \"z\"]\n for plane in self.models.eddy_diffusion.monitor_locations.planes.values():\n dim = [axis for axis in dims if axis not in str(plane.axis)][0]\n if isinstance(plane.distance, list):\n if max(plane.distance) > min(getattr(self.dimensions, dim)):\n raise ConsistencyError(f\"{plane} is outside the space domain\")\n else:\n if plane.distance > min(getattr(self.dimensions, dim)):\n raise ConsistencyError(f\"{plane} is outside the space domain\")\n\n for key, point in self.models.eddy_diffusion.monitor_locations.points.items():\n for dim in dims:\n if getattr(point, dim) > min(getattr(self.dimensions, dim)):\n raise ConsistencyError(\n f\"{key}'s {dim} value, is outside space domain \"\n f\"(0, {getattr(self.dimensions, dim)})\")\n\n for key, line in self.models.eddy_diffusion.monitor_locations.lines.items():\n for dim in dims:\n if getattr(line.point, dim) > min(getattr(self.dimensions, dim)):\n raise ConsistencyError(\n f\"{key}'s {dim} value, is outside space domain \"\n f\"(0, {getattr(self.dimensions, dim)})\")\n\n thresh = self.thresholds\n if len(thresh.concentration) > 5 or len(thresh.exposure) > 5:\n raise ConsistencyError(f\"Cannot exceed more than 5 thresholds\")\n \n line_number = self.models.eddy_diffusion.lines_plots.number\n if isinstance(line_number, list):\n for item in line_number:\n if item > self.time_samples:\n raise ConsistencyError(\n f\"The number of requested line plots ({line_number}) cannot exceed the \"\n f\"number of time samples ({self.time_samples}).\")\n else:\n if line_number > self.time_samples:\n raise ConsistencyError(\n f\"The number of requested line plots ({line_number}) cannot exceed the \"\n f\"number of time samples ({self.time_samples}).\")\n \n contour_number = self.models.eddy_diffusion.planes_plots.number\n if isinstance(contour_number, list):\n for item in contour_number:\n if item > self.time_samples:\n raise ConsistencyError(\n f\"The number of requested contour plots ({item}) cannot exceed the \"\n f\"number of time samples ({self.time_samples}.)\")\n else:\n if contour_number > self.time_samples:\n raise ConsistencyError(\n f\"The number of requested contour plots ({contour_number}) cannot exceed the \"\n f\"number of time samples ({self.time_samples}).\")", "def _check_many_qxz(count=1, tol=1e-8):\n qx = np.random.uniform(-2.6, 1.5, count)\n qz = np.random.uniform(-2.6, 2.6, count)\n lambda_i = np.random.uniform(4.0, 6.0, count)\n lambda_f = np.random.uniform(4.0, 6.0, count)\n for i in range(count):\n if i and i%10000 == 0: print(i, \"of\", count)\n qxz = qx[i], qz[i]\n wavelengths = lambda_i[i], lambda_f[i]\n #wavelengths = 4., 5.\n _check_qxz(qxz, wavelengths, tol)", "def test_depolarizing_error_2q_unitary(self):\n p_depol = 0.3\n error = depolarizing_error(p_depol, 2, standard_gates=False)\n X = standard_gate_unitary('x')\n Y = standard_gate_unitary('y')\n Z = standard_gate_unitary('z')\n target_unitaries = [X, Y, Z, # on qubit 0\n X, Y, Z, # on qubit 1\n np.kron(X, X), np.kron(X, Y), np.kron(X, Z),\n np.kron(Y, X), np.kron(Y, Y), np.kron(Y, Z),\n np.kron(Z, X), np.kron(Z, Y), np.kron(Z, Z)]\n for j in range(16):\n circ, p = error.error_term(j)\n name = circ[0]['name']\n self.assertIn(name, ('unitary', \"id\"))\n if name == \"unitary\":\n self.assertAlmostEqual(p, p_depol / 16)\n op = circ[0]['params'][0]\n qubits = circ[0]['qubits']\n if len(op) == 2:\n self.assertIn(qubits, [[0], [1]])\n else:\n self.assertEqual(qubits, [0, 1])\n self.remove_if_found(op, target_unitaries)\n else:\n self.assertAlmostEqual(p, 1 - p_depol + p_depol / 16)\n self.assertEqual(circ[0]['qubits'], [0])\n self.assertEqual(target_unitaries, [], msg=\"Incorrect unitaries\")", "def check_termination(self):\r\n \r\n # First check if we are doing termination based on running time\r\n if (self.options.time_limit):\r\n self.time = time.clock - self.time_start\r\n if (self.time >= self.options.maxtime):\r\n self.term_reason = 'Exceeded time limit'\r\n return\r\n \r\n # Now check if we are doing break by tolx\r\n if (self.options.use_tolx):\r\n if (np.sqrt(cua.dot(self.dx,self.dx).get())/\r\n np.sqrt(cua.dot(self.oldx,self.oldx).get()) < self.options.tolx):\r\n self.term_reason = 'Relative change in x small enough'\r\n return\r\n \r\n # Are we doing break by tolo (tol obj val)\r\n if (self.options.use_tolo and self.iter > 2):\r\n delta = abs(self.obj-self.oldobj)\r\n if (delta < self.options.tolo):\r\n self.term_reason ='Relative change in objvalue small enough'\r\n return\r\n\r\n # Check if change in x and gradient are small enough\r\n # we don't want that for now\r\n# if (np.sqrt((cua.dot(self.dx,self.dx).get())) < self.options.tolx) \\\r\n# or (np.sqrt(cua.dot(self.dg,self.dg).get()) < self.options.tolg):\r\n# self.term_reason = '|x_t+1 - x_t|=0 or |grad_t+1 - grad_t| < 1e-9'\r\n# return\r\n \r\n # Finally the plain old check if max iter has been achieved\r\n if (self.iter >= self.options.maxiter):\r\n self.term_reason = 'Maximum number of iterations reached'\r\n return\r\n \r\n # KKT violation\r\n if (self.options.use_kkt):\r\n if np.abs(np.sqrt(cua.dot(self.x,self.grad).get())) <= options.tolk:\r\n self.term_reason = '|x^T * grad| < opt.pbb_gradient_norm'\r\n return\r\n \r\n # Gradient check\r\n if (self.options.use_tolg):\r\n nr = cua.max(cua.fabs(self.grad)).get();\r\n if (nr < self.options.tolg):\r\n self.term_reason = '|| grad ||_inf < opt.tolg'\r\n return\r\n \r\n # No condition met, so return false\r\n self.term_reason = 0;", "def check_measure_fields(self, values: Set[str], tol: float = 0.7) -> bool:\n counter: int = 0\n if type(values) is list:\n if len(values) == 0:\n sc.message(\"WARNING: NER MAPPER HAS A FIELD WITH NO VALUES.\")\n return False\n\n for value in values:\n tmp = value.split()\n if len(tmp) == 2 and tmp[0].isnumeric():\n counter += 1\n return (counter / len(values)) > tol\n else:\n return False", "def check_matching_unit_dimension(\n ureg: UnitRegistry, base_units: str, units_to_check: List[str]\n) -> None:\n\n base_unit = getattr(ureg, base_units)\n\n for unit_string in units_to_check:\n unit = getattr(ureg, unit_string)\n if unit.dimensionality != base_unit.dimensionality:\n raise DimensionalityError(base_unit, unit)", "def is_valid_key(self, key):\n if not self.invar:\n return True\n if len(key) != len(self.qhape):\n return False\n key = map(opr.mul, self.dirs, key)\n s = sum(key)\n if self.qodulus is not None:\n s %= self.qodulus\n return s == self.charge", "def __init__(self,\n uDict,\n phiDict,\n testSpaceDict,\n matType,\n dofBoundaryConditionsDict,\n dofBoundaryConditionsSetterDict,\n coefficients,\n elementQuadrature,\n elementBoundaryQuadrature,\n fluxBoundaryConditionsDict=None,\n advectiveFluxBoundaryConditionsSetterDict=None,\n diffusiveFluxBoundaryConditionsSetterDictDict=None,\n stressTraceBoundaryConditionsSetterDict=None,\n stabilization=None,\n shockCapturing=None,\n conservativeFluxDict=None,\n numericalFluxType=None,\n TimeIntegrationClass=None,\n massLumping=False,\n reactionLumping=False,\n options=None,\n name='defaultName',\n reuse_trial_and_test_quadrature=True,\n sd = True,\n movingDomain=False):\n #\n #set the objects describing the method and boundary conditions\n #\n self.movingDomain=movingDomain\n self.tLast_mesh=None\n #\n self.name=name\n self.sd=sd\n self.Hess=False\n self.lowmem=True\n self.timeTerm=True#allow turning off the time derivative\n #self.lowmem=False\n self.testIsTrial=True\n self.phiTrialIsTrial=True\n self.u = uDict\n self.ua = {}#analytical solutions\n self.phi = phiDict\n self.dphi={}\n for ck,phi in phiDict.iteritems():\n if coefficients.potential.has_key(ck):\n for cj in coefficients.potential[ck].keys():\n self.dphi[(ck,cj)] = FiniteElementFunction(phi.femSpace)\n else:\n self.dphi[(ck,ck)] = FiniteElementFunction(phi.femSpace)\n #check for nonlinearities in the diffusion coefficient that don't match the potential\n for ci,ckDict in coefficients.diffusion.iteritems():\n #for ck,cjDict in coefficients.diffusion.iteritems(): #cek: bug?\n for ck,cjDict in ckDict.iteritems():\n for cj in cjDict.keys():\n if not self.dphi.has_key((ck,cj)):\n self.dphi[(ck,cj)] = FiniteElementFunction(phi.femSpace)\n self.matType = matType\n #try to reuse test and trial information across components if spaces are the same\n self.reuse_test_trial_quadrature = reuse_trial_and_test_quadrature#True#False\n if self.reuse_test_trial_quadrature:\n for ci in range(1,coefficients.nc):\n assert self.u[ci].femSpace.__class__.__name__ == self.u[0].femSpace.__class__.__name__, \"to reuse_test_trial_quad all femSpaces must be the same!\"\n ## Simplicial Mesh\n self.mesh = self.u[0].femSpace.mesh #assume the same mesh for all components for now\n self.testSpace = testSpaceDict\n self.dirichletConditions = dofBoundaryConditionsDict\n self.dirichletNodeSetList=None #explicit Dirichlet conditions for now, no Dirichlet BC constraints\n self.coefficients = coefficients\n self.coefficients.initializeMesh(self.mesh)\n self.nc = self.coefficients.nc\n self.stabilization = stabilization\n self.shockCapturing = shockCapturing\n self.conservativeFlux = conservativeFluxDict #no velocity post-processing for now\n self.fluxBoundaryConditions=fluxBoundaryConditionsDict\n self.advectiveFluxBoundaryConditionsSetterDict=advectiveFluxBoundaryConditionsSetterDict\n self.diffusiveFluxBoundaryConditionsSetterDictDict = diffusiveFluxBoundaryConditionsSetterDictDict\n #determine whether the stabilization term is nonlinear\n self.stabilizationIsNonlinear = False\n #cek come back\n if self.stabilization != None:\n for ci in range(self.nc):\n if coefficients.mass.has_key(ci):\n for flag in coefficients.mass[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.advection.has_key(ci):\n for flag in coefficients.advection[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.diffusion.has_key(ci):\n for diffusionDict in coefficients.diffusion[ci].values():\n for flag in diffusionDict.values():\n if flag != 'constant':\n self.stabilizationIsNonlinear=True\n if coefficients.potential.has_key(ci):\n for flag in coefficients.potential[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.reaction.has_key(ci):\n for flag in coefficients.reaction[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n if coefficients.hamiltonian.has_key(ci):\n for flag in coefficients.hamiltonian[ci].values():\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear=True\n #determine if we need element boundary storage\n self.elementBoundaryIntegrals = {}\n for ci in range(self.nc):\n self.elementBoundaryIntegrals[ci] = ((self.conservativeFlux != None) or\n (numericalFluxType != None) or\n (self.fluxBoundaryConditions[ci] == 'outFlow') or\n (self.fluxBoundaryConditions[ci] == 'mixedFlow') or\n (self.fluxBoundaryConditions[ci] == 'setFlow'))\n #\n #calculate some dimensions\n #\n self.nSpace_global = self.u[0].femSpace.nSpace_global #assume same space dim for all variables\n self.nDOF_trial_element = [u_j.femSpace.max_nDOF_element for u_j in self.u.values()]\n self.nDOF_phi_trial_element = [phi_k.femSpace.max_nDOF_element for phi_k in self.phi.values()]\n self.n_phi_ip_element = [phi_k.femSpace.referenceFiniteElement.interpolationConditions.nQuadraturePoints for phi_k in self.phi.values()]\n self.nDOF_test_element = [femSpace.max_nDOF_element for femSpace in self.testSpace.values()]\n self.nFreeDOF_global = [dc.nFreeDOF_global for dc in self.dirichletConditions.values()]\n self.nVDOF_element = sum(self.nDOF_trial_element)\n self.nFreeVDOF_global = sum(self.nFreeDOF_global)\n #\n NonlinearEquation.__init__(self,self.nFreeVDOF_global)\n #\n #build the quadrature point dictionaries from the input (this\n #is just for convenience so that the input doesn't have to be\n #complete)\n #\n elementQuadratureDict={}\n elemQuadIsDict = isinstance(elementQuadrature,dict)\n if elemQuadIsDict: #set terms manually\n for I in self.coefficients.elementIntegralKeys:\n if elementQuadrature.has_key(I):\n elementQuadratureDict[I] = elementQuadrature[I]\n else:\n elementQuadratureDict[I] = elementQuadrature['default']\n else:\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[I] = elementQuadrature\n if self.stabilization != None:\n for I in self.coefficients.elementIntegralKeys:\n if elemQuadIsDict:\n if elementQuadrature.has_key(I):\n elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature[I]\n else:\n elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature['default']\n else:\n elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature\n if self.shockCapturing != None:\n for ci in self.shockCapturing.components:\n if elemQuadIsDict:\n if elementQuadrature.has_key(('numDiff',ci,ci)):\n elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature[('numDiff',ci,ci)]\n else:\n elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature['default']\n else:\n elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature\n if massLumping:\n for ci in self.coefficients.mass.keys():\n elementQuadratureDict[('m',ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[('stab',)+I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n if reactionLumping:\n for ci in self.coefficients.mass.keys():\n elementQuadratureDict[('r',ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[('stab',)+I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)\n elementBoundaryQuadratureDict={}\n if isinstance(elementBoundaryQuadrature,dict): #set terms manually\n for I in self.coefficients.elementBoundaryIntegralKeys:\n if elementBoundaryQuadrature.has_key(I):\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature[I]\n else:\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature['default']\n else:\n for I in self.coefficients.elementBoundaryIntegralKeys:\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature\n #\n # find the union of all element quadrature points and\n # build a quadrature rule for each integral that has a\n # weight at each point in the union\n #mwf include tag telling me which indices are which quadrature rule?\n (self.elementQuadraturePoints,self.elementQuadratureWeights,\n self.elementQuadratureRuleIndeces) = Quadrature.buildUnion(elementQuadratureDict)\n self.nQuadraturePoints_element = self.elementQuadraturePoints.shape[0]\n self.nQuadraturePoints_global = self.nQuadraturePoints_element*self.mesh.nElements_global\n #\n #Repeat the same thing for the element boundary quadrature\n #\n (self.elementBoundaryQuadraturePoints,\n self.elementBoundaryQuadratureWeights,\n self.elementBoundaryQuadratureRuleIndeces) = Quadrature.buildUnion(elementBoundaryQuadratureDict)\n self.nElementBoundaryQuadraturePoints_elementBoundary = self.elementBoundaryQuadraturePoints.shape[0]\n self.nElementBoundaryQuadraturePoints_global = (self.mesh.nElements_global*\n self.mesh.nElementBoundaries_element*\n self.nElementBoundaryQuadraturePoints_elementBoundary)\n\n #\n #storage dictionaries\n self.scalars_element = set()\n #\n #simplified allocations for test==trial and also check if space is mixed or not\n #\n self.q={}\n self.ebq={}\n self.ebq_global={}\n self.ebqe={}\n self.phi_ip={}\n #mesh\n self.q['x'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,3),'d')\n self.q['det(J)'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q['abs(det(J))'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q['J'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global,self.nSpace_global),'d')\n self.q['inverse(J)'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global,self.nSpace_global),'d')\n self.ebqe['x'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')\n self.ebqe['g'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n max(1,self.nSpace_global-1),\n max(1,self.nSpace_global-1)),\n 'd')\n self.ebqe['inverse(J)'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global,self.nSpace_global),'d')\n self.ebqe['hat(x)'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')\n self.ebqe['bar(x)'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')\n self.ebqe['sqrt(det(g))'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n self.ebqe[('n')] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')\n #shape\n self.q[('v',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n self.q[('w',0)] = self.q[('v',0)]\n self.q[('grad(v)',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0],self.nSpace_global),'d')\n self.q[('grad(w)',0)] = self.q[('grad(v)',0)]\n self.q[('grad(w)*dV',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0],self.nSpace_global),'d')\n self.q[('grad(w)*dV_f',0)] = self.q[('grad(w)*dV',0)]\n #todo get rid of dV_{f,a}, etc\n self.q[('w*dV',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n self.q[('w*dV_m',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n #assume all components are the same space for now\n shapeKeysForAlias = ['v','w','grad(v)','grad(w)*dV','grad(w)*dV_f','w*dV','w*dV_m']\n for ci in range(1,self.nc):\n for key in shapeKeysForAlias:\n key_ci = (key,ci)\n key_0 = (key,0)\n self.q[key_ci] = self.q[key_0]\n #ELLAM weights stiffness, body integrals by dt\n for ci in range(self.nc):\n self.q[('dt*grad(w)*dV',ci)]= numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[ci],self.nSpace_global),'d')\n #\n self.ebqe[('v',0)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[0]),'d')\n self.ebqe[('w',0)] = self.ebqe[('v',0)]\n self.ebqe[('grad(v)',0)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[0],self.nSpace_global),'d')\n self.ebqe[('w*dS_f',0)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nDOF_trial_element[0]),'d')\n #assume all components are the same space for now\n shapeKeysForAlias = ['v','w','grad(v)','w*dS_f']\n for ci in range(1,self.nc):\n for key in shapeKeysForAlias:\n key_ci = (key,ci)\n key_0 = (key,0)\n self.ebqe[key_ci] = self.ebqe[key_0]\n\n for ci in range(self.nc):\n self.q[('u',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('grad(u)',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')\n #f\n for ci in self.coefficients.advection.keys():\n self.q[('f',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')\n for cj in self.coefficients.advection[ci].keys():\n self.q[('df',ci,cj)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')\n self.ebqe[('f',ci)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')\n for cj in self.coefficients.advection[ci].keys():\n self.ebqe[('df',ci,cj)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')\n\n #a, linear dispersion single component\n\n for ci,ckDict in self.coefficients.diffusion.iteritems():\n for ck,cjDict in ckDict.iteritems():\n for flag in cjDict.values():\n assert flag == 'constant', \"Error potential %s LADRellam does not handle diffusion = %s yet\" % (ck,flag)\n\n if self.coefficients.sdInfo != None and (ci,ck) in self.coefficients.sdInfo.keys():\n self.q[('a',ci,ck)] = numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n for cj in cjDict.keys():\n self.q[('da',ci,ck,cj)] = numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n self.ebqe[('a',ci,ck)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n for cj in cjDict.keys():\n self.ebqe[('da',ci,ck,cj)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.coefficients.sdInfo[(ci,ck)][0][self.nSpace_global]),\n 'd')\n\n else:\n self.q[('a',ci,ck)]=numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n for cj in cjDict.keys():\n self.q[('da',ci,ck,cj)]=numpy.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n self.ebqe[('a',ci,ck)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n for cj in cjDict.keys():\n self.ebqe[('da',ci,ck,cj)]=numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global,\n self.nSpace_global),\n 'd')\n #dense storage\n self.q[('grad(w)*dV_a',ci,ck)] = self.q[('grad(w)*dV_f',ci)]\n self.q[('dt*grad(w)*dV_a',ci,ck)]= self.q[('dt*grad(w)*dV',ci)]\n #ci,ckDict\n #linear potential only for now, need to change for e.g., Buckley Leverett\n for ck in self.phi.keys():\n self.phi[ck].dof[:]=self.u[ck].dof\n self.q[('grad(phi)',ck)] = self.q[('grad(u)',ck)]\n for key in self.dphi.keys():\n self.dphi[key].dof.fill(1.0)\n self.q[('dphi',key[0],key[1])] = numpy.ones((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n\n\n\n# if self.coefficients.diffusion.has_key(0):\n# for ck,flag in self.coefficients.diffusion[0][0].iteritems():\n# assert self.coefficients.diffusion[0][0][ck] == 'constant', \"Error potential %s LADRellam does not handle diffusion = %s yet\" % (ck,flag)\n# if self.coefficients.sdInfo != None and (0,0) in self.coefficients.sdInfo.keys():\n# self.q[('a',0,0)] = numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n# self.q[('da',0,0,0)] = numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n# self.ebqe[('a',0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n# self.ebqe[('da',0,0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.coefficients.sdInfo[(0,0)][0][self.nSpace_global]),\n# 'd')\n\n# else:\n# self.q[('a',0,0)]=numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# self.q[('da',0,0,0)]=numpy.zeros(\n# (self.mesh.nElements_global,\n# self.nQuadraturePoints_element,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# self.ebqe[('a',0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# self.ebqe[('da',0,0,0)]=numpy.zeros(\n# (self.mesh.nExteriorElementBoundaries_global,\n# self.nElementBoundaryQuadraturePoints_elementBoundary,\n# self.nSpace_global,\n# self.nSpace_global),\n# 'd')\n# #\n# self.phi[0].dof[:]=self.u[0].dof\n# self.dphi[(0,0)].dof.fill(1.0)\n# self.q[('grad(phi)',0)] = self.q[('grad(u)',0)]\n# self.q[('dphi',0,0)] = numpy.ones((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n\n# self.q[('grad(w)*dV_a',0,0)] = self.q[('grad(w)*dV_f',0)]\n# self.q[('dt*grad(w)*dV_a',0,0)]= self.q[('dt*grad(w)*dV',0)]\n\n #r 'constant' ie not a function of solution but go ahead and include dr for now\n for ci,cjDict in self.coefficients.reaction.iteritems():\n self.q[('r',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n for cj in cjDict.keys():\n self.q[('dr',ci,cj)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('w*dV_r',ci)] = self.q[('w*dV',ci)]\n self.q[('dt*w*dV_r',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nDOF_trial_element[0]),'d')\n self.ebqe[('r',ci)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n\n #m\n for ci,cjDict in self.coefficients.mass.iteritems():\n self.q[('m',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n for cj in cjDict.keys():\n self.q[('dm',ci,cj)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('mt',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('m_last',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('m_tmp',ci)] = self.q[('m',ci)]\n self.q[('cfl',ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('numDiff',ci,ci)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.ebqe[('m',ci)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n for cj in cjDict.keys():\n self.ebqe[('dm',ci,cj)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n\n\n ###ellam specific options with defauls here\n self.ellamDiscretization = ELLAMtools.ELLAMdiscretization(self,options)\n\n #\n self.needEBQ = options.needEBQ #could need for analytical velocity evaluation with RT0,BDM\n\n #beg normal stuff allocating things\n self.points_elementBoundaryQuadrature= set()\n self.scalars_elementBoundaryQuadrature= set([('u',ci) for ci in range(self.nc)])\n self.vectors_elementBoundaryQuadrature= set()\n self.tensors_elementBoundaryQuadrature= set()\n\n if self.needEBQ:\n for k in ['x','hat(x)']:\n self.ebq[k] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n 3),'d')\n self.ebq['n'] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global),'d')\n self.ebq['inverse(J)'] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global,\n self.nSpace_global),'d')\n #allocate the metric tensor\n self.ebq['g'] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n max(1,self.nSpace_global-1),\n max(1,self.nSpace_global-1)),\n 'd')\n log(memory(\"element boundary quadrature\",\"LADRellam\"),level=4)\n ebq_keys = ['sqrt(det(g))']\n ebq_keys.extend([('u',ci) for ci in range(self.nc)])\n for k in ebq_keys:\n self.ebq[k] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n\n #test and trial info\n self.ebq[('w',0)] = numpy.zeros((self.mesh.nElements_global,\n self.mesh.nElementBoundaries_element,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nDOF_trial_element[0]),'d')\n for ci in range(1,self.nc):\n self.ebq[('w',ci)] = self.ebq[('w',0)]\n for ci in range(self.nc):\n self.ebq[('v',ci)] = self.ebq[('w',0)]\n\n #ebq_global info\n self.ebq_global['x'] = numpy.zeros((self.mesh.nElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n 3),'d')\n self.ebq_global['n'] = numpy.zeros((self.mesh.nElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global),'d')\n #\n # allocate residual and Jacobian storage\n #\n self.elementResidual = [numpy.zeros(\n (self.mesh.nElements_global,\n self.nDOF_test_element[ci]),\n 'd') for ci in range(self.nc)]\n self.elementSpatialResidual = [numpy.zeros(\n (self.mesh.nElements_global,\n self.nDOF_test_element[ci]),\n 'd') for ci in range(self.nc)]\n self.elementJacobian = {}\n for ci in range(self.nc):\n self.elementJacobian[ci]={}\n for cj in range(self.nc):\n if cj in self.coefficients.stencil[ci]:\n self.elementJacobian[ci][cj] = numpy.zeros(\n (self.mesh.nElements_global,\n self.nDOF_test_element[ci],\n self.nDOF_trial_element[cj]),\n 'd')\n #\n self.fluxJacobian_exterior = {}\n for ci in range(self.nc):\n self.fluxJacobian_exterior[ci]={}\n for cj in self.coefficients.stencil[ci]:\n self.fluxJacobian_exterior[ci][cj] = numpy.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nDOF_trial_element[cj]),\n 'd')\n\n #\n #\n #\n #\n log(memory(\"element and element boundary Jacobians\",\"OneLevelTransport\"),level=4)\n self.inflowBoundaryBC = {}\n self.inflowBoundaryBC_values = {}\n self.inflowFlux = {}\n for cj in range(self.nc):\n self.inflowBoundaryBC[cj] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,),'i')\n self.inflowBoundaryBC_values[cj] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nDOF_trial_element[cj]),'d')\n self.inflowFlux[cj] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')\n self.internalNodes = set(range(self.mesh.nNodes_global))\n #identify the internal nodes this is ought to be in mesh\n ##\\todo move this to mesh\n for ebNE in range(self.mesh.nExteriorElementBoundaries_global):\n ebN = self.mesh.exteriorElementBoundariesArray[ebNE]\n eN_global = self.mesh.elementBoundaryElementsArray[ebN,0]\n ebN_element = self.mesh.elementBoundaryLocalElementBoundariesArray[ebN,0]\n for i in range(self.mesh.nNodes_element):\n if i != ebN_element:\n I = self.mesh.elementNodesArray[eN_global,i]\n self.internalNodes -= set([I])\n self.nNodes_internal = len(self.internalNodes)\n self.internalNodesArray=numpy.zeros((self.nNodes_internal,),'i')\n for nI,n in enumerate(self.internalNodes):\n self.internalNodesArray[nI]=n\n #\n del self.internalNodes\n self.internalNodes = None\n log(\"Updating local to global mappings\",2)\n self.updateLocal2Global()\n log(\"Building time integration object\",2)\n log(memory(\"inflowBC, internalNodes,updateLocal2Global\",\"OneLevelTransport\"),level=4)\n #mwf for interpolating subgrid error for gradients etc\n if self.stabilization and self.stabilization.usesGradientStabilization:\n self.timeIntegration = TimeIntegrationClass(self,integrateInterpolationPoints=True)\n else:\n self.timeIntegration = TimeIntegrationClass(self)\n\n if options != None:\n self.timeIntegration.setFromOptions(options)\n log(memory(\"TimeIntegration\",\"OneLevelTransport\"),level=4)\n log(\"Calculating numerical quadrature formulas\",2)\n self.calculateQuadrature()\n #lay out components/equations contiguously for now\n self.offset = [0]\n for ci in range(1,self.nc):\n self.offset += [self.offset[ci-1]+self.nFreeDOF_global[ci-1]]\n self.stride = [1 for ci in range(self.nc)]\n #use contiguous layout of components for parallel, requires weak DBC's\n comm = Comm.get()\n self.comm=comm\n if comm.size() > 1:\n assert numericalFluxType != None and numericalFluxType.useWeakDirichletConditions,\"You must use a numerical flux to apply weak boundary conditions for parallel runs\"\n self.offset = [0]\n for ci in range(1,self.nc):\n self.offset += [ci]\n self.stride = [self.nc for ci in range(self.nc)]\n #\n log(memory(\"stride+offset\",\"OneLevelTransport\"),level=4)\n if numericalFluxType != None:\n if options == None or options.periodicDirichletConditions == None:\n self.numericalFlux = numericalFluxType(self,\n dofBoundaryConditionsSetterDict,\n advectiveFluxBoundaryConditionsSetterDict,\n diffusiveFluxBoundaryConditionsSetterDictDict)\n else:\n self.numericalFlux = numericalFluxType(self,\n dofBoundaryConditionsSetterDict,\n advectiveFluxBoundaryConditionsSetterDict,\n diffusiveFluxBoundaryConditionsSetterDictDict,\n options.periodicDirichletConditions)\n else:\n self.numericalFlux = None\n #set penalty terms\n #cek todo move into numerical flux initialization\n if self.ebq_global.has_key('penalty'):\n for ebN in range(self.mesh.nElementBoundaries_global):\n for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):\n self.ebq_global['penalty'][ebN,k] = self.numericalFlux.penalty_constant/(self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power)\n #penalty term\n #cek move to Numerical flux initialization\n if self.ebqe.has_key('penalty'):\n for ebNE in range(self.mesh.nExteriorElementBoundaries_global):\n ebN = self.mesh.exteriorElementBoundariesArray[ebNE]\n for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):\n self.ebqe['penalty'][ebNE,k] = self.numericalFlux.penalty_constant/self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power\n log(memory(\"numericalFlux\",\"OneLevelTransport\"),level=4)\n self.elementEffectiveDiametersArray = self.mesh.elementInnerDiametersArray\n #use post processing tools to get conservative fluxes, None by default\n import PostProcessingTools\n self.velocityPostProcessor = PostProcessingTools.VelocityPostProcessingChooser(self)\n log(memory(\"velocity postprocessor\",\"OneLevelTransport\"),level=4)\n #helper for writing out data storage\n import Archiver\n self.elementQuadratureDictionaryWriter = Archiver.XdmfWriter()\n self.elementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()\n self.exteriorElementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()\n #TODO get rid of this\n for ci,fbcObject in self.fluxBoundaryConditionsObjectsDict.iteritems():\n self.ebqe[('advectiveFlux_bc_flag',ci)] = numpy.zeros(self.ebqe[('advectiveFlux_bc',ci)].shape,'i')\n for t,g in fbcObject.advectiveFluxBoundaryConditionsDict.iteritems():\n if self.coefficients.advection.has_key(ci):\n self.ebqe[('advectiveFlux_bc',ci)][t[0],t[1]] = g(self.ebqe[('x')][t[0],t[1]],self.timeIntegration.t)\n self.ebqe[('advectiveFlux_bc_flag',ci)][t[0],t[1]] = 1\n\n if hasattr(self.numericalFlux,'setDirichletValues'):\n self.numericalFlux.setDirichletValues(self.ebqe)\n if not hasattr(self.numericalFlux,'isDOFBoundary'):\n self.numericalFlux.isDOFBoundary = {}\n for ci in range(self.nc):\n self.numericalFlux.isDOFBoundary[ci]= numpy.zeros(self.ebqe[('u',ci)].shape,'i')\n if not hasattr(self.numericalFlux,'ebqe'):\n self.numericalFlux.ebqe = {}\n for ci in range(self.nc):\n self.numericalFlux.ebqe[('u',ci)]= numpy.zeros(self.ebqe[('u',ci)].shape,'d')", "def test_invalid_num_timestep(self):\n sampler = Sampler()\n estimator = Estimator()\n fidelity_primitive = ComputeUncompute(sampler)\n pvqd = PVQD(\n fidelity_primitive,\n self.ansatz,\n self.initial_parameters,\n estimator,\n optimizer=L_BFGS_B(),\n num_timesteps=0,\n )\n problem = TimeEvolutionProblem(\n self.hamiltonian, time=0.01, aux_operators=[self.hamiltonian, self.observable]\n )\n\n with self.assertRaises(ValueError):\n _ = pvqd.evolve(problem)", "def check_error(self):\n refine_results = {}\n for phase_path, phase in self.phases.items():\n refine_results[phase_path] = {}\n\n # Save the original grid to the refine results\n tx = phase.options['transcription']\n gd = tx.grid_data\n num_nodes = gd.subset_num_nodes['all']\n numseg = gd.num_segments\n\n refine_results[phase_path]['num_segments'] = numseg\n refine_results[phase_path]['order'] = gd.transcription_order\n refine_results[phase_path]['segment_ends'] = gd.segment_ends\n refine_results[phase_path]['need_refinement'] = np.zeros(numseg, dtype=bool)\n refine_results[phase_path]['error'] = np.zeros(numseg, dtype=float)\n\n if isinstance(tx, dm.RungeKutta):\n continue\n\n outputs = phase.list_outputs(units=False, out_stream=None)\n\n out_values_dict = {k: v['value'] for k, v in outputs}\n\n prom_to_abs_map = phase._var_allprocs_prom2abs_list['output']\n\n num_scalar_states = 0\n for state_name, options in phase.state_options.items():\n shape = options['shape']\n size = np.prod(shape)\n num_scalar_states += size\n\n x = np.zeros([num_nodes, num_scalar_states])\n f = np.zeros([num_nodes, num_scalar_states])\n c = 0\n\n # Obtain the solution on the current grid\n for state_name, options in phase.state_options.items():\n prom_name = f'timeseries.states:{state_name}'\n abs_name = prom_to_abs_map[prom_name][0]\n rate_source_prom_name = f\"timeseries.state_rates:{state_name}\"\n rate_abs_name = prom_to_abs_map[rate_source_prom_name][0]\n x[:, c] = out_values_dict[prom_name].ravel()\n f[:, c] = out_values_dict[rate_source_prom_name].ravel()\n c += 1\n\n # Obtain the solution on the new grid\n # interpolate x at t_hat\n new_order = gd.transcription_order + 1\n # Gauss-Lobatto does not allow even orders so increase order by 2 instead\n if gd.transcription == 'gauss-lobatto':\n new_order += 1\n new_grid = GridData(numseg, gd.transcription, new_order, gd.segment_ends, gd.compressed)\n left_end_idxs = new_grid.subset_node_indices['segment_ends'][0::2]\n left_end_idxs = np.append(left_end_idxs, new_grid.subset_num_nodes['all'] - 1)\n\n L = interpolation_lagrange_matrix(gd, new_grid)\n I = integration_matrix(new_grid)\n\n # Call the ODE at all nodes of the new grid\n x_hat, x_prime = self.eval_ode(phase, new_grid, L, I)\n E = {}\n e = {}\n err_over_states = {}\n for state_name, options in phase.state_options.items():\n E[state_name] = np.absolute(x_prime[state_name] - x_hat[state_name])\n for k in range(0, numseg):\n e[state_name] = E[state_name]/(1 + np.max(x_hat[state_name][left_end_idxs[k]:left_end_idxs[k + 1]]))\n err_over_states[state_name] = np.zeros(numseg)\n\n for state_name, options in phase.state_options.items():\n for k in range(0, numseg):\n err_over_states[state_name][k] = np.max(e[state_name][left_end_idxs[k]:left_end_idxs[k + 1]])\n\n self.error[phase_path] = np.zeros(numseg)\n refine_results[phase_path]['error'] = np.zeros(numseg)\n refine_results[phase_path]['need_refinement'] = np.zeros(numseg, dtype=bool)\n\n # Assess the errors in each state\n for state_name, options in phase.state_options.items():\n for k in range(0, numseg):\n if err_over_states[state_name][k] > self.error[phase_path][k]:\n self.error[phase_path][k] = err_over_states[state_name][k]\n refine_results[phase_path]['error'][k] = err_over_states[state_name][k]\n if self.error[phase_path][k] > phase.refine_options['tolerance']:\n refine_results[phase_path]['need_refinement'][k] = True\n\n return refine_results", "def _sanityCheckKeySizes(other):\n if other.minKeySize < 512:\n raise ValueError(\"minKeySize too small\")\n if other.minKeySize > 16384:\n raise ValueError(\"minKeySize too large\")\n if other.maxKeySize < 512:\n raise ValueError(\"maxKeySize too small\")\n if other.maxKeySize > 16384:\n raise ValueError(\"maxKeySize too large\")\n if other.maxKeySize < other.minKeySize:\n raise ValueError(\"maxKeySize smaller than minKeySize\")\n # check also keys of virtual hosts\n for i in other.virtual_hosts:\n i.validate()", "def test_depolarizing_error_1q_unitary(self):\n p_depol = 0.3\n error = depolarizing_error(p_depol, 1, standard_gates=False)\n target_unitaries = [standard_gate_unitary('x'),\n standard_gate_unitary('y'),\n standard_gate_unitary('z')]\n for j in range(4):\n circ, p = error.error_term(j)\n name = circ[0]['name']\n self.assertIn(name, ('unitary', \"id\"))\n self.assertEqual(circ[0]['qubits'], [0])\n if name == \"unitary\":\n self.assertAlmostEqual(p, p_depol / 4, msg=\"Incorrect Pauli probability\")\n self.remove_if_found(circ[0]['params'][0], target_unitaries)\n else:\n self.assertAlmostEqual(p, 1 - p_depol + p_depol / 4,\n msg=\"Incorrect identity probability\")\n self.assertEqual(target_unitaries, [], msg=\"Incorrect unitaries\")", "def check_dhi_limits_qcrad(dhi, solar_zenith, dni_extra, limits=None):\n if not limits:\n limits = QCRAD_LIMITS\n\n dhi_ub = _qcrad_ub(dni_extra, solar_zenith, limits['dhi_ub'])\n\n dhi_limit_flag = quality.util.check_limits(dhi, limits['dhi_lb'], dhi_ub)\n\n return dhi_limit_flag", "def check_validity(self,check_derivative=True,verbose=True):\n is_valid = True\n if self.f0(0.0)!=0.0:\n is_valid = False\n if verbose:\n print(\"DynamicalSystem: Warning: The system does \"+\n \"not satisfy f0(0)=0 (f(0)=\",self.f0(0.0),\")\")\n if self.f1(1.0)!=1.0:\n is_valid = False\n if verbose:\n print(\"DynamicalSystem: Warning: The system does \"+\n \"not satisfy f0(1)=1 (f1(1)=\",self.f1(1.0),\")\")\n if self.rho_max<self.rho_min:\n is_valid = False\n if verbose:\n print(\"DynamicalSystem: Warning: The system does \"+\n \"not satisfy rho_max>=rho_min (rho_min,rho_max=\",\n rho_min,rho_max,\")\")\n if check_derivative:\n # Coarse check if the derivative is bounded below by some d>1\n # Note it is sufficient to check the restricted ranges given\n xs0 = np.linspace(0.0,self.rho_max,129)\n xs1 = np.linspace(self.rho_min,1.0,129)\n d = np.min(np.concatenate((self.df0(xs0),self.df1(xs1))))\n if d<=1.0:\n is_valid = False\n if verbose:\n print(\"DynamicalSystem: Warning: The system does \"+\n \"not satisfy: d>1 (d<=\",d,\")\")\n return is_valid", "def do_checks(self):\n # ## get valid experiment variables\n all_subexperiments = [1, 2, 3]\n all_plates = list(range(1, 19))\n all_cell_ids = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']\n all_samples = list(self.experiment.design['Sample'])\n all_genes = self.experiment.subexperiments[1].plates[1].samples[all_samples[0]].genes\n all_replicates = list(range(1, 7))\n all_time = [0.5, 1.0, 2.0, 3.0, 4.0, 8.0, 12.0, 24.0, 48.0, 72.0, 96.0]\n\n if self.time is None:\n if self.treatment is 'Baseline':\n self.time = [0.0, 96.0]\n else:\n self.time = all_time\n\n if self.cell_id is None:\n self.cell_id = all_cell_ids\n\n if self.gene is None:\n self.gene = all_genes\n\n if self.replicate is None:\n self.replicate = all_replicates\n\n if self.treatment is None:\n raise ValueError('treatment cannot be None. Specify one of \"TGFb\", \"Control\", \"Baseline\"')\n\n if not isinstance(self.treatment, str):\n raise ValueError('treatment must be a string. Got \"{}\" a \"{}\"'.format(\n self.treatment, type(self.treatment)\n ))\n\n if not isinstance(self.normed, bool):\n raise ValueError('normed argument should be boolean. Got \"{}\"'.format(\n type(self.normed)\n ))\n\n if not isinstance(self.time, list):\n self.time = [self.time]\n\n for time_point in self.time:\n if time_point not in sorted(list(set(self.data.columns.get_level_values(1)))):\n raise ValueError('\"{}\" is invalid time point. Valid time '\n 'points are: {}'.format(\n time_point, list(self.data.columns))\n )", "def test_limits_boundary_values(self):\n\n def check_error_msg(status, output, storagelimit=False):\n import json\n if status == False:\n content = json.loads(output)[\"errors\"]\n if storagelimit:\n actual_error = content[\"dataStorageLimit\"]\n expected_error = '\"dataStorageLimit\" must be an integer between -1 and 100000'\n else:\n actual_error = content[\"dataThrottleLimit\"]\n expected_error = '\"dataThrottleLimit\" must be an integer between -1 and 2147483647'\n self.assertEqual(actual_error, expected_error)\n else:\n self.fail(\"expected to fail but passsed\")\n\n bucket = self.cluster.buckets[0]\n server = random.choice(bucket.servers)\n bucket_helper = BucketHelper(server)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=-2)\n check_error_msg(status, content)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=2147483648)\n check_error_msg(status, content)\n\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n storage_limit=-2)\n check_error_msg(status, content, True)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n storage_limit=2147483648)\n check_error_msg(status, content, True)\n\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=-2,\n storage_limit=-2)\n check_error_msg(status, content)\n check_error_msg(status, content, True)\n status, content = bucket_helper.set_throttle_n_storage_limit(bucket.name,\n throttle_limit=2147483648,\n storage_limit=2147483648)\n check_error_msg(status, content)\n check_error_msg(status, content, True)", "def _check(self):\n for molname in self.options.keys():\n for key in self.options[molname].keys():\n if key in [\"Ncopies\"]:\n try:\n self.options[molname][key]=int(self.options[molname][key])\n except:\n raise BaseException(\"Wrong type of the variable in molecule {} section {}\".format(molname,key))\n if key in [\"Cutoff\"]:\n try:\n self.options[molname][key]=float(self.options[molname][key])\n except:\n raise BaseException(\"Wrong type of the variable in molecule {} section {}\".format(molname,key))\n if key in [\"Addon\"]: # test the addon part and convert variables\n for item in self.options[molname][key]: # Iterate over all attachments\n if item is not None:\n # attachment point\n dtypes={\"attachment\":int}\n try:\n item[\"attachment\"]=int(item[\"attachment\"])\n except:\n raise BaseException(\"Wrong type of the variable in molecule {} section {}\".format(molname,key))\n # position\n #~ try:\n #~ print self.options[molname][key][\"position\"]\n #~ self.options[molname][key][\"position\"]=int(self.options[molname][key][\"position\"])\n #~ except:\n #~ raise BaseException(\"Wrong type of the variable in molecule {} section {}\".format(molname,key))" ]
[ "0.52189726", "0.5177317", "0.5169023", "0.51312196", "0.5052218", "0.50168866", "0.4954876", "0.48952448", "0.4873793", "0.4869507", "0.48316526", "0.48261344", "0.4816464", "0.47993284", "0.4798235", "0.47845814", "0.47829345", "0.47609234", "0.47100845", "0.46791354", "0.4672345", "0.46125296", "0.4603093", "0.45954186", "0.45786127", "0.45768863", "0.45709187", "0.4563125", "0.45627925", "0.45626926" ]
0.6384456
0
Takes an object and returns a list of names of all the methods in that class
def classmethods(class_object): fn_tuple_list = inspect.getmembers(class_object, predicate=inspect.ismethod) fn_names = [ f_name for (f_name, method) in fn_tuple_list if not f_name.startswith("_") ] return fn_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def methods_of(obj):\r\n result = []\r\n for i in dir(obj):\r\n if callable(getattr(obj, i)) and not i.startswith('_'):\r\n result.append((i, getattr(obj, i)))\r\n return result", "def lookup(obj):\n objList = [method_name for method_name in dir(obj)\n if callable(getattr(obj, method_name))]\n return objList", "def get_method_names(cls, prefix):\n names = []\n for name in dir(cls):\n if name.startswith(prefix):\n func = getattr(cls, name)\n names.append(name)\n return names", "def get_method_list_from_classlist(self):\n method_list = []\n method_name_list = []\n for class_object in self.class_list:\n for name, obj in inspect.getmembers(class_object, inspect.ismethod):\n method_list.append(obj)\n method_name_list.append(name)\n return method_list", "def get_all_methods(instance):\n return [m for m in dir(instance) if callable(getattr(instance, m))]", "def _methods_of(cls):\n # The idea of unbound methods exists in Python 2 and was removed in\n # Python 3, so \"inspect.ismethod\" is used here for Python 2 and\n # \"inspect.isfunction\" for Python 3.\n all_methods = inspect.getmembers(\n cls, predicate=lambda x: inspect.ismethod(x) or inspect.isfunction(x))\n methods = [m for m in all_methods if not m[0].startswith(\"_\")]\n\n help_groups = {}\n for m in methods:\n group = getattr(m[1], \"help_group\", \"0\")\n help_groups.setdefault(group, []).append(m)\n\n if len(help_groups) > 1:\n # we should sort methods by groups\n methods = []\n for group in sorted(help_groups.items(), key=lambda x: x[0]):\n if methods:\n # None -> empty line between groups\n methods.append((None, None))\n methods.extend(group[1])\n return methods", "def __methods(cls):\n _dict = {}\n __methodDict(cls, _dict)\n return _dict.keys()", "def get_all():\n temp = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n temp = [x for x in temp if x[0] not in [\"Method\", \"Radpro\"]]\n return temp", "def lookup(obj):\n return list(dir(obj))", "def lookup(obj):\n return list(dir(obj))", "def find_funcs(obj):\n infos = {}\n for y in find_subobjects(obj):\n attr = getattr(obj, y)\n if callable(attr):\n infos[y] = attr.__doc__\n return infos", "def _all_names_on_object(obj: Any) -> Set[str]:\n nameset = set(obj.__dict__.keys())\n for cls in obj.__class__.__mro__:\n nameset = nameset.union(set(cls.__dict__.keys()))\n return nameset", "def list(cls):\n return [cls.__dict__.get(name) for name in dir(cls) if (\n not callable(getattr(cls, name)) and not name.startswith(\"_\")\n )]", "def _get_methods(self):\n\n methods = inspect.getmembers(self, predicate=callable)\n method_list = set()\n\n for name, _ in methods:\n if (name in ('proxy', 'start', 'stop', 'part', 'join',)\n or name[0] == '_'):\n continue\n\n method_list.add(name)\n\n return method_list", "def print_methods(obj: object) -> None:\n all_attributes = set(dir(obj))\n names_of_methods = set(\n filter(lambda atrr_name: callable(getattr(obj, atrr_name)), all_attributes)\n )\n methods = (getattr(obj, method_name) for method_name in names_of_methods)\n methods_names_and_docs = [(full_signature(method), brief_documentation(method))\n for method in methods]\n print_table(methods_names_and_docs, \"Name\", \"Description\")", "def method_names(method_type):\n global methods\n ret = []\n for (name, value) in globals().items():\n if method_name_regexp.match(name) and type(value) == type:\n method = value()\n if isinstance(method, MethodBase):\n if method.name not in [curr.name for curr in methods]:\n methods.append(method)\n if hasattr(method, method_type):\n ret.append(value.name)\n return ret", "def _get_all_classnames(\n module: ModuleType\n) -> List[str]:\n return list(map(lambda x: x[0], inspect.getmembers(module, inspect.isclass)))", "def _each_trait_method ( self, object ):\n dic = {}\n for klass in object.__class__.__mro__:\n for name, method in klass.__dict__.items():\n if (type( method ) is FunctionType) and (name not in dic):\n dic[ name ] = True\n yield name", "def method_list(self):\n\t\tmethod_names = list(self.methods.keys())\n\t\tmethod_names.sort()\n\t\tmethod_list = []\n\t\tfor mn in method_names:\n\t\t\tmethod_list += [self.methods[mn]]\n\t\treturn method_list", "def find_prefixed_funcs(obj: Any, prefix: str) -> Sequence[Tuple[str, Callable]]:\n\n results = []\n\n for sym in dir(obj):\n if sym.startswith(prefix):\n name = sym[len(prefix) :]\n func = getattr(obj, sym)\n if not callable(func):\n continue\n\n results.append((name, func))\n\n return results", "def list_methods(self):\n return list(self.methods.keys())", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)", "def lookup(obj):\n return dir(obj)" ]
[ "0.81180924", "0.77584505", "0.7427535", "0.7369807", "0.7367492", "0.7268082", "0.7225354", "0.70353705", "0.70313483", "0.70313483", "0.7011768", "0.6934401", "0.6904447", "0.68782264", "0.68726104", "0.6863157", "0.6825215", "0.6760233", "0.67479444", "0.67468894", "0.6688628", "0.66404366", "0.66404366", "0.66404366", "0.66404366", "0.66404366", "0.66404366", "0.66404366", "0.66404366", "0.66404366" ]
0.8122183
0
The Akaike information criterion (AIC) is a measure of the relative goodness of fit of a statistical model. Akaike, Hirotugu (1974). "A new look at the statistical model
def calculate_AIC(self): hmm_ll_calculator = LikelihoodInfEngineHMM( dbn=self.model.dbn, hidden_node_index=0, check_dbn=False) ll_full = hmm_ll_calculator.calc_ll(self.seq_list, self.mismask_list) return 2 * ll_full - 2 * self._get_parameter_count()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aic_ms(distribution):\n print(\"TESTING: AIC model selection for %s distribution\" % distribution.upper())\n params = dist.DISTRIBUTIONS[distribution][dist.KEY_TEST_PARAMS]\n print(\" creating sample\")\n test_sample = dist.samples(distribution, params)\n print(\" calculating AIC for all distributions\")\n fit_results = {}\n aic = {}\n for d in dist.get():\n fit_results[d] = fit.fit_mle(d, test_sample)\n aic[d] = me.aic_measure(dist.log_likelihood(d, fit_results[d]['params'], test_sample, nonzero_only=True),\n len(fit_results[d]['params']))\n delta_aic = {d: aic[d]-min(aic.values()) for d in aic}\n weights = {d: float(exp(-delta_aic[d]/2)) for d in delta_aic}\n best_model = dist.get()[0]\n print(\" input parameters: %s\" % dist.get_params(params, distribution))\n for d in dist.get():\n if weights[d] > weights[best_model]:\n best_model = d\n weights[d] /= sum(weights.values())\n print(\" %s:\" % d.upper())\n print(\" %s\" % dist.get_params(fit_results[d]['params'], d))\n print(\" AIC = %.0f\" % aic[d])\n print(\" dAIC = %.0f\" % delta_aic[d])\n print(\" w = %r\" % weights[d])\n print(\" Most likely model: %s\" % best_model.upper())\n print_pmfs(test_sample, fit_results, 'TEST-AIC.CSV')", "def AIC(y,yhat,k):\r\n residuals = y-yhat\r\n sse = np.sum(residuals**2) #sum of squared errors\r\n N = len(y)\r\n Lhat = sse/N\r\n return(2*k + N*np.log(Lhat))", "def akaike_info_criterion(log_likelihood, n_params, n_samples):\n # Correction in case of small number of observations\n if n_samples / float(n_params) >= 40.0:\n aic = 2.0 * (n_params - log_likelihood)\n else:\n aic = 2.0 * (n_params - log_likelihood) + 2.0 * n_params * (n_params + 1.0) / (\n n_samples - n_params - 1.0\n )\n return aic", "def info_criteria(indep,\n dep,\n models,\n add_aicc=False):\n num_data = len(indep)\n\n bic_calc = BicCalculator(bic_type=BIC_TYPE.STANDARD)\n bic_calc_bkpt = BicCalculator(bic_type=BIC_TYPE.HYBRID)\n\n #bic_calc_bkpt = bic_calc\n #####bic_calc_bkpt = BicCalculator(bic_type = BIC_TYPE.HOS)\n\n bics = []\n aics = []\n aiccs = []\n for model in models:\n\n if model in [Model.ONE_BKPT, Model.TWO_BKPT]:\n bic_calc_to_use = bic_calc_bkpt\n else:\n bic_calc_to_use = bic_calc\n\n estimator = model.estimator(num_end_to_skip=NUM_END_TO_SKIP,\n num_between_to_skip=NUM_BETWEEN_TO_SKIP)\n estimator.fit(indep, dep)\n\n loglikelihood = estimator.loglikelihood\n num_params = estimator.num_params\n\n bic = bic_calc_to_use.bic(num_params=num_params,\n loglikelihood=loglikelihood,\n num_data=num_data)\n aic = stats_util.aic(num_params=num_params,\n loglikelihood=loglikelihood)\n aicc = stats_util.aicc(num_params=num_params,\n loglikelihood=loglikelihood,\n num_data=num_data)\n\n bics.append(bic)\n aics.append(aic)\n aiccs.append(aicc)\n ic_df = pd.DataFrame({\"BIC\": bics, \"AIC\": aics}, index=models)\n if add_aicc:\n ic_df[\"AICC\"] = aiccs\n\n wts_df = ic_df.apply(stats_util.bma_weights, axis=0)\n wts_cols = [x + \" Model Wt\" for x in wts_df.columns]\n wts_df.columns = wts_cols\n both = pd.concat([ic_df, wts_df], join=\"outer\", axis=1)\n return both", "def AIC(y_true, y_pred, n_features):\n ll = log_likelihood(y_true, y_pred)\n AIC = 2 * n_features - 2 * ll\n return AIC", "def aic(self):\n if hasattr(self, '_aic'):\n return self._aic\n else:\n self._aic = 2 * self.k + 2 * self.neg_ll()\n return self._aic", "def get_AIC_GLM(GLMMod): \r\n k = GLMMod.nVars \r\n aic = GLMMod.dev_res + 2.0 * k \r\n \r\n return aic", "def testAImode(self):\n import Intelligence\n res = Intelligence.Intelligence.AImodeHard(self)\n exp = 20\n self.assertEqual(res,exp)", "def akaike_information_criterion(ecov, p, m, Ntotal, corrected=False):\r\n\r\n AIC = (2 * (np.log(linalg.det(ecov))) +\r\n ((2 * (p ** 2) * m) / (Ntotal)))\r\n\r\n if corrected is None:\r\n return AIC\r\n else:\r\n return AIC + (2 * m * (m + 1)) / (Ntotal - m - 1)", "def BIC(y,yhat,k):\r\n y = np.asarray(y)\r\n yhat = np.asarray(yhat)\r\n \r\n N = yhat.size\r\n y = y.reshape(N,)\r\n yhat = yhat.reshape(N,)\r\n residuals = y-yhat\r\n sse = np.sum(residuals**2) #sum of squared errors\r\n N = len(y)\r\n Lhat = sse/(N-k)\r\n return(N*np.log(Lhat) + k*np.log(N))", "def learn_ICA(X, k):\n\n # TODO: YOUR CODE HERE", "def ce_fit(inp_image, ref_image, mask_image):\n\thist_res = Util.histc(ref_image, inp_image, mask_image)\n\targs = hist_res[\"args\"]\n\tscale = hist_res[\"scale\"]\n\tdata = [hist_res['data'], inp_image, hist_res[\"ref_freq_bin\"], mask_image, int(hist_res['size_img']), hist_res['hist_len']]\n\tres = amoeba(args, scale, hist_func, 1.e-4, 1.e-4, 500, data)\n\tresu = [\"Final Parameter [A,B]:\", res[0], \"Final Chi-square :\", -1*res[1], \"Number of Iteration :\", res[2]]\n\tcorrected_image = inp_image*res[0][0] + res[0][1]\n\tresult = [resu,\"Corrected Image :\",corrected_image]\n\tdel data[:], args[:], scale[:]\n\treturn result", "def test_IAC():\n L = 25 # Lattice size\n beta = 0.1 # Inverse temperature\n h = 0.5 # Step size\n Nsteps = int(1E4) # Number of MCMC steps\n metropolize = True # Do metropolize or not.\n\n if metropolize:\n [xy, rej_rate, mags] = sampleXY(L, beta, h, Nsteps, True, True)\n print(\"\\nMetropolized Scheme\")\n print(\"\\nRejection Rate = {:.2f}%\".format(100*rej_rate))\n\n else:\n print(\"\\nUn-Metropolized Scheme\")\n [xy, mags] = sampleXY(L, beta, h, Nsteps, False, True)\n\n acf = acor.function(mags)\n\n # Time for the correlation to first reach 0 (within the tolerance).\n cor_time = np.where(acf <= 1E-6)[0][0]\n\n plt.figure(3)\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n plt.xlabel('Lag')\n plt.ylabel('Autocorrelation')\n if metropolize:\n plt.title('ACF of the Metropolized Scheme')\n else:\n plt.title('ACF of the Un-Metropolized Scheme')\n plt.plot(np.arange(cor_time+1), acf[:cor_time+1], 'b-')\n\n tau = acor.acor(mags, maxlag = cor_time)[0]\n print(\"\\nIAC = {:.1f}\\n\".format(tau))", "def profile_arnaud_bestfit(self):\n # Universal profile parameters\n self.P0 = 8.403 * (self.h70)**(-1.5)\n self.c500 = 1.177\n self.gamma = 0.3081\n self.alpha = 1.0510\n self.beta = 5.4905", "def get_AIC(self, LL):\n degrees_of_freedom = 0\n # initial prob. matrix\n if self.fpi is None:\n degrees_of_freedom += self.num_states - 1\n\n # transition matrix\n if self.fT is None:\n degrees_of_freedom += self.num_states * (self.num_states - 1)\n\n for ii in range(self.num_states):\n degrees_of_freedom += self.estimate.E[ii].dof()\n\n # the whole population has one AIC value.\n AIC_value = -2 * np.sum(LL) + 2 * degrees_of_freedom\n\n return AIC_value, degrees_of_freedom", "def get_AIC_GWGLM(GWGLMMod): \r\n k = GWGLMMod.tr_S \r\n aic = GWGLMMod.dev_res + 2.0 * k \r\n \r\n return aic", "def aic_c(self):\n if hasattr(self, '_aic_c'):\n return self._aic_c\n else:\n k = len(self.params)\n n = self.data['n'].sum()\n self._aic_c = self.aic() + (2*k**2 + 2*k)/(n - k - 1)\n return self._aic_c", "def compute_IC(self, n_params=1):\n self.IC = information_criterion(self, n_params=n_params)", "def get_AIA(self):\n\n return self.get_POW().getAIA()", "def test_bic_ms(distribution):\n print(\"TESTING: BIC model selection for %s distribution\" % distribution.upper())\n params = dist.DISTRIBUTIONS[distribution][dist.KEY_TEST_PARAMS]\n print(\" creating sample\")\n test_sample = dist.samples(distribution, params)\n print(\" calculating BIC for all distributions\")\n fit_results = {}\n bic = {}\n for d in dist.get():\n fit_results[d] = fit.fit_mle(d, test_sample)\n bic[d] = me.bic_measure(dist.log_likelihood(d, fit_results[d]['params'], test_sample, nonzero_only=True),\n len(fit_results[d]['params']), len(test_sample))\n delta_bic = {d: bic[d]-min(bic.values()) for d in bic}\n weights = {d: float(exp(-delta_bic[d]/2)) for d in delta_bic}\n best_model = dist.get()[0]\n print(\" input parameters: %s\" % dist.get_params(params, distribution))\n for d in dist.get():\n if weights[d] > weights[best_model]:\n best_model = d\n weights[d] /= sum(weights.values())\n print(\" %s:\" % d.upper())\n print(\" %s\" % dist.get_params(fit_results[d]['params'], d))\n print(\" BIC = %.0f\" % bic[d])\n print(\" dBIC = %.0f\" % delta_bic[d])\n print(\" w = %r\" % weights[d])\n print(\" Most likely model: %s\" % best_model.upper())\n print_pmfs(test_sample, fit_results, 'TEST-BIC.CSV')", "def test_ds_1d(i, num_bins):\n np.random.seed(2191+i)\n simulated_dataset = simulate_direction(num_bins, ntaxa=47, nsamples=int(360/num_bins), Sigma_trace=1)\n X, K, sigma, mu = simulated_dataset\n y = np.zeros((X.shape[0]*X.shape[1], np.shape(X)[2])) #reformat data for model\n for i in range(len(X)):\n for j in range(len(X[0])):\n y[X.shape[1]*i+j] = X[i,j]\n no_struc = 1\n one_dim = fitModel_1d_util(y)\n for i in range(2):\n print([one_d_AIC(one_dim[1][i], y) for i in range(len(one_dim[1]))])\n #for i in range(2):\n # print([one_d_AWE(one_dim[1][i], y) for i in range(len(one_dim[1]))])\n #print(\"silhouette\")\n #for i in range(len(one_dim[1])):\n # mixing, sigma, delta, Q, Q_edge, edge_mean, mu, likelihoods, iterations = one_dim[1][i]\n # print(silhouette(mixing, sigma, mu, y))\n two_dim = fitModel_2d_util(y)\n for i in range(2):\n print([one_d_AIC(one_dim[1][i], y) for i in range(len(one_dim[1]))])\n print([AIC(two_dim[1][i], y) for i in range(len(two_dim[1]))])\n #one_dim_scores = one_dim[0] #Scores start at 2 bins\n #two_dim_scores = two_dim[0]\n selection = 1 #if selection is negative just assume i'm referring to the 2d case\n return simulated_dataset, one_dim, two_dim, selection", "def modelAVOAkiRichards3(interface):\n interface[:,6]=0.5*(interface[:,0]/interface[:,3]+ \\\n interface[:,2]/interface[:,5])\n interface[:,7]=(interface[:,0]/(2*interface[:,3]))- \\\n 4*((interface[:,4]**2/interface[:,3]**2)* \\\n (interface[:,1]/interface[:,4]))- \\\n 2*(interface[:,4]**2/interface[:,3]**2)* \\\n (interface[:,2]/interface[:,5])", "def aic(y: np.ndarray, y_predict: np.ndarray, p: int) -> float:\n n = len(y)\n res = np.subtract(y, y_predict)\n rss = np.sum(np.power(res, 2))\n aic_score = n * np.log(rss / n) + 2 * p\n return aic_score", "def score(self, archi:ArchitectureNN):\n archi.fit_model(self.train_data, **self.train_params)\n \n return archi.compute_test_score(self.test_data)", "def analytic_model(ell,A_est,slope):\n\t\treturn total_Cl_noise(ell)+A_est*ell**(-slope)", "def extract_anisotropy_features (Parameters, image, mask=None):\n \n data_inputs = {}\n \n Ka, Kb, Kc = Parameters.kA, Parameters.kB, Parameters.kC\n \n \n h, w, channels = image.shape\n \n if channels == 2:\n channel_types = [\"Para\", \"Perp\"]\n elif channels == 3:\n channel_types = [\"Open\", \"Para\", \"Perp\"]\n \n \n for index, channel in enumerate(channel_types):\n \n data_inputs[channel] = np.sum(image[:,:, index])/np.count_nonzero(image[:,:, index])\n\n\n #Additional parameters\n para_value = data_inputs['Para']\n perp_value = data_inputs['Perp']\n data_inputs['AniAvg'] = (para_value - perp_value)/(para_value + 2*perp_value)\n \n #With corrections\n data_inputs['Ix'] = Ix = ((Ka+Kb)*perp_value - (Ka+Kc)*para_value)/(Ka*Kb + Kb*Kb - Ka*Kc - Kc*Kc)\n data_inputs['Iy'] = Iy = (Kb*para_value - Kc*perp_value)/(Ka*Kb + Kb*Kb - Ka*Kc - Kc*Kc)\n data_inputs['AniAvg'] = (Ix - Iy)/(Ix + 2*Iy)\n \n\n \n return (data_inputs)", "def BIC_edit(y,y_hat,k,vola_val = None,b=None):\r\n y = np.asarray(y)\r\n y_hat = np.asarray(y_hat)\r\n vola_val = np.asarray(vola_val)\r\n \r\n if y.size != y_hat.size or vola_val.size != y_hat.size:\r\n raise(ValueError(\"y and yhat and vola val should be of same size now\\n\\\r\n size(y) = %d and size(yhat) = %d\"%(y.size,y_hat.size)))\r\n N = y_hat.size\r\n y = y.reshape(N,)\r\n y_hat = y_hat.reshape(N,)\r\n vola_val = vola_val.reshape(N,)\r\n try:\r\n Tinv = np.diag(1/(np.sqrt(b[0] + b[1]*vola_val))) #cholesky factorication\r\n #of dispersion matrix\r\n except:\r\n raise(ValueError(\"Vola val not given\"))\r\n residuals = Tinv.dot(y - y_hat) #transformed data\r\n sse = np.sum(residuals**2) #sum of squared errors\r\n N = len(y)\r\n sighat = sse/(N-k)\r\n return(N*np.log(sighat) + k*np.log(N))", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"kappa_W[1,0.0,2.0]\") \n self.modelBuilder.doVar(\"kappa_Z[1,0.0,2.0]\") \n self.modelBuilder.doVar(\"kappa_tau[1,0.0,3.0]\")\n self.modelBuilder.doVar(\"kappa_mu[1,0.0,5.0]\") \n self.modelBuilder.factory_(\"expr::kappa_mu_expr(\\\"@0*@1+(1-@0)*@2\\\", CMS_use_kmu[0], kappa_mu, kappa_tau)\")\n self.modelBuilder.doVar(\"kappa_t[1,0.0,4.0]\")\n # additional kappa for the anomalous coupling\n self.modelBuilder.doVar(\"kappa_tilde_t[0.0,0.0,4.0]\")\n self.modelBuilder.doVar(\"kappa_b[1,0.0,3.0]\")\n if not self.resolved:\n self.modelBuilder.doVar(\"kappa_g[1,0.0,2.0]\")\n self.modelBuilder.doVar(\"kappa_gam[1,0.0,2.5]\")\n\tself.modelBuilder.doVar(\"BRinv[0,0,1]\")\n self.modelBuilder.out.var(\"BRinv\").setConstant(True)\n # adding additional kappa to list of parameters of interest\n pois = 'kappa_W,kappa_Z,kappa_tau,kappa_t,kappa_tilde_t,kappa_b'\n if not self.resolved:\n pois += ',kappa_g,kappa_gam'\n self.doMH()\n self.modelBuilder.doSet(\"POI\",pois)\n # use modified Higgs Builder\n self.SMH = AnomalousTopHiggsBuilder(self.modelBuilder)\n self.setup()", "def test_IAC():\n L = 25 # Lattice size\n beta = 0.1 # Inverse temperature\n h = 0.05 # Step size\n n = 5 # Number of velocity verlet steps.\n Nsteps = int(1E4) # Number of MCMC steps\n metropolize = False # Do metropolize or not.\n\n if metropolize:\n [xy, rej_rate, mags] = HybridMC(L, beta, h, n, Nsteps, True, True)\n print(\"\\nMetropolized Scheme\")\n print(\"\\nRejection Rate = {:.2f}%\".format(100*rej_rate))\n\n else:\n print(\"\\nUn-Metropolized Scheme\")\n [xy, mags] = HybridMC(L, beta, h, n, Nsteps, False, True)\n\n acf = acor.function(mags)\n\n # Time for the correlation to first reach 0 (within the tolerance).\n cor_time = np.where(acf <= 1E-6)[0][0]\n\n plt.figure(3)\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n plt.xlabel('Lag')\n plt.ylabel('Autocorrelation')\n if metropolize:\n plt.title('ACF of the Metropolized Scheme')\n else:\n plt.title('ACF of the Un-Metropolized Scheme')\n plt.plot(np.arange(cor_time+1), acf[:cor_time+1], 'b-')\n\n tau = acor.acor(mags, maxlag = cor_time)[0]\n print(\"\\nIAC = {:.1f}\\n\".format(tau))", "def calcAWAIforOptim(slopeaz, latitude):\n\tdf = calcTotalInsolation(latitude, slopeaz[0], slopeaz[1])\n\treturn np.dot(\n\t\tnp.array([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]),\n\t\tdf['insolation_tilted']\n\t\t) / -365.0" ]
[ "0.7080638", "0.67313176", "0.66234607", "0.6486055", "0.61601174", "0.6079734", "0.6033591", "0.57279545", "0.57276684", "0.563323", "0.56095815", "0.555238", "0.5534736", "0.5459357", "0.5456556", "0.54511565", "0.5418131", "0.5415451", "0.5370663", "0.5346118", "0.5329103", "0.52440965", "0.5219358", "0.5181299", "0.5174665", "0.51656234", "0.51410425", "0.5140876", "0.513303", "0.51293784" ]
0.6961269
1
The Bayesian information criterion (BIC) is a criterion for model selection among a finite set of models. Schwarz, Gideon E. (1978). "Estimating the dimension of a model". Annals
def calculate_BIC(self): hmm_ll_calculator = LikelihoodInfEngineHMM( dbn=self.model.dbn, hidden_node_index=0, check_dbn=False) ll_full = hmm_ll_calculator.calc_ll(self.seq_list, self.mismask_list) return 2 * ll_full - self._get_parameter_count() * math.log( self._get_observation_count())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_bic_ms(distribution):\n print(\"TESTING: BIC model selection for %s distribution\" % distribution.upper())\n params = dist.DISTRIBUTIONS[distribution][dist.KEY_TEST_PARAMS]\n print(\" creating sample\")\n test_sample = dist.samples(distribution, params)\n print(\" calculating BIC for all distributions\")\n fit_results = {}\n bic = {}\n for d in dist.get():\n fit_results[d] = fit.fit_mle(d, test_sample)\n bic[d] = me.bic_measure(dist.log_likelihood(d, fit_results[d]['params'], test_sample, nonzero_only=True),\n len(fit_results[d]['params']), len(test_sample))\n delta_bic = {d: bic[d]-min(bic.values()) for d in bic}\n weights = {d: float(exp(-delta_bic[d]/2)) for d in delta_bic}\n best_model = dist.get()[0]\n print(\" input parameters: %s\" % dist.get_params(params, distribution))\n for d in dist.get():\n if weights[d] > weights[best_model]:\n best_model = d\n weights[d] /= sum(weights.values())\n print(\" %s:\" % d.upper())\n print(\" %s\" % dist.get_params(fit_results[d]['params'], d))\n print(\" BIC = %.0f\" % bic[d])\n print(\" dBIC = %.0f\" % delta_bic[d])\n print(\" w = %r\" % weights[d])\n print(\" Most likely model: %s\" % best_model.upper())\n print_pmfs(test_sample, fit_results, 'TEST-BIC.CSV')", "def info_criteria(indep,\n dep,\n models,\n add_aicc=False):\n num_data = len(indep)\n\n bic_calc = BicCalculator(bic_type=BIC_TYPE.STANDARD)\n bic_calc_bkpt = BicCalculator(bic_type=BIC_TYPE.HYBRID)\n\n #bic_calc_bkpt = bic_calc\n #####bic_calc_bkpt = BicCalculator(bic_type = BIC_TYPE.HOS)\n\n bics = []\n aics = []\n aiccs = []\n for model in models:\n\n if model in [Model.ONE_BKPT, Model.TWO_BKPT]:\n bic_calc_to_use = bic_calc_bkpt\n else:\n bic_calc_to_use = bic_calc\n\n estimator = model.estimator(num_end_to_skip=NUM_END_TO_SKIP,\n num_between_to_skip=NUM_BETWEEN_TO_SKIP)\n estimator.fit(indep, dep)\n\n loglikelihood = estimator.loglikelihood\n num_params = estimator.num_params\n\n bic = bic_calc_to_use.bic(num_params=num_params,\n loglikelihood=loglikelihood,\n num_data=num_data)\n aic = stats_util.aic(num_params=num_params,\n loglikelihood=loglikelihood)\n aicc = stats_util.aicc(num_params=num_params,\n loglikelihood=loglikelihood,\n num_data=num_data)\n\n bics.append(bic)\n aics.append(aic)\n aiccs.append(aicc)\n ic_df = pd.DataFrame({\"BIC\": bics, \"AIC\": aics}, index=models)\n if add_aicc:\n ic_df[\"AICC\"] = aiccs\n\n wts_df = ic_df.apply(stats_util.bma_weights, axis=0)\n wts_cols = [x + \" Model Wt\" for x in wts_df.columns]\n wts_df.columns = wts_cols\n both = pd.concat([ic_df, wts_df], join=\"outer\", axis=1)\n return both", "def select(self):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n bestBicScore = float(\"+inf\")\n bestModel = None\n log_n_samples = np.log(sum(self.lengths))\n \n for n_components in range(self.min_n_components,self.max_n_components+1): \n logL = float(\"-inf\")\n bicScore = float(\"+inf\")\n hmm_model = None\n logging.info('BIC: Training word =%s with number of components=%d', self.this_word, n_components)\n \n try :\n hmm_model = GaussianHMM(n_components=n_components, covariance_type=\"diag\", \n n_iter=1000, random_state=self.random_state,verbose=False).fit(self.X, self.lengths)\n logL = hmm_model.score(self.X, self.lengths)\n # Bayesian information criteria: BIC = -2 * logL + p * logN\n # p is number of Free Parameters in the Model\n parameters = n_components * n_components + 2 * len(self.X[0]) * n_components - 1\n bicScore = -2 * logL + parameters * log_n_samples\n if bicScore < bestBicScore:\n logging.debug('BIC: found lower bic score=%f for word =%s with components=%d', bicScore, self.this_word, n_components)\n bestBicScore = bicScore\n bestModel = hmm_model\n \n except RuntimeWarning as rw:\n logging.warning('BIC: RuntimeWarning : %s', rw)\n except ValueError as ve:\n logging.warning('BIC: ValueError : %s', ve) \n \n if bestModel == None:\n return None\n \n logging.info('BIC: returning : best model with BIC score=%f for word=%s with number of components=%d', bestBicScore, self.this_word, bestModel.n_components) \n return bestModel", "def bayesian_information_criterion(ecov, p, m, Ntotal):\r\n\r\n BIC = (2 * (np.log(linalg.det(ecov))) +\r\n ((2 * (p ** 2) * m * np.log(Ntotal)) / (Ntotal)))\r\n\r\n return BIC", "def bic(self, model, n_components):\n log_l = model.score(self.X, self.lengths)\n # p = m^2 +2mf-1\n p = n_components ** 2 + 2 * n_components * model.n_features - 1\n # BIC = -2 * logL + p * logN\n return -2 * log_l + p * np.log(len(self.X))", "def bic(self):\n return np.log(self.sample_size) * self.number_of_parameters() - 2*self.ll[-1]", "def compute_Bayes(BIC, BIC_wo, BIC_white):\n from scipy.misc import logsumexp\n lnprob = -0.5*BIC - np.logaddexp(-0.5*BIC, -0.5*BIC_wo)\n # BIC of H1 - BIC H0\n # larger value favours H1\n logBayes = 0.5 * (-1.0*BIC + BIC_wo)\n #lnprob = np.log(1./3.) - 0.5*BIC - logsumexp([BIC, BIC_wo, BIC_white])\n #print(np.log(1./3.), - 0.5*BIC, - logsumexp([BIC, BIC_wo, BIC_white]))\n logprob = logBayes - logsumexp([logBayes, 1.])\n #print(\"2lnK: \", 2.0*logBayes)\n lnprob_w = -0.5 * BIC - logsumexp([-0.5*BIC, -0.5*BIC_wo, -0.5*BIC_white])\n lnprob_wo = -0.5 * BIC_wo - logsumexp([-0.5*BIC, -0.5*BIC_wo, -0.5*BIC_white])\n lnprob_white = -0.5 * BIC_white - logsumexp([-0.5*BIC, -0.5*BIC_wo, -0.5*BIC_white])\n #print(0.5 * (BIC_wo - BIC))\n #prob = np.exp(-0.5*BIC) / (np.exp(-0.5*BIC) + np.exp(-0.5*BIC_wo))\n return np.exp(lnprob_w), np.exp(lnprob_wo), np.exp(lnprob_white)", "def BIC(model, logL, nPeople):\n\t\tph, pvh = model\n\t\tnClusters, nQuestions, nAnswers = pvh.shape\n\n\t\tnParams = (nClusters - 1) + nClusters * nQuestions *(nAnswers - 1)\n\t\tbic = -2. * logL + nParams * np.log(nPeople)\n\t\treturn bic", "def bayesian_info_criterion(log_likelihood, n_params, n_samples):\n return n_params * np.log(n_samples) - 2.0 * log_likelihood", "def bic(self, x):\n x = self.check_size(x)\n n = x.shape[0]\n\n # Free parameters for covariance, means and mixture components\n free_params = self.n_features * self.n_components + self.n_features + self.n_components - 1\n\n bic = -2. * self.__score(x, as_average=False).mean() * n + free_params * tf.math.log(n)\n\n return bic", "def _bif2bayesian(pathname, verbose=3):\n if verbose>=3: print('[bnlearn] >Loading bif file <%s>' %(pathname))\n\n bifmodel=readwrite.BIF.BIFReader(path=pathname)\n\n try:\n model = BayesianModel(bifmodel.variable_edges)\n model.name = bifmodel.network_name\n model.add_nodes_from(bifmodel.variable_names)\n\n tabular_cpds = []\n for var in sorted(bifmodel.variable_cpds.keys()):\n values = bifmodel.variable_cpds[var]\n cpd = TabularCPD(var, len(bifmodel.variable_states[var]), values,\n evidence=bifmodel.variable_parents[var],\n evidence_card=[len(bifmodel.variable_states[evidence_var])\n for evidence_var in bifmodel.variable_parents[var]])\n tabular_cpds.append(cpd)\n\n model.add_cpds(*tabular_cpds)\n# for node, properties in bifmodel.variable_properties.items():\n# for prop in properties:\n# prop_name, prop_value = map(lambda t: t.strip(), prop.split('='))\n# model.node[node][prop_name] = prop_value\n\n return model\n\n except AttributeError:\n raise AttributeError('[bnlearn] >First get states of variables, edges, parents and network names')", "def _bif2bayesian(pathname, verbose=3):\n if verbose>=3: print('[bnlearn] >Loading bif file <%s>' %(pathname))\n\n bifmodel=readwrite.BIF.BIFReader(path=pathname)\n\n try:\n model = BayesianModel(bifmodel.variable_edges)\n model.name = bifmodel.network_name\n model.add_nodes_from(bifmodel.variable_names)\n\n tabular_cpds = []\n for var in sorted(bifmodel.variable_cpds.keys()):\n values = bifmodel.variable_cpds[var]\n cpd = TabularCPD(var, len(bifmodel.variable_states[var]), values,\n evidence=bifmodel.variable_parents[var],\n evidence_card=[len(bifmodel.variable_states[evidence_var])\n for evidence_var in bifmodel.variable_parents[var]])\n tabular_cpds.append(cpd)\n\n model.add_cpds(*tabular_cpds)\n# for node, properties in bifmodel.variable_properties.items():\n# for prop in properties:\n# prop_name, prop_value = map(lambda t: t.strip(), prop.split('='))\n# model.node[node][prop_name] = prop_value\n\n return model\n\n except AttributeError:\n raise AttributeError('[bnlearn] >First get states of variables, edges, parents and network names')", "def BIC(y,yhat,k):\r\n y = np.asarray(y)\r\n yhat = np.asarray(yhat)\r\n \r\n N = yhat.size\r\n y = y.reshape(N,)\r\n yhat = yhat.reshape(N,)\r\n residuals = y-yhat\r\n sse = np.sum(residuals**2) #sum of squared errors\r\n N = len(y)\r\n Lhat = sse/(N-k)\r\n return(N*np.log(Lhat) + k*np.log(N))", "def summary(self, mode='BIC', b=0.0):\n if self.summary_object is None:\n if mode is None:\n mode = self.BFmode\n summ = dict()\n summ['logbayesfactor'] = self.get_log_Bayes_factor(mode)\n summ['evidence'] = \\\n {'mc': self.CModel.get_log_marginal_likelihood(mode), \n 'md': self.DModel.get_log_marginal_likelihood(mode)}\n summ['pmp'] = self.get_posterior_model_probabilities(mode)\n \n if self.ndim == 1 and self.design != 'DiD':\n # compute effect size \n \n es = self.get_effect_size(summ, b)\n for k, v in es.items():\n summ[k] = v\n \n elif self.ndim == 2 and self.design != 'DiD': \n warnings.warn('Computing 2D effect size with Monte Carlo may take a while.')\n \n m = len(b)\n es = {i: self.get_effect_size(summ, b[i]) for i in range(m)}\n \n for k, v in es[0].items():\n summ[k] = [es[i][k] for i in range(m)]\n \n else:\n warnings.warn('Effect size analysis for D = {:d} not implemented.'.format(self.ndim))\n self.summary_object = summ\n return self.summary_object", "def test_ic():\n # Set up the inputs\n profile = get_profile()\n T0 = 273.15 + 15.\n z0 = 1500.\n P = profile.get_values(z0, ['pressure'])\n composition = ['methane', 'ethane', 'propane', 'oxygen']\n bub = dbm.FluidParticle(composition)\n yk = np.array([0.85, 0.07, 0.08, 0.0])\n de = 0.005\n K = 1.\n K_T = 1.\n fdis = 1.e-4\n t_hyd = 0.\n lag_time = True\n\n # Get the initial conditions\n (bub_obj, y0) = single_bubble_model.sbm_ic(profile, bub,\n np.array([0., 0., z0]), de, yk, T0, K, K_T, fdis, t_hyd,\n lag_time)\n\n # Check the initial condition values\n assert y0[0] == 0.\n assert y0[1] == 0.\n assert y0[2] == z0\n assert y0[-1] == T0 * np.sum(y0[3:-1]) * seawater.cp() * 0.5\n assert_approx_equal(bub.diameter(y0[3:-1], T0, P), de, significant=6)\n\n # Check the bub_obj parameters\n for i in range(len(composition)):\n assert bub_obj.composition[i] == composition[i]\n assert bub_obj.T0 == T0\n assert bub_obj.cp == seawater.cp() * 0.5\n assert bub_obj.K == K\n assert bub_obj.K_T == K_T\n assert bub_obj.fdis == fdis\n assert bub_obj.t_hyd == t_hyd\n for i in range(len(composition)-1):\n assert bub_obj.diss_indices[i] == True\n assert bub_obj.diss_indices[-1] == False", "def compute_bic(kmeans,X):\n # assign centers and labels\n centers = [kmeans.cluster_centers_]\n labels = kmeans.labels_\n #number of clusters\n m = kmeans.n_clusters\n # size of the clusters\n n = np.bincount(labels)\n #size of data set\n N, d = X.shape\n\n #compute variance for all clusters beforehand\n cl_var = [(1.0 / (n[i] - m)) * sum(distance.cdist(X[np.where(labels == i)], [centers[0][i]], 'euclidean')**2) for i in range(m)]\n\n const_term = 0.5 * m * np.log10(N)\n\n BIC = np.sum([n[i] * np.log10(n[i]) -\n n[i] * np.log10(N) -\n ((n[i] * d) / 2) * np.log10(2*np.pi) -\n (n[i] / 2) * np.log10(cl_var[i]) -\n ((n[i] - m) / 2) for i in range(m)]) - const_term\n\n return(BIC)", "def BIC(y_true, y_pred, n_features):\n ll = log_likelihood(y_true, y_pred)\n n_samples = y_true.size\n BIC = np.log(n_samples) * n_features - 2 * ll\n return BIC", "def calculate_AIC(self): \n hmm_ll_calculator = LikelihoodInfEngineHMM(\n dbn=self.model.dbn, hidden_node_index=0, check_dbn=False)\n ll_full = hmm_ll_calculator.calc_ll(self.seq_list, self.mismask_list)\n return 2 * ll_full - 2 * self._get_parameter_count()", "def bic(self):\n if hasattr(self, '_bic'):\n return self._bic\n else:\n self._bic = self.k * np.log(self.data['n'][self.data['c'] == 0].sum()) + 2 * self.neg_ll()\n return self._bic", "def select(self):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n best_bic_score = float('inf')\n best_model = self.base_model(self.n_constant)\n\n # num_states: for n between self.min_n_components and self.max_n_components\n for num_states in range(self.min_n_components, self.max_n_components + 1):\n model = self.base_model(num_states)\n\n # logL: log(the likelihood of the fitted model)\n try:\n logL = model.score(self.X, self.lengths)\n except Exception as e:\n continue\n\n # N: the number of data points (= sample size)\n N = sum(self.lengths)\n\n # p: the number of free parameters\n # http://hmmlearn.readthedocs.io/en/latest/api.html\n # Attributes of GaussianHMM\n # transmat_: (array, shape (n_components, n_components)) Matrix of transition probabilities between states.\n # since they add up to 1.0, the last row can be calculated from others,\n # so it is n_components * (n_components - 1).\n # startprob_: (array, shape (n_components, )) Initial state occupation distribution.\n # since they add up to 1.0, it is (n_components - 1).\n # means_: (array, shape (n_components, n_features)) Mean parameters for each state.\n # covars_: (array) Covariance parameters for each state. (n_components, n_features) if “diag”\n # p = #transmat_ + #startprob_ + #means_ + #covars_\n # = n_components * (n_components - 1) + n_components - 1 + n_components * n_features + n_components * n_features\n p = num_states ** 2 + 2 * num_states * model.n_features - 1\n\n # BIC = -2 * logL + p * logN\n bic_score = -2 * logL + p * np.log(N)\n\n if bic_score < best_bic_score:\n best_bic_score, best_model = bic_score, model\n \n return best_model", "def select(self):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n # DONE implement model selection based on BIC scores\n\n # Bayesian information criteria: BIC = −2 * log L + p * log N,\n # where\n # • L is the likelihood of the fitted model,\n # • p is the number of parameters, and\n # • N is the number of data points.\n # The term −2 log L decreases with increasing model complexity\n # (more parameters), whereas the penalties 2p or p log N increase with\n # increasing complexity. The BIC applies a larger penalty\n # when N > e 2 = 7.4.\n # Model selection: The lower the BIC value the better the model\n\n select_bic = float(\"inf\")\n select_model = None\n for n_components in range(self.min_n_components, self.max_n_components + 1):\n try:\n model = self.base_model(n_components)\n logL = model.score(self.X, self.lengths)\n # https://discussions.udacity.com/t/verifing-bic-calculation/246165/5\n # https://discussions.udacity.com/t/number-of-parameters-bic-calculation/233235/17\n p = n_components**2 + 2*n_components * model.n_features - 1\n logN = math.log(sum(self.lengths))\n bic = - 2 * logL + p * logN\n if bic < select_bic:\n select_bic = bic\n select_model = model\n except:\n continue\n return select_model", "def bic(self, X):\n raise NotImplementedError", "def B(self) -> int:\n return self.params.B", "def get_effect_size(self, summ, b, nmc=5000):\n m0b, v0b = self.DModel.models[0].predict(np.array([b])) \n m1b, v1b = self.DModel.models[1].predict(np.array([b]))\n \n d_mean_D = np.squeeze(m1b - m0b) # TODO: why was this swapped around?\n d_var_D = np.squeeze(v0b + v1b)\n d_std_D = np.sqrt(d_var_D)\n \n if d_mean_D < 0:\n pval = 1 - stats.norm.cdf(x=0, loc=d_mean_D, scale=d_std_D)\n else:\n pval = stats.norm.cdf(x=0, loc=d_mean_D, scale=d_std_D)\n \n xmin, xmax = (np.min([d_mean_D - 4*d_std_D, -0.1*d_std_D]), \n np.max([d_mean_D + 4*d_std_D, 0.1*d_std_D]))\n \n n = 300\n xrange = np.linspace(xmin, xmax, n)\n y = stats.norm.pdf(xrange, d_mean_D, d_std_D) \n \n samples = np.zeros((nmc))\n nspike = int(np.round(summ['pmp']['pmc']*nmc))\n samples[nspike:] = np.random.normal(loc=d_mean_D, \n scale=np.sqrt(d_var_D), \n size=(nmc-nspike))\n \n if not np.isscalar(b):\n d_bma = None\n else:\n \n if nspike==nmc:\n # BMA dominated by continuous model\n # Put all mass at xrange closest to b\n d_bma = np.zeros((n))\n xdelta = xrange[1] - xrange[0]\n ix = np.argmin((xrange-b)**2)\n d_bma[ix] = 1.0 / xdelta\n elif nspike==0:\n # BMA dominated by discontinuous model\n d_bma = y\n else:\n # BMA is a mixture\n kde_fit = stats.gaussian_kde(samples, \n bw_method='silverman')\n d_bma = kde_fit(xrange)\n \n return {'es_BMA': d_bma,\n 'es_Disc': y,\n 'es_disc_stats': (d_mean_D, d_std_D),\n 'pval': pval,\n 'es_range': xrange,\n 'f(b)': (m0b, m1b),\n 'es_transform': lambda z: z*d_std_D + d_mean_D}", "def BIC_likelihood_model_test(image_matrix, comp_means):\n m, n = image_matrix.shape\n best_bic = float('inf')\n best_lk = float('-inf')\n n_comp_min_bic = n_comp_max_likelihood = 0\n for i in range(len(comp_means)):\n k = comp_means[i].shape[0]\n mu, sigma, pi, res = train_model(image_matrix, k, default_convergence)\n lk = likelihood(image_matrix, pi, mu, sigma, k)\n bic = np.log10(m) * (n * n + n + 1) * k - 2 * lk\n if lk > best_lk:\n best_lk = lk\n n_comp_max_likelihood = k\n if bic < best_bic:\n best_bic = bic\n n_comp_min_bic = k\n return n_comp_min_bic, n_comp_max_likelihood", "def constraint_B_k_invis(self):\n ms = self.ms\n width_contr = 0.0\n\n # Make sure scalar mass doesn't fall outside of kinematic bounds\n if np.any([s[0] <= ms**2 <= s[1] for s in B_k_invis_obs.s_bounds]):\n widths_s = self.partial_widths()\n width_s = widths_s[\"total\"]\n width_s_sm = width_s - widths_s[\"x x\"] # Gamma_{S->SM}\n\n # Magnitude of S' 3-momentum\n ps = np.sqrt(\n (mB - mk - ms) * (mB + mk - ms) * (mB - mk + ms) * (mB + mk + ms)\n ) / (2.0 * mB)\n # Probability that S decays outside the detector\n pr_invis = np.exp(-B_k_invis_obs.r_max * cm_to_inv_MeV * width_s * ms / ps)\n\n # Compute the total contribution to the invisible decay width\n width_contr = (\n self.width_B_k_s() * (widths_s[\"x x\"] + pr_invis * width_s_sm) / width_s\n )\n\n return B_k_invis_obs.width_bound - width_contr", "def banana(dim=2, b=.03, step='Metropolis', iters=5000):\n assert dim >= 2, 'banana must be dimension >= 2'\n C_1 = pl.ones(dim)\n C_1[0] = 100.\n X = mc.Uninformative('X', value=pl.zeros(dim))\n\n def banana_like(X, tau, b):\n phi_X = pl.copy(X)\n phi_X *= 30. # rescale X to match scale of other models\n phi_X[1] = phi_X[1] + b*phi_X[0]**2 - 100*b\n\n return mc.normal_like(phi_X, 0., tau)\n\n @mc.potential\n def banana(X=X, tau=C_1**-1, b=b):\n return banana_like(X, tau, b)\n\n mod = setup_and_sample(vars(), step, iters)\n im = pl.imread('banana.png')\n x = pl.arange(-1, 1, .01)\n y = pl.arange(-1, 1, .01)\n z = [[banana_like(pl.array([xi, yi]), C_1[[0,1]]**-1, b) for xi in x] for yi in y]\n def plot_distribution():\n pl.imshow(im, extent=[-1,1,-1,1], aspect='auto', interpolation='bicubic')\n pl.contour(x, y, z, [-1000, -10, -6], cmap=pl.cm.Greys, alpha=.5)\n mod.plot_distribution = plot_distribution\n\n return mod", "def _compute_B_statistics(self, K, W, a):\r\n if not self.noise_model.log_concave:\r\n #print \"Under 1e-10: {}\".format(np.sum(W < 1e-6))\r\n W[W < 1e-10] = 1e-10 # FIXME-HACK: This is a hack since GPy can't handle negative variances which can occur\r\n # If the likelihood is non-log-concave. We wan't to say that there is a negative variance\r\n # To cause the posterior to become less certain than the prior and likelihood,\r\n # This is a property only held by non-log-concave likelihoods\r\n\r\n\r\n #W is diagonal so its sqrt is just the sqrt of the diagonal elements\r\n W_12 = np.sqrt(W)\r\n B = np.eye(self.N) + W_12*K*W_12.T\r\n L = jitchol(B)\r\n\r\n W12BiW12a = W_12*dpotrs(L, np.asfortranarray(W_12*a), lower=1)[0]\r\n ln_B_det = 2*np.sum(np.log(np.diag(L)))\r\n return W12BiW12a, ln_B_det", "def informative_states(\n model,\n information_threshold = 0.5\n):\n informative_states = set()\n B = model.B\n bits = 0.0\n for i in xrange( model.N ):\n #print B[ i,: ]\n ic = math.log( 4, 2 )\n for x in xrange( 4 ):\n if B[ i, x ] > 0:\n ic += B[ i, x ] * math.log( B[ i, x ], 2 )\n #print i, ic\n if ic > information_threshold: informative_states.add( i )\n bits += ic\n return informative_states, bits", "def compute_bic(kmeans,X):\n # assign centers and labels\n centers = [kmeans.cluster_centers_]\n labels = kmeans.labels_\n #number of clusters\n m = kmeans.n_clusters\n # size of the clusters\n n = np.bincount(labels)\n #size of data set\n N, d = X.shape\n\n #compute variance for all clusters beforehand\n cl_var = (1.0 / (N - m) / d) * sum([sum(distance.cdist(X[np.where(labels == i)], [centers[0][i]], \n 'euclidean')**2) for i in range(m)])\n\n const_term = 0.5 * m * np.log(N) * (d+1)\n\n BIC = np.sum([n[i] * np.log(n[i]) -\n n[i] * np.log(N) -\n ((n[i] * d) / 2) * np.log(2*np.pi*cl_var) -\n ((n[i] - 1) * d/ 2) for i in range(m)]) - const_term\n BIC_clusters =[n[i] * np.log(n[i]) -\n n[i] * np.log(N) -\n ((n[i] * d) / 2) * np.log(2*np.pi*cl_var) -\n ((n[i] - 1) * d/ 2) for i in range(m)]\n return(BIC , BIC_clusters)" ]
[ "0.6738231", "0.6541316", "0.6489599", "0.63965064", "0.6255589", "0.61559856", "0.6112755", "0.6090768", "0.59615993", "0.59547305", "0.59203905", "0.59203905", "0.58714235", "0.5793341", "0.5740092", "0.57190186", "0.56444794", "0.5625056", "0.5620049", "0.5608862", "0.5584382", "0.55442196", "0.5500297", "0.54983133", "0.5497681", "0.5494651", "0.5478432", "0.5457966", "0.5429984", "0.5402545" ]
0.68793404
0
Create sequence and mismask (mask that identifies values as hidden, observed or missing) from a training set.
def _create_sequence_and_mismask(self, training_set, missing_residues): seq_list = [] mismask_list = [] if not self.show_warnings: warning_list = warnings.filters[:] warnings.filterwarnings('ignore', category=TorusDBNWarning) training_set_count = len(training_set) for filename in training_set: self.info('Reading data from training file %s...' % (filename)) try: sequences, mismasks = create_sequence_from_file( filename, missing_residues, not self.show_warnings) seq_list += sequences mismask_list += mismasks except TorusDBNException as error: warnings.warn( "%s The file was not included in the training set." % error, TorusDBNWarning ) training_set_count -= 1 self.info('\n%d files included in the training set.' % (training_set_count)) if not self.show_warnings: warnings.filters = warning_list return seq_list, mismask_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_masked_targets(self, item_ids: tf.Tensor, training: bool = False) -> MaskingInfo:\n\n labels = tf.cast(tf.fill(item_ids.shape, self.padding_idx), dtype=item_ids.dtype)\n non_padded_mask = tf.cast(item_ids != self.padding_idx, labels.dtype)\n rows_ids = tf.range(labels.shape[0], dtype=tf.int64)\n # During training, masks labels to be predicted according to a probability, ensuring that\n # each session has at least one label to predict\n if training:\n # Selects a percentage of items to be masked (selected as labels)\n probability_matrix = tf.cast(\n backend.random_bernoulli(array_ops.shape(labels), p=self.mlm_probability),\n labels.dtype,\n )\n\n mask_labels = probability_matrix * non_padded_mask\n labels = tf.where(\n tf.cast(mask_labels, tf.bool),\n item_ids,\n tf.cast(tf.fill(item_ids.shape, self.padding_idx), dtype=item_ids.dtype),\n )\n\n # Set at least one item in the sequence to mask, so that the network\n # can learn something with this session\n one_random_index_by_session = tf.random.categorical(\n tf.math.log(tf.cast(non_padded_mask, tf.float32)), num_samples=1\n )\n indices = tf.concat([tf.expand_dims(rows_ids, 1), one_random_index_by_session], axis=1)\n labels = tf.tensor_scatter_nd_update(\n labels, indices=indices, updates=tf.gather_nd(item_ids, indices)\n )\n mask_labels = tf.cast(labels != self.padding_idx, labels.dtype)\n\n # If a sequence has only masked labels, unmask one of the labels\n sequences_with_only_labels = tf.reduce_sum(mask_labels, axis=1) == tf.reduce_sum(\n non_padded_mask, axis=1\n )\n sampled_labels_to_unmask = tf.random.categorical(\n tf.math.log(tf.cast(mask_labels, tf.float32)), num_samples=1\n )\n\n labels_to_unmask = tf.boolean_mask(sampled_labels_to_unmask, sequences_with_only_labels)\n rows_to_unmask = tf.boolean_mask(rows_ids, sequences_with_only_labels)\n indices = tf.concat([tf.expand_dims(rows_to_unmask, 1), labels_to_unmask], axis=1)\n num_updates, _ = indices.shape.as_list()\n labels = tf.tensor_scatter_nd_update(\n labels, indices, tf.cast(tf.fill(num_updates, self.padding_idx), labels.dtype)\n )\n mask_labels = labels != self.padding_idx\n\n else:\n if self.eval_on_last_item_seq_only:\n last_item_sessions = tf.reduce_sum(non_padded_mask, axis=1) - 1\n\n indices = tf.concat(\n [\n tf.expand_dims(rows_ids, 1),\n tf.cast(tf.expand_dims(last_item_sessions, 1), tf.int64),\n ],\n axis=1,\n )\n labels = tf.tensor_scatter_nd_update(\n labels, indices=indices, updates=tf.gather_nd(item_ids, indices)\n )\n mask_labels = labels != self.padding_idx\n else:\n masking_info = self.predict_all(item_ids)\n mask_labels, labels = masking_info.schema, masking_info.targets\n\n return MaskingInfo(mask_labels, labels)", "def masking(X_train, X_test, y_train, y_test):\n # create mask to exclude NaN-values from train data\n mask_train = np.zeros(X_train.shape[0], dtype=np.bool)\n\n for i, subfeat in enumerate(X_train):\n if True in np.isnan(subfeat):\n mask_train[i] = True\n else:\n mask_train[i] = False\n\n # create mask to exclude NaN-values from test data\n mask_test = np.zeros(X_test.shape[0], dtype=np.bool)\n\n for i, subfeat in enumerate(X_test):\n if True in np.isnan(subfeat):\n mask_test[i] = True\n else:\n mask_test[i] = False\n\n # masking\n X_train = X_train[~mask_train]\n y_train = y_train[~mask_train]\n\n X_test = X_test[~mask_test]\n y_test = y_test[~mask_test]\n\n y_train = y_train.astype(\"int64\")\n y_test = y_test.astype(\"int64\")\n\n # exclude classes that are not included in both, test and train data\n difflist1 = list(set(np.unique(y_train)) - set(np.unique(y_test)))\n\n for i in difflist1:\n mask_train = y_train == i\n X_train = X_train[~mask_train]\n y_train = y_train[~mask_train]\n\n difflist2 = list(set(np.unique(y_test)) - set(np.unique(y_train)))\n\n for i in difflist2:\n mask_test = y_test == i\n X_test = X_test[~mask_test]\n y_test = y_test[~mask_test]\n\n return(X_train, X_test, y_train, y_test)", "def mask_out(self, x, lengths):\n params = self.params\n slen, bs = x.size()\n\n # define target words to predict\n if params.sample_alpha == 0:\n pred_mask = np.random.rand(slen, bs) <= params.word_pred\n pred_mask = torch.from_numpy(pred_mask.astype(np.uint8))\n else:\n x_prob = params.mask_scores[x.flatten()]\n n_tgt = math.ceil(params.word_pred * slen * bs)\n tgt_ids = np.random.choice(len(x_prob), n_tgt, replace=False, p=x_prob / x_prob.sum())\n pred_mask = torch.zeros(slen * bs, dtype=torch.uint8)\n pred_mask[tgt_ids] = 1\n pred_mask = pred_mask.view(slen, bs)\n\n # do not predict padding\n pred_mask[x == params.pad_index] = 0\n pred_mask[0] = 0 # TODO: remove\n\n # mask a number of words == 0 [8] (faster with fp16)\n if params.fp16:\n pred_mask = pred_mask.view(-1)\n n1 = pred_mask.sum().item()\n n2 = max(n1 % 8, 8 * (n1 // 8))\n if n2 != n1:\n pred_mask[torch.nonzero(pred_mask).view(-1)[:n1 - n2]] = 0\n pred_mask = pred_mask.view(slen, bs)\n # assert pred_mask.sum().item() % 8 == 0\n\n # generate possible targets / update x input\n pred_mask = pred_mask.bool()\n _x_real = x[pred_mask]\n if len(_x_real) == 0:\n pred_mask[0, 0] = 1\n _x_real = x[pred_mask]\n _x_rand = _x_real.clone().random_(params.n_words)\n _x_mask = _x_real.clone().fill_(params.mask_index)\n probs = torch.multinomial(params.pred_probs, len(_x_real), replacement=True)\n _x = _x_mask * (probs == 0).long() + _x_real * (probs == 1).long() + _x_rand * (probs == 2).long()\n x = x.masked_scatter(pred_mask, _x)\n\n assert 0 <= x.min() <= x.max() < params.n_words\n assert x.size() == (slen, bs)\n assert pred_mask.size() == (slen, bs)\n\n return x, _x_real, pred_mask", "def mask_tokens(self, sequence):\n n_tokens = len(sequence)\n n_masked_tokens = int(self.masking_proportion*n_tokens/100)\n indexes = [random.randint(0, n_tokens-1) for i in range(n_masked_tokens)]\n while len(set(indexes))!=n_masked_tokens:\n indexes = [random.randint(0, n_tokens-1) for i in range(n_masked_tokens)]\n sequence = np.array(sequence)\n sequence[indexes] = 4\n return list(sequence)", "def segment(u_train, l_train, u_test):\n ### Exercise 6.2\n\n # Encode input and gather stats for model\n encoder = WordEncoder()\n encoder.fit(u_train)\n X_train = encoder.transform(u_train, flat=False)\n X_test = encoder.transform(u_test, flat=False)\n input_N = len(u_train)\n maxLen = encoder.max_wordlen\n third_dim = np.shape(X_train)[2]\n\n # Pad output to match input\n paddded_l_train = []\n for label in l_train:\n new_label = np.zeros(maxLen, dtype=int)\n for j, k in enumerate(label):\n new_label[j] = k\n paddded_l_train.append(new_label)\n l_train_array = np.array(paddded_l_train).reshape(input_N, maxLen, 1)\n\n # Build model\n model = Sequential()\n # I could not get the masking layer to work. Tried editing the X_train and Y arrays in numerous ways\n # But couldn't figure out why it wouldn't accept it. Any advice here would be much appreciated!\n # model.add(Embedding(input_dim=maxLen, output_dim=(maxLen, third_dim), mask_zero=True))\n model.add(LSTM(100, input_shape=(maxLen, third_dim), activation=\"relu\", return_sequences=True))\n model.add(Dropout(0.2))\n model.add(TimeDistributed(Dense(1, activation='sigmoid')))\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5)\n model.summary()\n model.fit(X_train, l_train_array, epochs=50, callbacks=[es])\n\n predictions = model.predict(X_test)\n # Convert label predictions to segments\n segments = label_to_segments(u_test, predictions)\n\n return segments", "def create_masked_EHR_predictions(input_seq, masked_lm_prob,max_predictions_per_seq, vocab, rng):\n #print('original_inp_seq',input_seq)\n #orig_seq=input_seq ## added for inp_seq update issue LR 4/25\n #cand_indexes = []\n #for (i, token) in enumerate(tokens):\n # if token == \"[CLS]\" or token == \"[SEP]\":\n # continue\n # cand_indexes.append(i)\n\n #cand_indexes=list(range(len(input_seq)+1))[1:] ## I can use that for the position but not for the mask index \n #will use the same but exclude the +1 so I don't mask the fist and last code\n \n cand_indexes=list(range(len(input_seq)))### LR 4/29 remove[1:]\n rng.shuffle(cand_indexes)\n output_tokens = input_seq[:] ### added slicing to inhibit original list update\n\n num_to_predict = min(max_predictions_per_seq,\n max(1, int(round(len(input_seq) * masked_lm_prob))))\n\n masked_lms = []\n covered_indexes = set()\n for index in cand_indexes:\n if len(masked_lms) >= num_to_predict: ### LR 4/29 remove >=\n break\n if index in covered_indexes:\n continue\n covered_indexes.add(index)\n\n #masked_token = None #### need to make sure what I did below is correct\n masked_token=0 ### comment for now LR 4/25\n \n # 80% of the time, replace with [MASK]\n if rng.random() < 0.8:\n #masked_token = \"[MASK]\"\n masked_token=0\n else:\n # 10% of the time, keep original\n if rng.random() < 0.5:\n #masked_token = tokens[index]\n masked_token=input_seq[index] ### LR 4/29 added +1\n # 10% of the time, replace with random word\n else:\n #masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]\n masked_token=rng.randint(1,max(vocab.values()))\n \n output_tokens[index] = masked_token ### LR 4/29 added +1\n\n masked_lms.append(MaskedLmInstance(index=index, label=input_seq[index])) ### Currently keeping the original code but I need to optimize that later from here till end of function\n\n masked_lms = sorted(masked_lms, key=lambda x: x.index)\n\n masked_lm_positions = []\n masked_lm_labels = []\n for p in masked_lms:\n masked_lm_positions.append(p.index)\n masked_lm_labels.append(p.label)\n #print (input_seq,orig_seq,output_tokens, masked_lm_positions, masked_lm_labels)\n return (output_tokens, masked_lm_positions, masked_lm_labels)", "def build_attention_mask(input_ids): \n attention_masks = [] \n\n # 1 for input and 0 for pad\n for seq in input_ids: \n attention_masks.append([float(i>0) for i in seq])\n\n return attention_masks", "def random_masking(self, sequence: tf.Tensor, noise: tf.Tensor | None = None):\n batch_size, seq_length, dim = shape_list(sequence)\n len_keep = int(seq_length * (1 - self.config.mask_ratio))\n\n if noise is None:\n noise = tf.random.uniform(shape=(batch_size, seq_length), minval=0.0, maxval=1.0) # noise in [0, 1)\n\n # sort noise for each sample\n ids_shuffle = tf.argsort(noise, axis=1) # ascend: small is keep, large is remove\n ids_restore = tf.argsort(ids_shuffle, axis=1)\n\n # keep the first subset\n ids_keep = ids_shuffle[:, :len_keep]\n sequence_unmasked = tf.gather(\n sequence,\n axis=1,\n batch_dims=1,\n indices=ids_keep,\n )\n\n # generate the binary mask: 0 is keep, 1 is remove\n # this hack is needed because TF's EagerTensors don't support\n # assignment\n mask_keep = tf.zeros((batch_size, len_keep))\n mask_remove = tf.ones((batch_size, seq_length - len_keep))\n mask = tf.concat([mask_keep, mask_remove], axis=-1)\n\n # unshuffle to get the binary mask\n mask = tf.gather(mask, axis=1, batch_dims=1, indices=ids_restore)\n\n return sequence_unmasked, mask, ids_restore", "def make_smiley_training_set(num_points=0, delta=0.05):\n log.out.info(\"Generating happy data.\")\n # Select coordinates to do an XOR like operation on\n coords = []\n bools = []\n x_min = 0.0\n x_max = 1.0\n y_min = 0.0\n y_max = 1.0\n for i in range(num_points):\n # Add num_points randomly\n coord_point = np.random.random(2)\n coord_point[0] = coord_point[0] * (x_max - x_min) + x_min\n coord_point[1] = coord_point[1] * (y_max - y_min) + y_min\n coords.append(coord_point)\n\n # Assign an xor boolean value to the coordinates\n for coord_point in coords:\n x = coord_point[0]\n y = coord_point[1]\n if (abs(x - 0.65) < delta) & (abs(y - 0.65) < (0.05+delta)):\n bools.append(True)\n elif (abs(x - 0.35) < delta) & (abs(y - 0.65) < (0.05+delta)):\n bools.append(True)\n elif ((x > 0.2) & (x < 0.8) &\n (abs(y - ((1.5 * (x - 0.5))**2 + 0.25)) < delta)):\n bools.append(True)\n else:\n bools.append(False)\n\n # Build training vectors\n train_in = None\n train_out = None\n for i, coord in enumerate(coords):\n # Need to initialize the arrays\n if i == 0:\n train_in = np.array([coord])\n train_out = np.array([[bools[i]]])\n else:\n train_in = np.append(train_in, np.array([coord]), axis=0)\n train_out = np.append(train_out, np.array([[bools[i]]]), axis=1)\n\n train_out = train_out.T\n return train_in, train_out", "def gen_history_mask(x):\n batch_size, seq_len, _ = x.size()\n return torch.tril(torch.ones(seq_len, seq_len)).view(1, seq_len, seq_len).repeat(batch_size, 1, 1)", "def create_padding_mask(seq):\r\n seq = tf.cast(tf.math.equal(seq, 0), tf.float32)\r\n return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)\r", "def _compute_masked_targets(self, item_ids: tf.Tensor, training=False) -> MaskingInfo:\n raise NotImplementedError", "def infer_mask(seq, eos_ix, time_major=False, dtype=tf.float32):\n axis = 0 if time_major else 1\n lengths = infer_length(seq, eos_ix, time_major=time_major)\n mask = tf.sequence_mask(lengths, maxlen=tf.shape(seq)[axis], dtype=dtype)\n if time_major: mask = tf.transpose(mask)\n return mask", "def create_dummy_data():\n data = loadmat(join(project_root, './data/lungs.mat'))['seq']\n nx, ny, nt = data.shape\n ny_red = 8\n sl = ny//ny_red\n data_t = np.transpose(data, (2, 0, 1))\n\n data_t[:, :, :sl*4]\n train_slice = data_t[:, :, :sl*4]\n validate_slice = data_t[:, :, ny//2:ny//2+ny//4]\n test_slice = data_t[:, :, ny//2+ny//4]\n\n # Synthesize data by extracting patches\n train = np.array([data_t[..., i:i+sl] for i in np.random.randint(0, sl*3, 20)])\n validate = np.array([data_t[..., i:i+sl] for i in (sl*4, sl*5)])\n test = np.array([data_t[..., i:i+sl] for i in (sl*6, sl*7)])\n\n return train, validate, test", "def mask_test_train(data, split): \n # create a copy of the full data for reduction\n training_set = data.copy()\n\n # find index of values which are not empty\n nonzero_inds = training_set.nonzero()\n\n # create list of index pairs\n nonzero_pairs = list(zip(nonzero_inds[0], nonzero_inds[1]))\n\n # calculate the number of samples to be removed in training set\n num_samples = int(np.ceil(split*len(nonzero_pairs)))\n\n # get random samples\n samples = random.sample(nonzero_pairs, num_samples)\n\n # remove selected samples in training set\n user_inds = [index[0] for index in samples]\n item_inds = [index[1] for index in samples]\n training_set[user_inds, item_inds] = 0 \n\n return training_set, list(set(user_inds)), np.array(samples)", "def process_hidden_layers(self, x, training):\n restricted_to_final_seq = False\n for layer_ix, layer in enumerate(self.hidden_layers):\n if type(layer) == Dense:\n if self.return_final_seq_only and not restricted_to_final_seq:\n x = x[:, -1, :]\n restricted_to_final_seq = True\n x = layer(x)\n else:\n x = layer(x)\n if self.batch_norm:\n x = self.batch_norm_layers[layer_ix](x, training=False)\n if self.dropout != 0.0 and training: x = self.dropout_layer(x)\n return x, restricted_to_final_seq", "def _get_train_val_test_masks(total_size, y_true, val_fraction, test_fraction, seed):\n # Split into a train, val and test set\n # Store indexes of the nodes belong to train, val and test set\n indexes = range(total_size)\n indexes_train, indexes_test = train_test_split(\n indexes, test_size=test_fraction, stratify=y_true, random_state=seed)\n indexes_train, indexes_val = train_test_split(indexes_train, test_size=val_fraction, stratify=y_true[indexes_train],\n random_state=seed)\n # Init masks\n train_idxs = np.zeros(total_size, dtype=np.bool)\n val_idxs = np.zeros(total_size, dtype=bool)\n test_idxs = np.zeros(total_size, dtype=np.bool)\n\n # Update masks using corresponding indexes\n train_idxs[indexes_train] = True\n val_idxs[indexes_val] = True\n test_idxs[indexes_test] = True\n\n return torch.from_numpy(train_idxs), torch.from_numpy(val_idxs), torch.from_numpy(test_idxs)", "def separate_train_valid(positives, validation_split):\n val_set = []\n shuffled_positives = shuffle_annotations(positives)\n upper = int(round(len(shuffled_positives)*validation_split))\n subset = shuffled_positives[0:upper]\n for each in subset:\n val_set.append(each)\n shuffled_positives.remove(each)\n return val_set, shuffled_positives", "def sequence_mask(X, valid_len, value=0):\n maxlen = X.size(1)\n mask = torch.arange((maxlen), dtype=torch.float32,\n device=X.device)[None, :] < valid_len[:, None]\n X[~mask] = value\n return X", "def sequence_mask(X, valid_len, value=0):\n maxlen = X.size(1)\n mask = torch.arange((maxlen), dtype=torch.float32,\n device=X.device)[None, :] < valid_len[:, None]\n X[~mask] = value\n return X", "def reset_hidden(hidden, mask):\n if len(mask) != 0:\n hidden[:, mask, :] = 0\n \n return hidden", "def forward(self, x):\n features = self.encoder(x)\n decoder_output = self.drop(self.decoder(*features))\n\n masks = self.segmentation_head(decoder_output)\n\n return masks", "def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_list):\n cand_indices = []\n ignore = set([\"[CLS]\", \"[SEP]\", \"TIME\",\"DATE\",\"DOCTOR\"])\n for (i, token) in enumerate(tokens):\n #do not mask non alphanumeric and special tokens\n if token in ignore or not token.isalpha():\n continue\n # Whole Word Masking means that if we mask all of the wordpieces\n # corresponding to an original word. When a word has been split into\n # WordPieces, the first token does not have any marker and any subsequence\n # tokens are prefixed with ##. So whenever we see the ## token, we\n # append it to the previous set of word indexes.\n #\n # Note that Whole Word Masking does *not* change the training code\n # at all -- we still predict each WordPiece independently, softmaxed\n # over the entire vocabulary.\n if (len(cand_indices) >= 1 and token.startswith(\"##\")):\n cand_indices[-1].append(i)\n else:\n cand_indices.append([i])\n\n num_to_mask = min(max_predictions_per_seq,\n max(1, int(round(len(tokens) * masked_lm_prob))))\n shuffle(cand_indices)\n masked_lms = []\n covered_indexes = set()\n for index_set in cand_indices:\n if len(masked_lms) >= num_to_mask:\n break\n # If adding a whole-word mask would exceed the maximum number of\n # predictions, then just skip this candidate.\n if len(masked_lms) + len(index_set) > num_to_mask:\n continue\n is_any_index_covered = False\n for index in index_set:\n if index in covered_indexes:\n is_any_index_covered = True\n break\n if is_any_index_covered:\n continue\n for index in index_set:\n covered_indexes.add(index)\n\n masked_token = None\n # 80% of the time, replace with [MASK]\n if random() < 0.8:\n masked_token = \"[MASK]\"\n else:\n # 10% of the time, keep original\n if random() < 0.5:\n masked_token = tokens[index]\n # 10% of the time, replace with random word\n else:\n masked_token = choice(vocab_list)\n masked_lms.append(MaskedLmInstance(\n index=index, label=tokens[index]))\n tokens[index] = masked_token\n\n assert len(masked_lms) <= num_to_mask\n masked_lms = sorted(masked_lms, key=lambda x: x.index)\n mask_indices = [p.index for p in masked_lms]\n masked_token_labels = [p.label for p in masked_lms]\n\n return tokens, mask_indices, masked_token_labels", "def split_mono_data_by_mask(self, test_idx):\n test,train = (),()\n for inp in self.inputs:\n test += (inp[ test_idx.flatten(),...] ,)\n train += (inp[~test_idx.flatten(),...] ,)\n return train, test", "def attention_mask(x):\n mask = torch.zeros(len(x), len(x[0]))\n for i in range(len(x)):\n try:\n index = np.where(x[i]==1)[0][0]\n mask[i][index:] = -np.inf\n except:\n pass\n return mask", "def predict_all(self, item_ids: tf.Tensor) -> MaskingInfo:\n # TODO : Add option to predict N-last items\n # shift sequence of item-ids\n labels = item_ids[:, 1:]\n # As after shifting the sequence length will be subtracted by one, adding a masked item in\n # the sequence to return to the initial sequence.\n # This is important for ReformerModel(), for example\n labels = tf.concat(\n [\n labels,\n tf.zeros((labels.shape[0], 1), dtype=labels.dtype),\n ],\n axis=-1,\n )\n # apply mask on input where target is on padding index\n mask_labels = labels != self.padding_idx\n\n return MaskingInfo(mask_labels, labels)", "def idseqs_to_mask(idseqs: List[List[int]],\n n_seqlen: Optional[int] = None,\n n_vocab_sz: Optional[int] = None,\n ignore: Optional[List[int]] = [],\n dtype: Optional[torch.dtype] = torch.bool,\n dense: Optional[bool] = False\n ) -> torch.sparse.FloatTensor:\n if n_seqlen is None:\n n_seqlen = max([len(seq) for seq in idseqs])\n\n # create a list of IDs\n if n_vocab_sz is None:\n ids = set(itertools.chain(*idseqs))\n else:\n ids = set(range(0, n_vocab_sz))\n\n # remove IDs that we ignore\n ids = ids.difference(set(ignore))\n n_features = len(ids)\n\n # convert to list to lookup with .index() method\n ids = list(ids)\n\n # loop over each ID sequence\n masks = []\n for seq in idseqs:\n # extract index pairs of the sparse matrix\n featidx = []\n seqidx = []\n for step, elem in enumerate(seq[:n_seqlen]):\n try:\n featidx.append(ids.index(elem))\n seqidx.append(step)\n except Exception:\n pass\n # convert to COO matrix\n tmp = torch.sparse.FloatTensor(\n indices=torch.LongTensor([seqidx, featidx]),\n values=torch.FloatTensor([1.0 for _ in range(len(seqidx))]),\n size=torch.Size([n_seqlen, n_features])\n ).coalesce()\n # save it\n masks.append(tmp)\n\n # stack into one 3D tensor <batch_sz, n_seqlen, vocab_sz>\n masks = torch.stack(masks).coalesce()\n\n # convert to dense matrix if requested\n if dense:\n masks = masks.to_dense().type(dtype)\n\n # done\n return masks", "def __get_masks(x_shape, y, n_train=None):\n # type: (Tuple[int], np.ndarray, int) -> (np.ndarray, np.ndarray)\n n_train = n_train if n_train is not None else const.n_train\n\n if n_train <= 0 or n_train > x_shape[0]:\n return np.full(shape=x_shape, fill_value=True, dtype=bool), np.full(shape=y.shape, fill_value=True, dtype=bool)\n\n all_indexes = defaultdict(list) # type: Dict[int, List[int]]\n for i in range(len(y)):\n curr = int(y[i])\n all_indexes[curr].append(i)\n\n ratios = defaultdict() # type: Dict[int, float]\n\n for i, j in all_indexes.items():\n ratios[i] = (len(j) * 1. / len(all_indexes[0]))\n\n # Ratios split the whole dataset to ratios given class and first class.\n # Part scales these ratios up, so that, 'part' corresponds to size of first class.\n part = n_train * 1. / sum(ratios.values())\n if part == 0: # n_train is 0.\n part = len(y) * 1. / sum(ratios.values())\n\n # Masks of what to keep.\n indexes_x = np.full(shape=x_shape, fill_value=False, dtype=bool)\n indexes_y = np.full(shape=y.shape, fill_value=False, dtype=bool)\n\n for i in all_indexes.keys():\n chosen_idxs = random.sample(all_indexes[i], int(part * ratios[i]))\n indexes_y[chosen_idxs] = True\n indexes_x[chosen_idxs, ...] = True\n\n return indexes_x, indexes_y", "def add_val_to_train(mask_train, mask_val, seed_val, p=0.5):\n print(\"Adding some validation data to training\")\n rnd_val = np.random.RandomState(seed_val)\n chs = rnd_val.choice([True, False], size=np.sum(mask_val), p=[p, 1.0 - p])\n mask_val_new = np.array(mask_val)\n mask_train_new = np.array(mask_train)\n mask_val_new[mask_val_new] = chs\n mask_train_new[mask_val] = ~chs\n return mask_train_new, mask_val_new", "def preprocess_train(im, boxes, classes, inst_masks, mask, input_size, min_size=2,\n use_augment=False, training_scale=[0.3, 0.5, 0.7, 1.0]):\n ori_im = np.copy(im)\n target_h, target_w = input_size\n\n # ---------- old data_augmentation ----------\n if use_augment:\n if np.random.choice([0, 1]) != 0:\n scale = np.random.choice(training_scale) # adding more small objects\n im, inst_masks, mask, boxes, classes = random_scale(im, inst_masks, mask, boxes, classes, scale=scale)\n min_obj_cover = np.random.choice([0.8, 0.9, 1.0])\n # truncted examples may lead to multiple-detections..\n im, inst_masks, mask, boxes, classes = random_aspect_ratio(im, inst_masks, mask, boxes, classes,\n min_aspect_ratio=0.5, max_aspect_ratio=2.0,\n min_obj_cover=min_obj_cover)\n #\n # # r = np.random.randint(0, 3)\n # if np.random.rand() < 0.75:\n # im, inst_masks, mask, boxes, classes = fixed_scale(im, inst_masks, mask, boxes, classes, target_h, target_w)\n # else:\n # im, inst_masks, mask, boxes, classes = center_crop2fixed_pad(im, inst_masks, mask, boxes, classes, target_w, target_h,\n # min_size=min_size)\n\n # ---------- old data_augmentation ----------\n\n # ---------- none data_augmentation ----------\n im, inst_masks, mask, boxes, classes = fixed_scale(im, inst_masks, mask, boxes, classes, target_h, target_w)\n im, inst_masks, mask, boxes, classes = random_flip(im, inst_masks, mask, boxes, classes)\n # ---------- none data_augmentation ----------\n\n # ---------- old data_augmentation ----------\n im = distort_color(im)\n # ---------- old data_augmentation ----------\n\n im = imcv2_recolor(im)\n\n # add this because zeros numpy array will cause errors in torch Dataloader\n inst_masks = np.zeros([1, target_h, target_w], dtype=inst_masks.dtype) if inst_masks.size == 0 else inst_masks\n\n boxes = np.asarray(boxes, dtype=np.float32)\n return im, boxes, classes, inst_masks, mask, ori_im" ]
[ "0.62820804", "0.60834235", "0.5759483", "0.57154673", "0.56921595", "0.5655835", "0.56276035", "0.5595453", "0.55943155", "0.55873364", "0.55382186", "0.5523225", "0.54959726", "0.5485287", "0.54780704", "0.54613227", "0.5454266", "0.5439734", "0.5433462", "0.5433462", "0.54235536", "0.54170704", "0.54149246", "0.5398984", "0.5383645", "0.5345843", "0.5327265", "0.5321766", "0.5305637", "0.52979016" ]
0.6283801
0