query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Given TEXTFSM CDP neighbor, checks type of device and runs through corresponding parser function. | def parse(n):
capabilities = n['capabilities']
if n['platform'].__contains__('IP Phone') or capabilities.__contains__('Phone'):
phone_parse(n)
elif capabilities.__contains__('Router') and capabilities.__contains__('Source-Route-Bridge') or \
capabilities.__contains__('Switch'):
router_sw_parse(n)
elif capabilities.__contains__('Trans-Bridge'):
wap_parse(n)
else:
other_parse(n) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_device_from_datagram(\n device_callback: Callable[[SwitcherBase], Any], datagram: bytes\n) -> None:\n parser = DatagramParser(datagram)\n if not parser.is_switcher_originator():\n logger.debug(\"received datagram from an unknown source\")\n else:\n device_type: DeviceType = parser.get_device_type()\n if device_type == DeviceType.BREEZE:\n device_state = parser.get_thermostat_state()\n else:\n device_state = parser.get_device_state()\n if device_state == DeviceState.ON:\n power_consumption = parser.get_power_consumption()\n electric_current = watts_to_amps(power_consumption)\n else:\n power_consumption = 0\n electric_current = 0.0\n\n if device_type and device_type.category == DeviceCategory.WATER_HEATER:\n logger.debug(\"discovered a water heater switcher device\")\n device_callback(\n SwitcherWaterHeater(\n device_type,\n device_state,\n parser.get_device_id(),\n parser.get_ip_type1(),\n parser.get_mac(),\n parser.get_name(),\n power_consumption,\n electric_current,\n (\n parser.get_remaining()\n if device_state == DeviceState.ON\n else \"00:00:00\"\n ),\n parser.get_auto_shutdown(),\n )\n )\n\n elif device_type and device_type.category == DeviceCategory.POWER_PLUG:\n logger.debug(\"discovered a power plug switcher device\")\n device_callback(\n SwitcherPowerPlug(\n device_type,\n device_state,\n parser.get_device_id(),\n parser.get_ip_type1(),\n parser.get_mac(),\n parser.get_name(),\n power_consumption,\n electric_current,\n )\n )\n\n elif device_type and device_type.category == DeviceCategory.SHUTTER:\n logger.debug(\"discovered a Runner switch switcher device\")\n device_callback(\n SwitcherShutter(\n device_type,\n DeviceState.ON,\n parser.get_device_id(),\n parser.get_ip_type2(),\n parser.get_mac(),\n parser.get_name(),\n parser.get_shutter_position(),\n parser.get_shutter_direction(),\n )\n )\n\n elif device_type and device_type.category == DeviceCategory.THERMOSTAT:\n logger.debug(\"discovered a Breeze switcher device\")\n device_callback(\n SwitcherThermostat(\n device_type,\n device_state,\n parser.get_device_id(),\n parser.get_ip_type2(),\n parser.get_mac(),\n parser.get_name(),\n parser.get_thermostat_mode(),\n parser.get_thermostat_temp(),\n parser.get_thermostat_target_temp(),\n parser.get_thermostat_fan_level(),\n parser.get_thermostat_swing(),\n parser.get_thermostat_remote_id(),\n )\n )\n else:\n warn(\"discovered an unknown switcher device\")",
"def phone_parse(neighbor):\n mgmt_ip = neighbor[mgmt_ip_s]\n hostname = neighbor[hostname_s].split('.')[0]\n if nxos:\n sysname = neighbor['sysname']\n if sysname != '':\n hostname = sysname\n if mgmt_ip == '':\n mgmt_ip = neighbor['interface_ip']\n l_intf = neighbor['local_port']\n intf = re.findall(r'.{2}', l_intf)[0] + re.findall(r'\\d.+', l_intf)[0]\n macreg = re.findall(r'.{4}', hostname.replace('SEP', ''))\n mac_address = f'{macreg[0]}.{macreg[1]}.{macreg[2]}'.lower()\n voice_vlan = 'None'\n software_version = neighbor[version_s].replace('.loads', '')\n platform = neighbor['platform']\n for switchport in switchports:\n if switchport['interface'] == intf:\n for mac_addr in mac_addrs:\n if mac_addr['vlan'] == switchport['voice_vlan']:\n voice_vlan = mac_addr['vlan']\n break\n break\n if platform.__contains__('Cisco IP Phone'):\n platform = neighbor['platform'].replace('Cisco IP Phone ', '')\n else:\n platform = neighbor['platform']\n phone = {\n 'hostname': hostname,\n 'neighbor': {\n 'hostname': session.hostname,\n 'ip_address': session.ip_address,\n 'remote_intf': l_intf\n },\n 'ip_address': mgmt_ip,\n 'mac_addr': mac_address,\n 'voice_vlan': voice_vlan,\n 'software_version': software_version,\n 'model': platform\n }\n self.phones.append(phone)",
"def processpacket(p):\n\n\tglobal SynSentToTCPService\n\tglobal SynAckSentToTCPClient\n\tglobal LiveTCPService\n\tglobal LiveTCPClient\n\tglobal LiveUDPService\n\tglobal LiveUDPClient\n\tglobal NmapServerDescription\n\tglobal ManualServerDescription\n\tglobal ClientDescription\n\tglobal MacAddr\n\tglobal OSDescription\n\tglobal ServiceFPs\n\tglobal SipPhoneMatch\n\tglobal Devel\n\tglobal IsRouter\n\tglobal DNSRecord\n\tglobal HostIPs\n\n\tif (type(p) == Dot3) and (type(p['LLC']) == LLC):\n\t\tUnhandledPacket(p)\n\t\t#Spanning Tree Protocol\n\t\t#Debug(\"802.3\")\n\t\t#p.show()\n\t\t#print type(p['LLC'])\n\telif (p['Ethernet'] == None):\n\t\tDebug(\"non-ethernet packet\")\t\t#Need more details on how to handle.\n\t\tUnhandledPacket(p)\n\t\t#p.show()\n\t\t#print type(p)\n\t\t#quit()\n\telif p['Ethernet'].type == 0x0806:\t\t#ARP\n\t\t#pull arp data from here instead of tcp/udp packets, as these are all local\n\t\tif (p['ARP'].op == 1):\t\t\t#1 is request (\"who-has\")\n\t\t\tpass\n\t\tif (p['ARP'].op == 2):\t\t\t#2 is reply (\"is-at\")\n\t\t\tif (p['ARP.psrc'] != None) and (p['ARP.hwsrc'] != None):\n\t\t\t\tIPAddr=p['ARP.psrc']\n\t\t\t\tMyMac=p['ARP.hwsrc'].upper()\n\t\t\t\tif (not MacAddr.has_key(IPAddr)) or (MacAddr[IPAddr] != MyMac):\n\t\t\t\t\tReportId(\"MA\", IPAddr, 'Ethernet', MyMac, '')\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telse:\n\t\t\tUnhandledPacket(p)\n\telif p['Ethernet'].type == 0x0800:\t\t#IP\n\t\tsIP=str(p['IP'].src)\n\t\tdIP=str(p['IP'].dst)\n\t\t#Best to get these from arps instead; if we get them from here, we get router macs for foreign addresses.\n\t\t#if not MacAddr.has_key(sIP):\n\t\t#\tReportId(\"MA\", sIP, \"Ethernet\", p['Ethernet'].src, '')\n\t\t#if not MacAddr.has_key(dIP):\n\t\t#\tReportId(\"MA\", dIP, \"Ethernet\", p['Ethernet'].dst, '')\n\n\t\tif p['IP'].proto == 1:\t\t\t#ICMP\n\t\t\tType = p['ICMP'].type\n\t\t\tCode = p['ICMP'].code\n\n\t\t\tif (Type == 0):\t\t\t\t\t\t#Echo reply\n\t\t\t\tif (not(OSDescription.has_key(sIP))):\n\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", 'icmp echo reply')\n\t\t\telif (Type == 3) and (type(p[IPerror]) == IPerror):\t#Unreachable, check that we have an actual embedded packet\n\t\t\t\t#if (type(p[IPerror]) != IPerror):\n\t\t\t\t#\tp.show()\n\t\t\t\t#\tprint type(p[IPerror])\n\t\t\t\t#\tquit()\n\t\t\t\tOrigdIP = p[IPerror].dst\n\t\t\t\tif (Code == 0):\t\t\t\t\t#Net unreachable\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'net unreachable')\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"NetUn\", \"router\", \"\")\n\t\t\t\telif (Code == 1):\t\t\t\t#Host unreachable\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'host unreachable')\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"HostUn\", \"router\", \"\")\n\t\t\t\telif (Code == 3) and (p[IPerror].proto == 17):\t#Port unreachable and embedded protocol = 17, UDP, as it should be\n\t\t\t\t\tDNSServerLoc = p[IPerror].src + \",UDP_53\"\n\t\t\t\t\tif (p[UDPerror].sport == 53) and (ManualServerDescription.has_key(DNSServerLoc)) and (ManualServerDescription[DNSServerLoc] == \"dns/server\"):\n\t\t\t\t\t\t#If orig packet coming from 53 and coming from a dns server, don't do anything (closed port on client is a common effect)\n\t\t\t\t\t\t#Don't waste time on port unreachables going back to a dns server; too common, and ephemeral anyways.\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\t#If orig packet coming from something other than 53, or coming from 53 and NOT coming from a dns server, log as closed\n\t\t\t\t\t\tOrigDPort = str(p[UDPerror].dport)\n\t\t\t\t\t\tOrigDstService = OrigdIP + \",UDP_\" + OrigDPort\n\t\t\t\t\t\tif ((not LiveUDPService.has_key(OrigDstService)) or (LiveUDPService[OrigDstService] == True)):\n\t\t\t\t\t\t\tLiveUDPService[OrigDstService] = False\n\t\t\t\t\t\t\tReportId(\"US\", OrigdIP, \"UDP_\" + OrigDPort, \"closed\", \"port unreachable\")\n\t\t\t\telif (Code == 3) and (p[IPerror].proto == 6) and (p[TCPerror].dport == 113):\t#Port unreachable and embedded protocol = 6, TCP, which it shouldn't. May be the same firewall providing the TCP FR's\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 6):\t\t\t\t#Net unknown\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'net unknown')\n\t\t\t\telif (Code == 7):\t\t\t\t#Host unknown\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'host unknown')\n\t\t\t\telif (Code == 9):\t\t\t\t#Network Administratively Prohibited\n\t\t\t\t\tpass\t\t\t\t\t#Can't tell much from this type of traffic. Possibly list as firewall?\n\t\t\t\telif (Code == 10):\t\t\t\t#Host Administratively Prohibited\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 11):\t\t\t\t#Network unreachable for TOS\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 12):\t\t\t\t#Host unreachable for TOS\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 13):\t\t\t\t#Communication Administratively prohibited\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (Type == 8):\t\t\t\t\t#ping\n\t\t\t\t#FIXME - check payload for ping sender type, perhaps\n\t\t\t\tpass\n\t\t\telif (Type == 11):\t\t\t\t\t#Time exceeded\n\t\t\t\tif (Code == 0):\t\t\t\t\t#TTL exceeded\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\t#FIXME - put original target IP as column 5?\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"TTLEx\", \"router\", \"\")\n\t\t\t\telse:\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 2:\t\t#IGMP\n\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 6:\t\t#TCP\n\t\t\tsport=str(p['TCP'].sport)\n\t\t\tdport=str(p['TCP'].dport)\n\t\t\t#print p['IP'].src + \":\" + sport + \" -> \", p['IP'].dst + \":\" + dport,\n\t\t\tif (p['TCP'].flags & 0x17) == 0x12:\t#SYN/ACK (RST and FIN off)\n\t\t\t\tCliService = dIP + \",TCP_\" + sport\n\t\t\t\tif not SynAckSentToTCPClient.has_key(CliService):\n\t\t\t\t\tSynAckSentToTCPClient[CliService] = True\n\n\t\t\t\t#If we've seen a syn sent to this port and have either not seen any SA/R, or we've seen a R in the past:\n\t\t\t\t#The last test is for a service that was previously closed and is now open; report each transition once.\n\t\t\t\tService = sIP + \",TCP_\" + sport\n\t\t\t\tif ( (SynSentToTCPService.has_key(Service)) and ((not LiveTCPService.has_key(Service)) or (LiveTCPService[Service] == False)) ):\n\t\t\t\t\tLiveTCPService[Service] = True\n\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", '')\n\t\t\telif (p['TCP'].flags & 0x17) == 0x02:\t#SYN (ACK, RST, and FIN off)\n\t\t\t\tService = dIP + \",TCP_\" + dport\n\t\t\t\tif not SynSentToTCPService.has_key(Service):\n\t\t\t\t\tSynSentToTCPService[Service] = True\n\t\t\t\t#Debug(\"trying to fingerprint \" + sIP)\n\t\t\t\ttry:\n\t\t\t\t\tp0fdata = p0f(p)\n\t\t\t\t\t#FIXME - reasonably common occurence, don't whine, just fix it.\n\t\t\t\t\t#if (len(p0fdata) >1):\n\t\t\t\t\t#\tDebug(\"More than one OS fingerprint for \" + sIP + \", using the first.\")\n\t\t\t\t\tif (len(p0fdata) >=1):\n\t\t\t\t\t\tPDescription = p0fdata[0][0] + \" \" + p0fdata[0][1] + \" (\" + str(int(p0fdata[0][2]) + 1)\t#FIXME - Grabbing just the first candidate, may need to compare correlation values; provided?\n\t\t\t\t\t\tif (p0fdata[0][2] == 0):\n\t\t\t\t\t\t\tPDescription = PDescription + \" hop away)\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tPDescription = PDescription + \" hops away)\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t#[N][2] param appears to be distance away in hops (but add 1 to this to get real hop count?)\n\t\t\t\t\t\tPDescription = PDescription.replace(',', ';')\t\t#Commas are delimiters in output\n\t\t\t\t\t\tif (not(OSDescription.has_key(sIP))) or (OSDescription[sIP] != PDescription):\n\t\t\t\t\t\t\tOSDescription[sIP] = PDescription\n\t\t\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", PDescription)\n\t\t\t\texcept:\n\t\t\t\t\tPDescription = 'p0f failure'\n\t\t\t\t\tif (not(OSDescription.has_key(sIP))) or (OSDescription[sIP] != PDescription):\n\t\t\t\t\t\tDebug(\"P0f failure in \" + sIP + \":\" + sport + \" -> \" + dIP + \":\" + dport)\n\t\t\t\t\t\tOSDescription[sIP] = PDescription\n\t\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", PDescription)\n\t\t\telif (p['TCP'].flags & 0x07) == 0x01:\t#FIN (SYN/RST off)\n\t\t\t\tCliService = sIP + \",TCP_\" + dport\n\t\t\t\tif ( (SynAckSentToTCPClient.has_key(CliService)) and ((not LiveTCPClient.has_key(CliService)) or (LiveTCPClient[CliService] == False)) ):\n\t\t\t\t\tLiveTCPClient[CliService] = True\n\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", '')\n\t\t\telif (p['TCP'].flags & 0x07) == 0x04:\t#RST (SYN and FIN off)\n\t\t\t\t#FIXME - handle rst going in the other direction?\n\t\t\t\tService = sIP + \",TCP_\" + sport\n\t\t\t\tif ( (SynSentToTCPService.has_key(Service)) and ((not LiveTCPService.has_key(Service)) or (LiveTCPService[Service] == True)) ):\n\t\t\t\t\tLiveTCPService[Service] = False\n\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"closed\", '')\n\t\t\telif ((p['TCP'].flags & 0x3F) == 0x15) and (sport == \"113\"):\t#FIN, RST, ACK (SYN, PSH, URG off)\n\t\t\t\t#This may be a firewall or some other device stepping in for 113 with a FIN/RST.\n\t\t\t\tpass\n\t\t\telif (p['TCP'].flags & 0x17) == 0x10:\t#ACK (RST, SYN, and FIN off)\n\t\t\t\t#FIXME - check for UnhandledPacket placement in ACK\n\t\t\t\tFromPort = sIP + \",TCP_\" + sport\n\t\t\t\tToPort = dIP + \",TCP_\" + dport\n\t\t\t\tPayload = str(p['Raw.load'])\t\t\t#For some reason this doesn't handle p['Raw'].load\n\t\t\t\tif ( (LiveTCPService.has_key(FromPort)) and (LiveTCPService[FromPort] == True) and (LiveTCPService.has_key(ToPort)) and (LiveTCPService[ToPort] == True)):\n\t\t\t\t\tprint \"Logic failure: both \" + FromPort + \" and \" + ToPort + \" are listed as live services.\"\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\telif ((LiveTCPService.has_key(FromPort)) and (LiveTCPService[FromPort] == True)):\t#If the \"From\" side is a known TCP server:\n\t\t\t\t\tif (not NmapServerDescription.has_key(FromPort) ):\t\t#Check nmap fingerprint strings for this server port\n\t\t\t\t\t\tif (ServiceFPs.has_key(int(sport))):\n\t\t\t\t\t\t\tfor OneTuple in ServiceFPs[int(sport)]:\n\t\t\t\t\t\t\t\tMatchObj = OneTuple[0].search(Payload)\n\t\t\t\t\t\t\t\tif (MatchObj != None):\n\t\t\t\t\t\t\t\t\t#Debugging:\n\t\t\t\t\t\t\t\t\t#FIXME - removeme once understood:\n\t\t\t\t\t\t\t\t\t#File \"/home/wstearns/med/programming/python/passer/passer.py\", line 504, in processpacket\n\t\t\t\t\t\t\t\t\t#OutputDescription = OutputDescription.replace('$' + str(Index), MatchObj.group(Index))\n\t\t\t\t\t\t\t\t\t#TypeError: expected a character buffer object\n\t\t\t\t\t\t\t\t\tif (OneTuple[1] == None):\n\t\t\t\t\t\t\t\t\t\tDebug(\"Null description for \" + OneTuple[0])\n\t\t\t\t\t\t\t\t\t\t#quit()\n\t\t\t\t\t\t\t\t\tOutputDescription = OneTuple[1]\n\t\t\t\t\t\t\t\t\tif len(MatchObj.groups()) >= 1:\n\t\t\t\t\t\t\t\t\t\t#We have subexpressions matched, these need to be inserted into the description string\n\t\t\t\t\t\t\t\t\t\tfor Index in range(1,len(MatchObj.groups())+1):\n\t\t\t\t\t\t\t\t\t\t\t#Example: Replace \"$1\" with MatchObj.group(1)\n\t\t\t\t\t\t\t\t\t\t\tOutputDescription = OutputDescription.replace('$' + str(Index), str(MatchObj.group(Index)))\n\t\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", OutputDescription)\n\t\t\t\t\t\t\t\t\tNmapServerDescription[sIP + \",TCP_\" + sport] = OutputDescription\n\t\t\t\t\t\t\t\t\tbreak\t\t\t\t\t#Exit for loop, no need to check any more fingerprints now that we've found a match\n\n\t\t\t\t\tif (not NmapServerDescription.has_key(FromPort)):\t\t#If the above loop didn't find a server description\n\t\t\t\t\t\tif (ServiceFPs.has_key('all')):\t\t\t\t#Now recheck against regexes not associated with a specific port (port 'all').\n\t\t\t\t\t\t\tfor OneTuple in ServiceFPs['all']:\n\t\t\t\t\t\t\t\tMatchObj = OneTuple[0].search(Payload)\n\t\t\t\t\t\t\t\tif (MatchObj != None):\n\t\t\t\t\t\t\t\t\tOutputDescription = OneTuple[1]\n\t\t\t\t\t\t\t\t\tif len(MatchObj.groups()) >= 1:\n\t\t\t\t\t\t\t\t\t\t#We have subexpressions matched, these need to be inserted into the description string\n\t\t\t\t\t\t\t\t\t\tfor Index in range(1,len(MatchObj.groups())+1):\n\t\t\t\t\t\t\t\t\t\t\tOutputDescription = OutputDescription.replace('$' + str(Index), MatchObj.group(Index))\n\t\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", OutputDescription)\n\t\t\t\t\t\t\t\t\tNmapServerDescription[sIP + \",TCP_\" + sport] = OutputDescription\n\t\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\tif (not ManualServerDescription.has_key(FromPort) ):\n\t\t\t\t\t\tif (sport == \"22\") and (Payload != None) and (Payload.find('SSH-') > -1):\n\t\t\t\t\t\t\tif ( (Payload.find('SSH-1.99-OpenSSH_') > -1) or (Payload.find('SSH-2.0-OpenSSH_') > -1) ):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"ssh/openssh\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"ssh/openssh\"\n\t\t\t\t\t\t\telif (Payload.find('SSH-1.5-') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"ssh/generic\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"ssh/generic\"\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"25\") and (Payload != None) and (Payload.find(' ESMTP Sendmail ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"smtp/sendmail\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"smtp/sendmail\"\n\t\t\t\t\t\telif (sport == \"25\") and (Payload != None) and (Payload.find(' - Welcome to our SMTP server ESMTP') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"smtp/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"smtp/generic\"\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t#Check for port 80 and search for \"Server: \" once\n\t\t\t\t\t\telif (sport == \"80\") and (Payload != None) and (Payload.find('Server: ') > -1):\n\t\t\t\t\t\t\tif (Payload.find('Server: Apache') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/apache\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/apache\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Embedded HTTP Server') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/embedded\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/embedded\"\n\t\t\t\t\t\t\telif (Payload.find('Server: gws') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/gws\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/gws\"\n\t\t\t\t\t\t\telif (Payload.find('Server: KFWebServer') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/kfwebserver\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/kfwebserver\"\n\t\t\t\t\t\t\telif (Payload.find('Server: micro_httpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/micro-httpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/micro-httpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Microsoft-IIS') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/iis\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/iis\"\n\t\t\t\t\t\t\telif (Payload.find('Server: lighttpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/lighttpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/lighttpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: MIIxpc') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/mirrorimage\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/mirrorimage\"\n\t\t\t\t\t\t\telif (Payload.find('Server: mini_httpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/mini-httpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/mini-httpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: nc -l -p 80') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nc\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nc\"\n\t\t\t\t\t\t\telif (Payload.find('Server: nginx/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nginx\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nginx\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Nucleus') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nucleus\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nucleus\"\n\t\t\t\t\t\t\telif (Payload.find('Server: RomPager') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/rompager\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/rompager\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Server') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/server\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/server\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Sun-ONE-Web-Server/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/sun-one\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/sun-one\"\n\t\t\t\t\t\t\telif (Payload.find('Server: TrustRank Frontend') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/trustrank\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/trustrank\"\n\t\t\t\t\t\t\telif (Payload.find('Server: YTS/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/yahoo\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/yahoo\"\n\t\t\t\t\t\t\telif (Payload.find('HTTP/1.0 404 Not Found') > -1) or (Payload.find('HTTP/1.1 200 OK') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/generic\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/generic\"\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"110\") and (Payload != None) and (Payload.find('POP3 Server Ready') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"pop3/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"pop3/generic\"\n\t\t\t\t\t\telif (sport == \"143\") and (Payload != None) and (Payload.find('* OK dovecot ready') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"imap/dovecot\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"imap/dovecot\"\n\t\t\t\t\t\telif (sport == \"143\") and (Payload != None) and (Payload.find(' IMAP4rev1 ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"imap/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"imap/generic\"\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"783\") and (Payload != None) and (Payload.find('SPAMD/1.1 ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"spamd/spamd\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"spamd/spamd\"\n\t\t\t\t\t\telif ( (sport == \"3128\") or (sport == \"80\") ) and (Payload != None) and (Payload.find('Via: ') > -1) and (Payload.find(' (squid/') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"proxy/squid\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"proxy/squid\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\telif ((LiveTCPService.has_key(ToPort)) and (LiveTCPService[ToPort] == True)):\t\t#If the \"To\" side is a known TCP server:\n\t\t\t\t\tClientKey = sIP + \",TCP_\" + dport\t#Note: CLIENT ip and SERVER port\n\t\t\t\t\tif (not ClientDescription.has_key(ClientKey)):\n\t\t\t\t\t\tif (dport == \"22\") and (Payload != None) and ( (Payload.find('SSH-2.0-OpenSSH_') > -1) or (Payload.find('SSH-1.5-OpenSSH_') > -1) ):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"ssh/openssh\")\n\t\t\t\t\t\t#As cute as it is to catch this, it miscatches any relay that's carrying a pine-generated mail.\n\t\t\t\t\t\t#elif (dport == \"25\") and (Payload != None) and (Payload.find('Message-ID: <Pine.') > -1):\n\t\t\t\t\t\t#\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"smtp/pine\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: libwww-perl/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/libwww-perl\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Lynx') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/lynx\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Mozilla') > -1) and (Payload.find(' Firefox/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/firefox\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Wget/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/wget\")\n\t\t\t\t\t\telif (dport == \"143\") and (Payload != None) and (Payload.find('A0001 CAPABILITY') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"imap/generic\")\n\t\t\t\t\t\t\t#LogNewPayload(ClientPayloadDir, ClientKey, Payload)\n\t\t\t\t\t\telif (dport == \"783\") and (Payload != None) and (Payload.find('PROCESS SPAMC') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"spamd/spamc\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ClientPayloadDir, ClientKey, Payload)\n\t\t\t\telse:\t#Neither port pair is known as a server\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t#Following is debugging at best; it should only show up early on as the sniffer listens to conversations for which it didn't hear the SYN/ACK\n\t\t\t\t\t#print \"note: neither \" + FromPort + \" nor \" + ToPort + \" is listed as a live service.\"\n\t\t\telse:\t#Other TCP flag combinations here\n\t\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 17 and (type(p['UDP']) == UDP):\t\t#UDP. We have to check the object type as well as we do get (corrupted? truncated?) packets with type 17 that aren't udp: AttributeError: 'NoneType' object has no attribute 'sport'\n\t\t\t#FIXME - possibly run udp packets through ServiceFPs as well?\n\t\t\tsport=str(p['UDP'].sport)\n\t\t\tdport=str(p['UDP'].dport)\n\t\t\tSrcService = sIP + \",UDP_\" + sport\n\t\t\tDstService = dIP + \",UDP_\" + dport\n\t\t\tSrcClient = sIP + \",UDP_\" + dport\n\t\t\tPayload = p['Raw.load']\n\n\t\t\t#Multicast DNS: http://files.multicastdns.org/draft-cheshire-dnsext-multicastdns.txt\n\t\t\t#- usually sent to 224.0.0.251 (or FF02::FB) (link-local multicast).\n\t\t\t#\t- if \".local.\" in query, these MUST be the target IPs\n\t\t\t#\t- non-local queries may be sent to these or normal dns servers\n\t\t\t#\t- rdns queries for \"254.169.in-addr.arpa.\" MUST be sent to 224.0.0.251\n\t\t\t#\t- rdns queries for \"8.e.f.ip6.arpa.\", \"9.e.f.ip6.arpa.\",\"a.e.f.ip6.arpa.\", and \"b.e.f.ip6.arpa.\" MUST be sent to the IPv6 mDNS link-local multicast address FF02::FB.\n\t\t\t#- sent to udp port 5353\n\t\t\t#- generic clients may use \"single-dns-object.local.\", such as \"sparrow.local.\"\n\t\t\t#- responses have IP TTL = 255 to check that packet originated on-lan\n\n\t\t\t#Multicast DNS, placed next to normal dns, out of numerical order\n\t\t\tif (dport == \"5353\") and ( (p['IP'].ttl == 1) or (p['IP'].ttl == 255) ):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcClient)) or (LiveUDPService[SrcClient] == False)):\n\t\t\t\t\tLiveUDPService[SrcClient] = True\n\t\t\t\t\tif (dIP == \"224.0.0.251\"):\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mdns/broadcastclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mdns/client\")\n\n\t\t\t\t\t#Extract dns answers like with 53; change elif to if and add 5353 to ports on next if?\n\t\t\t\t\t#At the moment, no; scapy does not appear to parse 5353 as dns.\n\t\t\t\t\t#else:\n\t\t\t\t\t#\tUnhandledPacket(p)\n\t\t\t#FIXME - add check for \"if isinstance(p['DNS'], whatevertype):\there and at all p[] accesses.\n\t\t\telif (sport == \"53\") and (isinstance(p['DNS'], DNS)) and (p['DNS'].qr == 1):\t\t#qr == 1 is a response\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t#FIXME - Also report the TLD from one of the query answers to show what it's willing to answer for?\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"dns/server\")\n\t\t\t\t#Now we extract dns answers. First, check that there's no dns error:\n\t\t\t\tif (p['DNS'].rcode == 0):\t\t\t#No error\n\t\t\t\t\tDNSBlocks = [ ]\n\t\t\t\t\tCNAMERecs = [ ]\t\t\t\t#We hold onto all cnames until we've processed all PTR's and A's here\n\t\t\t\t\tif (p['DNS'].ancount > 0):\t\t#If we have at least one answer from the answer block, process it\n\t\t\t\t\t\tDNSBlocks.append(p[DNS].an)\n\t\t\t\t\tif (p['DNS'].arcount > 0):\t\t#Likewise for the \"additional\" block\n\t\t\t\t\t\tDNSBlocks.append(p[DNS].ar)\n\t\t\t\t\tfor OneAn in DNSBlocks:\n\t\t\t\t\t\t#Thanks to Philippe Biondi for showing me how to extract additional records.\n\t\t\t\t\t\t#Debug(\"Start dns extract\" + str(p['DNS'].ancount))\n\t\t\t\t\t\t#OneAn = p[DNS].an\n\t\t\t\t\t\t#while OneAn is not NoPayload:\t\t#This doesn't seem to stop at the end of the list; incorrect syntax.\n\t\t\t\t\t\twhile isinstance(OneAn,DNSRR):\t\t#Somewhat equivalent:\twhile not isinstance(an, NoPayload):\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t#print \"Type: \" + str(type(OneAn))\t\t#All of type scapy.DNSRR\n\t\t\t\t\t\t\tif (OneAn.rclass == 1) and (OneAn.type == 1):\t\t#\"IN\" class and \"A\" type answer\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rdata\n\t\t\t\t\t\t\t\tDNSHostname = OneAn.rrname.lower()\n\t\t\t\t\t\t\t\t#Check new hostname to see if it's in the list.\n\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",A\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",A\"])):\n\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"A\", DNSHostname, \"\")\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 2):\t\t\t#\"IN\" class and \"NS\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Perhaps later\n\t\t\t\t\t\t\t\t#Like cnames, this is object -> nameserver hostname, so these would need to be queued like cnames until we're done with A's and PTR's.\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 5):\t\t\t#\"IN\" class and \"CNAME\" answer\n\t\t\t\t\t\t\t\tCNAMERecs.append(OneAn)\t\t\t\t\t#Remember the record; we'll process these after the PTR's and A's\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 6):\t\t\t#\"IN\" class and \"SOA\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Not immediately useful, perhaps later\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 12):\t\t#\"IN\" class and \"PTR\" type answer\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#For input of '182.111.59.66.in-addr.arpa.' :\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rrname.replace(\".in-addr.arpa.\", \"\")\t\t# '182.111.59.66'\n\t\t\t\t\t\t\t\tDNSIPAddr = DNSIPAddr.split('.')\t\t\t\t# ['182', '111', '59', '66']\n\t\t\t\t\t\t\t\tDNSIPAddr.reverse()\t\t\t\t\t\t# ['66', '59', '111', '182']\n\t\t\t\t\t\t\t\tDNSIPAddr = string.join(DNSIPAddr, '.')\t\t\t\t# '66.59.111.182'\n\t\t\t\t\t\t\t\t#Check that we end up with a legal IPv4 address before continuing; we're getting garbage.\n\t\t\t\t\t\t\t\tif (re.search('^[1-9][0-9\\.]*[0-9]$', DNSIPAddr) == None):\n\t\t\t\t\t\t\t\t\tDebug(\"Odd PTR rrname: \" + OneAn.rrname)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tDNSHostname = OneAn.rdata.lower()\n\t\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",PTR\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",PTR\"])):\n\t\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"PTR\", DNSHostname, \"\")\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 15):\t\t#\"IN\" class and \"MX\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Possibly later\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 28):\t\t#\"IN\" class and \"AAAA\" answer\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rdata.upper()\n\t\t\t\t\t\t\t\tDNSHostname = OneAn.rrname.lower()\n\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",AAAA\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",AAAA\"])):\n\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"AAAA\", DNSHostname, \"\")\n\n\t\t\t\t\t\t\t#Move to the next DNS object in the \"an\" block\n\t\t\t\t\t\t\tOneAn = OneAn.payload\n\t\t\t\t\tfor OneCNAME in CNAMERecs:\t\t#Now that we have all A/PTR's, go back and turn cname records into pseudo-A's\n\t\t\t\t\t\tif isinstance(OneCNAME,DNSRR):\n\t\t\t\t\t\t\tAlias = OneCNAME.rrname.lower()\n\t\t\t\t\t\t\tExisting = OneCNAME.rdata.lower()\n\t\t\t\t\t\t\tif isFQDN(Alias) and isFQDN(Existing):\n\t\t\t\t\t\t\t\tif HostIPs.has_key(Existing):\n\t\t\t\t\t\t\t\t\tfor OneIP in HostIPs[Existing]:\t\t\t\t#Loop through each of the IPs for the canonical name, and\n\t\t\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(OneIP + \",CNAME\")) or (not(Alias in DNSRecord[OneIP + \",CNAME\"])):\n\t\t\t\t\t\t\t\t\t\t\tReportId(\"DN\", OneIP, \"CNAME\", Alias, \"\")\t#report them as kind-of A records for the Alias.\n\t\t\t\t\t\t\t\t#If we don't have a A/PTR record for \"Existing\", just ignore it. Hopefully we'll get the Existing A/PTR in the next few answers, and will re-ask for the CNAME later, at which point we'll get a full cname record.\n\t\t\t\t\t\t\t\t#else:\n\t\t\t\t\t\t\t\t#\tDebug(\"CNAME \" + Alias + \" -> \" + Existing + \" requested, but no IP's for the latter, skipping.\")\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tDebug(\"One of \" + Alias + \" and \" + Existing + \" isn't an FQDN, skipping cname processing.\")\n\t\t\t\telif (p['DNS'].rcode == 1):\t\t\t#FormErr: server responding to an improperly formatted request\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 2):\t\t\t#ServFail: domain exists, root nameservers list authoritative name servers, but authNS's won't answer queries\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 3):\t\t\t#NXDOMAIN: root nameservers don't have any listing (domain doesn't exist or is on hold)\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 5):\t\t\t#Query refused\n\t\t\t\t\tpass\n\t\t\t\telse:\t#rcode indicates an error\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"53\") and (type(p['DNS']) == DNS) and (p['DNS'].qr == 0):\t#dns query\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"dns/client\")\n\t\t\telif (sport == \"67\") and (dport == \"68\"):\t\t#Bootp/dhcp server talking to client\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"bootpordhcp/server\")\n\t\t\telif (sport == \"68\") and (dport == \"67\"):\t\t#Bootp/dhcp client talking to server\n\t\t\t\tif (sIP != \"0.0.0.0\"):\t\t\t\t#If the client is simply renewing an IP, remember it.\n\t\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"bootpordhcp/client\")\n\t\t\t\t#else:\t\t\t\t\t\t#If you want to record which macs are asking for addresses, do it here.\n\t\t\t\t#\tpass\n\t\t\telif (sport == \"123\") and (dport == \"123\") and (p['NTP'].stratum != ''):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"ntp/generic\")\n\t\t\telif (dport == \"123\") and ( (dIP == \"216.115.23.75\") or (dIP == \"216.115.23.76\") or (dIP == \"69.59.240.75\") ):\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"ntp/vonageclient\")\n\t\t\telif (sport == \"123\") and ( (sIP == \"216.115.23.75\") or (sIP == \"216.115.23.76\") or (sIP == \"69.59.240.75\") ):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"ntp/vonageserver\")\n\t\t\telif (dport == \"137\"):\t\t\t#netbios-ns\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (p['Ethernet'].dst.upper() == \"FF:FF:FF:FF:FF:FF\"):\t\t\t#broadcast\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/broadcastclient\")\n\t\t\t\t\telif (Payload != None) and (Payload.find('CKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA') > -1):\t#wildcard\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/wildcardclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/unicastclient\")\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"500\") and (dport == \"500\") and (p['ISAKMP'].init_cookie != ''):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"isakmp/generic\")\n\t\t\telif (dport == \"512\"):\t\t\t#BIFF\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('@') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"biff/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (dport == \"1026\") or (dport == \"1027\") or (dport == \"1028\") ):\t#winpopup spam client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and ( (Payload.find('Download Registry Update from:') > -1) or (Payload.find('CRITICAL ERROR MESSAGE! - REGISTRY DAMAGED AND CORRUPTED.') > -1) or (Payload.find('Your system registry is corrupted and needs to be cleaned immediately.') > -1) or (Payload.find('CRITICAL SYSTEM ERRORS') > -1) ):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"winpopup/spamclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"1434\"):\t\t#Probable mssql attack\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Qh.dll') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mssql/clientattack\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"1900\") and (dport == \"1900\") and (dIP == \"239.255.255.250\"):\t\t#SSDP\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('NOTIFY') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"ssdp/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"3865\") and (dIP == \"255.255.255.255\"):\t\t#XPL, http://wiki.xplproject.org.uk/index.php/Main_Page\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"xpl/client\")\n\t\t\telif (sport == \"5061\") and (dport == \"5061\") and ( (dIP == \"216.115.30.28\") or (dIP == \"69.59.227.77\") or (dIP == \"69.59.232.33\") or (dIP == \"69.59.240.84\") ):\t\t#Vonage SIP client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('.vonage.net:5061 SIP/2.0') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tSipMatch = SipPhoneMatch.search(Payload)\n\t\t\t\t\t\tif (SipMatch != None) and (len(SipMatch.groups()) >= 1):\n\t\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"sip/vonage_client, phone number: \" + SipMatch.group(1))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"sip/vonage_client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"5061\") and (dport == \"5061\") and ( (sIP == \"216.115.30.28\") or (sIP == \"69.59.227.77\") or (sIP == \"69.59.232.33\") or (sIP == \"69.59.240.84\") ):\t#Vonage SIP server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('.vonage.net:5061>') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"sip/vonage_server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"6515\") and (dport == \"6514\") and (dIP == \"255.255.255.255\"):\t\t#mcafee ASaP broadcast, looking for a proxy out. http://www.myasap.de/intl/EN/content/virusscan_asap/faq_new.asp\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('<rumor version=') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"asap/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (sport == \"9052\") or (sport == \"9053\") or (sport == \"9054\") ) and ( (sIP == \"205.188.146.72\") or (sIP == \"205.188.157.241\") or (sIP == \"205.188.157.242\") or (sIP == \"205.188.157.243\") or (sIP == \"205.188.157.244\") or (sIP == \"64.12.51.145\") or (sIP == \"64.12.51.148\") or (sIP == \"149.174.54.131\") ):\t#Possibly AOL dns response\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('dns-01') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"aoldns/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27005\") and ( (dport == \"27016\") or (dport == \"27017\") ):\t\t\t\t#Halflife client live game\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (dport == \"27013\") and (dIP == \"207.173.177.12\"):\t\t\t\t#variable payload, so can't (Payload != None) and (Payload.find('Steam.exe') > -1)\t\t\t\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (sport == \"27013\") and (sIP == \"207.173.177.12\"):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (sport == \"27016\") or (sport == \"27017\") ) and (dport == \"27005\"):\t\t\t\t#halflife server live game\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (dport == \"27015\") or (dport == \"27016\") or (dport == \"27025\") or (dport == \"27026\") ):\t\t#Variable payload, so can't: (Payload != None) and (Payload.find('basic') > -1)\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (dport == \"27017\") and ( (dIP == \"69.28.148.250\") or (dIP == \"69.28.156.250\") or (dIP == \"72.165.61.161\") or (dIP == \"72.165.61.185\") or (dIP == \"72.165.61.186\") or (dIP == \"72.165.61.188\") or (dIP == \"68.142.64.164\") or (dIP == \"68.142.64.165\") or (dIP == \"68.142.64.166\") ):\t#Steamfriends client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('VS01') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"steamfriends/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27017\") and ( (sIP == \"69.28.148.250\") or (sIP == \"69.28.156.250\") or (sIP == \"72.165.61.161\") or (sIP == \"72.165.61.185\") or (sIP == \"72.165.61.186\") or (sIP == \"72.165.61.188\") or (sIP == \"68.142.64.164\") or (sIP == \"68.142.64.165\") or (sIP == \"68.142.64.166\") ):\t#Steamfriends server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('VS01') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"steamfriends/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (sport == \"21020\") or (sport == \"21250\") or (sport == \"27016\") or (sport == \"27017\") or (sport == \"27018\") or (sport == \"27030\") or (sport == \"27035\") or (sport == \"27040\") or (sport == \"28015\") ):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Team Fortress') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27019\"):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (dport == \"1265\") or (dport == \"20100\") or (dport == \"21550\") or (dport == \"27000\") or (dport == \"27017\") or (dport == \"27018\") or (dport == \"27019\") or (dport == \"27022\") or (dport == \"27030\") or (dport == \"27035\") or (dport == \"27050\") or (dport == \"27078\") or (dport == \"27080\") or (dport == \"28015\") or (dport == \"28100\") or (dport == \"45081\") ):\t\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Source Engine Query') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"24441\"):\t\t\t#Pyzor\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('User:') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"pyzor/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t#FIXME - interesting issue; the ttl<5 test will catch traceroutes coming into us, but not ones we're creating to go out. Hmmm.\n\t\t\telif ( (dport >= \"33434\") and (dport <= \"33524\") ) and (p['IP'].ttl <= 5):\t#udptraceroute client\n\t\t\t\tif ((not LiveUDPClient.has_key(sIP + \"UDP_33434\")) or (LiveUDPClient[sIP + \"UDP_33434\"] == False)):\n\t\t\t\t\tLiveUDPClient[sIP + \"UDP_33434\"] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_33434\", \"open\", \"udptraceroute/client\")\n\t\t\telif (dport == \"40348\"):\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('HLS') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (p['IP'].frag > 0):\n\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sIP == \"207.46.51.74\") or (sIP == \"65.55.251.10\"):\t\t\t\t#Bigfish.com - dns?\n\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sIP == \"61.215.106.146\"):\t\t\t\t#junk\n\t\t\t\tUnhandledPacket(p)\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telse:\n\t\t\tDebug(\"Other IP protocol (\" + str(p['IP'].src) + \"->\" + str(p['IP'].dst) + \"): \" + str(p['IP'].proto))\n\t\t\tUnhandledPacket(p)\n\telif p['Ethernet'].type == 0x86DD:\t\t#IPv6\n\t\tUnhandledPacket(p)\n\telse:\n\t\tprint \"Unregistered ethernet type:\", p['Ethernet'].type\n\t\tUnhandledPacket(p)",
"def parse_packet(packet, traffic_type, pkt_type, exp_dst, step):\n packet_count = 0\n if(traffic_type == \"encap\"):\n if(pkt_type == \"stp\"):\n for i in packet:\n if ((packet[i]['Ethernet']['IP']['src'] == DST_IP) and\n (packet[i]['Ethernet']['IP']['dst'] == H2_IP) and\n (packet[i]['Ethernet']['IP']['UDP']['dport'] ==\n str(UDP_DPORT)) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['vni'] == VNI_HEX) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['src'] == MAC_A) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['dst'] == STP_DEST_MAC) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['type'] == '0x8870')):\n packet_count += 1\n else:\n for i in packet:\n if ((packet[i]['Ethernet']['IP']['src'] == DST_IP) and\n (packet[i]['Ethernet']['IP']['dst'] == H2_IP) and\n (packet[i]['Ethernet']['IP']['UDP']['dport'] ==\n str(UDP_DPORT)) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['vni'] == VNI_HEX) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['src'] == MAC_A) and\n (packet[i]['Ethernet']['IP']['UDP']['VXLAN']\n ['Ethernet']['dst'] == exp_dst)):\n packet_count += 1\n\n assert (packet_count == PKT_COUNT), 'Incorrect encapsulation'\n print(\"Correct encapsulation\")\n\n elif(traffic_type == \"decap\"):\n if(pkt_type == \"stp\"):\n for i in packet:\n if ((packet[i]['Ethernet']['src'] == MAC_B) and\n (packet[i]['Ethernet']['dst'] == STP_DEST_MAC) and\n (packet[i]['Ethernet']['type'] == '0x8870')):\n packet_count += 1\n else:\n for i in packet:\n if ((packet[i]['Ethernet']['src'] == MAC_B) and\n (packet[i]['Ethernet']['dst'] == exp_dst)):\n packet_count += 1\n\n assert (packet_count == PKT_COUNT), 'Incorrect decapsulation'\n print(\"Correct decapsulation\")",
"def parse_device(self) -> None:\n if self.device or self.skip_device_detection:\n return\n\n for Parser in self.DEVICE_PARSERS:\n parser = Parser(\n self.user_agent,\n self.ua_hash,\n self.ua_spaceless,\n self.VERSION_TRUNCATION,\n ).parse()\n if parser.ua_data:\n self.device = parser\n self.all_details['device'] = parser.ua_data\n if self.all_details['device'] != 'desktop' and DESKTOP_FRAGMENT.search(\n self.user_agent) is not None:\n self.all_details['device']['device'] = 'desktop'\n return",
"def parse_mochad_line(self, line):\n # bail out unless it's an incoming RFSEC message\n if line[15:23] == 'Rx RFSEC':\n\n # decode receive RFSEC message. format is either:\n # 09/22 15:39:07 Rx RFSEC Addr: 21:26:80 Func: Contact_alert_min_DS10A\n # ~ or ~\n # 09/22 15:39:07 Rx RFSEC Addr: 0x80 Func: Motion_alert_SP554A\n line_list = line.split(' ')\n addr = line_list[5]\n func = line_list[7]\n\n func_dict = self.decode_func(func)\n\n return addr, {'func': func_dict}, 'security'\n\n# elif line[15:23] == 'Tx RFSEC':\n\n # decode send RFSEC message. format is either:\n # 09/22 15:39:07 Tx RFSEC Addr: 21:26:80 Func: Contact_alert_min_DS10A\n # ~ or ~\n # 09/22 15:39:07 Tx RFSEC Addr: 0x80 Func: Motion_alert_SP554A\n# line_list = line.split(' ')\n# addr = line_list[5]\n# func = line_list[7]\n#\n# func_dict = self.decode_func(func)\n#\n# return addr, {'func': func_dict}, 'trigger'\n\n elif line[15:20] == 'Rx RF':\n\n # decode receive RF message. format is:\n # 02/13 23:54:28 Rx RF HouseUnit: B1 Func: On\n line_list = line.split(' ')\n house_code = line_list[5];\n house_func = line_list[7]\n\n return house_code, {'func': house_func}, 'radio'\n\n elif line[15:20] == 'Rx PL':\n \n # decode receive PL message. format is:\n # 02/13 23:54:28 Rx PL HouseUnit: A1\n # 02/13 23:54:28 Rx PL House: A Func: On\n line_list = line.split(' ')\n if line[21:27] == 'HouseU':\n house_code = line_list[5]\n with open ('/root/.house_code', 'wb') as f:\n pickle.dump(house_code, f)\n else:\n house_func = line_list[7]\n with open ('/root/.house_code', 'rb') as f:\n house_code = pickle.load(f)\n return house_code, {'func': house_func}, 'powerline'\n \n elif line[15:20] == 'Tx PL':\n \n # decode send RF/PL message. format is:\n # 02/13 23:54:28 Tx PL HouseUnit: A1\n # 02/13 23:54:28 Tx PL House: A Func: On\n line_list = line.split(' ')\n if line[21:27] == 'HouseU':\n house_code = line_list[5]\n with open ('/root/.house_code', 'wb') as f:\n pickle.dump(house_code, f)\n else:\n house_func = line_list[7]\n with open ('/root/.house_code', 'rb') as f:\n house_code = pickle.load(f)\n return house_code, {'func': house_func}, 'button'\n \n return '', ''",
"def parse_network(self):\n # Get the first symbol from Scanner\n self.symbol = self.scanner.get_symbol()\n\n # Main structure\n self.devicelist()\n self.connectlist()\n self.monitorlist()\n\n if not (self.symbol.type == self.scanner.KEYWORD and\n self.symbol.id == self.scanner.END_ID):\n # Error: 'END' keyword required at end of file\n self.error(self.NO_END, [])\n\n if self.error_count == 0:\n return True\n else:\n # Reset all classes for GUI\n self.names = Names()\n self.devices = Devices(self.names)\n self.network = Network(self.names, self.devices)\n self.monitors = Monitors(self.names, self.devices, self.network)\n\n return False",
"def other_parse(neighbor):\n mgmt_ip = neighbor[mgmt_ip_s]\n hostname = neighbor[hostname_s].split('.')[0]\n if nxos:\n sysname = neighbor['sysname']\n if sysname != '':\n hostname = sysname\n if mgmt_ip == '':\n mgmt_ip = neighbor['interface_ip']\n software_version = neighbor[version_s]\n if software_version.__contains__(','):\n for software in software_version.split(','):\n if software.__contains__('Version'):\n software_version = software.split('Version')[1].split('REL')[0]\n if software_version.__contains__(':'):\n software_version = software_version.replace(': ', '')\n else:\n software_version = software_version.replace(' ', '')\n break\n elif software_version.__contains__('Version'):\n found_1 = False\n for x in software_version.split(' '):\n if x.__contains__('Version'):\n found_1 = True\n continue\n if found_1:\n software_version = x\n break\n elif software_version.__contains__('version'):\n found_1 = False\n for x in software_version.split(' '):\n if x.__contains__('version'):\n found_1 = True\n continue\n if found_1:\n software_version = x\n break\n platform = neighbor['platform']\n if platform.__contains__('cisco '):\n platform = neighbor['platform'].replace('cisco ', '')\n elif platform.__contains__('Cisco '):\n platform = neighbor['platform'].replace('Cisco ', '')\n else:\n platform = neighbor['platform']\n other = {\n 'hostname': hostname,\n 'ip_address': mgmt_ip,\n 'neighbor': {\n 'hostname': session.hostname,\n 'ip_address': session.ip_address,\n 'remote_intf': neighbor['local_port'],\n 'local_intf': neighbor['remote_port']\n },\n 'software_version': software_version,\n 'model': platform\n }\n self.others.append(other)",
"def callback(self, pkt):\n if ARP in pkt:\n self.parse_ip(pkt.sprintf(\"%ARP.psrc%\"))\n if TCP in pkt or UDP in pkt:\n self.parse_ip(pkt.sprintf(\"%IP.src%\"))\n self.parse_ip(pkt.sprintf(\"%IP.dst%\"))",
"def retrieve_data(self, device):\n CISCO_USER_MODE_LOGIN_INFO['device_type'] = 'cisco_ios'\n CISCO_USER_MODE_LOGIN_INFO['ip'] = device\n # add try catch\n device = ConnectHandler(**CISCO_USER_MODE_LOGIN_INFO)\n device.find_prompt()\n lldp_connections = device.send_command('show cdp neighbors')\n ram_usage = device.send_command('show processes memory | include Processor')\n cpu_usage = device.send_command('show processes cpu sorted | include CPU')\n errors = device.send_command('show interfaces | include CRC|Fast|Serial|Gig')\n unsed_port = device.send_command('show interfaces | include line protocol is down')\n return lldp_connections, ram_usage, cpu_usage, errors, unsed_port",
"def router_sw_parse(neighbor):\n mgmt_ip = neighbor[mgmt_ip_s]\n hostname = neighbor[hostname_s].split('.')[0]\n if hostname.__contains__('('):\n hostname = hostname.split('(')[0]\n if nxos:\n sysname = neighbor['sysname']\n if sysname != '':\n hostname = sysname\n if mgmt_ip == '':\n mgmt_ip = neighbor['interface_ip']\n software_version = neighbor[version_s]\n platform = neighbor['platform']\n for software in software_version.split(','):\n if software.__contains__('Version'):\n software_version = software.split('Version')[1].split('REL')[0]\n if software_version.__contains__(':'):\n software_version = software_version.replace(': ', '')\n else:\n software_version = software_version.replace(' ', '')\n break\n if platform.__contains__('cisco '):\n platform = neighbor['platform'].replace('cisco ', '')\n elif platform.__contains__('Cisco '):\n platform = neighbor['platform'].replace('Cisco ', '')\n else:\n platform = neighbor['platform']\n router_sw = {\n 'hostname': hostname,\n 'ip_address': mgmt_ip,\n 'remote_intf': neighbor['local_port'],\n 'local_intf': neighbor['remote_port'],\n 'software_version': software_version,\n 'model': platform\n }\n self.routers_switches.append(router_sw)",
"def test_killer_parser():\n sentence = \"Salut GrandPy ! Est-ce que tu connais l'adresse d'OpenClassrooms ?\"\n address = \"7 Cité Paradis, 75010 Paris\"\n kp = KillerParser()\n test_sentence = kp.sentence_parser(sentence)\n assert test_sentence == \"openclassrooms\"\n test_address = kp.address_parser(address)\n assert test_address == \"cité paradis paris\"",
"def test_clfter_gtpc_pf(self):\n self.gtp_test(\n type='clfter', port='pf', tunnel_pkt='gtpc', inner_L3=None)",
"def never_used_if():\r\n net_connect = ConnectHandler(**devices)\r\n output = net_connect.send_command(\"Sh int | inc Gig|Last input\")\r\n\r\n \"\"\" divide string in lines \"\"\"\r\n output_ = output.split(\"GigabitEthernet\")\r\n\r\n \"\"\" define function to extract word after string \"\"\"\r\n def after(value, a):\r\n # Find and validate first part.\r\n pos_a = value.rfind(a)\r\n if pos_a == -1:\r\n return \"\"\r\n # Returns chars after the found string.\r\n adjusted_pos_a = pos_a + len(a)\r\n if adjusted_pos_a >= len(value):\r\n return \"\"\r\n return value[adjusted_pos_a:]\r\n\r\n\r\n \"\"\" loop over the lines to extract info \"\"\"\r\n dict_upper = {}\r\n for line in output_:\r\n line = line.replace(\"\\n\", \"\")\r\n dict_lower = {}\r\n dict_lower[\"Status\"] = (\r\n after(line, f\"{line.split(' ')[0]} is \").split(\" \")[0].replace(\",\", \"\")\r\n )\r\n dict_lower[\"Line protocol\"] = (\r\n after(line, \"line protocol is \").split(\" \")[0].replace(\",\", \"\")\r\n )\r\n dict_lower[\"Last input\"] = after(line, \"Last input \").split(\" \")[0].replace(\",\", \"\")\r\n dict_lower[\"Last output\"] = (\r\n after(line, f\"Last input {dict_lower['Last input']}, output \")\r\n .split(\" \")[0]\r\n .replace(\",\", \"\")\r\n )\r\n dict_lower[\"Output hang\"] = (\r\n after(line, \"output hang \").split(\" \")[0].replace(\",\", \"\")\r\n )\r\n dict_upper[f\"GigabitEthernet{line.split(' ')[0]}\"] = dict_lower\r\n\r\n\r\n \"\"\" find the gigabitinterfaces that never been used \"\"\"\r\n never_used_interfaces = []\r\n for i in dict_upper.keys():\r\n if dict_upper[i][\"Last input\"] == \"never\":\r\n never_used_interfaces.append(i)\r\n \r\n return never_used_interfaces",
"def retrieve_data(self, device):\n CISCO_USER_MODE_LOGIN_INFO['device_type'] = 'cisco_ios'\n CISCO_USER_MODE_LOGIN_INFO['ip'] = device\n # add try catch\n device = ConnectHandler(**CISCO_USER_MODE_LOGIN_INFO)\n device.find_prompt()\n lldp_connections = device.send_command('show cdp neighbors')\n ram_usage = device.send_command('show processes memory | include Processor')\n cpu_usage = device.send_command('show processes cpu sorted | include CPU')\n errors = device.send_command('show interfaces | include CRC|Fast|Serial|Gig')\n unsed_port = device.send_command('show interfaces | include line protocol is down')\n device.disconnect()\n return lldp_connections, ram_usage, cpu_usage, errors, unsed_port",
"def CASE2( self, main ):\n import json\n from tests.CHOTestMonkey.dependencies.elements.NetworkElement import Device, Link\n\n main.log.report( \"Collect and Store topology details from ONOS\" )\n main.log.report( \"____________________________________________________________________\" )\n main.case( \"Collect and Store Topology Details from ONOS\" )\n topoResult = main.TRUE\n topologyOutput = main.Cluster.active( 0 ).CLI.topology()\n topologyResult = main.Cluster.active( 0 ).CLI.getTopology( topologyOutput )\n ONOSDeviceNum = int( topologyResult[ 'devices' ] )\n ONOSLinkNum = int( topologyResult[ 'links' ] )\n mininetSwitchNum = len( main.mininetSwitches )\n mininetLinkNum = ( len( main.mininetLinks ) - len( main.mininetHosts ) ) * 2\n if mininetSwitchNum == ONOSDeviceNum and mininetLinkNum == ONOSLinkNum:\n main.step( \"Collect and store device data\" )\n stepResult = main.TRUE\n dpidToName = {}\n for key, value in main.mininetSwitches.items():\n dpidToName[ 'of:' + str( value[ 'dpid' ] ) ] = key\n devicesRaw = main.Cluster.active( 0 ).CLI.devices()\n devices = json.loads( devicesRaw )\n deviceInitIndex = 0\n for device in devices:\n name = dpidToName[ device[ 'id' ] ]\n newDevice = Device( deviceInitIndex, name, device[ 'id' ] )\n print newDevice\n main.devices.append( newDevice )\n deviceInitIndex += 1\n utilities.assert_equals( expect=main.TRUE,\n actual=stepResult,\n onpass=\"Successfully collected and stored device data\",\n onfail=\"Failed to collect and store device data\" )\n\n main.step( \"Collect and store link data\" )\n stepResult = main.TRUE\n linksRaw = main.Cluster.active( 0 ).CLI.links()\n links = json.loads( linksRaw )\n linkInitIndex = 0\n for link in links:\n for device in main.devices:\n if device.dpid == link[ 'src' ][ 'device' ]:\n deviceA = device\n elif device.dpid == link[ 'dst' ][ 'device' ]:\n deviceB = device\n assert deviceA is not None and deviceB is not None\n newLink = Link( linkInitIndex, deviceA, link[ 'src' ][ 'port' ], deviceB, link[ 'dst' ][ 'port' ] )\n print newLink\n main.links.append( newLink )\n linkInitIndex += 1\n # Set backward links and outgoing links of devices\n for linkA in main.links:\n linkA.deviceA.outgoingLinks.append( linkA )\n if linkA.backwardLink is not None:\n continue\n for linkB in main.links:\n if linkB.backwardLink is not None:\n continue\n if linkA.deviceA == linkB.deviceB and\\\n linkA.deviceB == linkB.deviceA and\\\n linkA.portA == linkB.portB and\\\n linkA.portB == linkB.portA:\n linkA.setBackwardLink( linkB )\n linkB.setBackwardLink( linkA )\n utilities.assert_equals( expect=main.TRUE,\n actual=stepResult,\n onpass=\"Successfully collected and stored link data\",\n onfail=\"Failed to collect and store link data\" )\n else:\n main.log.info( \"Devices (expected): %s, Links (expected): %s\" % ( mininetSwitchNum, mininetLinkNum ) )\n main.log.info( \"Devices (actual): %s, Links (actual): %s\" % ( ONOSDeviceNum, ONOSLinkNum ) )\n topoResult = main.FALSE\n\n caseResult = topoResult\n utilities.assert_equals( expect=main.TRUE,\n actual=caseResult,\n onpass=\"Saving ONOS topology data test PASS\",\n onfail=\"Saving ONOS topology data test FAIL\" )\n\n if not caseResult:\n main.log.info( \"Topology does not match, exiting test...\" )\n main.cleanAndExit()",
"def _check_deviceline(self):\n # Check if device name is valid\n if self._check_name(self.symbol):\n self.device_name = self.symbol\n self.symbol = self.scanner.get_symbol()\n # Check if '=' is used\n if self._is_equal(self.symbol):\n # Get next symbol\n self.symbol = self.scanner.get_symbol()\n # Check if name has been assigned to a valid device type\n if self._check_validdevice(self.symbol):\n self.device_kind = self.symbol\n self.symbol = self.scanner.get_symbol()\n if self._is_semicolon(self.symbol):\n # No device property\n if len(\n self.semantic_errors_list) == 0 and len(\n self.syntax_errors_list) == 0:\n # Only create device if no previous errors\n device_error = self.devices.make_device(\n self.device_name.id,\n self._device_type_returner(\n self.device_kind))\n # Send the returned error ID for error reporting\n self._display_semantic_error(device_error)\n self.symbol = self.scanner.get_symbol()\n elif self._is_comma(self.symbol):\n # Device property set\n self.symbol = self.scanner.get_symbol()\n self.device_param, \\\n self.device_paramvalue \\\n = self._check_paramindevice()\n if len(\n self.semantic_errors_list) == 0 and len(\n self.syntax_errors_list) == 0:\n # Only create device if no previous errors\n if self._device_type_returner(\n self.device_kind) == \\\n self.devices.SIGGEN:\n # Use symbol attribute 'value' to get parameter\n # value, since the symbol's 'id' attribute\n # would not capture a leading '0' in the signal\n # generator's signal string\n device_error = self.devices.make_device(\n self.device_name.id,\n self._device_type_returner(\n self.device_kind),\n self.device_paramvalue.value)\n else:\n # For other device types\n device_error = self.devices.make_device(\n self.device_name.id,\n self._device_type_returner(\n self.device_kind),\n self.device_paramvalue.id)\n # Send the returned error ID for error reporting\n self._display_semantic_error(device_error)\n self._check_semicolon_else_skip(self.symbol)\n self.symbol = self.scanner.get_symbol()\n else:\n # Neither semicolon nor comma\n self._display_syntax_error(\"semicoloncomma\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n else:\n # The device type is not valid\n self._display_syntax_error(\"devicetype\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n else:\n # No '='\n self._display_syntax_error(\"equal\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n else:\n # The device name is not valid\n self._display_syntax_error(\"devicename\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n\n return None",
"def devicelist(self):\n if (self.symbol.type == self.scanner.KEYWORD and\n self.symbol.id == self.scanner.DEVICES_ID):\n self.symbol = self.scanner.get_symbol()\n if (self.symbol.type == self.scanner.LEFT_CURLY):\n self.symbol = self.scanner.get_symbol()\n self.device()\n while (self.symbol.type == self.scanner.NAME):\n self.device()\n # Check right curly bracket ends device block\n if (self.symbol.type == self.scanner.RIGHT_CURLY):\n self.symbol = self.scanner.get_symbol()\n else:\n if (self.symbol.type == self.scanner.KEYWORD and\n self.symbol.id == self.scanner.CONNECT_ID):\n # Error Type: missing '}'\n # Stopping Symbols: 'CONNECT', 'MONITOR' or 'END'\n self.error(self.MISSING_RIGHT_CURLY,\n [self.scanner.KEYWORD],\n [self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Bad name terminated devices incorrectly\n # Error type: Invalid name\n # Stopping Symbols: 'CONNECT', 'MONITOR' or 'END'\n self.error(self.DEVICE_NAME, [self.scanner.KEYWORD],\n [self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Left curly needed after 'DEVICE'\n # Stopping Symbols: 'CONNECT', 'MONITOR' or 'END' KEYWORD\n self.error(self.NO_CURLY_DEVICE, [self.scanner.KEYWORD],\n [self.scanner.CONNECT_ID, self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: 'DEVICE' keyword required\n # Stopping Symbols: 'CONNECT', 'MONITOR' or 'END' KEYWORD\n self.error(self.NEED_DEVICE_KEYWORD, [self.scanner.KEYWORD],\n [self.scanner.CONNECT_ID, self.scanner.MONITOR_ID,\n self.scanner.END_ID])",
"def wap_parse(neighbor):\n mgmt_ip = neighbor[mgmt_ip_s]\n hostname = neighbor[hostname_s].split('.')[0]\n if nxos:\n sysname = neighbor['sysname']\n if sysname != '':\n hostname = sysname\n if mgmt_ip == '':\n mgmt_ip = neighbor['interface_ip']\n software_version = neighbor[version_s]\n platform = neighbor['platform']\n for software in software_version.split(','):\n if software.__contains__('Version'):\n software_version = software.split('Version')[1]\n if software_version.__contains__(':'):\n software_version = software_version.replace(': ', '')\n else:\n software_version = software_version.replace(' ', '')\n break\n if platform.__contains__('cisco '):\n platform = neighbor['platform'].replace('cisco ', '')\n elif platform.__contains__('Cisco '):\n platform = neighbor['platform'].replace('Cisco ', '')\n else:\n platform = neighbor['platform']\n ap = {\n 'hostname': hostname,\n 'ip_address': mgmt_ip,\n 'model': platform,\n 'neighbor': {\n 'hostname': session.hostname,\n 'ip_address': session.ip_address,\n 'remote_intf': neighbor['local_port']\n },\n 'software_version': software_version\n }\n self.waps.append(ap)",
"def parse(vera, s):\n ha = SwitchAction()\n ha.device = vera.get_device_by_id(s[\"device\"])\n ha.value = s[\"arguments\"][0][\"value\"]\n return ha",
"def handle(text, mic, profile):\n\n text = text.lower()\n ## Try to work out which OS to wake up\n match = re.search(r\"(?P<action>\\w+) (?P<target>\\w+)\", text)\n if match:\n ## tempted to just use \"os\" here but it is a package name, meh\n target = match.group(\"target\")\n action = match.group(\"action\")\n os_config = profile[\"activator\"]\n if target not in os_config:\n if target != \"check\" and target != \"tunnel\":\n #target not recognised\n mic.say(\"I'm sorry. Target operating system %s is not recognised.\" % target)\n return # break\n if action == \"activate\":\n try:\n if target == \"check\":\n ser = serial.Serial(\"/dev/ttyUSB0\", 38400, timeout=2)\n write(ser, \"check\")\n mic.say(\"Activation checking!\")\n elif target == \"tunnel\":\n ser = serial.Serial(\"/dev/ttyUSB0\", 38400, timeout=2)\n write(ser, \"tunnel\")\n mic.say(\"Activating tunnel\")\n rnd_suffix = str(randint(1000,9999))\n subprocess.Popen([\"node\", \"/usr/local/bin/lt\", \"--port\", \"80\", \"--subdomain\", \"famanson%s\" % rnd_suffix, \"&\"])\n mic.say(\"Your suffix is %s\" % rnd_suffix)\n else:\n mic.say(\"Activating %s.\" % target)\n mac = os_config[target][\"mac\"]\n dest = None\n if \"host\" in os_config[target]:\n dest = os_config[target][\"host\"]\n wol.send_magic_packet(mac, dest=dest)\n\n # Now sleep for 20 seconds to wait for grub to show up\n time.sleep(20)\n ser = serial.Serial(\"/dev/ttyUSB0\", 38400, timeout=2)\n\n # Send the activate command\n write(ser, target)\n ack1 = read(ser)\n if not ack1 or ACK1 not in ack1:\n print ack1\n mic.say(\"Acknowledge signal 1 was not received\")\n raise ValueError\n # Got ack2\n mic.say(\"Activation completed!\")\n except:\n traceback.print_exc()\n mic.say(\"Error found. Activation failed!\")\n finally:\n if ser:\n print \"Closing Serial connection\"\n ser.close()\n\n elif action == \"close\":\n mic.say(\"Closing %s.\" % target)\n if target == \"windows\":\n return\n else:\n host = os_config[target][\"host\"]\n subprocess.Popen([\"ssh\", \"pi@%s\" % host, \"sudo\", \"poweroff\"])\n else:\n mic.say(\"I'm sorry I did not catch your last command. Please try again.\")",
"def _check_monitorline(self):\n # Check if device name is valid\n if self._check_name(self.symbol):\n self.monitor_device = self.symbol\n self.symbol = self.scanner.get_symbol()\n # Check if ';' is used\n if self._is_semicolon(self.symbol):\n # End of line reached, exit function\n self.symbol = self.scanner.get_symbol()\n if len(\n self.semantic_errors_list) == 0 and len(\n self.syntax_errors_list) == 0:\n monitor_error = self.monitors.make_monitor(\n self.monitor_device.id, None)\n self._display_semantic_error(monitor_error)\n elif self._is_period(self.symbol):\n # DType output\n self.symbol = self.scanner.get_symbol()\n if self._check_validdtypeoutput(self.symbol):\n self.monitor_port = self.symbol\n self.symbol = self.scanner.get_symbol()\n if self._is_semicolon(self.symbol):\n # End of line reached, exit function\n self.symbol = self.scanner.get_symbol()\n if len(\n self.semantic_errors_list) == 0 and len(\n self.syntax_errors_list) == 0:\n monitor_error = self.monitors.make_monitor(\n self.monitor_device.id,\n self.monitor_port.id)\n self._display_semantic_error(monitor_error)\n else:\n # Semicolon error\n self._display_syntax_error(\"semicolon\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n else:\n self._display_syntax_error(\"doutput\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n else:\n # Semicolon error\n self._display_syntax_error(\"semicolon\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n else:\n # Device does not exist\n self._display_syntax_error(\"devicename\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n\n return None",
"def _packet_in(self, ev):\n\n dp = ev.msg.datapath\n ofp = dp.ofproto\n parser = dp.ofproto_parser\n match = ev.msg.match\n\n ##SNDCP packet with multiple fragments recieved - print warning, send ICMP fragmentation needed\n ##TODO: Not WOrking correctly\n ## File \"/usr/local/lib/python2.7/dist-packages/ryu/ofproto/ofproto_v1_3_parser.py\", line 746, in __getitem__\n ## return dict(self._fields2)[key]\n ## KeyError: 'udp_dst'\n\n # if (match['eth_type'] == 0x0800 and match['ip_proto'] == inet.IPPROTO_UDP\n # and match['udp_dst'] == VGSN_PORT and match['sndcp_first_segment'] == 1\n # and match['sndcp_more_segments'] == 1):\n # _icmp_send(dp,match['in_port'],match['ipv4_dst'],match['ipv4_src'],match['eth_dst'],match['eth_src'],icmp_type=3,icmp_code=4)\n # LOG.warning('WARNING: Device with IP: '+match['ipv4_src']+' sent fragmented sndcp packet')\n # return\n\n ##ARP request recieved - send 'I'm here' response\n if match['eth_type'] == 0x0806 and match['arp_op'] == 1:\n LOG.debug(\"ARP request accepted\")\n _arp_send(dp=dp, port_out=match['in_port'], arp_code=2, eth_dst=match['eth_src'], eth_target=match['arp_sha'],\n ip_target=match['arp_spa'], ip_sender=match['arp_tpa'])\n LOG.debug('Reply to '+match['arp_spa'] +': Host '+match['arp_tpa']+' is at forwarder '+str(dp.id) + \" with ethX source MAC address\")\n return\n\n ##ARP response with target_ip==DISCOVERY_ARP_IP recieved - we found APN\n #\n # FIXED: All ARP responses are replied, regardless of the target IP\n #\n # TODO : At this point only ARPs belonging to the APNs networks subnet should\n # be answered\n if match['eth_type'] == 0x0806 and match['arp_op'] == 2:\n LOG.debug('TUNNEL MNGR: ARP response with target APN discovery IP recieved at controller, processing for APN extraction')\n pkt = packet.Packet(array.array('B', ev.msg.data))\n arp_pkt=pkt.get_protocol(arp.arp)\n apn_ip = arp_pkt.src_ip\n apn_mac= arp_pkt.src_mac\n port = match['in_port']\n\n ##Search for apn in APN_POOL to add mac addr. and update topology\n for sApn in APN_POOL:\n if sApn.ip_addr == apn_ip:\n LOG.debug('Recieved ARP response was from ' + sApn.name + ' APN')\n sApn.eth_addr = apn_mac\n sApn.port = port\n sApn.dpid = dp.id\n # Links towards APNs will not be measured\n topo.add_link(dp.id,str(sApn.name),port)\n topo.add_link(str(sApn.name),dp.id,0)\n topo.reload_topology()\n LOG.debug('TUNNEL MNGR: APN '+str(sApn.name)+' found at forwarder: '+str(dp.id)+', port: '+str(port) + ' by ARP search')\n\n ##Add special rules to edge forwarder\n self.on_edge_inet_dp_join(dp, port, sApn)\n\n # FIX: We do not handle bss as a special APN\n # For greater extensibility, BSS/UTRAN/LAN APNs (exit/enter) points\n # will be handled in a generic manner\n #\n ##Create MAC-tunnels between APN and all BSSs\n #for bss in BSS_POOL:\n # self.add_tunnel(bss,apn)\n #break\n\n ### WMNC: In this case, we are not making tunnels between\n # two types of ingress/egress point, but actually same type\n\n for dApn in APN_POOL:\n # we are cycling through all possible APNs, looking for different APN tupples\n # with filled HW addresses (already found by APN search)\n if sApn != dApn and dApn.eth_addr != None:\n LOG.debug('TUNNEL MNGR: Different APNs with filled HW address found, lets find out if there is tunnel between them')\n\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('TUNNEL MNGR: No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next APN discovered.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n\n\n return\n\n ##ICMP echo with dst_ip==DISCOVERY_IP_DST recieved - new link between forwarders is up\n if match['eth_type'] == 0x0800 and match['ipv4_dst'] == DISCOVERY_IP_DST and match['ip_proto'] == 1:\n #LOG.debug('TOPO MNGR: ICMP echo recieved at controller, processing for link extraction or latency measurement')\n\n pkt = packet.Packet(array.array('B', ev.msg.data))\n\n ##Discovery pings carry information about sending datapath in payload of icmp packet\n ##these information are in Dictionary format, we parse the out with _icmp_parse_payload() method\n body = _icmp_parse_payload(pkt)\n neighbourDPID=body['dpid']\n neighbourPort=body['port_out']\n\n ## measurement\n ## currentClock moved way up to improve precision\n receivedClock=float(body['clock'])\n currentClock = time.clock()\n latency = currentClock - receivedClock\n\n currentDate = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n ##Update latency or add new edges to topology.\n if topo.DynamicGraph.has_edge(dp.id, neighbourDPID) and topo.DynamicGraph.has_edge(neighbourDPID, dp.id):\n topo.StaticGraph[neighbourDPID][dp.id]['pdv'] = topo.StaticGraph[neighbourDPID][dp.id]['lat'] - latency\n topo.StaticGraph[neighbourDPID][dp.id]['lat'] = latency\n topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n #topo.StaticGraph[neighbourDPID][dp.id]['upt'] = currentDate\n loss = self.loss_update(neighbourDPID, dp.id, currentDate)\n #LOG.debug('TOPO MNGR: Updating latency ' + str(latency) + ' and date ' + str(currentDate) + ' LOSS: ' + str(loss))\n topo.reload_topology()\n else:\n ## latency not correct for both directions when adding links\n ## update occurs on receive of next measurement packet from oposite direction\n topo.add_link(dp.id, neighbourDPID, ev.msg.match['in_port'], latency, currentDate)\n topo.add_link(neighbourDPID, dp.id, neighbourPort , latency, currentDate)\n LOG.debug('TOPO MNGR: Topology changed: New link between forwarder ID '+str(dp.id)+ ' via port ' + str(ev.msg.match['in_port'])\n +' and forwarder ID '+str(neighbourDPID)+ ' via port ' + str(neighbourPort) + ' was discovered.')\n\n topo.reload_topology()\n ## retry to create tunnels\n ## find better paths between APNs\n for sApn in APN_POOL:\n for dApn in APN_POOL:\n if sApn != dApn:\n LOG.debug('TOPO MNGR: Topology changed: trying to re-build inactive tunnel between:' + sApn.name + ' and ' + dApn.name)\n paths = False\n try:\n paths = nx.all_simple_paths(topo.DynamicGraph, source=sApn.name, target=dApn.name)\n except:\n LOG.debug('No path between: ' + sApn.name + ' and ' + dApn.name + '. Retry when next fwd connects.')\n\n LOG.debug('TUNNEL MNGR: These are the paths between them (possible tunnels):')\n if paths:\n for path in paths:\n LOG.debug('TUNNEL MNGR: Calling add_plainMacTunnel for ' + sApn.name + ' and ' + dApn.name + ' with path: ' + str(path))\n self.add_plainMacTunnel(sApn, dApn, path)\n else:\n LOG.debug('TUNNEL MNGR: PATHS == 0 ????????????????')\n return\n\n # flow of last resort (process for routing)\n if match['eth_type'] == 0x0800:\n # LOG.debug('*****************Flow of last resort matched(plain IP), process for routing********'\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'] + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp'])))\n ## Not very proud of myself, but it will do the trick\n ## Turbo lumberjack routing logic\n ## TODO: Implement a longest prefix match routing\n\n candidates = []\n\n for source, destination, ip_dscp in routesList:\n if ((source == match['ipv4_dst'] and destination == match['ipv4_src']) or (source == match['ipv4_src'] and destination == match['ipv4_dst'])) and ip_dscp == match['ip_dscp']:\n # LOG.debug('ROUTING: route source: ' + str(source) + 'destination: ' + str(destination)\n # + ' match[ipv4_dst]: ' + str(match['ipv4_dst'])\n # + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(ip_dscp)\n # + ' already exists, aborting addition of new route')\n return\n\n for tunnel in TUNNELS:\n if (tunnel.sApn.ip_addr == match['ipv4_dst'] and tunnel.dApn.ip_addr == match['ipv4_src']) or (tunnel.sApn.ip_addr == match['ipv4_src'] and tunnel.dApn.ip_addr == match['ipv4_dst']):\n LOG.debug('ROUTING: Tunnel candidate found in list of tunnels. Adding tunnel path: ' + str(tunnel.po_edges) + ' to candidates.')\n candidates.append(tunnel)\n\n trafficClass = self.TC_selection(match['ip_dscp'])\n\n if len(candidates) == 0:\n LOG.debug('ROUTING: match[ipv4_dst]: ' + str(match['ipv4_dst'])\n + ' match[ipv4_src]: ' + str(match['ipv4_src']) + ' DSCP: ' + str(match['ip_dscp']))\n LOG.debug('ROUTING: ERROR, NO feasible tunnels for such route.')\n return\n\n LOG.debug('Looking for tunnels: DST_IP: ' + match['ipv4_dst'] + ' SRC_IP: ' + match['ipv4_src'] + ' DSCP: ' + str(match['ip_dscp']) + '(traffic class: ' + str(trafficClass) + ')' + ' Incoming from FWD: ' + str(dp.id))\n tunnel = self.tunnel_selection(trafficClass, candidates)\n LOG.debug('TE MNGR: Selected tunnel Path out: ' + str(tunnel.path_out_str) + ' meter_id: ' + str(tunnel.meter_id))\n\n dscp = match['ip_dscp']\n\n ## meter_id\n ## 2,4,6,8,10 = 500kbps, 1,3,5,7,9 = 1000kbps ...\n ## 0 = 100Gbps\n meter_id = tunnel.meter_id\n\n #\n # FIXME: incomplete set of rules installed on LAN Access forwarders\n # TODO : Philosophy of table IDs should be clarified, as now it total mess!!!\n # TODO : this should be done only once, from that moment, all user plane packets\n # should travelse only forwarder and should not be sent to controller\n\n\n\n #WAY OUT\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.dApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_src=tunnel.tid_in), parser.OFPActionSetField(eth_dst=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id=INGRESS_TABLE)\n dp.send_msg(req)\n\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.dApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.dApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_out))\n\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_out)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.dApn.eth_addr), parser.OFPActionOutput(tunnel.path_out[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY OUT to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.dApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_out)+ ' dApn ETH addr: ' + str(tunnel.dApn.eth_addr))\n\n #WAY IN\n dp = dpset.get(tunnel.dApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_type=0x0800, ipv4_dst=tunnel.sApn.ip_addr, ip_dscp=dscp)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.tid_in), parser.OFPActionSetField(eth_src=tunnel.tid_out)]\n inst = [parser.OFPInstructionGotoTable(MAC_TUNNEL_TABLE), parser.OFPInstructionMeter(meter_id), parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=100, match=match, instructions=inst, table_id = INGRESS_TABLE)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(INGRESS_TABLE) + ' DP ID: ' + str(tunnel.sApn.dpid) + ' Tunel dApn IP addr: ' + str(tunnel.sApn.ip_addr) + ' Tunnel ID: ' + str(tunnel.tid_in))\n\n\n dp = dpset.get(tunnel.sApn.dpid)\n parser = dp.ofproto_parser\n ofp = dp.ofproto\n match = parser.OFPMatch (eth_dst=tunnel.tid_in)\n actions = [parser.OFPActionSetField(eth_dst=tunnel.sApn.eth_addr), parser.OFPActionOutput(tunnel.path_in[-1].port_out)]\n inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]\n req = parser.OFPFlowMod(datapath=dp, priority=300, match=match, instructions=inst, table_id=ACCESS_ADAPTATION_TABLE_OUT)\n dp.send_msg(req)\n LOG.debug('ROUTING: Installing flow ON WAY IN to forwarderID: ' + str(dp.id) + ',Table: ' + str(ACCESS_ADAPTATION_TABLE_OUT) + ' DP ID: ' + str(tunnel.sApn.dpid)+ ' Tunel ID: ' + str(tunnel.tid_in)+ ' sApn ETH addr: ' + str(tunnel.sApn.eth_addr))\n\n\n LOG.debug('ROUTING: Rules on access edge forwarders installed')\n LOG.debug('ROUTING: Adding route: DST_IP: ' + tunnel.dApn.ip_addr + ' SRC_IP: ' + tunnel.sApn.ip_addr + ' dscp: ' + str(dscp) + ' path out str: ' + tunnel.path_out_str )\n routesList.append( ( tunnel.sApn.ip_addr, tunnel.dApn.ip_addr, dscp) )\n\n parser = dp.ofproto_parser\n\n for dpid in LAN_TYPE_FORWARDERS:\n ## DUNNO why this rule with low priority still hits traffic which is also matched by rules with IP address matches\n ## Here I delete the rule, it is added on FWD when it connects to controoller\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dpid) + ' is a LAN edge forwarder, deleting rules')\n dp = dpset.get(dpid)\n priority = 2\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE_STRICT,\n table_id=0, actions=actions,\n match=match, priority=priority)\n\n LOG.debug('TOPO MNGR: Forwarder: ' + str(dp.id) + ' is a LAN edge forwarder, installing rules again :)')\n match = parser.OFPMatch(eth_type=0x0800)\n actions = [parser.OFPActionOutput(ofp.OFPP_CONTROLLER)]\n self.add_flow(dp, 2, match, actions)",
"def parse_network(self):\n self._check_fixed_start()\n self._check_fixed_others(self.scanner.DEVICES_ID)\n self._check_devicelist()\n self._check_fixed_others(self.scanner.DEVICES_ID)\n self._check_fixed_start()\n self._check_fixed_others(self.scanner.CONNECTIONS_ID)\n self._check_connectionlist()\n self._check_fixed_others(self.scanner.CONNECTIONS_ID)\n # Move on to checking monitors\n self.duplicate_error_checker = 1\n self._check_whole_network()\n self._check_fixed_start()\n self._check_fixed_others(self.scanner.MONITORS_ID)\n self._check_monitorlist()\n self._check_fixed_others(self.scanner.MONITORS_ID)\n self.symbol = self.scanner.get_symbol()\n\n if self.symbol.type == self.scanner.EOF:\n print(\"Finished parsing!\")\n print(\"No of errors:\" +\n str(len(self.syntax_errors_list) +\n len(self.semantic_errors_list)))\n else:\n print(\"There shouldn't be anything here.\")\n\n if len(\n self.syntax_errors_list) == 0 and len(\n self.semantic_errors_list) == 0:\n # No errors in definition file\n return True\n else:\n # Either semantic or syntax error(s) in file\n return False",
"def script_main(session):\n # Get script object that owns this session, so we can check settings, get textfsm templates, etc\n script = session.script\n\n # Start session with device, i.e. modify term parameters for better interaction (assuming already connected)\n session.start_cisco_session()\n\n # Validate device is running a supported OS\n session.validate_os([\"AireOS\"])\n\n # Get additional information we'll need\n get_mobility_group(session, to_cvs=True)\n\n # Return terminal parameters back to the original state.\n session.end_cisco_session()",
"def do_Device (self, line):",
"def _parse(x, cliargs=CliArg(), heap=HeapGate()):\n country = re.findall(r'/images/flags/(..)\\.png', x)\n if not country:\n return 0\n country = country[0]\n ip = re.findall(r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}', x)\n if not ip:\n return 0\n ip = ip[0]\n users = re.findall(r'Total.*?(\\d{1,3}.*?)users', x)\n if not users:\n return 0\n users = int(users[0].replace(',',''))\n mbps = re.findall(r'(\\d{1,3}\\.\\d{1,2})\\sMbps', x)\n if not mbps:\n return 0\n mbps = float(mbps[0])\n ms = re.findall(r'(\\d{1,10})\\sms', x)\n if not ms:\n return 0\n ms = int(ms[0])\n vpn = re.findall(r'(do_openvpn[^\\'\" >]+)', x)\n if not vpn:\n return 0\n vpn = cliargs._site+vpn[0]\n node = OpenNode(string=x,country=country,ip=ip,total=users,mbps=mbps,ms=ms,vpn=vpn)\n \"check if vpn fits wanted cli arguments\"\n if cliargs._parse_cliargs(node, heap):\n heap.insert_node(node)\n return 1\n return 0",
"def _parse(self, line):\n comd, value = cmd.parse(line, CLIENT_PREFIX)\n if comd == 'reg':\n self.peername = value\n log.msg('PeerName of %s is %s' %(self.peer, self.peername))\n else:\n return False\n return True",
"def _check_validconnectioninput(self):\n # Check if name is valid\n if self._check_name(self.symbol):\n second_device = self.symbol\n self.symbol = self.scanner.get_symbol()\n # Check if '.' is used:\n if self._is_period(self.symbol):\n self.symbol = self.scanner.get_symbol()\n # Check if device input begins with 'I'\n if self.names.get_name_string(self.symbol.id)[0] == \"I\":\n # Check if input number is a positive number\n try:\n inputno = int(\n self.names.get_name_string(\n self.symbol.id)[\n 1:])\n second_port = self.symbol\n self.symbol = self.scanner.get_symbol()\n return second_device, second_port\n except BaseException:\n # Input number is not valid\n self._display_syntax_error(\"number\")\n self._semicolon_skipper()\n return None, None\n # OR if DType input\n elif self._check_validdtypeinput(self.symbol):\n second_port = self.symbol\n self.symbol = self.scanner.get_symbol()\n return second_device, second_port\n else:\n # Input is not valid\n self._display_syntax_error(\"input\")\n self._semicolon_skipper()\n return None, None\n else:\n # No '.'\n self._display_syntax_error(\"period\")\n self._semicolon_skipper()\n return None, None\n else:\n # Device does not exist\n self._display_syntax_error(\"devicename\")\n self._semicolon_skipper()\n return None, None",
"def _parse(self, line):\n comd, value = cmd.parse(line, CLIENT_PREFIX)\n if comd == 'reg':\n self.peername = value\n elif comd == 'eof':\n self._reset()\n msg = cmd.clientcmd(comd, value)\n msg = cmd.addFirst(msg, self.peername)\n self.sendString(msg)\n elif comd == 'fail':\n self._reset()\n else:\n return False\n return True"
]
| [
"0.5448138",
"0.5367822",
"0.51965165",
"0.51411146",
"0.5129462",
"0.4950195",
"0.4934969",
"0.49349013",
"0.47787055",
"0.47489983",
"0.4734217",
"0.4636806",
"0.46320385",
"0.46221396",
"0.46014342",
"0.4576387",
"0.4569176",
"0.45680135",
"0.45521274",
"0.45414808",
"0.45299628",
"0.4527965",
"0.45175996",
"0.45174432",
"0.45102718",
"0.44949344",
"0.44692677",
"0.44653818",
"0.44608137",
"0.4460805"
]
| 0.55118585 | 0 |
Parses CUCM export of phones with fields 'Description', 'Device Name', and 'Directory Number 1' | def cucm_export_parse(file):
phones = {}
while True:
try:
with open(file) as phonelist_csv:
for line in phonelist_csv:
if not line.__contains__('Description,Device Name,Directory Number 1'):
info = line.split(',')
device_name = info[1]
description = info[0]
directory_number = info[2]
phones[device_name.upper()] = {
'description': description,
'directory_number': directory_number
}
return phones
except FileNotFoundError:
raise NoPhoneReportFound('No phone report file found at provided location.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_phone(parsed_data):\n result = []\n known_values = []\n\n contacts = {'registrant_contact': [], 'administrative_contact': [], 'technical_contact': [],\n 'domain_registrar' :[]}\n if 'registrant_contact' in parsed_data:\n contacts['registrant_contact'].append(parsed_data['registrant_contact'])\n if 'administrative_contact' in parsed_data:\n contacts['administrative_contact'].append(parsed_data['administrative_contact'])\n if 'technical_contact' in parsed_data:\n contacts['technical_contact'].append(parsed_data['technical_contact'])\n if 'domain_registrar' in parsed_data:\n contacts['domain_registrar'].append(parsed_data['domain_registrar'])\n # parsing phone number from contact block\n\n for contact, info in contacts.items():\n if info is not None:\n d = {'type': 4, 'data': '', 'properties': {}, 'special_properties': {}, 'ref': {}}\n # properties dictionary\n owener = {'type': 11, 'owner': ''}\n location = {'type': 11, 'location': ''}\n properties_list = []\n special_properties_list = []\n d.update({'ref': {'task': 'whois', 'whois_for': '', 'whois_from': ''}})\n if 'domain_name' in parsed_data and len(parsed_data['domain_name']) > 0:\n d['ref']['whois_for'] = parsed_data['domain_name']\n if 'whois_server' in parsed_data:\n d['ref']['whois_from'] = parsed_data['whois_server']\n\n for name in info:\n if \"phone_number\" in name:\n if name['phone_number'] in known_values:\n break\n for feature in name.keys():\n if feature == \"phone_number\":\n d['data'] = name['phone_number']\n known_values.append(name['phone_number'])\n if feature == \"full_name\":\n owener['owner'] = name['full_name']\n\n if feature ==\"registrar_name\":\n owener['owner'] = name['registrar_name']\n if feature == \"city_name\":\n location['location'] = name['city_name']\n # prevent from create result if phone number of contact is not available\n if d['data'] == '':\n continue\n properties_list.append(location)\n properties_list.append(owener)\n special_properties_list.append({'phone_type': '', 'type': 0})\n special_properties_list.append({'country_code': '', 'type': 0})\n special_properties_list.append({'operator': '', 'type': 0})\n special_properties_list.append({'is_valid': '', 'type': 0})\n d['special_properties'] = special_properties_list\n d['properties'] = properties_list\n result.append(d)\n return result",
"def parse_device_info(self, info_string):\n device = {}\n block_list = [\"[\\x1b[0;\", \"removed\"]\n if not any(keyword in info_string for keyword in block_list):\n try:\n device_position = info_string.index(\"Device\")\n except ValueError:\n pass\n else:\n if device_position > -1:\n attribute_list = info_string[device_position:].split(\" \", 2)\n device = {\n \"mac_address\": attribute_list[1],\n \"name\": attribute_list[2],\n }\n return device",
"def parse_device_info(self, info_string):\n device = {}\n block_list = [\"[\\x1b[0;\", \"removed\"]\n string_valid = not any(keyword in info_string for keyword in block_list)\n\n if string_valid:\n try:\n device_position = info_string.index(\"Device\")\n except ValueError:\n pass\n else:\n if device_position > -1:\n attribute_list = info_string[device_position:].split(\" \", 2)\n device = {\n \"mac_address\": attribute_list[1],\n \"name\": attribute_list[2]\n }\n\n return device",
"def gather_metric(self):\n device_dict = {}\n # Delete first and last line of output of adb.\n output = self._shell.run(self.COMMAND).stdout\n\n # Example Line, Device Serial Num TAB Phone Status\n # 00bd977c7f504caf\toffline\n if output:\n for line in output.split('\\n'):\n spl_line = line.split('\\t')\n # spl_line[0] is serial, [1] is status. See example line.\n device_dict[spl_line[0]] = spl_line[1]\n\n return {self.DEVICES: device_dict}",
"def get_devices_information():\n global nipper_xml\n devices = {}\n\n for device in nipper_xml.findall('./information/devices/device'):\n if DEBUG:\n print \"\\t\" + note + \"Name: %s\" % device.get('name')\n print \"\\t\" + note + \"Type: %s\" % device.get('type')\n print \"\\t\" + note + \"OS: %s\" % device.get('os')\n print \"\\t\" + note + \"OS Version: %s\" % device.get('osversion')\n devices[device.attrib.get('name')] = {'name': device.get('name'),\n 'type': device.get('type'),\n 'os': device.get('os'),\n 'osversion': device.get('osversion')}\n if DEBUG:\n print info + \"Device Object:\"\n print devices\n raw_input(warn + \"Press enter to continue\")\n return devices",
"def _parseMediaInfo(self):\n\t\t# the program path to MediaInfo should be set otherwise\n\t\tenv = {'path': env_mediainfo_dir}\n\t\t# the command for MediaInfo is a fixed command\n\t\tcom = [com_mediainfo, '-f', self.name]\n\t\t# invoke the external program\n\t\tproc = externalProcess(com, env)\n\t\t# read the programs output line by line and parse the output to a dictionary, obtaining all information\n\t\tinfo = {}\n\t\tstate = 'start'\n\t\tstream = 0\n\t\tfor line in proc.execute():\n\t\t\tlist = line.split(\":\")\n\t\t\t# recognize the sections ('General','Video','Audio','Text')\n\t\t\tif len(list) == 1 and list[0] != '':\n\t\t\t\tstate = str(list[0].lstrip().rstrip())\n\t\t\t\t# print \"New state: \", state\n\t\t\telif len(list) >= 2 and list[0] != '' and list[1] != '':\n\t\t\t\t# recognize several stream identifier\n\t\t\t\tif str(list[0].lstrip().rstrip()) == 'Stream identifier':\n\t\t\t\t\tstream = int(str(list[1].lstrip().rstrip()))\n\t\t\t\t\tcontinue\n\t\t\t\t# save the information to the dictionary\n\t\t\t\tkey = state + \"_\" + str(stream) + \"_\" + str(list[0].lstrip().rstrip())\n\t\t\t\twhile key in info.keys():\n\t\t\t\t\tkey += \"_\"\n\t\t\t\tinfo[key] = str(list[1].lstrip().rstrip())\n\t\treturn info",
"def get_devices_summary():\n\n # This function was created to replace get_devices_information\n # because it wasn't detecting virtual systems in Palo Alto Virtual Systems\n global nipper_xml\n devices = {}\n headings = []\n\n # Add the table headings to a list\n for h in nipper_xml.findall(\"./summary/table/[@ref='SCOPE.AUDITDEVICELIST.TABLE']/headings/heading\"):\n if h not in headings:\n headings.append(h.text)\n\n for device in nipper_xml.findall(\"./summary/table/[@ref='SCOPE.AUDITDEVICELIST.TABLE']/tablebody/tablerow\"):\n values = []\n for i in device.findall('./tablecell/item'):\n if i not in values:\n values.append(i.text)\n if DEBUG:\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('Name')], values[headings.index('Name')])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('Device')], values[headings.index('Device')])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('OS')], values[headings.index('OS')].split(\" \")[0])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('OS')], values[headings.index('OS')].split(\" \")[1])\n devices[values[headings.index('Name')]] = {'name': values[headings.index('Name')],\n 'type': values[headings.index('Device')],\n 'os': values[headings.index('OS')].split(' ')[0],\n 'osversion': values[headings.index('OS')].split(' ')[1]\n }\n\n if DEBUG:\n print info + \"Device Object:\"\n print devices\n raw_input(warn + \"Press enter to continue\")\n return devices",
"def parse_devices_spt(self, devices=None):\n\n if not devices:\n self._logger.warning(\"The devices list is empty, so no devices parsed!\")\n return\n try:\n for entry in devices['SCSI Devices']['Device List']:\n device_type = entry['Peripheral Device Type Description']\n if self._include_enclosures:\n if not device_type.startswith('Direct') and \\\n not device_type.startswith('Host Managed') and \\\n not device_type.startswith('Enclosure'):\n continue\n else:\n if not device_type.startswith('Direct') and \\\n not device_type.startswith('Host Managed'):\n continue\n\n # Parse remaining information.\n if device_type.startswith('Direct') or device_type.startswith('Host Managed'):\n device_type = 'disk'\n if self.product_name and not self.product_name in entry['Product Identification'].strip():\n continue;\n if self.vendor_name and not self.vendor_name in entry['Vendor Identification'].strip():\n continue;\n if self.serial_number and not self.serial_number in entry['Product Serial Number'].strip():\n continue;\n if self.target_port and not self.target_port in entry['Device Target Port']:\n continue;\n elif device_type.startswith('Enclosure'):\n device_type = 'enclosure'\n\n device = dict()\n device['Device Type'] = device_type\n\n device['Device Type Description'] = entry['Peripheral Device Type Description']\n device['Product Identification'] = entry['Product Identification'].strip()\n device['Vendor Identification'] = entry['Vendor Identification'].strip()\n device['Revision Level'] = entry['Firmware Revision Level'].strip()\n\n if entry.get('Full Firmware Version') is not None:\n fwver = entry['Full Firmware Version']\n if not fwver.startswith('<not available>'):\n device['Firmware Version'] = fwver\n\n serial = entry['Product Serial Number']\n device['Serial Number'] = serial.strip()\n\n # Note: Not currently displayed. (WWN == LUN Device Identification)\n wwn = entry['Device World Wide Name']\n if wwn.startswith('<not available>'):\n wwn = \"\"\n device['Device World Wide Name'] = wwn\n\n sas_address = entry['Device Target Port']\n if not sas_address.startswith('<not available>'):\n device['SAS Address'] = sas_address\n self._sas_addresses += 1\n else:\n device['SAS Address'] = \"\"\n\n # Note: There's probably a better Pythonic way to do this?\n device['Linux Device Name'] = \"\"\n device['SCSI Device Name'] = \"\"\n device['DMMP Device Name'] = \"\"\n\n # Parse the device paths.\n for path_type in entry['Path Types']:\n if path_type.get('Linux Device'):\n # Handle multiple Linux device paths. (these are \"sd\" devices)\n if device.get('Linux Device Name') and path_type.get('SCSI Nexus'):\n new_device = copy.deepcopy(device)\n self._devices.append(new_device)\n # Fall through to update this device entry.\n # Initialize information for this (or next) device.\n device['Linux Device Name'] = path_type['Linux Device']\n device['Linux SCSI Nexus'] = path_type['SCSI Nexus']\n if path_type.get('SCSI Device'):\n device['SCSI Device Name'] = path_type['SCSI Device']\n if path_type.get('Device Target Port'):\n device['SAS Address'] = path_type['Device Target Port']\n\n elif path_type.get('SCSI Device'):\n # Handle multiple SCSI device paths. (now, \"sg\" devices only)\n if device.get('SCSI Device Name') and path_type.get('SCSI Nexus'):\n new_device = copy.deepcopy(device)\n self._devices.append(new_device)\n # Fall through to update this device entry.\n # Initialize information for this (or next) device.\n device['SCSI Device Name'] = path_type['SCSI Device']\n device['SCSI Nexus'] = path_type['SCSI Nexus']\n if path_type.get('Device Target Port'):\n device['SAS Address'] = path_type['Device Target Port']\n\n elif path_type.get('DMMP Device') is not None:\n # Initialize information for this device. (limited)\n device['DMMP Device Name'] = path_type['DMMP Device']\n\n # Hack: We don't find a SCSI device if there's no serial number or device ID (WWN).\n # This is observed on Linux VM's, so not common, but we still wish to handle this!\n if not len(device['SCSI Device Name']):\n # Funky DM-MP names are skipped! (we deal with sd and/or sg devices only)\n # /dev/mapper/centos_cos--lab--vm01-root\n if not len(device['Linux Device Name']):\n continue\n\n self._devices.append(device)\n\n except RuntimeError as exc:\n self._logger.error(\"Failed to acquire SCSI devices: {0}\".format(exc))\n raise exc",
"def find_device_info(xcresult_path):\n parsed = xcresulttool_json('get', '--path', xcresult_path)\n actions = parsed['actions']['_values']\n action = actions[-1]\n\n result = action['runDestination']['targetDeviceRecord']['modelUTI']['_value']\n return result",
"def extract_device_information(self, host_dict):\n self.host_list = []\n if self.args.hostname is None:\n try:\n hosts_val = self.main_file[\"hosts\"]\n except KeyError as ex:\n self.logger.error(\n colorama.Fore.RED\n + \"\\nERROR occurred !! Hostname not given properly %s\" % str(ex),\n extra=self.log_detail,\n )\n # raise Exception(ex)\n except Exception as ex:\n self.logger.error(\n colorama.Fore.RED + \"\\nERROR occurred !! %s\" % str(ex),\n extra=self.log_detail,\n )\n # raise Exception(ex)\n else:\n # when group of devices are given, searching for include keyword in\n # hosts in main.yaml file\n self.get_hosts_list(hosts_val, host_dict)\n else:\n # login credentials are given from command line\n host_dict[\"0\"] = {\n \"device\": self.args.hostname,\n \"username\": self.args.login,\n \"passwd\": self.args.passwd,\n }\n self.host_list.append(self.args.hostname)",
"def get_mobile_info(self):\n # 1. select brand\n self.select_brand()\n # 2. select os\n self.select_os()\n # 3. device_id\n self.gen_device_id()\n # 4. lat lon\n self.gen_lat_lon()\n # 5. mac\n self.gen_mac()",
"def parse_device(device):\n\n # Elements are either direct children or nested under a DeviceDetails element\n details = device.find(\"DeviceDetails\")\n if details is None:\n details = device\n\n # Convert tags to property names\n attributes = {inflection.underscore(x.tag):x.text for x in details}\n # Convert last_contact to datetime\n if 'last_contact' in attributes:\n attributes['last_contact'] = _date_from_hex(attributes['last_contact'])\n\n components = [parse_component(comp) for comp in device.findall(\"Components/Component\")]\n return Device(**attributes, components=components)",
"def parse_device(self) -> None:\n if self.device or self.skip_device_detection:\n return\n\n for Parser in self.DEVICE_PARSERS:\n parser = Parser(\n self.user_agent,\n self.ua_hash,\n self.ua_spaceless,\n self.VERSION_TRUNCATION,\n ).parse()\n if parser.ua_data:\n self.device = parser\n self.all_details['device'] = parser.ua_data\n if self.all_details['device'] != 'desktop' and DESKTOP_FRAGMENT.search(\n self.user_agent) is not None:\n self.all_details['device']['device'] = 'desktop'\n return",
"def device_info(self) -> Dict[str, any]:\n return {\n \"identifiers\": {(DOMAIN, self._config[CONF_SERIAL])},\n \"name\": self._config[CONF_NAME],\n \"manufacturer\": \"Bosch\",\n }",
"def phone_parse(neighbor):\n mgmt_ip = neighbor[mgmt_ip_s]\n hostname = neighbor[hostname_s].split('.')[0]\n if nxos:\n sysname = neighbor['sysname']\n if sysname != '':\n hostname = sysname\n if mgmt_ip == '':\n mgmt_ip = neighbor['interface_ip']\n l_intf = neighbor['local_port']\n intf = re.findall(r'.{2}', l_intf)[0] + re.findall(r'\\d.+', l_intf)[0]\n macreg = re.findall(r'.{4}', hostname.replace('SEP', ''))\n mac_address = f'{macreg[0]}.{macreg[1]}.{macreg[2]}'.lower()\n voice_vlan = 'None'\n software_version = neighbor[version_s].replace('.loads', '')\n platform = neighbor['platform']\n for switchport in switchports:\n if switchport['interface'] == intf:\n for mac_addr in mac_addrs:\n if mac_addr['vlan'] == switchport['voice_vlan']:\n voice_vlan = mac_addr['vlan']\n break\n break\n if platform.__contains__('Cisco IP Phone'):\n platform = neighbor['platform'].replace('Cisco IP Phone ', '')\n else:\n platform = neighbor['platform']\n phone = {\n 'hostname': hostname,\n 'neighbor': {\n 'hostname': session.hostname,\n 'ip_address': session.ip_address,\n 'remote_intf': l_intf\n },\n 'ip_address': mgmt_ip,\n 'mac_addr': mac_address,\n 'voice_vlan': voice_vlan,\n 'software_version': software_version,\n 'model': platform\n }\n self.phones.append(phone)",
"def devices():\n\n ret = {}\n\n p = subprocess.Popen([\"lsusb\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n out = p.stdout.read()\n err = p.stderr.read()\n\n if err:\n raise salt.exceptions.CommandExecutionError(\"Failed to run lsusb: {}\".format(err))\n\n # Ensure pattern is compiled\n global pattern\n if not pattern:\n log.info(\"Compiling regex pattern {}\".format(LSUSB_OUTPUT_REGEX))\n pattern = re.compile(LSUSB_OUTPUT_REGEX)\n\n # Parse output\n devices = []\n for dev_line in out.split(\"\\n\"):\n if dev_line == \"\":\n # empty line, skip\n continue\n\n match = pattern.match(dev_line)\n if not match:\n log.warning(\"Couldn't match line {}\".format(dev_line))\n continue\n\n devices.append({\n \"bus\": match.group(\"bus\"),\n \"device\": match.group(\"device\"),\n \"vendor\": match.group(\"vendor\"),\n \"product\": match.group(\"product\"),\n \"name\": match.group(\"name\"),\n })\n\n ret[\"values\"] = devices\n return ret",
"def expand_phone_details(self, phone_details):\n summary = {}\n result = {}\n for item in phone_details:\n key = item['data']['systemName'] + ': ' + item['data']['systemVersion']\n summary[key] = summary.get(key, 0) + 1\n result[item['_id']] = key\n return summary, result",
"def get_device_info(platform_path: str):\n device_name = os.path.basename(platform_path)\n try:\n platform_file = next(\n glob.iglob(os.path.join(glob.escape(platform_path), 'hw', f'*.[xd]sa')))\n except StopIteration as e:\n raise ValueError('cannot find platform file for %s' % device_name) from e\n with zipfile.ZipFile(platform_file) as platform:\n # platform_file must end with .xsa or .dsa, thus [:-4]\n with platform.open(os.path.basename(platform_file)[:-4] +\n '.hpfm') as metadata:\n platform_info = ET.parse(metadata).find('./xd:component/xd:platformInfo',\n XILINX_XML_NS)\n if platform_info is None:\n raise ValueError('cannot parse platform')\n clock_period = platform_info.find(\n \"./xd:systemClocks/xd:clock/[@xd:id='0']\", XILINX_XML_NS)\n if clock_period is None:\n raise ValueError('cannot find clock period in platform')\n part_num = platform_info.find('xd:deviceInfo', XILINX_XML_NS)\n if part_num is None:\n raise ValueError('cannot find part number in platform')\n return {\n 'clock_period':\n clock_period.attrib['{{{xd}}}period'.format(**XILINX_XML_NS)],\n 'part_num':\n part_num.attrib['{{{xd}}}name'.format(**XILINX_XML_NS)]\n }",
"def show_device_information_long(self):\n\n for device in self._devices:\n print(\"\")\n if device['Device Type'].startswith(\"enclosu\"):\n if device.get('Device Type'):\n print(\"{0:>32}: {1}\".format(\"Device Type\", device['Device Type']))\n if device['Device Type Description']:\n print(\"{0:>32}: {1}\".format(\"Device Description\", device['Device Type Description']))\n if device.get('SCSI Device Name'):\n print(\"{0:>32}: {1}\".format(\"SCSI Device Name\", device['SCSI Device Name']))\n if device.get('Product Identification'):\n print(\"{0:>32}: {1}\".format(\"Product Identification\", device['Product Identification']))\n if device.get('Vendor Identification'):\n print(\"{0:>32}: {1}\".format(\"Vendor Identification\", device['Vendor Identification']))\n if device.get('Firmware Version'):\n print(\"{0:>32}: {1}\".format(\"Firmware Version\", device['Firmware Version']))\n if device.get('Serial Number'):\n print(\"{0:>32}: {1}\".format(\"Serial Number\", device['Serial Number']))\n if device.get('SAS Address'):\n print(\"{0:>32}: {1}\".format(\"SAS Address\", device['SAS Address']))\n else:\n if device.get('Device Type'):\n print(\"{0:>32}: {1}\".format(\"Device Type\", device['Device Type']))\n if device['Device Type Description']:\n print(\"{0:>32}: {1}\".format(\"Device Description\", device['Device Type Description']))\n if device.get('Linux Device Name'):\n print(\"{0:>32}: {1}\".format(\"Linux Device Name\", device['Linux Device Name']))\n if device.get('SCSI Device Name'):\n print(\"{0:>32}: {1}\".format(\"SCSI Device Name\", device['SCSI Device Name']))\n if device.get('Product Identification'):\n print(\"{0:>32}: {1}\".format(\"Product Identification\", device['Product Identification']))\n if device.get('Vendor Identification'):\n print(\"{0:>32}: {1}\".format(\"Vendor Identification\", device['Vendor Identification']))\n if device.get('Firmware Version'):\n print(\"{0:>32}: {1}\".format(\"Firmware Version\", device['Firmware Version']))\n if device.get('Serial Number'):\n print(\"{0:>32}: {1}\".format(\"Serial Number\", device['Serial Number']))\n if device.get('Drive Capacity'):\n print(\"{0:>32}: {1}\".format(\"Drive Capacity\", device['Drive Capacity']))\n if device.get('Block Length'):\n print(\"{0:>32}: {1}\".format(\"Block Length\", device['Block Length']))\n if device.get('Power On Hours'):\n print(\"{0:>32}: {1}\".format(\"Power On Hours\", device['Power On Hours']))\n if device.get('Current Temperature'):\n print(\"{0:>32}: {1}\".format(\"Current Temperature\", device['Current Temperature']))\n if device.get('SAS Address'):\n print(\"{0:>32}: {1}\".format(\"SAS Address\", device['SAS Address']))\n if device.get('Enclosure Device'):\n print(\"{0:>32}: {1}\".format(\"Enclosure Device\", device['Enclosure Device']))\n if device.get('Enclosure Slot'):\n print(\"{0:>32}: {1}\".format(\"Enclosure Slot\", device['Enclosure Slot']))\n if device.get('Slot Description'):\n print(\"{0:>32}: {1}\".format(\"Slot Desciption\", device['Slot Description']))\n\n if len(self._devices):\n print(\"\")",
"def parse_dmidecode(output):\n \n dict1 = parseSplit(output,delimit = \":\",add_underscore = 1)\n \n manufacturer = dict1['manufacturer']\n model = dict1['product_name']\n return manufacturer,model\n pass",
"def get_device_details(device):\n ret = device.wait_for_output(\"SetupQRCode\")\n if ret is None or len(ret) < 2:\n return None\n\n qr_code = re.sub(\n r\"[\\[\\]]\", \"\", ret[-1].partition(\"SetupQRCode:\")[2]).strip()\n try:\n device_details = dict(SetupPayload().ParseQrCode(\n \"VP:vendorpayload%{}\".format(qr_code)).attributes)\n except exceptions.ChipStackError as ex:\n log.error(ex.msg)\n return None\n\n return device_details",
"def device_info(self) -> Dict[str, any]:\n return {\n \"identifiers\": {(DOMAIN, self.config_entry.entry_id)},\n \"name\": NAME,\n \"model\": VERSION,\n \"manufacturer\": NAME,\n }",
"def test_lsusb_cdc_mbim(self):\n self.assertEqual(jc.parsers.lsusb.parse(self.generic_lsusb_cdc_mbim, quiet=True), self.generic_lsusb_cdc_mbim_json)",
"def parse_data(self, fileinput):\n with open(fileinput, 'rb') as fh:\n for line in fh:\n try:\n name, address, phone = line.strip().split(\",\")\n self.data.append(Document(name, address, phone))\n except Exception, ex:\n raise SerialException(\": Failed to parse input line %s: %s\" % (line, ex))\n return",
"def getfundamentals(self, results):\n self.log(\"Retrieving fundamental phone information\")\n self.log(\"Phone serial number\")\n results['uniqueserial']=sha.new(self.get_esn()).hexdigest()\n results['groups']=self.get_groups()\n results['ringtone-index']=self.get_ringtone_index()\n results['wallpaper-index']=self.get_wallpaper_index()\n return results",
"def MakeDeviceDescriptionsDict():\r\n\r\n DeviceDescriptionsDict = {}\r\n\r\n #TEST = 1\r\n TEST = 0\r\n\r\n rootdir = os.getcwd()\r\n\r\n # special fiddle for when being run from the Renpy launcher\r\n if string.find(rootdir, \"renpy-6.99.14.3-sdk\") > -1:\r\n os.chdir(\"E:\\Projects\\NEW RENPY GAME\\game\")\r\n rootdir = os.getcwd()\r\n\r\n if TEST == 1:\r\n badgesdir = os.path.join(rootdir, \"game\", \"images\", \"badges\")\r\n else:\r\n badgesdir = os.path.join(rootdir, \"images\", \"badges\")\r\n\r\n devicesdir = os.path.join(badgesdir, \"Devices\")\r\n\r\n if os.path.isfile(os.path.join(devicesdir, \"_descriptions.csv\")):\r\n #print \"FOUND _descriptions.csv in\", devicesdir\r\n descriptions = open(os.path.join(devicesdir, \"_descriptions.csv\")).readlines()\r\n for d in descriptions:\r\n if string.find(d, \",\") > -1:\r\n fn, sing, plural = string.split(d, \",\")\r\n DeviceDescriptionsDict[fn] = [string.strip(sing), string.strip(plural)]\r\n return DeviceDescriptionsDict",
"async def device_info(request):\n textx = await request.get_reply_message()\n codename = request.pattern_match.group(1)\n if codename:\n pass\n elif textx:\n codename = textx.text\n else:\n await edit_or_reply(request, \"`Usage: .device <codename> / <model>`\")\n return\n data = json.loads(\n get(\n \"https://raw.githubusercontent.com/androidtrackers/\"\n \"certified-android-devices/master/by_device.json\"\n ).text\n )\n results = data.get(codename)\n if results:\n reply = f\"**Search results for {codename}**:\\n\\n\"\n for item in results:\n reply += (\n f\"**Brand**: `{item['brand']}`\\n\"\n f\"**Name**: `{item['name']}`\\n\"\n f\"**Model**: `{item['model']}`\\n\\n\"\n )\n else:\n reply = f\"`Couldn't find info about {codename}!`\\n\"\n await edit_or_reply(request, reply)",
"def COM(fp):\n length = unpack('>H', fp.read(2))[0]\n comment = unpack('{}s'.format(length - 2), fp.read(length - 2))[0]\n\n info = {\n 'Lc' : length,\n 'Cm' : comment\n }\n\n return info",
"def parse_device_list(device_root):\n devices = [parse_device(device) for device in device_root.findall(\"Device\")]\n return devices",
"def device_info(self) -> Dict[str, Any]:\n return {\n 'name': 'Boiler Module',\n 'manufacturer': 'Eneco',\n 'identifiers': {\n (DOMAIN, self.toon.agreement.id, 'boiler_module'),\n },\n 'via_device': (DOMAIN, self.toon.agreement.id),\n }"
]
| [
"0.5697088",
"0.53372353",
"0.5237132",
"0.52135444",
"0.5204209",
"0.5193696",
"0.5180402",
"0.51639044",
"0.5127449",
"0.5107768",
"0.5095245",
"0.5094951",
"0.50657725",
"0.5043901",
"0.50421095",
"0.49992374",
"0.4995149",
"0.49897113",
"0.49766734",
"0.49737394",
"0.49620453",
"0.49617663",
"0.49570015",
"0.49516833",
"0.49349612",
"0.49301383",
"0.49131998",
"0.49036744",
"0.4900493",
"0.48977387"
]
| 0.7282722 | 0 |
Return a next index for read client. This function implements a default behavior for get a next read client for masterslave setup. Overwrite this function if you want a specific behavior. | def get_next_client_index(self, write=True):
if write or len(self._server) == 1:
return 0
return random.randint(1, len(self._server) - 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __next_index():\n return redis_store.incr(String.__name__.lower() + '-index')",
"def get_client(self, write=True):\r\n index = self.get_next_client_index(write=write)\r\n\r\n if self._clients[index] is None:\r\n self._clients[index] = self.connect(index)\r\n\r\n return self._clients[index]",
"def _get_next_correlation_index(self, index: int) -> int:\n pass",
"def getClusterVmNextId(self):\n data = self.connect('get','cluster/nextid',None)\n return data",
"def GetNext(self):\n if self.ids:\n return self.ids.pop()\n self.next_idx += 1\n return self.next_idx",
"def get_client_index(self, client=None):\n if type(client) is Client:\n for i, c in enumerate(self.client_list):\n if client.client_id == c.client_id:\n return i\n\n return False",
"def _next_index(self):\n # Cache a string of random numbers to speed things up\n if not self.rnd_pool_:\n self.rnd_pool_ = self.rnd.randint(0, self.input_size - 1, self.batch_size * 10).tolist()\n\n return self.rnd_pool_.pop()",
"def next(self) -> int:\n self.idx += 1\n return self.m[self.idx]",
"def next(self) -> int:\n self.idx += 1\n return self.m[self.idx]",
"def getNextIndex(self, indexName):\n self._db._c.execute(\"SELECT {} FROM {}\".format(indexName,\n self.tableName))\n IDs = self._db._c.fetchall()\n\n if len(IDs) == 0:\n return '1'\n\n # Generate a number one larger than the largest current ID.\n newID = 0\n for ID in IDs:\n newID = max(newID, int(ID[0]))\n newID += 1\n\n # Return that.\n return str(newID)",
"def read_index(self, index):\n current = self.head\n if index == 0:\n return current.data\n elif index >= self.size() :\n return None\n else:\n position = 0\n while position < index:\n current = current.next_node\n position += 1\n return current.data",
"def index_client(indexd_client):\n return indexd_client",
"def getNextIndex (self, gameState, currIndex):\n nextIndex = currIndex + 1\n if (nextIndex >= gameState.getNumAgents()):\n nextIndex = 0\n return nextIndex",
"def next_index(state):\n node = state\n for key in (\"layers\", \"index\"):\n node = node.get(key, {})\n indices = [key for key in node.keys()]\n if len(indices) == 0:\n return 0\n else:\n return max(indices) + 1",
"def get_next_index(self, current_index_string):\n # current index is a string, so cast to int\n current_index = int(current_index_string)\n\n return current_index+1",
"def next(self) -> int:\n value = self.inorder[self.index]\n self.index = self.index + 1\n return value",
"def get_index(self):\n\t\treturn call_sdk_function('PrlBootDev_GetIndex', self.handle)",
"def next(self) -> int:\n self.index += 1\n return self.nodes_sorted[self.index]",
"def get(self, index: int) -> int:\n if index + 1 >self.cnt:\n return -1\n\n tmp = self.dummy\n for i in range(index+1):\n tmp = tmp.next\n return tmp.val",
"def get_next_command( self, ):\n self.ix_command += 1\n if self.ix_command >= len( self.command_list ):\n ret = None\n else:\n ret = self.command_list[ self.ix_command ]\n# print( f\"command = { self.ix_command} {ret} \", flush = True )\n return ret",
"def get_next(self) -> int:\n cur_next = self._bin_iter.get_next()\n\n return self._intvs.get_next(cur_next, self.even)",
"def get_next(self):\n try:\n return self.the_input[self.index]\n except IndexError:\n return None",
"def next(self) -> int:\n self.pointer += 1\n return self.ordered_nodes[self.pointer-1]",
"def get_next_id(self):\n con = self.c._connect()\n last_id = self.c.get_last_id(con.cursor())\n con.close()\n return last_id + 1",
"def get_next_id():\n global _lock, _counter\n with _lock:\n if _counter == 65535:\n _counter = 1\n else:\n _counter += 1\n\n return str(_counter)",
"def _get_next_offset(self):\n return self.__offset",
"def get_resource_index(self):\n result = -1\n max_sleep_time = self.time_window\n with self._lock:\n while result == -1:\n for i in range(0, self.num_keys):\n curr_sleep_time = max((self.timers[i][0] + self.time_window) - time.time(), 0)\n\n max_sleep_time = min(max_sleep_time, curr_sleep_time)\n\n if self.timers[i][1] >= self.window_limit and self.timers[i][0] + self.time_window < time.time():\n self.timers[i][0] = 0\n self.timers[i][1] = 0\n\n if self.timers[i][1] < self.window_limit:\n result = i\n break\n\n if result == -1: # case when all streams are rate limited\n # logging.warning('sleeping for %d seconds.' % max_sleep_time)\n # time.sleep(max_sleep_time)\n return -1 * max_sleep_time\n\n if self.timers[result][0] == 0:\n self.timers[result][0] = time.time()\n\n self.timers[result][1] += 1\n\n return result",
"def get_sequence_index(self):\n\t\treturn call_sdk_function('PrlBootDev_GetSequenceIndex', self.handle)",
"def current_index(self):\n job = self.client.query(\"SELECT MAX(ID) FROM {}.{};\".format(self.database_name, self.table_name))\n for row in job.result():\n if row[0] == None:\n return 1\n current_index = row[0] + 1\n return current_index",
"def get_conn(self):\n nn_connections = self.get_connections(self.webhdfs_conn_id)\n for nn in nn_connections:\n try:\n logging.debug('Trying namenode {}'.format(nn.host))\n connection_str = 'http://{nn.host}:{nn.port}'.format(nn=nn)\n if _kerberos_security_mode:\n client = KerberosClient(connection_str)\n else:\n proxy_user = self.proxy_user or nn.login\n client = InsecureClient(connection_str, user=proxy_user)\n client.status('/')\n logging.debug('Using namenode {} for hook'.format(nn.host))\n return client\n except HdfsError as e:\n logging.debug(\"Read operation on namenode {nn.host} failed with\"\n \" error: {e.message}\".format(**locals()))\n nn_hosts = [c.host for c in nn_connections]\n no_nn_error = \"Read operations failed on the namenodes below:\\n{}\".format(\"\\n\".join(nn_hosts))\n raise Exception(no_nn_error)"
]
| [
"0.6373826",
"0.60432035",
"0.57846725",
"0.57057947",
"0.56468946",
"0.5631014",
"0.5614183",
"0.56074053",
"0.56074053",
"0.5567467",
"0.5533405",
"0.5528173",
"0.5506794",
"0.55002165",
"0.5483921",
"0.54825854",
"0.5479725",
"0.54147923",
"0.5399875",
"0.53776383",
"0.5344388",
"0.53341645",
"0.5309027",
"0.52849424",
"0.5281459",
"0.5268795",
"0.5251142",
"0.525106",
"0.5240141",
"0.5236951"
]
| 0.72630644 | 0 |
Method that parse a connection string. | def parse_connection_string(self, constring):
try:
host, port, db = constring.split(":")
port = port if host == "unix" else int(port)
db = int(db)
return host, port, db
except (ValueError, TypeError):
raise ImproperlyConfigured("Incorrect format '%s'" % (constring)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_connection_string(self, connection_string):\n self.host = '127.0.0.1'\n self.port = 3306\n self.db = None\n self.user = None\n self.pwd = None\n for part in connection_string.split(';'):\n part = part.strip()\n if part != '':\n k, v = part.split('=')\n k = k.lower()\n if k == 'server':\n self.host = v.strip()\n elif k == 'port':\n self.port = int(v.strip())\n elif k == 'database':\n self.db = v.strip()\n elif k == 'uid':\n self.user = v.strip()\n elif k == 'pwd':\n self.pwd = v.strip()",
"def parseConnection(str_in):\n m = re.match(r\"(.+):(in|out)\", str_in)\n return {'name': m.group(1), 'type': m.group(2)}",
"def get_connection_string_elements(connection_string):\n connectionstring = namedtuple(\"ConnectionString\", [\"url\", \"username\", \"password\"])\n\n d = dict(item.split(\"=\") for item in connection_string.split(\";\"))\n\n cs = connectionstring(d[\"url\"], d[\"username\"], d[\"password\"])\n \n return cs",
"def parse_connection(conn_obj):\n return dict(\n user=conn_obj.login,\n password=conn_obj.password,\n host=conn_obj.host,\n dbname=conn_obj.schema,\n port=conn_obj.port,\n )",
"def _parse_connection_uri(uri):\n settings = {\"schema\": \"\"}\n\n match = _URI_SCHEME_RE.match(uri)\n scheme, uri = match.groups() if match else (\"mysqlx\", uri)\n\n if scheme not in (\"mysqlx\", \"mysqlx+srv\"):\n raise InterfaceError(f\"Scheme '{scheme}' is not valid\")\n\n if scheme == \"mysqlx+srv\":\n settings[\"dns-srv\"] = True\n\n userinfo, tmp = uri.partition(\"@\")[::2]\n host, query_str = tmp.partition(\"?\")[::2]\n\n pos = host.rfind(\"/\")\n if host[pos:].find(\")\") == -1 and pos > 0:\n host, settings[\"schema\"] = host.rsplit(\"/\", 1)\n host = host.strip(\"()\")\n\n if not host or not userinfo or \":\" not in userinfo:\n raise InterfaceError(f\"Malformed URI '{uri}'\")\n user, password = userinfo.split(\":\", 1)\n settings[\"user\"], settings[\"password\"] = unquote(user), unquote(password)\n\n if host.startswith((\"/\", \"..\", \".\")):\n settings[\"socket\"] = unquote(host)\n elif host.startswith(\"\\\\.\"):\n raise InterfaceError(\"Windows Pipe is not supported\")\n else:\n settings.update(_parse_address_list(host))\n\n invalid_options = (\"user\", \"password\", \"dns-srv\")\n for key, val in parse_qsl(query_str, True):\n opt = key.replace(\"_\", \"-\").lower()\n if opt in invalid_options:\n raise InterfaceError(f\"Invalid option: '{key}'\")\n if opt in _SSL_OPTS:\n settings[opt] = unquote(val.strip(\"()\"))\n else:\n val_str = val.lower()\n if val_str in (\"1\", \"true\"):\n settings[opt] = True\n elif val_str in (\"0\", \"false\"):\n settings[opt] = False\n else:\n settings[opt] = val_str\n return settings",
"def _parse(self, string):\n modern_scheme = r\"\"\"\nssh://\n(?:\n (?P<user>[^@]+)\n@)? # user is anything but @, then the @ separator\n(?P<host>[^:/]+) # host is anything but : and /\n(:(?P<port>\\d+))? # optional port\n(/(?P<remote_dir>.*))? # optional remote directory\n\"\"\"\n match = re.match(modern_scheme, string, re.VERBOSE)\n if match:\n self._handle_match(match)\n else:\n old_scheme = \"\"\"\n(?P<user>[^@]+) # user is anything but @, and optional\n@ # mandatory @ separator\n(?P<host>[^:/]+) # host is anything but : and /\n(\n (:|/)? # directory separator is either : or /\n (?P<remote_dir>.*))? # remote directory is optional\n \"\"\"\n match = re.match(old_scheme, string, re.VERBOSE)\n if match:\n self._handle_match(match)\n else:\n raise URLParseError(\"\"\" \\\nCould not parse %s as a valid url.\nSupported schemes are\n\n user@host:directory\n\n ssh://user@host:port/directory\n\"\"\" % self.as_string)",
"def from_string(spec):\n assert isinstance(spec, str)\n\n # Try match\n match = Connection.REGEX.fullmatch(spec)\n assert match is not None, spec\n\n # Extract data\n return Connection(\n driver=match.group(\"driver\"),\n port=match.group(\"port\"),\n pin=int(match.group(\"pin\")),\n interconnect=match.group(\"interconnect\"),\n )",
"def from_connection_string(cls, conn_str, eventhub=None, **kwargs):\n address, policy, key, entity = _parse_conn_str(conn_str)\n entity = eventhub or entity\n address = _build_uri(address, entity)\n return cls(address, username=policy, password=key, **kwargs)",
"def parseString(self, s):\n pass",
"def parse(cls, s):\n raise NotImplementedError",
"def connect(con_str):\r\n try:\r\n connection = psycopg2.connect(**con_str)\r\n return connection\r\n except Exception as conn_err:\r\n print(conn_err)\r\n print('Unable to connect to database. Aborting')",
"def from_connection_string(cls, conn_str, event_hub_path=None, **kwargs):\n is_iot_conn_str = conn_str.lstrip().lower().startswith(\"hostname\")\n if not is_iot_conn_str:\n address, policy, key, entity = _parse_conn_str(conn_str)\n entity = event_hub_path or entity\n left_slash_pos = address.find(\"//\")\n if left_slash_pos != -1:\n host = address[left_slash_pos + 2:]\n else:\n host = address\n return cls(host, entity, EventHubSharedKeyCredential(policy, key), **kwargs)\n else:\n return cls._from_iothub_connection_string(conn_str, **kwargs)",
"def parse_url(url):\n # Expected URL format string (for error messages)\n # http://www.iana.org/assignments/uri-schemes/prov/redis\n expected = ('<schema>://(:password)@<host>:<port>/(db) (exclude db number '\n 'for cluster mode)')\n\n # Make sure we can parse the key bits of the URL\n try:\n schema = re.search('^(.*)://', url).group(1)\n host = re.search('://(:.*@)*(.*):', url).group(2)\n port = re.search('://(:.*@)*.*:(.*)/', url).group(2)\n except Exception:\n raise argparse.ArgumentTypeError(f'URL format: {expected}')\n\n # Toggle SSL if we have a secure schema\n ssl = (schema == 'rediss')\n\n # Parse the database number from the connection string\n db = re.search(r':.*/(\\d+$)', url)\n if db is None:\n Logger().info(f'Using cluster mode for {host}')\n else:\n db = db.group(1)\n\n # Parse the password from the connection string\n password = re.search('://:(.*)@', url)\n if password is None:\n Logger().info(f'No password set for {host}')\n else:\n password = password.group(1)\n\n return {'ssl': ssl,\n 'password': password,\n 'host': host,\n 'port': port,\n 'db': db}",
"def parse_db_url(db_url):\n u = urlparse(db_url)\n db = {}\n db[\"database\"] = u.path[1:]\n db[\"user\"] = u.username\n db[\"password\"] = u.password\n db[\"host\"] = u.hostname\n db[\"port\"] = u.port\n return db",
"def get_connection(conf):\n config = configparser.ConfigParser()\n with open(conf, 'r') as configfile:\n config.read_file(configfile)\n connection = config['Database']['Connection_String']\n\n return connection",
"def parse_configuration_string(config_string):\n try:\n parser = parser_bnf()\n result = parser.parseString(config_string, parseAll=True)\n except (ParseException, ParseSyntaxException) as e:\n print(\"ERROR: {m}\".format(m=str(e)))\n sys.exit(1)\n return result",
"def parseString(self, s):\n return self.parser.parseString(s)",
"def subverParseClient(s):\n return s[1:].split(\":\")[0]",
"def parse(s):\n return s",
"def parse_string(self, in_str):\n match = MAIN_REGEX.search(in_str)\n if not match:\n err_str = \"Unable to parse string: %s\" % in_str\n raise ValueError(err_str)\n self.parse_completed(match.group(1))\n self.parse_priority(match.group(2))\n if match.group(3) and match.group(4):\n self.parse_completion_date(match.group(3))\n self.parse_creation_date(match.group(4))\n else:\n self.parse_creation_date(match.group(3))\n self.parse_description(match.group(5))",
"def func_PARSE(self):\n self.parsed_url = parse.urlparse(\"http://{0}:{1}{2}\".format(args.HTTP_HOST, args.HTTP_PORT, self.path).lower())\n self.parsed_param = parse.parse_qs(self.parsed_url[4])",
"def parse_string(self, data):\n pass",
"def fromstring(cls, string: str) -> 'Config':\n parser: configparser.ConfigParser = configparser.ConfigParser()\n parser.read_dict(dict(wpwatcher=Config.DEFAULT_CONFIG))\n parser.read_string(string)\n return cls.fromparser(parser)",
"def parse(self, uriString):\n \n uriString = uriString.strip()\n \n # empty URI\n if uriString == \"\" or uriString.lower() == \"null\":\n return URI.EMPTY()\n \n # (known) scheme only, nothing else\n if uriString.lower() in self.schemePortMappings:\n return URI(uriString.lower(), \"\", None, None, None)\n elif uriString[-1:] == \":\" and uriString[:-1].lower() in self.schemePortMappings:\n return URI(uriString[:-1].lower(), \"\", None, None, None)\n elif uriString[-3:] == \"://\" and uriString[:-3].lower() in self.schemePortMappings:\n return URI(uriString[:-3].lower(), \"\", None, None, None)\n \n res = urlparse.urlparse(uriString, allow_fragments=True)\n scheme = res.scheme.lower()\n netloc = res.netloc\n path = res.path\n query = res.query\n \n # data scheme: use data as host\n if scheme in defaults.schemesWithNoDoubleSlash:\n return URI(scheme, path, None, None, None)\n \n # urlparse behaves strangely when no scheme is present, so add http and try again\n if scheme == \"\" and netloc == \"\": # example: \"www.seclab.nu\"\n res = urlparse.urlparse(\"http://\" + uriString, allow_fragments=True)\n scheme = None\n netloc = res.netloc\n path = res.path\n query = res.query\n elif scheme != \"\" and netloc == \"\": # example: www.seclab.nu:80/path\n res = urlparse.urlparse(\"http://\" + uriString, allow_fragments=True)\n scheme = None\n netloc = res.netloc\n path = res.path\n query = res.query\n \n # split netloc part into host/port and remove user:pwd part\n netlocMatch = self._getRE().match(netloc)\n if netlocMatch is None:\n return URI.INVALID() # probably using an unsupported symbol\n host = netlocMatch.group(\"host\").lower()\n port = netlocMatch.group(\"port\") # number or None\n if port is not None and self._convertPortToInt:\n port = int(port)\n \n # optionally add scheme if missing\n if self.addScheme and scheme is None:\n if port is not None and port in self.portSchemeMappings:\n scheme = self.portSchemeMappings[port]\n else:\n scheme = self.defaultScheme\n \n # optionally add port if missing\n if self.addPort and port is None:\n if scheme is not None and scheme in self.schemePortMappings:\n port = self.schemePortMappings[scheme]\n else:\n port = self.defaultPort\n \n if self.decodeEscapedCharacters:\n # urllib.unquote cannot handle unicode strings\n path = urllib.unquote(path.encode(\"ascii\")).decode(\"utf8\")\n if path == \"\":\n path = None\n \n if query == \"\":\n query = None\n \n return URI(scheme, host, port, path, query)",
"def read_dsn(\n section: str, dsn: str = \"\",\n):\n check_types([(\"dsn\", dsn, [str],), (\"section\", section, [str],)])\n confparser = ConfigParser()\n confparser.optionxform = str\n if not dsn:\n dsn = os.environ[\"ODBCINI\"]\n confparser.read(dsn)\n if confparser.has_section(section):\n options = confparser.items(section)\n conn_info = {\"port\": 5433, \"user\": \"dbadmin\"}\n for elem in options:\n if elem[0].lower() in (\"servername\", \"server\"):\n conn_info[\"host\"] = elem[1]\n elif elem[0].lower() == \"uid\":\n conn_info[\"user\"] = elem[1]\n elif elem[0].lower() == \"port\":\n try:\n conn_info[\"port\"] = int(elem[1])\n except:\n conn_info[\"port\"] = elem[1]\n elif elem[0].lower() == \"pwd\":\n conn_info[\"password\"] = elem[1]\n elif elem[0].lower() == \"kerberosservicename\":\n conn_info[\"kerberos_service_name\"] = elem[1]\n elif elem[0].lower() == \"kerberoshostname\":\n conn_info[\"kerberos_host_name\"] = elem[1]\n elif \"vp_test_\" in elem[0].lower():\n conn_info[elem[0].lower()[8:]] = elem[1]\n else:\n conn_info[elem[0].lower()] = elem[1]\n return conn_info\n else:\n raise NameError(\"The DSN Section '{}' doesn't exist.\".format(section))",
"def __init__(\n self,\n connect_string,\n prefix=\"chattymarkov\",\n separator=\"\\x01\",\n stop_word=\"\\x02\",\n ):\n self.db = database.build_database_connection(connect_string)\n self.separator = separator\n self.stop_word = stop_word\n self.prefix = prefix",
"def __init__(\n self,\n connect_string,\n prefix=\"chattymarkov\",\n separator=\"\\x01\",\n stop_word=\"\\x02\",\n ):\n self.db = database.build_database_connection(connect_string, True)\n self.separator = separator\n self.stop_word = stop_word\n self.prefix = prefix",
"def readopts(self):\n parser = OptionParser()\n parser.add_option(\"--dbname\", action=\"store\", type=\"string\", dest=\"dbname\", default=None)\n\n parser.add_option(\"--user\",\n action=\"store\",\n type=\"string\",\n dest=\"user\",\n default=None)\n\n parser.add_option(\"--password\",\n action=\"store\",\n type=\"string\",\n dest=\"password\",\n default=None)\n\n parser.add_option(\"--host\",\n action=\"store\",\n type=\"string\",\n dest=\"host\",\n default=None)\n\n parser.add_option(\"--port\",\n action=\"store\",\n type=\"string\",\n dest=\"port\",\n default=None)\n\n (options, args) = parser.parse_args()\n\n if options.dbname is None:\n print \"dbname is mandatory\"\n exit(1)\n\n conf = \"dbname=%s\" % options.dbname\n for parm in ['user', 'password', 'host', 'port']:\n if options.__dict__[parm] is not None:\n conf = \"%s %s=%s\" % (conf, parm, options.__dict__[parm])\n return conf",
"def parse(self, string):\r\n # Tidy up our line\r\n string = self._check_line_is_good(string)\r\n \r\n # Break up into origin, token and body\r\n high_level_parts = string.split(None, 2)\r\n origin = parse_numeric(high_level_parts[0], self._maxclientnum)\r\n command = high_level_parts[1]\r\n if not command.isupper() and not command.isdigit():\r\n raise ProtocolError('Command not in uppercase', string)\r\n if len(high_level_parts) > 2:\r\n params = self._parse_params(high_level_parts[2])\r\n else:\r\n params = []\r\n \r\n # If this is an invalid command, pass it upwards\r\n try:\r\n self._pass_to_handler(origin, command, params)\r\n except ParseError, error:\r\n raise ParseError(error.value, string)",
"def from_connection_string(\n cls, conn_str, # type: str\n share=None, # type: Optional[Union[str, ShareProperties]]\n file_path=None, # type: Optional[str]\n snapshot=None, # type: Optional[Union[str, Dict[str, Any]]]\n credential=None, # type: Optional[Any]\n **kwargs # type: Any\n ):\n # type: (...) -> FileClient\n account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file')\n if 'secondary_hostname' not in kwargs:\n kwargs['secondary_hostname'] = secondary\n return cls(\n account_url, share=share, file_path=file_path, snapshot=snapshot, credential=credential, **kwargs)"
]
| [
"0.8401574",
"0.6616161",
"0.6538914",
"0.6453745",
"0.6397597",
"0.6344282",
"0.6208351",
"0.6030974",
"0.59864354",
"0.57071424",
"0.56045955",
"0.5569482",
"0.5531569",
"0.5472697",
"0.5466513",
"0.5460773",
"0.54534364",
"0.5451783",
"0.5448955",
"0.5406482",
"0.5380559",
"0.5358296",
"0.53455776",
"0.5316474",
"0.53103757",
"0.52889407",
"0.5286538",
"0.52306753",
"0.52228063",
"0.5207004"
]
| 0.7921391 | 1 |
Given a connection index, returns a new raw redis client/connection instance. Index is used for master/slave setups and indicates that connection string should be used. In normal setups, index is 0. | def connect(self, index=0):
host, port, db = self.parse_connection_string(self._server[index])
return self.connection_factory.connect(host, port, db) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def connect(self, index=0, write=True):\r\n master_name, sentinel_hosts, db = self.parse_connection_string(self._connection_string)\r\n\r\n sentinel_timeout = self._options.get('SENTINEL_TIMEOUT', 1)\r\n sentinel = Sentinel(sentinel_hosts, socket_timeout=sentinel_timeout)\r\n\r\n if write:\r\n host, port = sentinel.discover_master(master_name)\r\n else:\r\n host, port = random.choice([sentinel.discover_master(master_name)] + sentinel.discover_slaves(master_name))\r\n\r\n return self.connection_factory.connect(host, port, db)",
"def get_client(conn):\n # No database indicates a cluster connection\n if not conn.get('db', None):\n conn.pop('db', None)\n return connect_redis_cluster(conn)\n\n # Otherwise it's a regular redis connection\n return connect_redis(conn)",
"def get_connection(self, params):\r\n return Redis(connection_pool=self.get_or_create_connection_pool(params))",
"def conn(self):\n if self._sentinel:\n return self._sentinel.master_for(self._sentinel_name)\n if not self._conn:\n self._conn = self.__redis_mod.StrictRedis(\n host=self._host, port=self._port, **self._conn_kwargs\n )\n return self._conn",
"def _get_conn(self):\n return redis.Redis(connection_pool=self.pool)",
"def create_connection():\n # REDIS_URL is defined in .env and loaded into the environment by Honcho\n redis_url = os.getenv('REDIS_URL')\n # If it's not defined, use the Redis default\n if not redis_url:\n redis_url = 'redis://localhost:6379'\n urlparse.uses_netloc.append('redis')\n url = urlparse.urlparse(redis_url)\n return redis.StrictRedis(\n host=url.hostname,\n port=url.port,\n db=0,\n password=url.password\n )",
"def get_rethink_connection(config):\n\n\trethink_conn = r.connect(\n\t\thost=config.get(\"RETHINKDB\", \"RETHINK_HOST\"),\n\t\tport=config.get(\"RETHINKDB\", \"RETHINK_PORT\"),\n\t\tdb=config.get(\"RETHINKDB\", \"RETHINK_DB\"),\n\t\tuser=config.get(\"RETHINKDB\", \"RETHINK_USER\"),\n\t\tpassword=config.get(\"RETHINKDB\", \"RETHINK_PASSWORD\"),\n\t\ttimeout=int(config.get(\"RETHINKDB\", \"RETHINK_TIMEOUT\")),\n\t)\n\treturn rethink_conn",
"def get_scan_by_index(self, index):\n scan_number = int(index) + 1\n try:\n return self._scan_cache[scan_number]\n except KeyError:\n package = ThermoRawScanPtr(scan_number)\n scan = Scan(package, self)\n self._scan_cache[scan_number] = scan\n return scan",
"def get(self, conn_alias: str) -> \"BaseDBAsyncClient\":\n storage: Dict[str, \"BaseDBAsyncClient\"] = self._get_storage()\n try:\n return storage[conn_alias]\n except KeyError:\n connection: BaseDBAsyncClient = self._create_connection(conn_alias)\n storage[conn_alias] = connection\n return connection",
"def get_scan_by_index(self, index):\n if not self._use_index:\n raise TypeError(\"This method requires the index. Please pass `use_index=True` during initialization\")\n index_keys = tuple(self.index)\n id_bytes = index_keys[index]\n id_str = id_bytes.decode(\"utf-8\")\n return self.get_scan_by_id(id_str)",
"def _create_cluster_from_index(self, index):\n return Cluster(index=index)",
"def index_client(indexd_client):\n return indexd_client",
"def make_connection(self):\n if self._created_connections[self._pattern_idx] >= self.max_connections_per_pattern:\n raise ConnectionError(\"Too many connections\")\n self._created_connections[self._pattern_idx] += 1\n conn = self.connection_class(**self.patterns[self._pattern_idx])\n conn._pattern_idx = self._pattern_idx\n return conn",
"def get_rethink_connection_ex(config):\n\n\trethink_conn = r.connect(\n\t\thost=config.get(\"RETHINKDB\", \"RETHINK_HOST\"),\n\t\tport=config.get(\"RETHINKDB\", \"RETHINK_PORT\"),\n\t\tdb=config.get(\"RETHINKDB\", \"RETHINK_DB\"),\n\t\tuser=config.get(\"RETHINKDB\", \"RETHINK_USER\"),\n\t\tpassword=config.get(\"RETHINKDB\", \"RETHINK_PASSWORD\"),\n\t\ttimeout=int(config.get(\"RETHINKDB\", \"RETHINK_TIMEOUT\")),\n\t)\n\treturn rethink_conn",
"def from_index(cls, index):\n return cls(name=index.name or None,\n fields=index.fields)",
"def __getitem__(self, ind):\n if not isinstance(ind, int):\n raise TypeError\n\n return self.circuits[ind]\n\n # TODO: all encoding classes will need this method.\n # TODO: make a BaseEncoding that implements this",
"def get(self, instance, database):\n key = instance + '/' + database\n\n if not key in self.queues:\n queue = Queue(self.poolsize)\n self.queues[key] = queue\n\n queue = self.queues[key]\n\n if queue.empty():\n cnxn = cnxn_ctor(instance, database)\n else:\n cnxn = queue.get()\n # Make sure the connection is still good.\n cnxn.ping()\n cnxn.commit()\n\n return cnxn",
"def get_client(self, write=True):\r\n index = self.get_next_client_index(write=write)\r\n\r\n if self._clients[index] is None:\r\n self._clients[index] = self.connect(index)\r\n\r\n return self._clients[index]",
"def create_redis_connection(app=None):\n\n if app:\n app.logger.info('Instantiated new redis connection.')\n\n redis_connection = redis.StrictRedis(\n host=\"localhost\",\n port=6379,\n db=0\n )\n\n if not redis_connection.exists('last_queue_idx'):\n redis_connection.set('last_queue_idx', 0)\n\n return redis_connection",
"def connect_redis(conn):\n # Don't pass empty password to the client\n if not conn.get('password', None):\n conn.pop('password', None)\n\n return redis.StrictRedis(**conn)",
"def create_connection(self, db_name=None, raw=False):\n connected = False\n max_tries = 10\n\n # if db_name is not defined, let it be empty string\n if db_name is None:\n db_name = \"\"\n\n # Reconnect until max_tries exceeded\n while not connected and max_tries > 0:\n try:\n # create engine from db settings\n engine = self.get_engine(db_name)\n\n # Create connection for query\n connection = engine.connect() if raw == False else engine.raw_connection()\n\n connected = True\n\n return engine, connection\n except Exception as e:\n print(\"Database Connection Error: {}\".format(e))\n print(\"Network is unreachable. Retrying to connect to database in 10 seconds...\")\n time.sleep(10)\n max_tries -= 1",
"def get_db_client(self, connection_name: str) -> BaseDBAsyncClient:\n return self._db_client_map[connection_name]",
"def create_connection(self, alias='async', client_class=AsyncElasticsearch, **kwargs):\n kwargs.setdefault('serializer', serializer)\n conn = self._conns[alias] = client_class(**kwargs)\n return conn",
"def __getitem__(self, name):\n return self.connection(name)",
"def _conn_redis(self) -> Redis:\n return Redis(host=self._REDIS_DB_HOST, port=self._REDIS_DB_PORT, db=0,decode_responses=True)",
"def get(self, conn_id: str) -> Connection:\n return Connection.from_dict(self.query(f'{CONNECTION_URL}/{conn_id}'))",
"def init_index(self, index_name):\n return Index(self, index_name)",
"def get_client(connection_string, options_string):\n if not isinstance(connection_string, (str, dict)):\n raise InterfaceError(\"connection_data must be a string or dict\")\n\n settings_dict = _get_connection_settings(connection_string)\n\n if not isinstance(options_string, (str, dict)):\n raise InterfaceError(\"connection_options must be a string or dict\")\n\n if isinstance(options_string, str):\n try:\n options_dict = json.loads(options_string)\n except JSONDecodeError as err:\n raise InterfaceError(\n \"'pooling' options must be given in the form of a document or dict\"\n ) from err\n else:\n options_dict = {}\n for key, value in options_string.items():\n options_dict[key.replace(\"-\", \"_\")] = value\n\n if not isinstance(options_dict, dict):\n raise InterfaceError(\n \"'pooling' options must be given in the form of a document or dict\"\n )\n pooling_options_dict = {}\n if \"pooling\" in options_dict:\n pooling_options = options_dict.pop(\"pooling\")\n if not isinstance(pooling_options, (dict)):\n raise InterfaceError(\n \"'pooling' options must be given in the form document or dict\"\n )\n # Fill default pooling settings\n pooling_options_dict[\"enabled\"] = pooling_options.pop(\"enabled\", True)\n pooling_options_dict[\"max_size\"] = pooling_options.pop(\"max_size\", 25)\n pooling_options_dict[\"max_idle_time\"] = pooling_options.pop(\"max_idle_time\", 0)\n pooling_options_dict[\"queue_timeout\"] = pooling_options.pop(\"queue_timeout\", 0)\n\n # No other options besides pooling are supported\n if len(pooling_options) > 0:\n raise InterfaceError(f\"Unrecognized pooling options: {pooling_options}\")\n # No other options besides pooling are supported\n if len(options_dict) > 0:\n raise InterfaceError(\n f\"Unrecognized connection options: {options_dict.keys()}\"\n )\n\n return Client(settings_dict, pooling_options_dict)",
"def connect_to_redis():\n return Redis(host=redis_host, port=redis_port, db=0)",
"def conn_redis(host, port, db=0):\r\n r = redis.Redis(host=host, port=port, db=db)\r\n return r"
]
| [
"0.5969906",
"0.5762544",
"0.5700567",
"0.5463157",
"0.5462497",
"0.5395308",
"0.5355108",
"0.53345335",
"0.5311246",
"0.53111696",
"0.53071743",
"0.5279201",
"0.52436805",
"0.5213795",
"0.52018327",
"0.5171319",
"0.5145463",
"0.5115737",
"0.51155186",
"0.51154757",
"0.51045585",
"0.50934756",
"0.50931317",
"0.50895315",
"0.5078248",
"0.50781643",
"0.50780755",
"0.50657886",
"0.50590855",
"0.504029"
]
| 0.64365077 | 0 |
Adds delta to the cache version for the supplied key. Returns the new version. | def incr_version(self, key, delta=1, version=None, client=None):
if client is None:
client = self.get_client(write=True)
if version is None:
version = self._backend.version
old_key = self.make_key(key, version)
value = self.get(old_key, version=version, client=client)
try:
ttl = client.ttl(old_key)
except ConnectionError:
raise ConnectionInterrupted(connection=client)
if value is None:
raise ValueError("Key '%s' not found" % key)
if isinstance(key, CacheKey):
new_key = self.make_key(key.original_key(), version=version + delta)
else:
new_key = self.make_key(key, version=version + delta)
self.set(new_key, value, timeout=ttl, client=client)
self.delete(old_key, client=client)
return version + delta | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def incr(self, key, delta=1):\n try:\n key = self.prepare_key(key)\n return super(CacheClass, self).incr(key, delta)\n except Exception as err:\n return self.warn_or_error(err, delta)",
"def incr(self, key, delta=1, version=None, client=None):\r\n return self._incr(key=key, delta=delta, version=version, client=client)",
"def incr(self, key, delta=1):\n\t\treturn self._incrdecr(\"incr\", key, delta)",
"def incr(self, key, delta=1):\r\n if delta < 0:\r\n return self._incrdecr(\"decr\", key, -delta)\r\n else:\r\n return self._incrdecr(\"incr\", key, delta)",
"def add(name, key, delta):\n num_shards = CounterShardConfig.get_num_shards(name)\n def txn():\n index = random.randint(0, num_shards - 1)\n shard_key = '%s-%s-%s' % (name, key, str(index))\n counter = CounterShard.get_by_key_name(shard_key)\n if counter is None:\n counter = CounterShard(key_name=shard_key, name=name, reference_key=key)\n counter.count += delta\n counter.put()\n db.run_in_transaction(txn)\n \n cache_key = make_key('counter', name, key)\n cached = memcache.get(cache_key)\n if cached != None:\n memcache.set(cache_key, cached + delta)",
"def inc(self, key, delta=1):\n if self.has(key):\n _filter = {'_id': key}\n document = {'$inc': {'value': delta}}\n try:\n self.collection.update(_filter, document)\n except PyMongoError:\n return None\n else:\n self.add(key, delta)\n return self.get(key)",
"def incr(self, key, delta=1, callback=None):\n self._incrdecr(\"incr\", key, delta, callback=callback)",
"def add(self, key, value):\n newest = self._Item(key, value) # make new item instance\n walk = self._data.last() # walk backward looking for smaller key\n while walk is not None and newest < walk.element():\n walk = self._data.before(walk)\n if walk is None:\n self._data.add_first(newest) # new key is smallest\n else:\n self._data.add_after(walk, newest) # newest goes after walk",
"def add(self, key, value):\r\n newest = Item(key, value) # make new item instance\r\n walk = self.data.last() # walk backward looking for smaller key\r\n while walk is not None and newest < walk.element():\r\n walk = self.data.before(walk)\r\n if walk is None:\r\n self.data.add_first(newest) # new key is smallest\r\n else:\r\n self.data.add_after(walk, newest) # newest goes after walk\r",
"def decr(self, key, delta=1, version=None, client=None):\r\n return self._incr(key=key, delta=-delta, version=version,\r\n client=client)",
"def update(self, cache_key):\r\n self._write_sha(cache_key)",
"def addVertex(self, key):\n if key not in self.vertList:\n self.numVertices += 1\n vtx = Vertex(key)\n self.verList[key] = vtx\n return vtx",
"def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None, client=None):\r\n return self.set(key, value, timeout, client=client, nx=True)",
"def add(self, key):\n addition_idx = self._reduce(self._hash(key))\n\n if self.table[addition_idx] != \"_\":\n # collision\n new_idx = self._resolve_collision(addition_idx)\n if new_idx == addition_idx:\n # table is full; do not insert\n print(\"Did not add key: hash table is full!\")\n else:\n # found a new\n self.table[new_idx] = key\n else:\n # no collision; place value at index\n self.table[addition_idx] = key",
"def add(self, key, value):\n if not self.checkpoints:\n # If no checkpoints exist we don't need to track history.\n return\n elif key in self.latest:\n # If the key is already in the latest checkpoint we should not\n # overwrite it.\n return\n self.latest[key] = value",
"def decr(self, key, delta=1):\n try:\n key = self.prepare_key(key)\n return super(CacheClass, self).decr(key, delta)\n except Exception as err:\n return self.warn_or_error(err, delta)",
"def add(self, key, val, expiry_time=0, min_compress_len=0):\n\t\treturn self._set(\"add\", key, val, expiry_time, min_compress_len)",
"def add(self, key, value):\n new = self._Item(key, value)\n\n if self.is_empty():\n self._data.append(new)\n else:\n for i, item in enumerate(self._data):\n if new <= item:\n self._data.insert(i, new)\n break\n if i == len(self) - 1:\n self._data.append(new)\n break",
"def update(library_key):\n versions = Library.uncached_versions_for_key(library_key)\n version_cache = VersionCache.get_or_insert('versions', parent=library_key)\n needs_index_update = False\n if version_cache.versions != versions:\n old_default = versiontag.default_version(version_cache.versions)\n new_default = versiontag.default_version(versions)\n needs_index_update = old_default != new_default\n version_cache.versions = versions\n version_cache.put()\n return needs_index_update",
"def __add_new_entry(self, key, value):\n if not self.__has_frequency_one():\n frequency_one = LFUNode(1, self.capacity)\n frequency_one.add_node_after(self.head)\n self.size += 1\n self.key_node_map[key] = self.head.next.frequency_cache.put_key_value_internally(key, value)\n self.key_to_frequency_node[key] = self.head.next\n return self.key_node_map[key]",
"def dec(self, key, delta=1):\n return self.inc(key, -delta)",
"def inc(self, key):\n if key in self.key_dict:\n self.increase(key)\n return\n self.key_dict[key] = key_node = KeyNode(key, 1)\n value_node = self.value_dict.get(1)\n if value_node is None:\n self.value_dict[1] = value_node = ValueNode(1, None, self.head)\n if self.head:\n self.head.prev = value_node\n self.head = value_node\n if self.last is None:\n self.last = value_node\n self.insert_key_node(key_node)",
"def update_versioned_target(self, vt):\n self._cache_manager.update(vt.cache_key)",
"def add(self, key):\r\n if key not in self.map:\r\n end = self.end\r\n curr = end[PREV]\r\n curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]",
"def _newKey(self, key):\n self._testKeySubNsAdd()\n self._getKeyList().append(key)",
"def update_time(cls, key):\n key.put()",
"def get_version(self, extra=None):\n\n if extra:\n key = self._get_extra_key(extra)\n else:\n key = self.key\n\n v = self._get_cache(key).get(key)\n if v == None:\n v = self._increment_version(extra=extra)\n\n return \"%s.%s\" % (key, v)",
"def add(self, key, value):\n # If the node is empty, simply insert the key-value pair.\n if not self.keys:\n self.keys.append(key)\n self.values.append([value])\n return None\n\n for i, item in enumerate(self.keys):\n # If new key matches existing key, add to list of values.\n if key == item:\n self.values[i].append(value)\n break\n\n # If new key is smaller than existing key, insert new key to the left of existing key.\n elif key < item:\n self.keys = self.keys[:i] + [key] + self.keys[i:]\n self.values = self.values[:i] + [[value]] + self.values[i:]\n break\n\n # If new key is larger than all existing keys, insert new key to the right of all\n # existing keys.\n elif i + 1 == len(self.keys):\n self.keys.append(key)\n self.values.append([value])",
"def add(self, key, value):\n\t\tself.__add_key_to_bt(key)[3] = self.__add_key_value_to_ll(key, value)",
"def add(self, key, val):\n self.obtain(key).append(val)"
]
| [
"0.6767568",
"0.6661848",
"0.6260883",
"0.6248231",
"0.6220057",
"0.5948065",
"0.5826451",
"0.56962454",
"0.55747366",
"0.54451704",
"0.53051615",
"0.52766025",
"0.5254437",
"0.52447313",
"0.5226823",
"0.5207911",
"0.52070314",
"0.52013683",
"0.5162349",
"0.5153979",
"0.51407474",
"0.5127734",
"0.5118347",
"0.51148695",
"0.51142",
"0.5072264",
"0.5059571",
"0.5056774",
"0.505534",
"0.50549495"
]
| 0.73972917 | 0 |
Unpickles the given value. | def unpickle(value):
try:
value = int(value)
except (ValueError, TypeError):
value = smart_bytes(value)
value = pickle.loads(value)
return value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unserialize(val):\n return pickle.loads(val)",
"def _decode_value(self, value):\n return pickle.loads(value.value) if value else value",
"def from_value(value):\n return pickle.dumps(value)",
"def base64unpickle(self, value):\n if value:\n return pickle.loads(self.base64decode(value))",
"def unpickle(path):\n with open(path, \"rb\") as f:\n return pickle.load(f, encoding='latin1')",
"def loads(value):\n return unpackb(value)",
"def do_unpickle(self, arg):\n try:\n from pickling import Pickling\n Pickling('output.pickle', arg).unpickle_it()\n print('The pickled file has been un-pickled')\n except FileNotFoundError as e:\n print(e)\n except():\n print(\"Error!!\")",
"def unpickle(self, filename):\n try:\n return pickle.load(open(filename, 'rb'))\n except IOError:\n print(\"error!! not available!\")\n return None",
"def unpickle_file(filename):\r\n\r\n with open(filename, 'rb') as f:\r\n return pickle.load(f)",
"def _unpickle(filename):\n\n # Create full path for the file.\n file_path = _get_file_path(filename)\n\n print(\"Loading data: \" + file_path)\n\n with open(file_path, mode='rb') as file:\n # In Python 3.X it is important to set the encoding,\n # otherwise an exception is raised here.\n data = pickle.load(file, encoding='bytes')\n\n return data",
"def to_value(pickled):\n return pickle.loads(pickled)",
"def unpickle_file(filename):\n\n with open(filename, 'rb') as f:\n return pickle.load(f)",
"def unpickle_data(tdata):\n try:\n if isinstance(tdata, bytes): #Quick check if tdata is already bytes\n data = pickle.loads(tdata)\n else:\n data = tdata\n except:\n data = False\n return data",
"def test__pickle_unpickle(self):\n pass",
"def unpackb(value):\n return load(io.BytesIO(value))",
"def loads(data):\n return cPickle.loads(data)",
"def unpickle(file):\n if os.path.isfile(file):\n return pickle.load(open(file, \"rb\"))\n else:\n return None",
"def pickle(self, value):\r\n\r\n if isinstance(value, bool) or not isinstance(value, integer_types):\r\n return pickle.dumps(value, self._pickle_version)\r\n\r\n return value",
"def from_db_value(self, value, *args):\n if value is not None:\n try:\n value = dbsafe_decode(value, self.compress)\n except Exception:\n # If the value is a definite pickle; and an error is raised in\n # de-pickling it should be allowed to propogate.\n if isinstance(value, PickledObject):\n raise\n else:\n if isinstance(value, _ObjectWrapper):\n return value._obj\n return value",
"def restoreData(filename='laue.dat'):\r\n import cPickle\r\n with open(filename, 'rb') as fp:\r\n return cPickle.load(fp)",
"def unpickle_object(file):\n filehandler = open(file, 'rb')\n Object = pickle.load(filehandler)\n filehandler.close()\n return Object",
"def load(self):\n result = bolt.PickleDict.load(self)\n if not result and self.oldPath.exists():\n ins = None\n try:\n ins = self.oldPath.open('r')\n self.data.update(compat.uncpickle(ins))\n ins.close()\n result = 1\n except EOFError:\n if ins: ins.close()\n #--Done\n return result",
"def unpickle_binary(file: Union[str, Path]) -> object:\n\n with open(str(file), 'rb') as f:\n return pickle.load(f)",
"def decompress_pickle(file):\n data = bz2.BZ2File(file, 'rb')\n data = cPickle.load(data)\n return data",
"def compress(value):\n pickled = pickle_util.dump(value)\n return zlib.compress(pickled)",
"def msgpack_unpackb(value):\n return msgpack.unpackb(value, object_hook=pseud_decode, encoding='utf-8')",
"def unpickle_data(file_name):\n infile = open(file_name, \"rb\")\n try:\n data = pickle.load(infile)\n except:\n data = {}\n infile.close()\n\n return data",
"def value(self) -> Any:\n return pickle.loads(self.pickled_value)",
"def dump_object(self, value):\n return pickle.dumps(value)",
"def unpickle(file):\r\n\timport cPickle\r\n\tfo = open(file, 'rb')\r\n\tdict = cPickle.load(fo)\r\n\tfo.close()\r\n\treturn dict"
]
| [
"0.7045012",
"0.7031028",
"0.6841384",
"0.65678066",
"0.6526041",
"0.64857894",
"0.6477095",
"0.61220485",
"0.6111226",
"0.6080982",
"0.6064356",
"0.60208184",
"0.6005177",
"0.5897015",
"0.58008486",
"0.5781338",
"0.57552445",
"0.57470506",
"0.5712168",
"0.56796086",
"0.5661771",
"0.5649209",
"0.56452316",
"0.56239325",
"0.55641997",
"0.551686",
"0.55157757",
"0.5498715",
"0.5454385",
"0.54522973"
]
| 0.7501486 | 0 |
Pickle the given value. | def pickle(self, value):
if isinstance(value, bool) or not isinstance(value, integer_types):
return pickle.dumps(value, self._pickle_version)
return value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def from_value(value):\n return pickle.dumps(value)",
"def dump_object(self, value):\n return pickle.dumps(value)",
"def _encode_value(self, value):\n return pickle.dumps(value)",
"def unserialize(val):\n return pickle.loads(val)",
"def value(self) -> Any:\n return pickle.loads(self.pickled_value)",
"def serialize(value, **kwargs):\n return value",
"def unpickle(value):\r\n try:\r\n value = int(value)\r\n except (ValueError, TypeError):\r\n value = smart_bytes(value)\r\n value = pickle.loads(value)\r\n return value",
"def objToPickle(self, x):\n try:\n xp = pickle.dumps(x)\n pickle.loads(xp)\n except:\n return\n return xp",
"def serialize(self, value):\n return value",
"def compress(value):\n pickled = pickle_util.dump(value)\n return zlib.compress(pickled)",
"def _decode_value(self, value):\n return pickle.loads(value.value) if value else value",
"def to_value(pickled):\n return pickle.loads(pickled)",
"def pickle(obj):\n return pickletools.optimize(cPickle.dumps(obj))",
"def serialize_value(self, value):\n\n return value",
"def dump_object(self, value):\n t = type(value)\n if t is int or t is long:\n return str(value)\n return '!' + pickle.dumps(value)",
"def serialize_to_python(cls, value):\n raise NotImplementedError",
"def to_pickle(self, path=None):\n if path:\n with open(path, \"wb\") as f:\n dill.dump(self, f)\n return None\n return dill.dumps(self)",
"def read_or_new_pickle(filename, value, *args, **kwargs):\n data = None\n filename = \"{}.pkl\".format(filename)\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n\n if os.path.isfile(filename):\n # If file had been created, but is empty return None since another process\n # could be writing to it.\n if os.path.getsize(filename) > 0:\n with open(filename, \"rb\") as f:\n try:\n data = pickle.load(f)\n except Exception as e:\n print(e)\n raise e\n else:\n # open(filename, \"ab\").close()\n if callable(value):\n data = value(*args, **kwargs)\n else:\n data = value\n with open(filename, \"wb\") as f:\n pickle.dump(data, f)\n return data",
"def pickle(self,data,filename):\n pickle.dump(data, open(filename, 'wb'))",
"def dump(value, filename, compress=0, cache_size=100):\r\n if compress is True:\r\n # By default, if compress is enabled, we want to be using 3 by\r\n # default\r\n compress = 3\r\n if not isinstance(filename, _basestring):\r\n # People keep inverting arguments, and the resulting error is\r\n # incomprehensible\r\n raise ValueError(\r\n 'Second argument should be a filename, %s (type %s) was given'\r\n % (filename, type(filename))\r\n )\r\n try:\r\n pickler = NumpyPickler(filename, compress=compress,\r\n cache_size=cache_size)\r\n pickler.dump(value)\r\n pickler.close()\r\n finally:\r\n if 'pickler' in locals() and hasattr(pickler, 'file'):\r\n pickler.file.flush()\r\n pickler.file.close()\r\n return pickler._filenames",
"def dump(self, value, filename):\n\n assert isinstance(filename, str)\n joblib.dump(value=value, filename=filename)",
"def serialize(self, value) -> bytes:\n pass",
"def serialize(self, value: VALUE) -> bytes:\n raise NotImplementedError",
"def _to_serialize(value):\n return value.serialize() if value is not None else None",
"def dump(self, value, filename):\n\n super().dump(value=value, filename=filename)",
"def serialize(obj):\n return pickle.dumps(obj)",
"def pickable(self, value=None):\n if value is None:\n return self.GetPickable()\n self.SetPickable(value)\n return self",
"def get(self, key):\n if key in self.cache:\n return self.cache[key]\n valueat,valuelen = self.keys[key]\n valuedump = self.file.readp(valueat, valuelen)\n value = pickle.loads(valuedump)\n self.cache[key] = value\n return value",
"def to_pickle(self, path: Union[str, Path]) -> None:\n with open(path, 'wb') as handle:\n pickle.dump(self, handle)",
"def pickle(self, path: str):\n path = pathlib.Path(path)\n with path.open(mode='wb') as f:\n pickle.dump(self, f, 2)"
]
| [
"0.77703",
"0.7441614",
"0.6958083",
"0.6798523",
"0.6667502",
"0.6574332",
"0.6568599",
"0.6552237",
"0.65478945",
"0.6544326",
"0.65061575",
"0.6471692",
"0.64400285",
"0.6392385",
"0.6310394",
"0.6301115",
"0.6247126",
"0.6234851",
"0.619252",
"0.61067325",
"0.6043083",
"0.60272354",
"0.60013056",
"0.5978754",
"0.59704447",
"0.5895127",
"0.58849114",
"0.5839582",
"0.5793561",
"0.5781172"
]
| 0.75480545 | 1 |
Given a connection parameters and return a new or cached connection pool for them. Reimplement this method if you want distinct connection pool instance caching behavior. | def get_or_create_connection_pool(self, params):
key = frozenset((k, repr(v)) for (k, v) in params.items())
if key not in self._pools:
self._pools[key] = self.get_connection_pool(params)
return self._pools[key] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_connection_pool(self, params):\r\n cp_params = dict(params)\r\n cp_params.update(self.pool_cls_kwargs)\r\n return self.pool_cls(**cp_params)",
"def get_connection(self, params):\r\n return Redis(connection_pool=self.get_or_create_connection_pool(params))",
"def build_connection_pool(conn_details: dict):\n\n # Expecting url to be like jdbc:postgresql://host:port/db\n conn_details.update(\n DB.url_regex.match(conn_details[\"url\"]).groupdict()\n )\n return SimpleConnectionPool(\n minconn=1,\n maxconn=20,\n user=conn_details[\"user\"],\n password=conn_details[\"password\"],\n host=conn_details[\"host\"],\n port=conn_details[\"port\"],\n database=conn_details[\"db\"])",
"def _get_pool(self, *args, **kwargs):\n\n pool_name = '_pool_%s' % getattr(self, 'alias', 'common')\n\n if not hasattr (self.__class__, pool_name):\n lock = thread.allocate_lock()\n lock.acquire()\n\n try:\n pool = cx_Oracle.SessionPool(\n user=self.user,\n password=self.password,\n dsn=self.tns,\n min=CX_POOL_SESSION_MIN,\n max=CX_POOL_SESSION_MAX,\n increment=CX_POOL_SESSION_INCREMENT,\n connectiontype=cx_Oracle.Connection,\n threaded=CX_POOL_THREADED,\n getmode=cx_Oracle.SPOOL_ATTRVAL_NOWAIT,\n homogeneous=True)\n except Exception as err:\n pool = None\n\n if pool:\n pool.timeout = CX_POOL_CONNECT_TIMEOUT\n setattr(self.__class__, pool_name, pool)\n else:\n msg = \"\"\" ### Database login failed or database not found ### \"\"\"\n raise self.Database_Error, ('%s') %(msg)\n\n lock.release()\n\n return getattr(self.__class__, pool_name)",
"def redis_conn_pool(self) -> ConnectionPool:\n if self._redis_conn_pool is None:\n if self._config[\"graph_redis_pool_block\"]:\n pool_class: Callable = BlockingConnectionPool\n else:\n pool_class = ConnectionPool\n\n if self._config[\"graph_redis_pool_gevent_queue\"]:\n redis_conn_pool = pool_class().from_url(\n self._config[\"graph_redis_url\"],\n decode_components=True,\n max_connections=self._config[\"graph_redis_pool_max_connections\"],\n timeout=self._config[\"graph_redis_pool_timeout\"],\n queue_class=gevent.queue.LifoQueue,\n )\n\n else:\n redis_conn_pool = pool_class().from_url(\n self._config[\"graph_redis_url\"],\n decode_components=True,\n max_connections=self._config[\"graph_redis_pool_max_connections\"],\n timeout=self._config[\"graph_redis_pool_timeout\"],\n )\n\n self._redis_conn_pool = redis_conn_pool\n\n self._logger.debug(\n \"[%s]: Initialized Redis connection pool: %s\",\n self.__name__,\n self._redis_conn_pool,\n )\n\n return self._redis_conn_pool",
"def __get_connection():\n # 根据配置文件创建连接池\n if not Mysql.__mysql_pool:\n Mysql.__mysql_pool = PooledDB(\n creator=MySQLdb,\n use_unicode=False,\n cursorclass=DictCursor,\n db=sqlconf.MysqlConfig['db'],\n host=sqlconf.MysqlConfig['host'],\n port=sqlconf.MysqlConfig['port'],\n user=sqlconf.MysqlConfig['user'],\n passwd=sqlconf.MysqlConfig['passwd'],\n charset=sqlconf.MysqlConfig['charset'],\n mincached=sqlconf.MysqlConfig['mincached'],\n maxcached=sqlconf.MysqlConfig['maxcached'],\n maxconnections=sqlconf.MysqlConfig['maxconnections'])\n # 返回连接池中连接对象\n return Mysql.__mysql_pool.connection()",
"def get_pool(self):\n try:\n return self._pool\n except AttributeError:\n db_url = getattr(settings, self.name)\n self._pool = PostgresConnectionPool.for_url(db_url)\n return self._pool",
"def make_connection(self):\n if self._created_connections() >= self.max_connections:\n raise ConnPoolException(\"Too many connections.\")\n connection = self.connection_class(**self.connection_kwargs)\n connection.connect()\n return connection",
"def get_connection(self, settings):\n\n def set_mysqlx_wait_timeout(cnx):\n ver = cnx.sql(_SELECT_VERSION_QUERY).execute().fetch_all()[0][0]\n # mysqlx_wait_timeout is only available on MySQL 8\n if tuple(int(n) for n in ver.split(\"-\")[0].split(\".\")) > (\n 8,\n 0,\n 10,\n ):\n cnx.sql(f\"set mysqlx_wait_timeout = {pool.max_idle_time}\").execute()\n\n pools = self._get_pools(settings)\n cur_priority = settings.get(\"cur_priority\", None)\n error_list = []\n self._check_unavailable_pools(settings)\n cur_priority = self._get_next_priority(pools, cur_priority)\n if cur_priority is None:\n raise PoolError(\n \"Unable to connect to any of the target hosts. No pool is available\"\n )\n settings[\"cur_priority\"] = cur_priority\n pool = self._get_next_pool(pools, cur_priority)\n lock = threading.RLock()\n while pool is not None:\n try:\n # Check connections aviability in this pool\n if pool.qsize() > 0:\n # We have connections in pool, try to return a working one\n with lock:\n try:\n cnx = pool.get(block=True, timeout=pool.queue_timeout)\n except queue.Empty:\n raise PoolError(\n \"Failed getting connection; pool exhausted\"\n ) from None\n try:\n if cnx.is_server_disconnected():\n pool.remove_connections()\n # Only reset the connection by re-authentification\n # if the connection was unable to keep open by the\n # server\n if not cnx.keep_open:\n cnx.reset()\n set_mysqlx_wait_timeout(cnx)\n except (RuntimeError, OSError, InterfaceError):\n # Unable to reset connection, close and remove\n try:\n cnx.close_connection()\n except (RuntimeError, OSError, InterfaceError):\n pass\n finally:\n pool.remove_connection(cnx)\n # By WL#13222 all idle sessions that connect to the\n # same endpoint should be removed from the pool.\n while pool.qsize() > 0:\n try:\n cnx = pool.get(\n block=True, timeout=pool.queue_timeout\n )\n except queue.Empty:\n pass\n else:\n try:\n cnx.close_connection()\n except (RuntimeError, OSError, InterfaceError):\n pass\n finally:\n pool.remove_connection(cnx)\n # Connection was closed by the server, create new\n try:\n cnx = PooledConnection(pool)\n pool.track_connection(cnx)\n cnx.connect()\n set_mysqlx_wait_timeout(cnx)\n except (RuntimeError, OSError, InterfaceError):\n pass\n finally:\n # Server must be down, take down idle\n # connections from this pool\n while pool.qsize() > 0:\n try:\n cnx = pool.get(\n block=True,\n timeout=pool.queue_timeout,\n )\n cnx.close_connection()\n pool.remove_connection(cnx)\n except (RuntimeError, OSError, InterfaceError):\n pass\n return cnx\n elif pool.open_connections < pool.pool_max_size:\n # No connections in pool, but we can open a new one\n cnx = PooledConnection(pool)\n pool.track_connection(cnx)\n cnx.connect()\n set_mysqlx_wait_timeout(cnx)\n return cnx\n else:\n # Pool is exaust so the client needs to wait\n with lock:\n try:\n cnx = pool.get(block=True, timeout=pool.queue_timeout)\n cnx.reset()\n set_mysqlx_wait_timeout(cnx)\n return cnx\n except queue.Empty:\n raise PoolError(\"pool max size has been reached\") from None\n except (InterfaceError, TimeoutError, PoolError) as err:\n error_list.append(f\"pool: {pool} error: {err}\")\n if isinstance(err, PoolError):\n # Pool can be exhaust now but can be ready again in no time,\n # e.g a connection is returned to the pool.\n pool.set_unavailable(2)\n else:\n self.set_pool_unavailable(pool, err)\n\n self._check_unavailable_pools(settings)\n # Try next pool with the same priority\n pool = self._get_next_pool(pools, cur_priority)\n\n if pool is None:\n cur_priority = self._get_next_priority(pools, cur_priority)\n settings[\"cur_priority\"] = cur_priority\n pool = self._get_next_pool(pools, cur_priority)\n if pool is None:\n msg = \"\\n \".join(error_list)\n raise PoolError(\n \"Unable to connect to any of the target hosts: \"\n f\"[\\n {msg}\\n]\"\n ) from err\n continue\n\n raise PoolError(\"Unable to connect to any of the target hosts\")",
"def get_pool(name):\n if name not in _CONNECTIONS:\n add_pool(name)\n return _CONNECTIONS[name]",
"def _get_cached_connection(connection_hash):\n global _CONNECTIONS\n\n # If we've not initialized the _CONNECTIONS global yet, do so\n if not _CONNECTIONS:\n _CONNECTIONS = dict()\n\n # If our connection_hash is in our global connection pool return it\n if connection_hash in _CONNECTIONS:\n\n # Increment our use counter\n _CONNECTIONS[connection_hash]['connections'] += 1\n\n # Return the active connection\n return _CONNECTIONS[connection_hash]['connection']\n\n # Return None, we don't have it in the pool\n return None",
"def make_pool(self) -> pool.SimpleConnectionPool:\n\n return pool.SimpleConnectionPool(\n minconn=1, maxconn=self.pool_size, **self._kwargs\n )",
"def __init__(self, **kwargs):\n creator = kwargs.pop(\"creator\", None)\n if not creator:\n import MySQLdb\n creator = MySQLdb\n mincached = kwargs.pop(\"mincached\", 2)\n maxcached = kwargs.pop(\"maxcached\", 10)\n maxshared = kwargs.pop(\"maxshared\", 10)\n maxconnections = kwargs.pop(\"maxconnections\", 20)\n blocking = kwargs.pop(\"blocking\", 0)\n reset = kwargs.pop(\"reset\", True)\n maxusage = kwargs.pop(\"maxusage\", 0)\n setsession = kwargs.pop(\"setsession\", [\"set autocommit = 0\"])\n ping = kwargs.pop(\"ping\", 1)\n\n self._pool = PooledDB(creator=creator, mincached=mincached, maxcached=maxcached,\n maxshared=maxshared, maxconnections=maxconnections,\n blocking=blocking, maxusage=maxusage,reset=reset,\n setsession=setsession, ping=ping, **kwargs)",
"def _threadsafe_get_connection(self):\n with self._lock:\n next_con = self._nextConnection\n con = PooledDBConnection(self, self._connections[next_con])\n next_con += 1\n if next_con >= len(self._connections):\n next_con = 0\n self._nextConnection = next_con\n return con",
"def create_pool(self, cnx_settings):\n connections_settings = self._get_connections_settings(cnx_settings)\n\n # Subscribe client if it does not exists\n if cnx_settings.get(\"client_id\", \"No id\") not in self.__pools:\n self.__pools[cnx_settings.get(\"client_id\", \"No id\")] = []\n\n # Create a pool for each router\n for router_name, settings in connections_settings:\n if self._pool_exists(cnx_settings.get(\"client_id\", \"No id\"), router_name):\n continue\n pool = self.__pools.get(cnx_settings.get(\"client_id\", \"No id\"), [])\n pool.append(ConnectionPool(router_name, **settings))",
"def get_new_connection(self, conn_params):\r\n self.__connection_string = conn_params.get('connection_string', '')\r\n conn = Database.connect(**conn_params)\r\n return conn",
"def get_pool_conn(conn_details: dict):\n pool_ = DB.build_connection_pool(conn_details)\n conn_pool = pool_.getconn()\n try:\n yield conn_pool\n conn_pool.commit()\n finally:\n pool_.putconn(conn_pool)",
"def _unthreadsafe_get_connection(self):\n return PooledDBConnection(self, self._queue.get())",
"def _init_pool(self, cfg: dict):\n pool = PyMysqlPoolBase(**cfg)\n return pool",
"def connection(self, name=None):\n if not name:\n name = threading.currentThread().getName()\n if name in self:\n return self[name]\n self[name] = self.database.connection()\n return self[name]",
"def _get_cached_db_connection(name='ace'):\n if name is None:\n name = 'ace'\n\n config_section = 'database_{}'.format(name)\n\n if config_section not in saq.CONFIG:\n raise ValueError(\"invalid database {}\".format(name))\n\n try:\n db_identifier = _get_cached_db_identifier(name)\n with _global_db_cache_lock:\n logging.debug(\"aquiring existing cached database connection {}\".format(db_identifier))\n db = _global_db_cache[db_identifier]\n\n try:\n db.rollback()\n #logging.debug(\"acquired cached database connection to {}\".format(name))\n return db\n\n except Exception as e:\n logging.info(\"possibly lost cached connection to database {}: {} ({})\".format(name, e, type(e)))\n try:\n db.close()\n except Exception as e:\n logging.error(\"unable to close cached database connection to {}: {}\".format(name, e))\n\n try:\n with _global_db_cache_lock:\n del _global_db_cache[db_identifier]\n except Exception as e:\n logging.error(\"unable to delete cached db {}: {}\".format(db_identifier, e))\n\n #return _get_db_connection(name)\n\n except KeyError:\n pass\n\n try:\n logging.info(\"opening new cached database connection to {}\".format(name))\n\n with _global_db_cache_lock:\n _global_db_cache[db_identifier] = _get_db_connection(name)\n\n logging.debug(\"opened cached database connection {}\".format(db_identifier))\n return _global_db_cache[db_identifier]\n\n except Exception as e:\n logging.error(\"unable to connect to database {}: {}\".format(name, e))\n report_exception()\n raise e",
"def _get_conn(self):\n return redis.Redis(connection_pool=self.pool)",
"def get_connection(self, command, args=()):\n # TODO: find a better way to determine if connection is free\n # and not havily used.\n command = command.upper().strip()\n is_pubsub = command in _PUBSUB_COMMANDS\n if is_pubsub and self._pubsub_conn:\n if not self._pubsub_conn.closed:\n return self._pubsub_conn, self._pubsub_conn.address\n self._pubsub_conn = None\n for i in range(self.freesize):\n conn = self._pool[0]\n self._pool.rotate(1)\n if conn.closed: # or conn._waiters: (eg: busy connection)\n continue\n if conn.in_pubsub:\n continue\n if is_pubsub:\n self._pubsub_conn = conn\n self._pool.remove(conn)\n self._used.add(conn)\n return conn, conn.address\n return None, self._address # figure out",
"def _get_connection(self) -> Connection:\n # TODO(101) is there a problem with having just one db connection?\n # Will this cause bugs with failed commits?\n curr_thread = threading.get_ident()\n if curr_thread not in self.conn or self.conn[curr_thread] is None:\n try:\n conn = sqlite3.connect(self.db_path)\n conn.row_factory = StringIDRow\n self.conn[curr_thread] = conn\n except sqlite3.Error as e:\n raise MephistoDBException(e)\n return self.conn[curr_thread]",
"def get_connection(self):\n\t\tfrom pymongo import MongoClient\n\n\t\tif self._connection is None:\n\t\t\tself._connection = MongoClient(host=self.url, max_pool_size=10)\n\n\t\treturn self._connection",
"def connection(self, connection=None):\n if connection is None:\n return self.engine.acquire()\n return ConnectionProxy(connection=connection)",
"def get_conn(self, *args, **kwargs):\n connections = self.__connections_for('get_conn', args=args, kwargs=kwargs)\n\n if len(connections) == 1:\n return connections[0]\n else:\n return connections",
"async def acquire(self, command=None, args=()):\n if self.closed:\n raise PoolClosedError(\"Pool is closed\")\n async with self._cond:\n if self.closed:\n raise PoolClosedError(\"Pool is closed\")\n while True:\n await self._fill_free(override_min=True)\n if self.freesize:\n conn = self._pool.popleft()\n assert not conn.closed, conn\n assert conn not in self._used, (conn, self._used)\n self._used.add(conn)\n return conn\n else:\n await self._cond.wait()",
"def open_connection(self) -> Iterator[psycopg2.connect]:\n\n conn = self._pool.getconn()\n\n if self.pre_ping:\n for n in range(self.max_reconnects):\n if not self.ping(conn):\n if n > 0:\n time.sleep(self._back_off_time(n - 1))\n self._pool = self.restart_pool()\n conn = self._pool.getconn()\n else:\n break\n try:\n yield conn\n finally:\n self._pool.putconn(conn)",
"def _get_connection(self) -> sqlite3.Connection:\n curr_thread = threading.get_ident()\n if curr_thread not in self.conn or self.conn[curr_thread] is None:\n conn = sqlite3.connect(self.db_path, check_same_thread=False)\n conn.row_factory = sqlite3.Row\n self.conn[curr_thread] = conn\n return self.conn[curr_thread]"
]
| [
"0.7633331",
"0.7093984",
"0.704607",
"0.70299256",
"0.7016528",
"0.6718907",
"0.66587025",
"0.66581637",
"0.66004694",
"0.6494415",
"0.6447045",
"0.641799",
"0.63243353",
"0.6310433",
"0.63036764",
"0.6295449",
"0.6271186",
"0.6250118",
"0.61557645",
"0.61037254",
"0.6043522",
"0.5957001",
"0.59560215",
"0.5945207",
"0.5841227",
"0.5817161",
"0.5796477",
"0.57820255",
"0.5775795",
"0.57651746"
]
| 0.7563619 | 1 |
Given a connection parameters, return a new connection pool for them. Overwrite this method if you want a custom behavior on creating connection pool. | def get_connection_pool(self, params):
cp_params = dict(params)
cp_params.update(self.pool_cls_kwargs)
return self.pool_cls(**cp_params) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_or_create_connection_pool(self, params):\r\n key = frozenset((k, repr(v)) for (k, v) in params.items())\r\n if key not in self._pools:\r\n self._pools[key] = self.get_connection_pool(params)\r\n return self._pools[key]",
"def build_connection_pool(conn_details: dict):\n\n # Expecting url to be like jdbc:postgresql://host:port/db\n conn_details.update(\n DB.url_regex.match(conn_details[\"url\"]).groupdict()\n )\n return SimpleConnectionPool(\n minconn=1,\n maxconn=20,\n user=conn_details[\"user\"],\n password=conn_details[\"password\"],\n host=conn_details[\"host\"],\n port=conn_details[\"port\"],\n database=conn_details[\"db\"])",
"def make_connection(self):\n if self._created_connections() >= self.max_connections:\n raise ConnPoolException(\"Too many connections.\")\n connection = self.connection_class(**self.connection_kwargs)\n connection.connect()\n return connection",
"def _get_pool(self, *args, **kwargs):\n\n pool_name = '_pool_%s' % getattr(self, 'alias', 'common')\n\n if not hasattr (self.__class__, pool_name):\n lock = thread.allocate_lock()\n lock.acquire()\n\n try:\n pool = cx_Oracle.SessionPool(\n user=self.user,\n password=self.password,\n dsn=self.tns,\n min=CX_POOL_SESSION_MIN,\n max=CX_POOL_SESSION_MAX,\n increment=CX_POOL_SESSION_INCREMENT,\n connectiontype=cx_Oracle.Connection,\n threaded=CX_POOL_THREADED,\n getmode=cx_Oracle.SPOOL_ATTRVAL_NOWAIT,\n homogeneous=True)\n except Exception as err:\n pool = None\n\n if pool:\n pool.timeout = CX_POOL_CONNECT_TIMEOUT\n setattr(self.__class__, pool_name, pool)\n else:\n msg = \"\"\" ### Database login failed or database not found ### \"\"\"\n raise self.Database_Error, ('%s') %(msg)\n\n lock.release()\n\n return getattr(self.__class__, pool_name)",
"def make_pool(self) -> pool.SimpleConnectionPool:\n\n return pool.SimpleConnectionPool(\n minconn=1, maxconn=self.pool_size, **self._kwargs\n )",
"def get_new_connection(self, conn_params):\r\n self.__connection_string = conn_params.get('connection_string', '')\r\n conn = Database.connect(**conn_params)\r\n return conn",
"def create_pool(self, cnx_settings):\n connections_settings = self._get_connections_settings(cnx_settings)\n\n # Subscribe client if it does not exists\n if cnx_settings.get(\"client_id\", \"No id\") not in self.__pools:\n self.__pools[cnx_settings.get(\"client_id\", \"No id\")] = []\n\n # Create a pool for each router\n for router_name, settings in connections_settings:\n if self._pool_exists(cnx_settings.get(\"client_id\", \"No id\"), router_name):\n continue\n pool = self.__pools.get(cnx_settings.get(\"client_id\", \"No id\"), [])\n pool.append(ConnectionPool(router_name, **settings))",
"def redis_conn_pool(self) -> ConnectionPool:\n if self._redis_conn_pool is None:\n if self._config[\"graph_redis_pool_block\"]:\n pool_class: Callable = BlockingConnectionPool\n else:\n pool_class = ConnectionPool\n\n if self._config[\"graph_redis_pool_gevent_queue\"]:\n redis_conn_pool = pool_class().from_url(\n self._config[\"graph_redis_url\"],\n decode_components=True,\n max_connections=self._config[\"graph_redis_pool_max_connections\"],\n timeout=self._config[\"graph_redis_pool_timeout\"],\n queue_class=gevent.queue.LifoQueue,\n )\n\n else:\n redis_conn_pool = pool_class().from_url(\n self._config[\"graph_redis_url\"],\n decode_components=True,\n max_connections=self._config[\"graph_redis_pool_max_connections\"],\n timeout=self._config[\"graph_redis_pool_timeout\"],\n )\n\n self._redis_conn_pool = redis_conn_pool\n\n self._logger.debug(\n \"[%s]: Initialized Redis connection pool: %s\",\n self.__name__,\n self._redis_conn_pool,\n )\n\n return self._redis_conn_pool",
"def get_connection(self, params):\r\n return Redis(connection_pool=self.get_or_create_connection_pool(params))",
"def get_connection(self, settings):\n\n def set_mysqlx_wait_timeout(cnx):\n ver = cnx.sql(_SELECT_VERSION_QUERY).execute().fetch_all()[0][0]\n # mysqlx_wait_timeout is only available on MySQL 8\n if tuple(int(n) for n in ver.split(\"-\")[0].split(\".\")) > (\n 8,\n 0,\n 10,\n ):\n cnx.sql(f\"set mysqlx_wait_timeout = {pool.max_idle_time}\").execute()\n\n pools = self._get_pools(settings)\n cur_priority = settings.get(\"cur_priority\", None)\n error_list = []\n self._check_unavailable_pools(settings)\n cur_priority = self._get_next_priority(pools, cur_priority)\n if cur_priority is None:\n raise PoolError(\n \"Unable to connect to any of the target hosts. No pool is available\"\n )\n settings[\"cur_priority\"] = cur_priority\n pool = self._get_next_pool(pools, cur_priority)\n lock = threading.RLock()\n while pool is not None:\n try:\n # Check connections aviability in this pool\n if pool.qsize() > 0:\n # We have connections in pool, try to return a working one\n with lock:\n try:\n cnx = pool.get(block=True, timeout=pool.queue_timeout)\n except queue.Empty:\n raise PoolError(\n \"Failed getting connection; pool exhausted\"\n ) from None\n try:\n if cnx.is_server_disconnected():\n pool.remove_connections()\n # Only reset the connection by re-authentification\n # if the connection was unable to keep open by the\n # server\n if not cnx.keep_open:\n cnx.reset()\n set_mysqlx_wait_timeout(cnx)\n except (RuntimeError, OSError, InterfaceError):\n # Unable to reset connection, close and remove\n try:\n cnx.close_connection()\n except (RuntimeError, OSError, InterfaceError):\n pass\n finally:\n pool.remove_connection(cnx)\n # By WL#13222 all idle sessions that connect to the\n # same endpoint should be removed from the pool.\n while pool.qsize() > 0:\n try:\n cnx = pool.get(\n block=True, timeout=pool.queue_timeout\n )\n except queue.Empty:\n pass\n else:\n try:\n cnx.close_connection()\n except (RuntimeError, OSError, InterfaceError):\n pass\n finally:\n pool.remove_connection(cnx)\n # Connection was closed by the server, create new\n try:\n cnx = PooledConnection(pool)\n pool.track_connection(cnx)\n cnx.connect()\n set_mysqlx_wait_timeout(cnx)\n except (RuntimeError, OSError, InterfaceError):\n pass\n finally:\n # Server must be down, take down idle\n # connections from this pool\n while pool.qsize() > 0:\n try:\n cnx = pool.get(\n block=True,\n timeout=pool.queue_timeout,\n )\n cnx.close_connection()\n pool.remove_connection(cnx)\n except (RuntimeError, OSError, InterfaceError):\n pass\n return cnx\n elif pool.open_connections < pool.pool_max_size:\n # No connections in pool, but we can open a new one\n cnx = PooledConnection(pool)\n pool.track_connection(cnx)\n cnx.connect()\n set_mysqlx_wait_timeout(cnx)\n return cnx\n else:\n # Pool is exaust so the client needs to wait\n with lock:\n try:\n cnx = pool.get(block=True, timeout=pool.queue_timeout)\n cnx.reset()\n set_mysqlx_wait_timeout(cnx)\n return cnx\n except queue.Empty:\n raise PoolError(\"pool max size has been reached\") from None\n except (InterfaceError, TimeoutError, PoolError) as err:\n error_list.append(f\"pool: {pool} error: {err}\")\n if isinstance(err, PoolError):\n # Pool can be exhaust now but can be ready again in no time,\n # e.g a connection is returned to the pool.\n pool.set_unavailable(2)\n else:\n self.set_pool_unavailable(pool, err)\n\n self._check_unavailable_pools(settings)\n # Try next pool with the same priority\n pool = self._get_next_pool(pools, cur_priority)\n\n if pool is None:\n cur_priority = self._get_next_priority(pools, cur_priority)\n settings[\"cur_priority\"] = cur_priority\n pool = self._get_next_pool(pools, cur_priority)\n if pool is None:\n msg = \"\\n \".join(error_list)\n raise PoolError(\n \"Unable to connect to any of the target hosts: \"\n f\"[\\n {msg}\\n]\"\n ) from err\n continue\n\n raise PoolError(\"Unable to connect to any of the target hosts\")",
"def __get_connection():\n # 根据配置文件创建连接池\n if not Mysql.__mysql_pool:\n Mysql.__mysql_pool = PooledDB(\n creator=MySQLdb,\n use_unicode=False,\n cursorclass=DictCursor,\n db=sqlconf.MysqlConfig['db'],\n host=sqlconf.MysqlConfig['host'],\n port=sqlconf.MysqlConfig['port'],\n user=sqlconf.MysqlConfig['user'],\n passwd=sqlconf.MysqlConfig['passwd'],\n charset=sqlconf.MysqlConfig['charset'],\n mincached=sqlconf.MysqlConfig['mincached'],\n maxcached=sqlconf.MysqlConfig['maxcached'],\n maxconnections=sqlconf.MysqlConfig['maxconnections'])\n # 返回连接池中连接对象\n return Mysql.__mysql_pool.connection()",
"def get_pool(self):\n try:\n return self._pool\n except AttributeError:\n db_url = getattr(settings, self.name)\n self._pool = PostgresConnectionPool.for_url(db_url)\n return self._pool",
"def _init_pool(self, cfg: dict):\n pool = PyMysqlPoolBase(**cfg)\n return pool",
"def get_pool_conn(conn_details: dict):\n pool_ = DB.build_connection_pool(conn_details)\n conn_pool = pool_.getconn()\n try:\n yield conn_pool\n conn_pool.commit()\n finally:\n pool_.putconn(conn_pool)",
"async def create_pool(address, *, db=None, password=None, ssl=None,\n encoding=None, minsize=1, maxsize=10,\n parser=None, loop=None, create_connection_timeout=None,\n pool_cls=None, connection_cls=None):\n # FIXME: rewrite docstring\n if pool_cls:\n assert issubclass(pool_cls, AbcPool),\\\n \"pool_class does not meet the AbcPool contract\"\n cls = pool_cls\n else:\n cls = ConnectionsPool\n if isinstance(address, str):\n address, options = parse_url(address)\n db = options.setdefault('db', db)\n password = options.setdefault('password', password)\n encoding = options.setdefault('encoding', encoding)\n create_connection_timeout = options.setdefault(\n 'timeout', create_connection_timeout)\n if 'ssl' in options:\n assert options['ssl'] or (not options['ssl'] and not ssl), (\n \"Conflicting ssl options are set\", options['ssl'], ssl)\n ssl = ssl or options['ssl']\n # TODO: minsize/maxsize\n\n pool = cls(address, db, password, encoding,\n minsize=minsize, maxsize=maxsize,\n ssl=ssl, parser=parser,\n create_connection_timeout=create_connection_timeout,\n connection_cls=connection_cls,\n loop=loop)\n try:\n await pool._fill_free(override_min=False)\n except Exception:\n pool.close()\n await pool.wait_closed()\n raise\n return pool",
"def __init__(self, **kwargs):\n creator = kwargs.pop(\"creator\", None)\n if not creator:\n import MySQLdb\n creator = MySQLdb\n mincached = kwargs.pop(\"mincached\", 2)\n maxcached = kwargs.pop(\"maxcached\", 10)\n maxshared = kwargs.pop(\"maxshared\", 10)\n maxconnections = kwargs.pop(\"maxconnections\", 20)\n blocking = kwargs.pop(\"blocking\", 0)\n reset = kwargs.pop(\"reset\", True)\n maxusage = kwargs.pop(\"maxusage\", 0)\n setsession = kwargs.pop(\"setsession\", [\"set autocommit = 0\"])\n ping = kwargs.pop(\"ping\", 1)\n\n self._pool = PooledDB(creator=creator, mincached=mincached, maxcached=maxcached,\n maxshared=maxshared, maxconnections=maxconnections,\n blocking=blocking, maxusage=maxusage,reset=reset,\n setsession=setsession, ping=ping, **kwargs)",
"def get_pool(name):\n if name not in _CONNECTIONS:\n add_pool(name)\n return _CONNECTIONS[name]",
"def connect(self, *args, **kw):\n\n return self.get_pool(*args, **kw).connect()",
"def __connect(host):\n global pg_pool\n pg_config['host'] = host\n pg_pool = SimpleConnectionPool(1, 1, **pg_config)",
"def create_connection(config_file):\n\n config = configparser.ConfigParser()\n config.read(config_file)\n\n try:\n connection = psycopg2.connect(\"\"\"\n host={}\n dbname={}\n user={}\n password={}\n port={}\n \"\"\".format(*config['CLUSTER'].values()))\n\n except psycopg2.Error as error:\n print(\"Error: Could not make connection to the Postgres database.\")\n print(error)\n\n try:\n cursor = connection.cursor()\n\n except psycopg2.Error as error:\n print(\"Error: Could not get cursor.\")\n print(error)\n\n return(cursor, connection)",
"def pool_create_from_dict(self, parameters: dict):\n pool_name = parameters[KnownParameters.SITE_NAME.value]\n parameters[KnownParameters.POOL_NAME.value] = pool_name\n for pool in self.get_app_pool_list():\n if pool.name.lower() == pool_name.lower():\n return\n return self.pool_create(pool_name)",
"def createConnection(self):\r\n conn_string = \"host='{}' dbname='{}' user='{}' password='{}' port={}\".format(\r\n self.host, self.database, self.user, self.password, self.port)\r\n return psycopg2.connect(conn_string)",
"def patch_http_connection_pool(**constructor_kwargs):\n class MyHTTPConnectionPool(connectionpool.HTTPConnectionPool):\n def __init__(self, *args, **kwargs):\n kwargs.update(constructor_kwargs)\n super(MyHTTPConnectionPool, self).__init__(*args, **kwargs)\n poolmanager.pool_classes_by_scheme['http'] = MyHTTPConnectionPool",
"def _threadsafe_get_connection(self):\n with self._lock:\n next_con = self._nextConnection\n con = PooledDBConnection(self, self._connections[next_con])\n next_con += 1\n if next_con >= len(self._connections):\n next_con = 0\n self._nextConnection = next_con\n return con",
"def get_connection():\n con = psycopg2.connect(**DB_CONFIG)\n return con",
"def add_connection(self, cnx=None):\n if not self.cnx_config:\n raise PoolError(\"Connection configuration not available\")\n\n if self.full():\n raise PoolError(\"Failed adding connection; queue is full\")\n\n if not cnx:\n cnx = PooledConnection(self)\n # mysqlx_wait_timeout is only available on MySQL 8\n ver = cnx.sql(_SELECT_VERSION_QUERY).execute().fetch_all()[0][0]\n if tuple(int(n) for n in ver.split(\"-\")[0].split(\".\")) > (\n 8,\n 0,\n 10,\n ):\n cnx.sql(f\"set mysqlx_wait_timeout = {self.max_idle_time}\").execute()\n self._connections_openned.append(cnx)\n else:\n if not isinstance(cnx, PooledConnection):\n raise PoolError(\"Connection instance not subclass of PooledSession\")\n if cnx.is_server_disconnected():\n self.remove_connections()\n cnx.close()\n\n self.queue_connection(cnx)",
"def __init__(self, dbapi, maxconnections, *args, **kwargs):\n try:\n threadsafety = dbapi.threadsafety\n except Exception:\n threadsafety = None\n if threadsafety == 0:\n raise NotSupportedError(\n \"Database module does not support any level of threading.\")\n if threadsafety == 1:\n # If there is no connection level safety, build\n # the pool using the synchronized queue class\n # that implements all the required locking semantics.\n from queue import Queue\n self._queue = Queue(maxconnections) # create the queue\n self.connection = self._unthreadsafe_get_connection\n self.addConnection = self._unthreadsafe_add_connection\n self.returnConnection = self._unthreadsafe_return_connection\n elif threadsafety in (2, 3):\n # If there is connection level safety, implement the\n # pool with an ordinary list used as a circular buffer.\n # We only need a minimum of locking in this case.\n from threading import Lock\n self._lock = Lock() # create a lock object to be used later\n self._nextConnection = 0 # index of the next connection to be used\n self._connections = [] # the list of connections\n self.connection = self._threadsafe_get_connection\n self.addConnection = self._threadsafe_add_connection\n self.returnConnection = self._threadsafe_return_connection\n else:\n raise NotSupportedError(\n \"Database module threading support cannot be determined.\")\n # Establish all database connections (it would be better to\n # only establish a part of them now, and the rest on demand).\n for _i in range(maxconnections):\n self.addConnection(dbapi.connect(*args, **kwargs))",
"def make_connection(self):\n if self._created_connections[self._pattern_idx] >= self.max_connections_per_pattern:\n raise ConnectionError(\"Too many connections\")\n self._created_connections[self._pattern_idx] += 1\n conn = self.connection_class(**self.patterns[self._pattern_idx])\n conn._pattern_idx = self._pattern_idx\n return conn",
"def _unthreadsafe_get_connection(self):\n return PooledDBConnection(self, self._queue.get())",
"def add_pool(name, **kwargs):\n _CONNECTIONS[name] = redis.StrictRedis(**kwargs)"
]
| [
"0.7737848",
"0.7455023",
"0.7187452",
"0.70865107",
"0.6992813",
"0.69039977",
"0.684276",
"0.672731",
"0.66503847",
"0.6621734",
"0.659866",
"0.6564659",
"0.6443048",
"0.6405981",
"0.63895524",
"0.6246814",
"0.62264544",
"0.615156",
"0.6043402",
"0.6006722",
"0.6000867",
"0.5923694",
"0.59014684",
"0.5875974",
"0.5861692",
"0.5861689",
"0.5859878",
"0.5845379",
"0.5845245",
"0.5810586"
]
| 0.79732764 | 0 |
Tests calculating confusion matrix per subpopulation. Tests | def test_confusion_matrix_per_subgroup():
mx1 = np.array([[2, 1, 0], [0, 0, 0], [0, 0, 0]])
mx2 = np.array([[2, 0, 0], [0, 0, 0], [0, 2, 1]])
mx3 = np.array([[2, 0, 1], [0, 2, 0], [1, 0, 1]])
with pytest.warns(UserWarning) as w:
pcmxs, bin_names = fumt.confusion_matrix_per_subgroup(
DATASET, GROUND_TRUTH, PREDICTIONS, 1)
assert len(w) == 1
assert str(w[0].message) == MISSING_LABEL_WARNING
assert len(pcmxs) == 3
assert np.array_equal(pcmxs[0], mx1)
assert np.array_equal(pcmxs[1], mx2)
assert np.array_equal(pcmxs[2], mx3)
assert bin_names == ["('3',)", "('5',)", "('7',)"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evaluate_classifications(self):\n test_labels = open('./digitdata/testlabels', 'r')\n self.init_confusion_matrix()\n i = 0\n class_stats = {0:[0,0], 1:[0,0], 2:[0,0], 3:[0,0], 4:[0,0], 5:[0,0], 6:[0,0], 7:[0,0], 8:[0,0], 9:[0,0]}\n total_correct = 0\n num_labels = 1000\n for label in test_labels:\n int_label = int(label)\n if int_label == self.solutions[i]:\n class_stats[int_label][0] += 1\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n else:\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n class_stats[int_label][1] += 1\n i += 1\n for k in class_stats:\n print \"Class \" + str(k) + \": \" + str(float(class_stats[k][0])/class_stats[k][1])\n total_correct += float(class_stats[k][0])\n print \"Overall Accuracy: \" + str(total_correct/num_labels) \n for l in range(0,10):\n for w in range(0,10):\n self.confusion_matrix[l][w] = float(self.confusion_matrix[l][w]) / class_stats[l][1]\n \n s = [[str(e) for e in row] for row in self.confusion_matrix]\n lens = [len(max(col, key=len)) for col in zip(*s)]\n fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens)\n table = [fmt.format(*row) for row in s]\n print '\\n'.join(table)\n #self.print_confusion_matrix() ",
"def test(self, test_data_path):\n test_data = read_datafile(test_data_path)\n test_data = self.preprocessor.preprocess(test_data)\n\n data = '__label__' + test_data['claim'].astype(str) + test_data['check_worthiness'].astype(str) + ' ' + \\\n test_data['tweet_text']\n\n output = self.run(data)\n\n df = pd.DataFrame()\n df[\"predicted\"] = output.split()\n df[\"labeled\"] = [d.split()[0] for d in data]\n\n cm = confusion_matrix(df[\"labeled\"], df[\"predicted\"], labels=['__label__11','__label__10','__label__00'])\n\n ax= plt.subplot()\n sns.heatmap(cm, annot=True, ax = ax); #annot=True to annotate cells\n\n ax.set_xlabel('Predicted labels');ax.set_ylabel('True labels'); \n ax.set_title('Confusion Matrix'); \n ax.xaxis.set_ticklabels(['__label__11','__label__10','__label__00']); ax.yaxis.set_ticklabels(['__label__11','__label__10','__label__00']);\n\n plt.show()\n\n return np.sum(cm.diagonal()) / np.sum(cm)",
"def get_confmatrix(self,y_pred,y_test):",
"def confusionMatrix(testDataPredictions, testDataOriginal):\n matrix = {\"predicted >50K correctly as >50K\": 0, \"predicted >50K incorrectly as <=50K\": 0,\n \"predicted <=50K correctly as <=50K\": 0, \"predicted <=50K incorrectly as >50K\": 0}\n\n for instance in range(len(testDataPredictions)):\n prediction = testDataPredictions[instance]\n original = testDataOriginal[14].iloc[instance]\n\n #calculating total number of TP,TN,FP and FN\n\n if prediction == 1.0 and original == 1.0:\n matrix[\"predicted >50K correctly as >50K\"] += 1.00\n elif prediction == 0.0 and original == 1.0:\n matrix[\"predicted >50K incorrectly as <=50K\"] += 1.00\n elif prediction == 0.0 and original == 0.0:\n matrix[\"predicted <=50K correctly as <=50K\"] += 1.00\n elif prediction == 1.0 and original == 0.0:\n matrix[\"predicted <=50K incorrectly as >50K\"] += 1.00\n\n #Making the confusion matrix look readable on console printing\n print('----------------')\n print('CONFUSION MATRIX')\n print( 'TP: ', matrix[\"predicted >50K correctly as >50K\"], '||', 'FP: ', matrix[\"predicted >50K incorrectly as <=50K\"])\n print('----------------')\n print('FN: ', matrix[\"predicted <=50K incorrectly as >50K\"], '||', 'TN: ', matrix[\"predicted <=50K correctly as <=50K\"])\n\n # definition of sensitivity, precision and specificity formulas\n sensitivity = matrix[\"predicted >50K correctly as >50K\"] / (\n matrix[\"predicted >50K correctly as >50K\"] + matrix[\"predicted <=50K incorrectly as >50K\"])\n\n precision = matrix[\"predicted >50K correctly as >50K\"]/ (\n matrix[\"predicted >50K correctly as >50K\"] + matrix[\"predicted >50K incorrectly as <=50K\"])\n\n specificity = matrix[\"predicted <=50K correctly as <=50K\"] / (\n matrix[\"predicted <=50K correctly as <=50K\"] + matrix[\"predicted >50K incorrectly as <=50K\"])\n\n print('Precision: ' + str(precision*100) + '%')\n print('Sensitivity: '+ str(sensitivity*100)+ '%')\n print('Specificity: '+ str(specificity*100) +'%')\n\n return matrix, precision, sensitivity, specificity",
"def test_confusion_matrix(self):\n perf = self.get_file(\"classification_metrics.csv\")\n schema = [(\"value\", int), (\"predicted\", int)]\n # [true_positive, false_negative, false_positive, true_negative]\n actual_result = [64, 15, 23, 96]\n\n frame = self.context.frame.import_csv(perf, schema=schema)\n\n cm = frame.binary_classification_metrics('value', 'predicted', 1, 1)\n\n conf_matrix = cm.confusion_matrix.values\n cumulative_matrix_list = [conf_matrix[0][0],\n conf_matrix[0][1],\n conf_matrix[1][0],\n conf_matrix[1][1]]\n self.assertEqual(actual_result, cumulative_matrix_list)",
"def test(y_hat, test_y):\n # if s.ndim == 2:\n # y_hat = np.argmax(s, axis=1)\n num_class = np.unique(test_y).size\n confusion_mat = np.zeros((num_class, num_class))\n\n for i in range(num_class):\n class_i_idx = test_y == i\n num_class_i = np.sum(class_i_idx)\n y_hat_i = y_hat[class_i_idx]\n for j in range(num_class):\n confusion_mat[i, j] = 1.0 * np.sum(y_hat_i == j) / num_class_i\n\n np.set_printoptions(precision=2)\n print('Confusion matrix:')\n print(confusion_mat)\n print('Diagonal values:')\n print(confusion_mat.flatten()[0::(num_class + 1)])",
"def confusion_matrix(expected, predicted):\n\n retval = numpy.zeros((10,10), dtype=float)\n\n for k in range(10):\n pred_k = predicted[expected==k] # predictions that are supposed to be 'k'\n retval[:,k] = numpy.array([len(pred_k[pred_k==p]) for p in range(10)])\n retval[:,k] /= len(pred_k)\n\n return retval",
"def confusion(model, x_test, y_test, show=True, mType=\"\", n_cases=2):\n\n def convert_y_if_needed(y_vals, y_lbl):\n print(\"{} raw shape: {}\".format(y_vals.shape, y_lbl))\n if (len(y_vals.shape) == 2):\n y_vals = argmax(y_vals, axis=1)\n print(\" - converted to: {}\".format(y_vals.shape))\n return y_vals\n\n y_pred = model.predict(x_test)\n y_pred = convert_y_if_needed(y_pred, \"Y Pred\")\n y_test = convert_y_if_needed(y_test, \"Y Test\")\n\n cases = range(n_cases)\n cmNN = confusion_matrix(y_test, y_pred)\n\n acc = sklearn.metrics.accuracy_score(y_test, y_pred) # (TP + TN) / Total\n if n_cases==2:\n rec = sklearn.metrics.recall_score(y_test, y_pred) # TP / (TP + FN)\n pre = sklearn.metrics.precision_score(y_test, y_pred) # TP / (TP + FP)\n\n accLbl = \"Proportion of classifications that are correct = (TP + TN) / Total\"\n recLbl = \"Proportion of relevant cases that were selected = TP / (TP + FN)\"\n preLbl = \"Proportion of selected cases that are relevant = TP / (TP + FP)\"\n print()\n print(\"Confusion Matrix for \" + mType)\n print(\"Accuracy = {:.2%} = {}\".format(acc, accLbl))\n if n_cases == 2:\n print(\"Recall = {:.2%} = {}\".format(rec, recLbl))\n print(\"Precision = {:.2%} = {}\".format(pre, preLbl))\n print()\n if show: myPlot.plot_confusion_matrix(cmNN,cases, show=show, title=mType+\" Confusion Matrix\")\n stats = {\"Accuracy\":acc, \"Precision\":pre, \"Recall\":rec, \"matrix\":cmNN}\n return stats",
"def test(self, songs, genres):\n logging.info('Starting testing.')\n num_matches = 0\n confusion_matrix = ConfusionMatrix(genres)\n for song, actual_genre in zip(songs, genres):\n predicted_genre = self.classify(song)\n logging.info('Actual genre: {}, predicted genre: {}'.format(actual_genre, predicted_genre))\n confusion_matrix.add_genres(actual_genre, predicted_genre)\n if actual_genre == predicted_genre:\n num_matches += 1\n return num_matches, confusion_matrix",
"def test_confusion_matrix_per_subgroup_indexed():\n incorrect_shape_error_gt = ('The ground_truth parameter should be a '\n '1-dimensional numpy array.')\n incorrect_shape_error_p = ('The predictions parameter should be a '\n '1-dimensional numpy array.')\n\n flat = np.array([1, 2])\n square = np.array([[1, 2], [3, 4]])\n with pytest.raises(IncorrectShapeError) as exin:\n fumt.confusion_matrix_per_subgroup_indexed([[0]], square, square)\n assert str(exin.value) == incorrect_shape_error_gt\n\n with pytest.raises(IncorrectShapeError) as exin:\n fumt.confusion_matrix_per_subgroup_indexed([[0]], flat, square)\n assert str(exin.value) == incorrect_shape_error_p\n\n mx1 = np.array([[2, 1, 0], [0, 0, 0], [0, 0, 0]])\n mx2 = np.array([[2, 0, 0], [0, 0, 0], [0, 2, 1]])\n mx3 = np.array([[2, 0, 1], [0, 2, 0], [1, 0, 1]])\n\n with pytest.warns(UserWarning) as w:\n pcmxs_1 = fumt.confusion_matrix_per_subgroup_indexed(\n _INDICES_PER_BIN, GROUND_TRUTH, PREDICTIONS, labels=[0, 1, 2])\n pcmxs_2 = fumt.confusion_matrix_per_subgroup_indexed(\n _INDICES_PER_BIN, GROUND_TRUTH, PREDICTIONS)\n assert len(w) == 2\n wmsg = ('Some of the given labels are not present in either of the input '\n 'arrays: {2}.')\n assert str(w[0].message) == wmsg\n assert str(w[1].message) == wmsg\n assert len(pcmxs_1) == 3\n assert len(pcmxs_2) == 3\n assert np.array_equal(pcmxs_1[0], mx1)\n assert np.array_equal(pcmxs_2[0], mx1)\n assert np.array_equal(pcmxs_1[1], mx2)\n assert np.array_equal(pcmxs_2[1], mx2)\n assert np.array_equal(pcmxs_1[2], mx3)\n assert np.array_equal(pcmxs_2[2], mx3)",
"def test_model(model, trainset, testset):\n model.eval()\n \n predictions = []\n actuals = []\n \n for data in testset:\n # data will have batch of features and labels\n X = data[0:4]\n y = data[4:]\n \n pred = np.round(model(X).detach().numpy())\n actual = y.detach().numpy()\n # print(f'pred: {pred}')\n # print(f'actual: {actual}')\n predictions.append(pred)\n actuals.append(actual)\n \n print(accuracy_score(y_true=actuals, y_pred=predictions))\n \n \n # Confusion Matrix\n \n confusion_matrix = np.zeros((3, 3))\n for i,j in zip(predictions, actuals):\n confusion_matrix[i, j] += 1\n print(\"Confusion matrix:\\n\", confusion_matrix)",
"def run_classification_experiment ( feature_matrix, target_array, colmap ):\n np.random.seed ( 7062020 ) # Due date\n\n # Split off validation set and cross-validation set\n X_validation = feature_matrix [ : feature_matrix.shape [ 0 ] // 10 ]\n X_cross_validation = feature_matrix [ feature_matrix.shape [ 0 ] // 10 : ]\n y_validation = target_array [ : feature_matrix.shape [ 0 ] // 10 ]\n y_cross_validation = target_array [ feature_matrix.shape [ 0 ] // 10 : ]\n\n experiment_results = {}\n experiment_num = 1\n\n # Use 5-Fold stratified CV\n kfold_strat = KFoldStratifiedCV ( number_of_folds = 5, shuffle = True )\n\n for train, test in kfold_strat.split ( feature_matrix = X_cross_validation, target_array = y_cross_validation ):\n logger.info ( f\"Experiment Number: { experiment_num }\" )\n\n # Get training set\n X_train = X_cross_validation [ train, : ]\n y_train = y_cross_validation [ train ]\n\n # Fit the tree\n d_tree = DecisionTreeClassifier ( evaluate_function = entropy, map_column_node_type = colmap )\n d_tree.fit ( X_train, y_train )\n\n # Prune the tree\n pruned_tree = PostPruner (\n d_tree,\n X_validation = X_validation,\n y_validation = y_validation,\n evaluate_function = accuracy,\n ).prune_tree()\n\n # Get post-pruned predictions\n pruned_preds = pruned_tree.predict ( X_cross_validation [ test, : ] )\n\n # Save the results\n experiment_results [ experiment_num ] = {\n \"actuals\": y_cross_validation [ test ],\n \"preds\": pruned_preds,\n \"model\": pruned_tree,\n }\n experiment_num += 1\n\n return experiment_results\n # End run_classification_experiment",
"def confusionMetric( self, classTest, classPred):\n # accuracy of the model - in one number\n accuracy = average_precision_score( classTest, classPred )\n # confusion matrix 2x2 matric\n matConf = confusion_matrix(classTest, classPred)\n # cohen Kappa is applicable for unbalanced data\n matCohenKappa = cohen_kappa_score(classTest, classPred)\n # classification report\n strClassificationReport = classification_report(classTest, classPred)\n \n return accuracy, matConf, matCohenKappa, strClassificationReport",
"def doContPopEvaluation(self, is_train):\n if is_train:\n my_type = \"TRAINING\"\n else:\n my_type = \"TESTING\"\n no_match = 0 #How often does the population fail to have a classifier that matches an instance in the data.\n cons.env.resetDataRef(is_train) #Go to first instance in data set\n accuracy_estimate_sum = 0\n\n if is_train:\n instances = cons.env.format_data.numb_train_instances\n else:\n instances = cons.env.format_data.numb_test_instances\n #----------------------------------------------------------------------------------------------\n for _ in range(instances):\n if is_train:\n state_action = cons.env.getTrainInstance()\n else:\n state_action = cons.env.getTestInstance()\n #-----------------------------------------------------------------------------\n self.population.makeEvalMatchSet(state_action[0])\n prediction = Prediction(self.population)\n selected_action = prediction.getDecision()\n #-----------------------------------------------------------------------------\n if selected_action == None:\n no_match += 1\n else: #Instances which failed to be covered are excluded from the initial accuracy calculation\n prediction_err = math.fabs(float(selected_action) - float(state_action[1]))\n action_range = cons.env.format_data.action_list[1] - cons.env.format_data.action_list[0]\n accuracy_estimate_sum += 1.0 - (prediction_err / float(action_range))\n\n self.population.clearSets()\n #----------------------------------------------------------------------------------------------\n #Accuracy Estimate\n if instances == no_match:\n accuracy_estimate = 0\n else:\n accuracy_estimate = accuracy_estimate_sum / float(instances - no_match)\n\n #Adjustment for uncovered instances - to avoid positive or negative bias we incorporate the probability of guessing a phenotype by chance (e.g. 50% if two phenotypes)\n covered_instances = 1.0 - (float(no_match)/float(instances))\n adjusted_accuracy_estimate = accuracy_estimate_sum / float(instances) #no_matchs are treated as incorrect predictions (can see no other fair way to do this)\n\n print(\"-----------------------------------------------\")\n print(str(my_type)+\" Accuracy Results:-------------\")\n print(\"Instance Coverage = \"+ str(covered_instances*100.0)+ '%')\n print(\"Estimated Prediction Accuracy (Ignore uncovered) = \" + str(accuracy_estimate))\n print(\"Estimated Prediction Accuracy (Penalty uncovered) = \" + str(adjusted_accuracy_estimate))\n #Balanced and Standard Accuracies will only be the same when there are equal instances representative of each phenotype AND there is 100% covering.\n result_list = [adjusted_accuracy_estimate, covered_instances]\n return result_list",
"def score_all(results):\n Y = np.concatenate([results['%dtest'%n] for n in range(10)])\n print score(np.concatenate([results['%dtrain'%n] for n in range(10)]))\n print score(np.concatenate([results['%dtest'%n] for n in range(10)]))\n class_counts = np.asarray([(Y[:,0]==n).sum() for n in range(10)])\n return confusion_matrix(Y[:,0],Y[:,1]), class_counts",
"def cmh_test(*args):\n import pandas, math\n from statsmodels.stats.contingency_tables import StratifiedTable as cmh\n\n # set up data logging\n ignored = {}\n\n # get contingency tables for pops with case and control data\n tables = create_tables(*args)\n\n # fill in a dataframe with cmh test results, one locus at a time\n results = pandas.DataFrame(columns=['locus', 'odds_ratio', 'p-value',\n 'lower_confidence', 'upper_confidence', 'num_pops'])\n for locus,table in tables.items():\n if len(table) == 0:\n # if none of the populations for a locus provide a contingency table (due to missing data)\n # ... then continue to the next locus.\n ignored[locus] = 'there were no populations that provided contingency tables'\n continue\n # cmh results for stratified contingency tables (called \"table\" = an array of tables)\n cmh_res = cmh(table)\n res = cmh_res.test_null_odds(True) # statistic and p-value\n odds_ratio = cmh_res.oddsratio_pooled # odds ratio\n conf = cmh_res.oddsratio_pooled_confint() # lower and upper confidence\n locus_results = locus, odds_ratio, res.pvalue, *conf, len(table)\n\n # look for fixed states across all tables\n\n if sum([math.isnan(x) for x in conf]) > 0:\n # if the upper and lower estimat of the confidence interval are NA, ignore\n # this can happen when all of the tables returned for a specific locus are fixed\n # ... for either the REF or ALT. This happens rarely for loci with low MAF, where\n # ... the populations that have variable case or control, do not have a frequency\n # ... estimated for the other treatment (case or control) and therefore don't\n # ... make it into the list of stratified tables and the remaining tables\n # ... (populations) are all fixed for the REF or ALT - again, this happens for\n # ... some low MAF loci and may happen if input file has few pops to stratify.\n\n # log reason\n ignored[locus] = 'the upper and lower confidence interval for the odds ratio was NA'\n ignored[locus] = ignored[locus] + '\\t' + '\\t'.join(map(str, locus_results[1:]))\n\n continue\n\n results.loc[len(results.index), :] = locus_results\n\n return results, ignored",
"def main():\n\n # confusion matrix model ensemble\n df = pd.read_csv('pred_test_ensemble.csv')\n print('Real test accuracy:', accuracy_score(df.labels.values, df.class_preds.values))\n conf_matrix = confusion_matrix(df.labels.values, df.class_preds.values, labels=[0, 1, 2, 3])\n\n dct = {'': [0, 90, 180, 270]}\n for i in range(4):\n dct[str(i*90)] = conf_matrix[:, i]\n \n conf_matrix = pd.DataFrame(dct)\n print(conf_matrix)\n conf_matrix.to_csv('confusion_matrix_ensemble.csv', index=False)\n\n\n\n # # Statistical gama\n # df = pd.read_csv('pred_test.csv')\n # print('Statistical... ')\n # statistical = gama_statistic(df)\n # statistical.to_csv('gama_statistic.csv', index=False)\n # print(statistical)",
"def confusion_metrics(experiment, plot_matrix = False):\n\n # y_train = experiment['y_train']\n # y_train_prediction = experiment['y_train_prediction']\n y_test = experiment['y_test']\n y_test_prediction = experiment['y_test_prediction']\n\n # precision1, recall1, fbeta1, support1 = precision_recall_fscore_support(y_train, y_train_prediction)\n precision2, recall2, fbeta2, support2 = precision_recall_fscore_support(y_test, y_test_prediction)\n\n # accuracy1 = accuracy_score(y_train, y_train_prediction)\n accuracy2 = accuracy_score(y_test, y_test_prediction)\n\n # TPR and TNR (TPR should equal to recall)\n # TPR1 = np.mean(y_train[y_train_prediction == 1])\n # NPR1 = 1. - np.mean(y_train[y_train_prediction == 0])\n TPR2 = np.mean(y_test[y_test_prediction == 1])\n NPR2 = 1. - np.mean(y_test[y_test_prediction == 0])\n\n # experiment['SCORES_train'] = (precision1, recall1, fbeta1, support1, accuracy1, TPR1, NPR1)\n experiment['SCORES_test'] = (precision2, recall2, fbeta2, support2, accuracy2, TPR2, NPR2)\n\n print('')\n print('+++++++++++++++++++++++++++++++++++++++++++++++++++++')\n print('Testing Results:')\n print(' Class 0')\n print(' Precision = {:,.2f}%'.format(precision2[0] * 100))\n print(' Recall = {:,.2f}%'.format(recall2[0] * 100))\n print(' F1-Score = {:,.2f}%'.format(fbeta2[0] * 100))\n print(' Support = {:,.0f}'.format(support2[0]))\n print(' Class 1')\n print(' Precision = {:,.2f}%'.format(precision2[1] * 100))\n print(' Recall = {:,.2f}%'.format(recall2[1] * 100))\n print(' F1-Score = {:,.2f}%'.format(fbeta2[1] * 100))\n print(' Support = {:,.0f}'.format(support2[1]))\n print('True positive rate = {:,.2f}'.format(TPR2 * 100))\n print('True negative rate = {:,.2f}'.format(NPR2 * 100))\n print('Accuracy = {:,.2f}%'.format(accuracy2 * 100))\n print('+++++++++++++++++++++++++++++++++++++++++++++++++++++')\n print('')\n\n if plot_matrix:\n cnf_matrix = confusion_matrix(y_test, y_test_prediction)\n plot_confusion_matrix(cnf_matrix, ['$H_0$', '$H_1$'])\n\n return experiment",
"def custom_confusion_matrix(predictions, targets):\n tp, fp, fn, tn = [], [], [], []\n\n for pred, targ in zip(predictions, targets):\n for shift_pred, shift_targ in zip(pred, targ):\n if shift_pred == 1 and shift_targ == 1: # True positive\n tp.append(1)\n elif shift_pred == 1 and shift_targ == 0: # False positive\n fp.append(1)\n elif shift_pred == 0 and shift_targ == 1: # False negative\n fn.append(1)\n elif shift_pred == 0 and shift_targ == 0: # True negative:\n tn.append(1)\n\n tp_count = len(tp)\n fp_count = len(fp)\n fn_count = len(fn)\n tn_count = len(tn)\n\n conf_matrix = np.array([\n [tp_count, fp_count],\n [fn_count, tn_count]\n ])\n\n return conf_matrix",
"def n_test(self):\n return self.factors[1].shape[0]",
"def get_confusion_matrix(y_true, y_pred):\r\n\r\n ## 3 classes\r\n TP1, TP2, TP3, FP1, FP2, FP3, TN1, TN2, TN3, FN1, FN2, FN3 = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\r\n\r\n for i in range(y_true.shape[0]):\r\n if y_true[i] == 0 and y_pred[i] == 0:\r\n TN1 += 1\r\n elif y_true[i] == 0 and y_pred[i] != 0:\r\n FP1 += 1\r\n elif y_true[i] != 0 and y_pred[i] == 0:\r\n FN1 += 1\r\n elif y_true[i] != 0 and y_pred[i] != 0:\r\n TP1 += 1\r\n\r\n for i in range(y_true.shape[0]):\r\n if y_true[i] == 1 and y_pred[i] == 1:\r\n TN2 += 1\r\n elif y_true[i] == 1 and y_pred[i] != 1:\r\n FP2 += 1\r\n elif y_true[i] != 1 and y_pred[i] == 1:\r\n FN2 += 1\r\n elif y_true[i] != 1 and y_pred[i] != 1:\r\n TP2 += 1\r\n\r\n for i in range(y_true.shape[0]):\r\n if y_true[i] == 2 and y_pred[i] == 2:\r\n TN3 += 1\r\n elif y_true[i] == 2 and y_pred[i] != 2:\r\n FP3 += 1\r\n elif y_true[i] != 2 and y_pred[i] == 2:\r\n FN3 += 1\r\n elif y_true[i] != 2 and y_pred[i] != 2:\r\n TP3 += 1\r\n\r\n conf_matrix1 = [\r\n [TP1, FP1],\r\n [FN1, TN1]\r\n ]\r\n conf_matrix2 = [\r\n [TP2, FP2],\r\n [FN2, TN2]\r\n ]\r\n conf_matrix3 = [\r\n [TP3, FP3],\r\n [FN3, TN3]\r\n ]\r\n\r\n return conf_matrix1, conf_matrix2, conf_matrix3",
"def confusion_matrix(documentV, targetV, testV):\n DocSize = len(documentV)\n TgtSize = len(targetV)\n TstSize = len(testV)\n \n TruePositiveV = targetV.intersection(testV)\n\n TP = len(TruePositiveV)\n FP = TstSize - TP\n FN = TgtSize - TP\n TN = DocSize - TgtSize - FP\n\n return TP, FP, FN, TN",
"def test(ndigit, elambda, showSamples, showConfusion):\n Data, Label = getData()\n trainX, trainY, testX, testY = splitData(Data, Label, ndigit)\n trainX_mean = np.mean(trainX, axis=0)\n trainX_new = trainX - trainX_mean\n eigenvectors = getEigenVectors(trainX_new, elambda)\n trainX_eigen = trainX_new.dot(eigenvectors)\n testX_new = testX - trainX_mean\n testX_eigen = testX_new.dot(eigenvectors)\n testO = []\n if showSamples:\n correct_samples = []\n correct_samples_nearest = []\n correct_samples_eigen = []\n correct_samples_nearest_eigen = []\n correct_samples_labels = []\n correct_samples_predictions = []\n wrong_samples = []\n wrong_samples_nearest = []\n wrong_samples_eigen = []\n wrong_samples_nearest_eigen = []\n wrong_samples_labels = []\n wrong_samples_predictions = []\n if showConfusion:\n conf = np.zeros((ndigit, ndigit))\n for i in xrange(testX_eigen.shape[0]):\n t = testX_eigen[i]\n j = getNearestSampleIndex(t, trainX_eigen)\n p = int(trainY[j])\n y = int(testY[i])\n if showConfusion:\n conf[p, y] += 1\n if showSamples:\n if p == y:\n if len(correct_samples) < y + 1:\n correct_samples.append(testX[i])\n correct_samples_nearest.append(trainX[j])\n correct_samples_eigen.append(testX_eigen[i])\n correct_samples_nearest_eigen.append(trainX_eigen[j])\n correct_samples_labels.append(y)\n correct_samples_predictions.append(p)\n else:\n if len(wrong_samples) < y + 1:\n wrong_samples.append(testX[i])\n wrong_samples_nearest.append(trainX[j])\n wrong_samples_eigen.append(testX_eigen[i])\n wrong_samples_nearest_eigen.append(trainX_eigen[j])\n wrong_samples_labels.append(y)\n wrong_samples_predictions.append(p)\n testO.append(p)\n testO = np.array(testO)\n train0 = []\n for i in xrange(trainX_eigen.shape[0]):\n t = trainX_eigen[i]\n j = getNearestSampleIndex(t, trainX_eigen)\n min_class = trainY[j]\n train0.append(min_class)\n train0 = np.array(train0)\n print \"for digits = %d lambda = %.2f train = %.6f test = %.6f \" % (\n ndigit, elambda, (train0 == trainY).mean(), (testO == testY).mean())\n if showConfusion:\n print conf\n if showSamples:\n displaySamples(correct_samples_labels, correct_samples_predictions,\n correct_samples, correct_samples_nearest,\n correct_samples_eigen, correct_samples_nearest_eigen,\n eigenvectors, trainX_mean, elambda, 'Correct')\n displaySamples(wrong_samples_labels, wrong_samples_predictions,\n wrong_samples, wrong_samples_nearest,\n wrong_samples_eigen, wrong_samples_nearest_eigen,\n eigenvectors, trainX_mean, elambda, 'Wrong')",
"def calculate_confusion_tables(self):\n self.confusion_tables = np.zeros((self.number_of_classes, 2, 2))\n self.normalized_confusion_tables = np.zeros((self.number_of_classes, 2, 2))\n \n for cls in range(self.number_of_classes):\n # Normal confusion table\n self.confusion_tables[cls, 0, 0] = self.TP[cls] # TP\n self.confusion_tables[cls, 0, 1] = self.FP[cls] # FP\n self.confusion_tables[cls, 1, 0] = self.FN[cls] # FN\n self.confusion_tables[cls, 1, 1] = self.TN[cls] # TN\n \n # Weighted confusion table\n table_weights = self.confusion_tables[cls].sum(axis=0).reshape(1, 2)\n self.normalized_confusion_tables[cls] = (self.confusion_tables[cls]/table_weights).round(self.digits_count_fp)\n \n # Convert the data type into int\n self.confusion_tables = self.confusion_tables.astype(int)",
"def getConfusionMatrix(pred, real):\n # print pd.crosstab(pred, real) \n \n total = float(real.shape[0])\n \n tp = 0 # true positive\n tn = 0 # true negitive\n fp = 0 # false positive\n fn = 0 # false negitive\n for predicted, actual in zip(pred, real):\n if predicted == actual:\n if predicted == 1:\n tp += 1\n else:\n tn += 1\n else:\n if predicted == 1:\n fp += 1\n else:\n fn += 1\n \n\n print \"(tp, tn, fp, fn):\" , tp, tn, fp, fn\n print \"accuracy is :\", (tp+tn)/total",
"def get_confusion_matrix(self):\n return confusion_matrix(self.test_y, self.predict())",
"def confusion_matrix(self,predictions,labels):\n TP = np.sum((np.round(predictions) == True) * (np.asarray(labels, dtype=bool) == True))\n FP = np.sum((np.round(predictions) == True) * (np.asarray(labels, dtype=bool) == False))\n FN = np.sum((np.round(predictions) == False) * (np.asarray(labels, dtype=bool) == True))\n TN = np.sum((np.round(predictions) == False) * (np.asarray(labels, dtype=bool) == False))\n\n return np.array([[TP,FP],[FN,TN]])",
"def confusion(self,xs,ys):\n n = self.param['numClasses']\n assert n > 1, \"Confusion matrices can only be obtained for classification data.\" \n preds = self.predicts(xs)\n conf = [[0] * n for i in range(n)]\n for (y,p) in zip(ys,preds):\n conf[y][p] += 1\n return conf",
"def overall_classification_matrix(self, interpreter):\n return sum([ r.classification_matrix(interpreter) for r in self.results ])",
"def overall_confusion_matrix(self, interpreter):\n return numpy.array([ r.confusion_matrix(interpreter) for r in self.results ])"
]
| [
"0.6618988",
"0.66012615",
"0.65771365",
"0.64746296",
"0.6443359",
"0.6311446",
"0.62488693",
"0.6220614",
"0.6143405",
"0.6113498",
"0.60880667",
"0.60832113",
"0.6033341",
"0.6021256",
"0.6018865",
"0.59884655",
"0.59654135",
"0.59608114",
"0.59072",
"0.5879316",
"0.5829568",
"0.5806428",
"0.57547516",
"0.5753191",
"0.57480145",
"0.57449293",
"0.573333",
"0.57255787",
"0.57229304",
"0.57124263"
]
| 0.68114936 | 0 |
Tests calculating confusion matrix per indexbased subpopulation. Tests | def test_confusion_matrix_per_subgroup_indexed():
incorrect_shape_error_gt = ('The ground_truth parameter should be a '
'1-dimensional numpy array.')
incorrect_shape_error_p = ('The predictions parameter should be a '
'1-dimensional numpy array.')
flat = np.array([1, 2])
square = np.array([[1, 2], [3, 4]])
with pytest.raises(IncorrectShapeError) as exin:
fumt.confusion_matrix_per_subgroup_indexed([[0]], square, square)
assert str(exin.value) == incorrect_shape_error_gt
with pytest.raises(IncorrectShapeError) as exin:
fumt.confusion_matrix_per_subgroup_indexed([[0]], flat, square)
assert str(exin.value) == incorrect_shape_error_p
mx1 = np.array([[2, 1, 0], [0, 0, 0], [0, 0, 0]])
mx2 = np.array([[2, 0, 0], [0, 0, 0], [0, 2, 1]])
mx3 = np.array([[2, 0, 1], [0, 2, 0], [1, 0, 1]])
with pytest.warns(UserWarning) as w:
pcmxs_1 = fumt.confusion_matrix_per_subgroup_indexed(
_INDICES_PER_BIN, GROUND_TRUTH, PREDICTIONS, labels=[0, 1, 2])
pcmxs_2 = fumt.confusion_matrix_per_subgroup_indexed(
_INDICES_PER_BIN, GROUND_TRUTH, PREDICTIONS)
assert len(w) == 2
wmsg = ('Some of the given labels are not present in either of the input '
'arrays: {2}.')
assert str(w[0].message) == wmsg
assert str(w[1].message) == wmsg
assert len(pcmxs_1) == 3
assert len(pcmxs_2) == 3
assert np.array_equal(pcmxs_1[0], mx1)
assert np.array_equal(pcmxs_2[0], mx1)
assert np.array_equal(pcmxs_1[1], mx2)
assert np.array_equal(pcmxs_2[1], mx2)
assert np.array_equal(pcmxs_1[2], mx3)
assert np.array_equal(pcmxs_2[2], mx3) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_confusion_matrix_per_subgroup():\n\n mx1 = np.array([[2, 1, 0], [0, 0, 0], [0, 0, 0]])\n mx2 = np.array([[2, 0, 0], [0, 0, 0], [0, 2, 1]])\n mx3 = np.array([[2, 0, 1], [0, 2, 0], [1, 0, 1]])\n\n with pytest.warns(UserWarning) as w:\n pcmxs, bin_names = fumt.confusion_matrix_per_subgroup(\n DATASET, GROUND_TRUTH, PREDICTIONS, 1)\n assert len(w) == 1\n assert str(w[0].message) == MISSING_LABEL_WARNING\n\n assert len(pcmxs) == 3\n assert np.array_equal(pcmxs[0], mx1)\n assert np.array_equal(pcmxs[1], mx2)\n assert np.array_equal(pcmxs[2], mx3)\n assert bin_names == [\"('3',)\", \"('5',)\", \"('7',)\"]",
"def confusionMatrix(testDataPredictions, testDataOriginal):\n matrix = {\"predicted >50K correctly as >50K\": 0, \"predicted >50K incorrectly as <=50K\": 0,\n \"predicted <=50K correctly as <=50K\": 0, \"predicted <=50K incorrectly as >50K\": 0}\n\n for instance in range(len(testDataPredictions)):\n prediction = testDataPredictions[instance]\n original = testDataOriginal[14].iloc[instance]\n\n #calculating total number of TP,TN,FP and FN\n\n if prediction == 1.0 and original == 1.0:\n matrix[\"predicted >50K correctly as >50K\"] += 1.00\n elif prediction == 0.0 and original == 1.0:\n matrix[\"predicted >50K incorrectly as <=50K\"] += 1.00\n elif prediction == 0.0 and original == 0.0:\n matrix[\"predicted <=50K correctly as <=50K\"] += 1.00\n elif prediction == 1.0 and original == 0.0:\n matrix[\"predicted <=50K incorrectly as >50K\"] += 1.00\n\n #Making the confusion matrix look readable on console printing\n print('----------------')\n print('CONFUSION MATRIX')\n print( 'TP: ', matrix[\"predicted >50K correctly as >50K\"], '||', 'FP: ', matrix[\"predicted >50K incorrectly as <=50K\"])\n print('----------------')\n print('FN: ', matrix[\"predicted <=50K incorrectly as >50K\"], '||', 'TN: ', matrix[\"predicted <=50K correctly as <=50K\"])\n\n # definition of sensitivity, precision and specificity formulas\n sensitivity = matrix[\"predicted >50K correctly as >50K\"] / (\n matrix[\"predicted >50K correctly as >50K\"] + matrix[\"predicted <=50K incorrectly as >50K\"])\n\n precision = matrix[\"predicted >50K correctly as >50K\"]/ (\n matrix[\"predicted >50K correctly as >50K\"] + matrix[\"predicted >50K incorrectly as <=50K\"])\n\n specificity = matrix[\"predicted <=50K correctly as <=50K\"] / (\n matrix[\"predicted <=50K correctly as <=50K\"] + matrix[\"predicted >50K incorrectly as <=50K\"])\n\n print('Precision: ' + str(precision*100) + '%')\n print('Sensitivity: '+ str(sensitivity*100)+ '%')\n print('Specificity: '+ str(specificity*100) +'%')\n\n return matrix, precision, sensitivity, specificity",
"def get_confmatrix(self,y_pred,y_test):",
"def test_confusion_matrix(self):\n perf = self.get_file(\"classification_metrics.csv\")\n schema = [(\"value\", int), (\"predicted\", int)]\n # [true_positive, false_negative, false_positive, true_negative]\n actual_result = [64, 15, 23, 96]\n\n frame = self.context.frame.import_csv(perf, schema=schema)\n\n cm = frame.binary_classification_metrics('value', 'predicted', 1, 1)\n\n conf_matrix = cm.confusion_matrix.values\n cumulative_matrix_list = [conf_matrix[0][0],\n conf_matrix[0][1],\n conf_matrix[1][0],\n conf_matrix[1][1]]\n self.assertEqual(actual_result, cumulative_matrix_list)",
"def evaluate_classifications(self):\n test_labels = open('./digitdata/testlabels', 'r')\n self.init_confusion_matrix()\n i = 0\n class_stats = {0:[0,0], 1:[0,0], 2:[0,0], 3:[0,0], 4:[0,0], 5:[0,0], 6:[0,0], 7:[0,0], 8:[0,0], 9:[0,0]}\n total_correct = 0\n num_labels = 1000\n for label in test_labels:\n int_label = int(label)\n if int_label == self.solutions[i]:\n class_stats[int_label][0] += 1\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n else:\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n class_stats[int_label][1] += 1\n i += 1\n for k in class_stats:\n print \"Class \" + str(k) + \": \" + str(float(class_stats[k][0])/class_stats[k][1])\n total_correct += float(class_stats[k][0])\n print \"Overall Accuracy: \" + str(total_correct/num_labels) \n for l in range(0,10):\n for w in range(0,10):\n self.confusion_matrix[l][w] = float(self.confusion_matrix[l][w]) / class_stats[l][1]\n \n s = [[str(e) for e in row] for row in self.confusion_matrix]\n lens = [len(max(col, key=len)) for col in zip(*s)]\n fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens)\n table = [fmt.format(*row) for row in s]\n print '\\n'.join(table)\n #self.print_confusion_matrix() ",
"def test(self, test_data_path):\n test_data = read_datafile(test_data_path)\n test_data = self.preprocessor.preprocess(test_data)\n\n data = '__label__' + test_data['claim'].astype(str) + test_data['check_worthiness'].astype(str) + ' ' + \\\n test_data['tweet_text']\n\n output = self.run(data)\n\n df = pd.DataFrame()\n df[\"predicted\"] = output.split()\n df[\"labeled\"] = [d.split()[0] for d in data]\n\n cm = confusion_matrix(df[\"labeled\"], df[\"predicted\"], labels=['__label__11','__label__10','__label__00'])\n\n ax= plt.subplot()\n sns.heatmap(cm, annot=True, ax = ax); #annot=True to annotate cells\n\n ax.set_xlabel('Predicted labels');ax.set_ylabel('True labels'); \n ax.set_title('Confusion Matrix'); \n ax.xaxis.set_ticklabels(['__label__11','__label__10','__label__00']); ax.yaxis.set_ticklabels(['__label__11','__label__10','__label__00']);\n\n plt.show()\n\n return np.sum(cm.diagonal()) / np.sum(cm)",
"def score_all(results):\n Y = np.concatenate([results['%dtest'%n] for n in range(10)])\n print score(np.concatenate([results['%dtrain'%n] for n in range(10)]))\n print score(np.concatenate([results['%dtest'%n] for n in range(10)]))\n class_counts = np.asarray([(Y[:,0]==n).sum() for n in range(10)])\n return confusion_matrix(Y[:,0],Y[:,1]), class_counts",
"def confusion_matrix(expected, predicted):\n\n retval = numpy.zeros((10,10), dtype=float)\n\n for k in range(10):\n pred_k = predicted[expected==k] # predictions that are supposed to be 'k'\n retval[:,k] = numpy.array([len(pred_k[pred_k==p]) for p in range(10)])\n retval[:,k] /= len(pred_k)\n\n return retval",
"def main():\n\n # confusion matrix model ensemble\n df = pd.read_csv('pred_test_ensemble.csv')\n print('Real test accuracy:', accuracy_score(df.labels.values, df.class_preds.values))\n conf_matrix = confusion_matrix(df.labels.values, df.class_preds.values, labels=[0, 1, 2, 3])\n\n dct = {'': [0, 90, 180, 270]}\n for i in range(4):\n dct[str(i*90)] = conf_matrix[:, i]\n \n conf_matrix = pd.DataFrame(dct)\n print(conf_matrix)\n conf_matrix.to_csv('confusion_matrix_ensemble.csv', index=False)\n\n\n\n # # Statistical gama\n # df = pd.read_csv('pred_test.csv')\n # print('Statistical... ')\n # statistical = gama_statistic(df)\n # statistical.to_csv('gama_statistic.csv', index=False)\n # print(statistical)",
"def confusion_matrix(documentV, targetV, testV):\n DocSize = len(documentV)\n TgtSize = len(targetV)\n TstSize = len(testV)\n \n TruePositiveV = targetV.intersection(testV)\n\n TP = len(TruePositiveV)\n FP = TstSize - TP\n FN = TgtSize - TP\n TN = DocSize - TgtSize - FP\n\n return TP, FP, FN, TN",
"def run_classification_experiment ( feature_matrix, target_array, colmap ):\n np.random.seed ( 7062020 ) # Due date\n\n # Split off validation set and cross-validation set\n X_validation = feature_matrix [ : feature_matrix.shape [ 0 ] // 10 ]\n X_cross_validation = feature_matrix [ feature_matrix.shape [ 0 ] // 10 : ]\n y_validation = target_array [ : feature_matrix.shape [ 0 ] // 10 ]\n y_cross_validation = target_array [ feature_matrix.shape [ 0 ] // 10 : ]\n\n experiment_results = {}\n experiment_num = 1\n\n # Use 5-Fold stratified CV\n kfold_strat = KFoldStratifiedCV ( number_of_folds = 5, shuffle = True )\n\n for train, test in kfold_strat.split ( feature_matrix = X_cross_validation, target_array = y_cross_validation ):\n logger.info ( f\"Experiment Number: { experiment_num }\" )\n\n # Get training set\n X_train = X_cross_validation [ train, : ]\n y_train = y_cross_validation [ train ]\n\n # Fit the tree\n d_tree = DecisionTreeClassifier ( evaluate_function = entropy, map_column_node_type = colmap )\n d_tree.fit ( X_train, y_train )\n\n # Prune the tree\n pruned_tree = PostPruner (\n d_tree,\n X_validation = X_validation,\n y_validation = y_validation,\n evaluate_function = accuracy,\n ).prune_tree()\n\n # Get post-pruned predictions\n pruned_preds = pruned_tree.predict ( X_cross_validation [ test, : ] )\n\n # Save the results\n experiment_results [ experiment_num ] = {\n \"actuals\": y_cross_validation [ test ],\n \"preds\": pruned_preds,\n \"model\": pruned_tree,\n }\n experiment_num += 1\n\n return experiment_results\n # End run_classification_experiment",
"def test(self, songs, genres):\n logging.info('Starting testing.')\n num_matches = 0\n confusion_matrix = ConfusionMatrix(genres)\n for song, actual_genre in zip(songs, genres):\n predicted_genre = self.classify(song)\n logging.info('Actual genre: {}, predicted genre: {}'.format(actual_genre, predicted_genre))\n confusion_matrix.add_genres(actual_genre, predicted_genre)\n if actual_genre == predicted_genre:\n num_matches += 1\n return num_matches, confusion_matrix",
"def confusion(model, x_test, y_test, show=True, mType=\"\", n_cases=2):\n\n def convert_y_if_needed(y_vals, y_lbl):\n print(\"{} raw shape: {}\".format(y_vals.shape, y_lbl))\n if (len(y_vals.shape) == 2):\n y_vals = argmax(y_vals, axis=1)\n print(\" - converted to: {}\".format(y_vals.shape))\n return y_vals\n\n y_pred = model.predict(x_test)\n y_pred = convert_y_if_needed(y_pred, \"Y Pred\")\n y_test = convert_y_if_needed(y_test, \"Y Test\")\n\n cases = range(n_cases)\n cmNN = confusion_matrix(y_test, y_pred)\n\n acc = sklearn.metrics.accuracy_score(y_test, y_pred) # (TP + TN) / Total\n if n_cases==2:\n rec = sklearn.metrics.recall_score(y_test, y_pred) # TP / (TP + FN)\n pre = sklearn.metrics.precision_score(y_test, y_pred) # TP / (TP + FP)\n\n accLbl = \"Proportion of classifications that are correct = (TP + TN) / Total\"\n recLbl = \"Proportion of relevant cases that were selected = TP / (TP + FN)\"\n preLbl = \"Proportion of selected cases that are relevant = TP / (TP + FP)\"\n print()\n print(\"Confusion Matrix for \" + mType)\n print(\"Accuracy = {:.2%} = {}\".format(acc, accLbl))\n if n_cases == 2:\n print(\"Recall = {:.2%} = {}\".format(rec, recLbl))\n print(\"Precision = {:.2%} = {}\".format(pre, preLbl))\n print()\n if show: myPlot.plot_confusion_matrix(cmNN,cases, show=show, title=mType+\" Confusion Matrix\")\n stats = {\"Accuracy\":acc, \"Precision\":pre, \"Recall\":rec, \"matrix\":cmNN}\n return stats",
"def test(indices_to_visit = None):\n ##0 Chicago\n ##1 New York City\n ##2 Los Angeles\n ##3 Minneapolis\n ##4 Denver\n ##5 Dallas\n ##6 Seattle\n ##7 Boston\n ##8 San Francisco\n ##9 St. Louis\n ##10 Houston\n ##11 Phoenix\n ##12 Salt Lake City\n ##13 Miami\n ##14 Atlanta\n ##15 Kansas City\n home_index = 15 # Kansas city\n # 15x15 matrix with main diagonal consisting of 0s and to which data is mirrored along\n # (values are derived from external resource and multiplied by 1000 for higher accuracy)\n matrix = np.array([[0.0, 1148413.3550047704, 2813453.6297408855, 572861.4368351421, 1483440.7452179305, 1296355.2188721865, 2801269.1215845253, 1370943.3069385102, 2996683.256068982, 422589.4697157836, 1515737.0196676727, 2343639.7107855356, 2031500.319603397, 1913900.3015914203, 946854.1020487415, 665894.0336505901],\n [1148413.3550047704, 0.0, 3949451.153672887, 1642119.4792808082, 2628946.6435325537, 2212019.1209020815, 3882177.952930788, 306997.0343229422, 4144977.810718553, 1408454.3261387087, 2286054.8575902223, 3455343.3108375454, 3179102.5335818897, 1754834.3710577146, 1202616.154562711, 1766599.1336905772],\n [2813453.6297408855, 3949451.153672887, 0.0, 2455296.3791196346, 1339227.410707824, 1998182.1420783552, 1545364.434045008, 4184394.186016967, 559978.4273194656, 2560790.9591738936, 2212581.51715849, 575975.8749662543, 933602.6426595236, 3767490.41517038, 3120118.850020503, 2186473.1552241463],\n [572861.4368351421, 1642119.4792808082, 2455296.3791196346, 0.0, 1127312.7583590776, 1390159.7734006236, 2249169.1308160927, 1811513.5290266906, 2554165.8167895717, 750916.7305340832, 1701189.1538312144, 2062079.2399570548, 1590460.9488364782, 2434801.332310659, 1462408.5353501518, 662752.1291133759],\n [1483440.7452179305, 2628946.6435325537, 1339227.410707824, 1127312.7583590776, 0.0, 1067257.7993323756, 1646308.7967673023, 2852307.4164419994, 1530510.2790658756, 1283707.511393525, 1414308.8805983758, 943721.1931707633, 598728.757362067, 2779561.192116527, 1952618.0544916363, 899656.1020173575],\n [1296355.2188721865, 2212019.1209020815, 1998182.1420783552, 1390159.7734006236, 1067257.7993323756, 0.0, 2709804.112590561, 2500314.4507069485, 2390841.4329337194, 882457.80942383, 361482.7025425731, 1427995.4150203674, 1610768.421819668, 1788903.6065106322, 1161480.3557326929, 730446.8613086065],\n [2801269.1215845253, 3882177.952930788, 1545364.434045008, 2249169.1308160927, 1646308.7967673023, 2709804.112590561, 0.0, 4018059.834330202, 1093104.7332788548, 2778905.575804111, 3046648.362755992, 1794989.6453295103, 1129464.5539648102, 4404737.747850686, 3516794.375197078, 2427457.036285458],\n [1370943.3069385102, 306997.0343229422, 4184394.186016967, 1811513.5290266906, 2852307.4164419994, 2500314.4507069485, 4018059.834330202, 0.0, 4350710.853063807, 1673216.4080939887, 2586942.3262796295, 3706392.097841614, 3382851.415271485, 2022974.6418062754, 1509585.60107986, 2015770.1390589625],\n [2996683.256068982, 4144977.810718553, 559978.4273194656, 2554165.8167895717, 1530510.2790658756, 2390841.4329337194, 1093104.7332788548, 4350710.853063807, 0.0, 2812916.3098878833, 2650547.941880299, 1053620.7288649315, 967859.8344376946, 4179636.203479384, 3448359.745690545, 2428862.4239271535],\n [422589.4697157836, 1408454.3261387087, 2560790.9591738936, 750916.7305340832, 1283707.511393525, 882457.80942383, 2778905.575804111, 1673216.4080939887, 2812916.3098878833, 0.0, 1093601.4408876144, 2050115.5214378452, 1872971.1741522516, 1708236.6189296674, 752855.8488125347, 384122.2000072272],\n [1515737.0196676727, 2286054.8575902223, 2212581.51715849, 1701189.1538312144, 1414308.8805983758, 361482.7025425731, 3046648.362755992, 2586942.3262796295, 2650547.941880299, 1093601.4408876144, 0.0, 1636770.4499809493, 1932616.2801687205, 1559260.024532222, 1130480.278513877, 1039856.4844335921],\n [2343639.7107855356, 3455343.3108375454, 575975.8749662543, 2062079.2399570548, 943721.1931707633, 1427995.4150203674, 1794989.6453295103, 3706392.097841614, 1053620.7288649315, 2050115.5214378452, 1636770.4499809493, 0.0, 812548.5062332726, 3191662.5092484164, 2564665.4531581327, 1690942.142157212],\n [2031500.319603397, 3179102.5335818897, 933602.6426595236, 1590460.9488364782, 598728.757362067, 1610768.421819668, 1129464.5539648102, 3382851.415271485, 967859.8344376946, 1872971.1741522516, 1932616.2801687205, 812548.5062332726, 0.0, 3364908.7076308434, 2551338.215149899, 1490589.7393085626],\n [1913900.3015914203, 1754834.3710577146, 3767490.41517038, 2434801.332310659, 2779561.192116527, 1788903.6065106322, 4404737.747850686, 2022974.6418062754, 4179636.203479384, 1708236.6189296674, 1559260.024532222, 3191662.5092484164, 3364908.7076308434, 0.0, 973244.7750437199, 2000112.4162614697],\n [946854.1020487415, 1202616.154562711, 3120118.850020503, 1462408.5353501518, 1952618.0544916363, 1161480.3557326929, 3516794.375197078, 1509585.60107986, 3448359.745690545, 752855.8488125347, 1130480.278513877, 2564665.4531581327, 2551338.215149899, 973244.7750437199, 0.0, 1089830.6426635552],\n [665894.0336505901, 1766599.1336905772, 2186473.1552241463, 662752.1291133759, 899656.1020173575, 730446.8613086065, 2427457.036285458, 2015770.1390589625, 2428862.4239271535, 384122.2000072272, 1039856.4844335921, 1690942.142157212, 1490589.7393085626, 2000112.4162614697, 1089830.6426635552, 0.0]])\n\n solver = FacilityOrderSolver(matrix, home_index)\n \n return solver.solve(indices_to_visit)",
"def test(y_hat, test_y):\n # if s.ndim == 2:\n # y_hat = np.argmax(s, axis=1)\n num_class = np.unique(test_y).size\n confusion_mat = np.zeros((num_class, num_class))\n\n for i in range(num_class):\n class_i_idx = test_y == i\n num_class_i = np.sum(class_i_idx)\n y_hat_i = y_hat[class_i_idx]\n for j in range(num_class):\n confusion_mat[i, j] = 1.0 * np.sum(y_hat_i == j) / num_class_i\n\n np.set_printoptions(precision=2)\n print('Confusion matrix:')\n print(confusion_mat)\n print('Diagonal values:')\n print(confusion_mat.flatten()[0::(num_class + 1)])",
"def doContPopEvaluation(self, is_train):\n if is_train:\n my_type = \"TRAINING\"\n else:\n my_type = \"TESTING\"\n no_match = 0 #How often does the population fail to have a classifier that matches an instance in the data.\n cons.env.resetDataRef(is_train) #Go to first instance in data set\n accuracy_estimate_sum = 0\n\n if is_train:\n instances = cons.env.format_data.numb_train_instances\n else:\n instances = cons.env.format_data.numb_test_instances\n #----------------------------------------------------------------------------------------------\n for _ in range(instances):\n if is_train:\n state_action = cons.env.getTrainInstance()\n else:\n state_action = cons.env.getTestInstance()\n #-----------------------------------------------------------------------------\n self.population.makeEvalMatchSet(state_action[0])\n prediction = Prediction(self.population)\n selected_action = prediction.getDecision()\n #-----------------------------------------------------------------------------\n if selected_action == None:\n no_match += 1\n else: #Instances which failed to be covered are excluded from the initial accuracy calculation\n prediction_err = math.fabs(float(selected_action) - float(state_action[1]))\n action_range = cons.env.format_data.action_list[1] - cons.env.format_data.action_list[0]\n accuracy_estimate_sum += 1.0 - (prediction_err / float(action_range))\n\n self.population.clearSets()\n #----------------------------------------------------------------------------------------------\n #Accuracy Estimate\n if instances == no_match:\n accuracy_estimate = 0\n else:\n accuracy_estimate = accuracy_estimate_sum / float(instances - no_match)\n\n #Adjustment for uncovered instances - to avoid positive or negative bias we incorporate the probability of guessing a phenotype by chance (e.g. 50% if two phenotypes)\n covered_instances = 1.0 - (float(no_match)/float(instances))\n adjusted_accuracy_estimate = accuracy_estimate_sum / float(instances) #no_matchs are treated as incorrect predictions (can see no other fair way to do this)\n\n print(\"-----------------------------------------------\")\n print(str(my_type)+\" Accuracy Results:-------------\")\n print(\"Instance Coverage = \"+ str(covered_instances*100.0)+ '%')\n print(\"Estimated Prediction Accuracy (Ignore uncovered) = \" + str(accuracy_estimate))\n print(\"Estimated Prediction Accuracy (Penalty uncovered) = \" + str(adjusted_accuracy_estimate))\n #Balanced and Standard Accuracies will only be the same when there are equal instances representative of each phenotype AND there is 100% covering.\n result_list = [adjusted_accuracy_estimate, covered_instances]\n return result_list",
"def get_confusion_matrix(true_label, predictions, num_index):\n class_matrix = np.zeros(shape=(num_index, num_index))\n false_group = [[] for _ in range(num_index)]\n for idx, true, pred in zip(range(len(predictions)),true_label, predictions):\n class_matrix[true][pred] += 1\n if true != pred:\n false_group[true].append(idx)\n return class_matrix, false_group",
"def test(ndigit, elambda, showSamples, showConfusion):\n Data, Label = getData()\n trainX, trainY, testX, testY = splitData(Data, Label, ndigit)\n trainX_mean = np.mean(trainX, axis=0)\n trainX_new = trainX - trainX_mean\n eigenvectors = getEigenVectors(trainX_new, elambda)\n trainX_eigen = trainX_new.dot(eigenvectors)\n testX_new = testX - trainX_mean\n testX_eigen = testX_new.dot(eigenvectors)\n testO = []\n if showSamples:\n correct_samples = []\n correct_samples_nearest = []\n correct_samples_eigen = []\n correct_samples_nearest_eigen = []\n correct_samples_labels = []\n correct_samples_predictions = []\n wrong_samples = []\n wrong_samples_nearest = []\n wrong_samples_eigen = []\n wrong_samples_nearest_eigen = []\n wrong_samples_labels = []\n wrong_samples_predictions = []\n if showConfusion:\n conf = np.zeros((ndigit, ndigit))\n for i in xrange(testX_eigen.shape[0]):\n t = testX_eigen[i]\n j = getNearestSampleIndex(t, trainX_eigen)\n p = int(trainY[j])\n y = int(testY[i])\n if showConfusion:\n conf[p, y] += 1\n if showSamples:\n if p == y:\n if len(correct_samples) < y + 1:\n correct_samples.append(testX[i])\n correct_samples_nearest.append(trainX[j])\n correct_samples_eigen.append(testX_eigen[i])\n correct_samples_nearest_eigen.append(trainX_eigen[j])\n correct_samples_labels.append(y)\n correct_samples_predictions.append(p)\n else:\n if len(wrong_samples) < y + 1:\n wrong_samples.append(testX[i])\n wrong_samples_nearest.append(trainX[j])\n wrong_samples_eigen.append(testX_eigen[i])\n wrong_samples_nearest_eigen.append(trainX_eigen[j])\n wrong_samples_labels.append(y)\n wrong_samples_predictions.append(p)\n testO.append(p)\n testO = np.array(testO)\n train0 = []\n for i in xrange(trainX_eigen.shape[0]):\n t = trainX_eigen[i]\n j = getNearestSampleIndex(t, trainX_eigen)\n min_class = trainY[j]\n train0.append(min_class)\n train0 = np.array(train0)\n print \"for digits = %d lambda = %.2f train = %.6f test = %.6f \" % (\n ndigit, elambda, (train0 == trainY).mean(), (testO == testY).mean())\n if showConfusion:\n print conf\n if showSamples:\n displaySamples(correct_samples_labels, correct_samples_predictions,\n correct_samples, correct_samples_nearest,\n correct_samples_eigen, correct_samples_nearest_eigen,\n eigenvectors, trainX_mean, elambda, 'Correct')\n displaySamples(wrong_samples_labels, wrong_samples_predictions,\n wrong_samples, wrong_samples_nearest,\n wrong_samples_eigen, wrong_samples_nearest_eigen,\n eigenvectors, trainX_mean, elambda, 'Wrong')",
"def n_test(self):\n return self.factors[1].shape[0]",
"def index_subset(subset, numTestClasses=7, randomSeed=123 ):\n print(\"datasets.py is ingesting the DF dataset from data/SETA.npz\")\n npzfile = np.load(\"data/SETA.npz\", allow_pickle=True)\n data = npzfile[\"data\"]\n labels = npzfile[\"labels\"]\n npzfile.close()\n distinctLabelCount= len(set(labels))\n \n\n #print(\"distinctLabelCount\", distinctLabelCount)\n \n #random.seed(randomSeed)\n testClasses = random.sample(range(0,distinctLabelCount),numTestClasses)\n\n # Test the overrides \n \n # WORST \n #testClasses = np.array([18.0,16.0,14.0,11.0,10.0,9.0,7.0]).tolist()\n # BEST\n #testClasses = np.array([5.0,6.0,8.0,12.0,13.0,14.0,19.0]).tolist()\n \n testClasses = [np.float64(i) for i in testClasses]\n\n mask = np.isin(labels,testClasses)\n \n \n X_test = data[np.where(mask)]\n X_train = data[np.where(~mask)]\n y_test = labels[np.where(mask)]\n y_train = labels[np.where(~mask)]\n \n if subset==\"background\":\n \n print(\"SETA data\", data.shape)\n print(\"labels\", labels.shape)\n print(testClasses)\n \n print(\"Current working directory is \", os.getcwd())\n X = X_train\n y = y_train\n print(\"SETA background data shape\", X.shape)\n elif subset==\"evaluation\":\n X = X_test\n y = y_test\n print(\"SETA evaluation data shape\", X.shape)\n images = []\n \n #print(\"y shape\", y.shape)\n \n \n for i in range(y.shape[0]):\n images.append({\n 'subset': subset,\n 'class_name': y[i],\n 'data': X[i].reshape(1,X[1].shape[0]) # This is the shape needed for Matching Networks\n #'data': X[i] #.reshape(X[1].shape[0]) # This is the shape needed for MAML\n })\n \n return images",
"def cmh_test(*args):\n import pandas, math\n from statsmodels.stats.contingency_tables import StratifiedTable as cmh\n\n # set up data logging\n ignored = {}\n\n # get contingency tables for pops with case and control data\n tables = create_tables(*args)\n\n # fill in a dataframe with cmh test results, one locus at a time\n results = pandas.DataFrame(columns=['locus', 'odds_ratio', 'p-value',\n 'lower_confidence', 'upper_confidence', 'num_pops'])\n for locus,table in tables.items():\n if len(table) == 0:\n # if none of the populations for a locus provide a contingency table (due to missing data)\n # ... then continue to the next locus.\n ignored[locus] = 'there were no populations that provided contingency tables'\n continue\n # cmh results for stratified contingency tables (called \"table\" = an array of tables)\n cmh_res = cmh(table)\n res = cmh_res.test_null_odds(True) # statistic and p-value\n odds_ratio = cmh_res.oddsratio_pooled # odds ratio\n conf = cmh_res.oddsratio_pooled_confint() # lower and upper confidence\n locus_results = locus, odds_ratio, res.pvalue, *conf, len(table)\n\n # look for fixed states across all tables\n\n if sum([math.isnan(x) for x in conf]) > 0:\n # if the upper and lower estimat of the confidence interval are NA, ignore\n # this can happen when all of the tables returned for a specific locus are fixed\n # ... for either the REF or ALT. This happens rarely for loci with low MAF, where\n # ... the populations that have variable case or control, do not have a frequency\n # ... estimated for the other treatment (case or control) and therefore don't\n # ... make it into the list of stratified tables and the remaining tables\n # ... (populations) are all fixed for the REF or ALT - again, this happens for\n # ... some low MAF loci and may happen if input file has few pops to stratify.\n\n # log reason\n ignored[locus] = 'the upper and lower confidence interval for the odds ratio was NA'\n ignored[locus] = ignored[locus] + '\\t' + '\\t'.join(map(str, locus_results[1:]))\n\n continue\n\n results.loc[len(results.index), :] = locus_results\n\n return results, ignored",
"def test_case_7():\n N = 50\n\n x = 0\n y = 0\n strength = 10000\n population = [[0 for i in range(N)] for j in range(N)]\n final_population = simulate_infection(population, x, y, strength)\n\n assert sum([len([i for i in row if i == -1]) for row in final_population]) == N * N",
"def test_Contingency_table_values(observation, forecast, category_edges):\n\n def logical(ds, edges):\n \"\"\"Convert input xarray DataArray or Dataset to integers corresponding\n to which bin the data falls in.\n In first bin -> 0; in second bin -> 1; in third bin -> 2; etc\n \"\"\"\n ds_out = 0 * ds.copy()\n for i in range(1, len(edges) - 1):\n ds_out += i * ((ds > edges[i]) & (ds <= edges[i + 1]))\n return ds_out\n\n cont_table = Contingency(\n observation, forecast, category_edges, category_edges, dim=\"time\"\n )\n for lon in forecast.lon:\n for lat in forecast.lat:\n observation_1d = logical(observation.sel(lon=lon, lat=lat), category_edges)\n forecast_1d = logical(forecast.sel(lon=lon, lat=lat), category_edges)\n sklearn_cont_table_1d = confusion_matrix(\n observation_1d, forecast_1d, labels=range(len(category_edges) - 1)\n )\n xs_cont_table_1d = (\n cont_table.table.sel(lon=lon, lat=lat)\n .transpose(\"observations_category\", \"forecasts_category\")\n .values\n )\n npt.assert_allclose(sklearn_cont_table_1d, xs_cont_table_1d)",
"def confusionMetric( self, classTest, classPred):\n # accuracy of the model - in one number\n accuracy = average_precision_score( classTest, classPred )\n # confusion matrix 2x2 matric\n matConf = confusion_matrix(classTest, classPred)\n # cohen Kappa is applicable for unbalanced data\n matCohenKappa = cohen_kappa_score(classTest, classPred)\n # classification report\n strClassificationReport = classification_report(classTest, classPred)\n \n return accuracy, matConf, matCohenKappa, strClassificationReport",
"def test_model(model, trainset, testset):\n model.eval()\n \n predictions = []\n actuals = []\n \n for data in testset:\n # data will have batch of features and labels\n X = data[0:4]\n y = data[4:]\n \n pred = np.round(model(X).detach().numpy())\n actual = y.detach().numpy()\n # print(f'pred: {pred}')\n # print(f'actual: {actual}')\n predictions.append(pred)\n actuals.append(actual)\n \n print(accuracy_score(y_true=actuals, y_pred=predictions))\n \n \n # Confusion Matrix\n \n confusion_matrix = np.zeros((3, 3))\n for i,j in zip(predictions, actuals):\n confusion_matrix[i, j] += 1\n print(\"Confusion matrix:\\n\", confusion_matrix)",
"def testMultiClass_MatrixData(self):\n cont_features = [\n tf.contrib.layers.real_valued_column('feature', dimension=4)]\n\n classifier = tf.contrib.learn.DNNClassifier(\n n_classes=3,\n feature_columns=cont_features,\n hidden_units=[3, 3],\n config=tf.contrib.learn.RunConfig(tf_random_seed=1))\n\n classifier.fit(input_fn=_iris_input_multiclass_fn, steps=200)\n self.assertTrue('centered_bias_weight' in classifier.get_variable_names())\n scores = classifier.evaluate(input_fn=_iris_input_multiclass_fn, steps=1)\n self.assertGreater(scores['accuracy'], 0.8)\n self.assertLess(scores['loss'], 0.3)",
"def test_discrimination_indices(self):\n\n n_items = [10, 20, 30]\n n_factors = [2, 3, 4, 5]\n\n for item in n_items:\n for factor in n_factors:\n diagonal_indices, lower_indices = get_discrimination_indices(item, \n factor)\n lower_size = item * factor - factor * (factor + 1) / 2\n\n self.assertEqual(diagonal_indices[0].size, factor)\n self.assertEqual(diagonal_indices[1].size, factor)\n \n self.assertEqual(lower_indices[0].size, lower_size)\n self.assertEqual(lower_indices[1].size, lower_size)\n\n # Using lower, the rows are flipped, flip them back\n flipped = item - diagonal_indices[0] - 1\n np.testing.assert_equal(flipped, diagonal_indices[1])\n\n test = np.zeros((item, factor))\n test[lower_indices] = 1\n\n self.assertEqual(test.sum(), lower_size)\n\n for ndx1 in range(factor):\n for ndx2 in range(ndx1, factor):\n self.assertEqual(test[item - ndx1 -1, ndx2], 0)",
"def main():\n conf_matrix1 = one_vs_all()\n conf_matrix2 = all_vs_all()\n results = my_info() + '\\t\\t'\n results += np.array_str(np.diagonal(conf_matrix1)) + '\\t\\t'\n results += np.array_str(np.diagonal(conf_matrix2))\n print results + '\\t\\t'\n\n # sum = 0\n #\n # for i in range(len(conf_matrix1)):\n # sum += conf_matrix1[i][i]\n #\n # print \"One-vs-All corecct classifications: \", sum\n #\n # sum = 0\n #\n # for i in range(len(conf_matrix2)):\n # sum += conf_matrix2[i][i]\n #\n # print \"All-vs-All correct classificatinos: \", sum\n\n #print(\"onevsall\")\n #print_latex_table(conf_matrix1)\n #print(\"allvsall\")\n #print_latex_table(conf_matrix2)",
"def overall_classification_matrix(self, interpreter):\n return sum([ r.classification_matrix(interpreter) for r in self.results ])",
"def confusion_matrix(preds, labels, conf_matrix):\n for p, t in zip(preds, labels):\n conf_matrix[t, p] += 1\n return conf_matrix"
]
| [
"0.66565275",
"0.62798464",
"0.62386554",
"0.6203715",
"0.61975443",
"0.6048215",
"0.5992596",
"0.59676194",
"0.58624643",
"0.5859458",
"0.58580637",
"0.58289576",
"0.58109146",
"0.5807874",
"0.5788835",
"0.5787484",
"0.57719433",
"0.5745471",
"0.5728884",
"0.572269",
"0.5708174",
"0.5700658",
"0.56916434",
"0.5686509",
"0.56724215",
"0.5668985",
"0.5665861",
"0.5620099",
"0.56170225",
"0.5596808"
]
| 0.6355921 | 1 |
Returns the number of prizes that will be awarded for this prize. | def num_awarded(self, floor=None):
if self.award_to in ("individual_overall", "floor_overall", "dorm"):
# For overall prizes, it is only possible to award one.
return 1
elif self.award_to in ("floor_dorm", "individual_dorm"):
# For dorm prizes, this is just the number of dorms.
return Dorm.objects.count()
elif self.award_to == "individual_floor":
# This is awarded to each floor.
return Floor.objects.count()
raise Exception("Unknown award_to value '%s'" % self.award_to) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def num_pickets(self) -> int:\n return len(self.pickets)",
"def get_num_petals(self):\n return self._num_petals",
"def psizes(self):\n return self._cache.psizes",
"def num_votes(self):\n return sum(self.votes_per_count)",
"def size(self) -> int:\n\n return self.sizes.sum()",
"def _get_num_proposals(self):\n total_props = self._df['nprops'].sum()\n return total_props",
"def price_count(self):\n return self.price_set.count()",
"def page_counts(self):\n return 1 + (self.total_count - 1) / self.page_size",
"def qps(self) -> int:\n return pulumi.get(self, \"qps\")",
"def count(self) -> float:\n return pulumi.get(self, \"count\")",
"def getPoints(self):\n count = 0\n for card in self.cards:\n if card.rank > 9:\n count += 10\n elif card.rank == 1:\n count += 11\n else:\n count += card.rank\n # Deduct 10 if Ace is available and needed as 1\n for card in self.cards:\n if count <= 21:\n break\n elif card.rank == 1:\n count -= 10\n return count",
"def get_num_plants(self) -> int:\r\n\r\n return len(self.plants)",
"def count(self) -> int:\n return pulumi.get(self, \"count\")",
"def number_of_players(self) -> int:\n return self.param.number_of_players",
"def get_numpins(self):\n return self.numpins",
"def num_carns(self):\n return self._num_carns",
"def __len__(self) -> int:\n return sum(target.quantity for target in self.target_sizes)",
"def get_product_count(self):\n return self.products.count()",
"def get_size(self):\n return len(self.reviews)",
"def product_count(self) -> int:\n return self._product_count",
"def get_num_cards(self):\n \n return self._hand.get_size()",
"def total_cards(self):\n amount = 0\n for palo in self._cards:\n amount = amount + len(self._cards[palo])\n\n return amount",
"def getTotalIndividualCount(self):\r\n return self._n",
"def tally(self):\n return self.count",
"def num_spikes(self):\n return self._num_spikes",
"def hives_count(self) -> int:\n return self.hives.count()",
"def carn_count(self):\n return len(self.carnivores)",
"def count(self):\n return self.get_count()",
"def n_spikes(self):\n return self.model.n_spikes",
"def number_of_seats(self):\n return self._number_of_seats"
]
| [
"0.65297043",
"0.6438432",
"0.638483",
"0.625789",
"0.61884695",
"0.6187963",
"0.6182225",
"0.6173951",
"0.6150105",
"0.612123",
"0.61027765",
"0.6080633",
"0.6056884",
"0.60247165",
"0.601722",
"0.60013944",
"0.5973453",
"0.5966288",
"0.5964273",
"0.5963277",
"0.59582824",
"0.5952626",
"0.59005475",
"0.58931494",
"0.5891539",
"0.5891466",
"0.58892995",
"0.58872265",
"0.5869705",
"0.58679724"
]
| 0.6676239 | 0 |
Adds a ticket from the user if they have one. Throws an exception if they cannot add a ticket. | def add_ticket(self, user):
profile = user.get_profile()
if profile.available_tickets() <= 0:
raise Exception("This user does not have any tickets to allocate.")
ticket = RaffleTicket(raffle_prize=self, user=user)
ticket.save() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def ticket_add(self, ctx, user: discord.Member):\n guild_settings = await self.config.guild(ctx.guild).all()\n is_admin = await is_admin_or_superior(self.bot, ctx.author) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in ctx.author.roles]\n )\n must_be_admin = not guild_settings[\"usercanmodify\"]\n\n if not is_admin and must_be_admin:\n await ctx.send(\"Only Administrators can add/remove other users to tickets.\")\n return\n elif not is_admin:\n author = ctx.author\n author_id = author.id\n elif is_admin:\n # Since the author isn't specified, and it's an admin, we need to guess on who\n # the author is\n inverted = {}\n for author_id, tickets in guild_settings[\"created\"].items():\n for ticket in tickets:\n inverted[ticket[\"channel\"]] = author_id\n try:\n author = ctx.guild.get_member(int(inverted[ctx.channel.id]))\n if author:\n author_id = author.id\n else:\n author_id = int(inverted[ctx.channel.id])\n except KeyError:\n author = ctx.author\n author_id = author.id\n\n index = None\n\n if not guild_settings[\"created\"][str(author_id)]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n elif len(guild_settings[\"created\"][str(author_id)]) == 1:\n index = 0\n else:\n for i, ticket in enumerate(guild_settings[\"created\"][str(author_id)]):\n if ticket[\"channel\"] == ctx.channel.id:\n index = i\n break\n\n if index is None:\n await ctx.send(\n \"You have multiple tickets open. \"\n \"Please run this command in the ticket channel you wish to edit.\"\n )\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n\n if user.id in guild_settings[\"created\"][str(author_id)][index][\"added\"]:\n await ctx.send(\"That user is already added.\")\n return\n\n adding_is_admin = await is_admin_or_superior(self.bot, user) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in user.roles]\n )\n\n if adding_is_admin:\n await ctx.send(\"You cannot add a user in support or admin team.\")\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n if not channel:\n await ctx.send(\"The ticket channel has been deleted.\")\n return\n\n try:\n await channel.set_permissions(user, send_messages=True, read_messages=True)\n except discord.Forbidden:\n await ctx.send(\n \"The Manage Permissions channel for me has been removed. \"\n \"I am unable to modify this ticket.\"\n )\n return\n\n async with self.config.guild(ctx.guild).created() as created:\n created[str(author_id)][index][\"added\"].append(user.id)\n\n await ctx.send(f\"{user.mention} has been added to the ticket.\")",
"def create_ticket(self, user):\n return Ticket.objects.create_ticket('test', user)",
"async def adduser(ctx, user: discord.Member):\n channel = ctx.channel\n if not IsATicket(channel.id):\n await ctx.send(\n \"This is not a ticket! Users can only be added to a ticket channel\"\n )\n return\n\n await channel.set_permissions(user, read_messages=True, send_messages=True)\n await ctx.message.delete()",
"def test_credit_ticket_as_user(self):\n user = UserFactory()\n self.assertEqual(user.tickets, 1)\n nb_tickets_to_add = 5\n data = {\n 'nb_tickets': nb_tickets_to_add,\n }\n\n self.client.force_authenticate(user=self.user)\n response = self.client.post(\n reverse(\n 'user-credit-tickets',\n kwargs={'pk': user.id},\n ),\n data,\n format='json',\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_403_FORBIDDEN,\n )",
"def add(self, user: U) -> None:\n ...",
"async def add(self, ctx, member: Member):\n await self.create_ticket(member,ctx.guild)\n embed : Embed = settings.get_ticket_panel_embed()\n embed.description = 'Ticket created with success!'\n embed.set_footer(text=embed.footer.text, icon_url=self.bot.user.avatar_url)\n await ctx.message.delete()\n await ctx.send(embed=embed)",
"def Ticket(ticket):\n try:\n data = ticket_module.verify(ticket)\n name = data['slivers'][0]['name']\n if data != None:\n deliver_ticket(data)\n logger.log('api_calls: Ticket delivered for %s' % name)\n Create(database.db.get(name))\n except Exception, err:\n raise xmlrpclib.Fault(102, 'Ticket error: ' + str(err))",
"def test_add_user_to_thread(self):\n thread = self.create_thread()\n user = self.create_user()\n thread.add_user_to_thread(user)\n self.assertTrue(\n UserThread.objects.filter(thread=thread, user=user).exists())",
"def add_user(self, team, params={}, **options):\n path = \"/teams/%s/addUser\" % (team)\n return self.client.post(path, params, **options)",
"def team_user_add(token_user, team_id, user_id):\n team = Team.query.get(team_id)\n if team is None:\n abort(404, 'team not found')\n\n # check for permissions to update the team\n if not (token_user.has_permission('team.update.elevated') or\n (token_user.has_permission('team.update') and\n team.has_member(token_user))):\n abort(403, 'insufficient permissions to add user to team')\n\n # don't allow adding to 'single' teams\n if team.team_type == TeamType.query.filter_by(name='single').first():\n abort(400, 'cannot add a user to a \"single\" team')\n\n user = User.query.get(user_id)\n if user is None:\n abort(400, 'invalid user id')\n\n if team.has_member(user):\n abort(409, 'user already in team')\n\n user.teams.append(team)\n get_db().commit()\n\n return '', 201",
"async def on_raw_reaction_add(self, payload):\n emoji = str(payload.emoji)\n member = payload.member\n\n if member.bot:\n return\n\n channel = await self.bot.fetch_channel(payload.channel_id)\n message = await channel.fetch_message(payload.message_id)\n\n if emoji != settings.get_ticket_create_emoji():\n return\n \n if len(message.embeds) == 0 or message.embeds[0].title != settings.get_ticket_panel_embed().title:\n return\n \n await message.remove_reaction(emoji, member)\n await self.create_ticket(member,message.guild)",
"def ticket_created(self, ticket):",
"def AdminTicket(ticket):\n try:\n data, = xmlrpclib.loads(ticket)[0]\n name = data['slivers'][0]['name']\n if data != None:\n deliver_ticket(data)\n logger.log('api_calls: Admin Ticket delivered for %s' % name)\n Create(database.db.get(name))\n except Exception, err:\n raise xmlrpclib.Fault(102, 'Ticket error: ' + str(err))",
"def test_credit_ticket_as_admin(self):\n user = UserFactory()\n self.assertEqual(user.tickets, 1)\n nb_tickets_to_add = 5\n data = {\n 'nb_tickets': nb_tickets_to_add,\n }\n\n self.client.force_authenticate(user=self.admin)\n response = self.client.post(\n reverse(\n 'user-credit-tickets',\n kwargs={'pk': user.id},\n ),\n data,\n format='json',\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_200_OK,\n )\n\n self.assertEqual(\n User.objects.get(pk=user.id).tickets,\n 1 + nb_tickets_to_add\n )",
"async def add_error(self, ctx, error):\n embed: Embed = settings.get_ticket_error_embed()\n\n embed.set_footer(text=embed.footer.text,\n icon_url=self.bot.user.avatar_url)\n print(error)\n if isinstance(error, commands.MissingRequiredArgument):\n embed.description = f\"\\nUse **!add <user>**\"\n elif isinstance(error, commands.BadArgument):\n embed.description = f\"\\nUser not found.\"\n else:\n embed.description = f\"\\nYou don't have permissions for executing this command.\"\n\n await ctx.send(embed=embed)",
"def save(self, user):\n\n q = self.cleaned_data['queue']\n\n t = Ticket( title = self.cleaned_data['title'],\n submitter_email = self.cleaned_data['submitter_email'],\n account = self.cleaned_data['account'],\n created = datetime.now(),\n status = Ticket.OPEN_STATUS,\n queue = q,\n description = self.cleaned_data['body'],\n priority = self.cleaned_data['priority'],\n owner = self.cleaned_data['owner']\n )\n\n if HAS_TAG_SUPPORT:\n t.tags = self.cleaned_data['tags']\n\n if self.cleaned_data['assigned_to']:\n try:\n t.assigned_to = self.cleaned_data['assigned_to']\n except User.DoesNotExist:\n t.assigned_to = None\n t.save()\n\n f = FollowUp( ticket = t,\n title = _('Ticket Opened'),\n date = datetime.now(),\n public = False,\n comment = self.cleaned_data['body'],\n systemuser = user.account,\n )\n if self.cleaned_data['assigned_to']:\n f.title = _('Ticket Opened & Assigned to %(name)s') % {\n 'name': t.get_assigned_to\n }\n\n f.save()\n \n files = []\n if self.cleaned_data['attachment']:\n import mimetypes\n file = self.cleaned_data['attachment']\n filename = file.name.replace(' ', '_')\n a = Attachment(\n followup=f,\n filename=filename,\n mime_type=mimetypes.guess_type(filename)[0] or 'application/octet-stream',\n size=file.size,\n )\n a.file.save(file.name, file, save=False)\n a.save()\n \n if file.size < getattr(settings, 'MAX_EMAIL_ATTACHMENT_SIZE', 512000):\n # Only files smaller than 512kb (or as defined in \n # settings.MAX_EMAIL_ATTACHMENT_SIZE) are sent via email.\n files.append(a.file.path)\n\n context = {\n 'ticket': t,\n 'queue': q,\n 'comment': f.comment,\n }\n \n messages_sent_to = []\n\n if t.submitter_email:\n send_templated_mail(\n 'newticket_owner',\n context,\n recipients=t.submitter_email,\n sender=q.from_address,\n fail_silently=True,\n files=files,\n )\n messages_sent_to.append(t.submitter_email)\n\n #FIX USERSETTINGS\n #=======================================================================\n # if t.assigned_to and t.assigned_to != user and getattr(t.assigned_to.usersettings.settings, 'email_on_ticket_assign', False) and t.assigned_to.email and t.assigned_to.email not in messages_sent_to:\n # send_templated_mail(\n # 'assigned_to',\n # context,\n # recipients=t.assigned_to.email,\n # sender=q.from_address,\n # fail_silently=True,\n # files=files,\n # )\n # messages_sent_to.append(t.assigned_to.email)\n #=======================================================================\n\n if q.new_ticket_cc and q.new_ticket_cc not in messages_sent_to:\n send_templated_mail(\n 'newticket_cc',\n context,\n recipients=q.new_ticket_cc,\n sender=q.from_address,\n fail_silently=True,\n files=files,\n )\n messages_sent_to.append(q.new_ticket_cc)\n\n if q.updated_ticket_cc and q.updated_ticket_cc != q.new_ticket_cc and q.updated_ticket_cc not in messages_sent_to:\n send_templated_mail(\n 'newticket_cc',\n context,\n recipients=q.updated_ticket_cc,\n sender=q.from_address,\n fail_silently=True,\n files=files,\n )\n\n return t",
"def test_ticket_type_add_error_already_exists(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('ticket_type add defect')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def addUser(self, user):\r\n self.users.append(user)\r\n return len(self.users)-1",
"async def ticket_remove(self, ctx, user: discord.Member):\n guild_settings = await self.config.guild(ctx.guild).all()\n is_admin = await is_admin_or_superior(self.bot, ctx.author) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in ctx.author.roles]\n )\n must_be_admin = not guild_settings[\"usercanmodify\"]\n\n if not is_admin and must_be_admin:\n await ctx.send(\"Only Administrators can add/remove other users to tickets.\")\n return\n elif not is_admin:\n author = ctx.author\n author_id = author.id\n elif is_admin:\n # Since the author isn't specified, and it's an admin, we need to guess on who\n # the author is\n inverted = {}\n for author_id, tickets in guild_settings[\"created\"].items():\n for ticket in tickets:\n inverted[ticket[\"channel\"]] = author_id\n try:\n author = ctx.guild.get_member(int(inverted[ctx.channel.id]))\n if author:\n author_id = author.id\n else:\n author_id = int(inverted[ctx.channel.id])\n except KeyError:\n author = ctx.author\n author_id = author.id\n\n index = None\n\n if not guild_settings[\"created\"][str(author_id)]:\n await ctx.send(\"You don't have any open tickets.\")\n return\n elif len(guild_settings[\"created\"][str(author_id)]) == 1:\n index = 0\n else:\n for i, ticket in enumerate(guild_settings[\"created\"][str(author_id)]):\n if ticket[\"channel\"] == ctx.channel.id:\n index = i\n break\n\n if index is None:\n await ctx.send(\n \"You have multiple tickets open. \"\n \"Please run this command in the ticket channel you wish to edit.\"\n )\n return\n\n if user.id not in guild_settings[\"created\"][str(author_id)][index][\"added\"]:\n await ctx.send(\"That user is not added.\")\n return\n\n removing_is_admin = await is_admin_or_superior(self.bot, user) or any(\n [ur.id in guild_settings[\"supportroles\"] for ur in user.roles]\n )\n\n if removing_is_admin:\n await ctx.send(\"You cannot remove a user in support or admin team.\")\n return\n\n channel = self.bot.get_channel(guild_settings[\"created\"][str(author_id)][index][\"channel\"])\n if not channel:\n await ctx.send(\"The ticket channel has been deleted.\")\n\n try:\n await channel.set_permissions(user, send_messages=False, read_messages=False)\n except discord.Forbidden:\n await ctx.send(\n \"The Manage Permissions channel for me has been removed. \"\n \"I am unable to modify this ticket.\"\n )\n return\n\n async with self.config.guild(ctx.guild).created() as created:\n created[str(author_id)][index][\"added\"].remove(user.id)\n\n await ctx.send(f\"{user.mention} has been removed from the ticket.\")",
"def add_bug(request):\n if request.method == \"POST\":\n if request.user.is_staff:\n form = StaffReportBugForm(request.POST)\n else:\n form = ReportBugForm(request.POST)\n if form.is_valid():\n ticket = form.save(commit=False)\n ticket.created_by = request.user\n ticket.ticket_type = \"Bug\"\n ticket.save()\n messages.success(\n request,\n 'Your bug has been reported successfully.')\n return redirect('bug', bugid=ticket.pk)\n\n return render(request, 'addbug.html', {'form': form})\n\n if request.user.is_authenticated:\n form = StaffReportBugForm()\n else:\n form = ReportBugForm()\n return render(request, 'addbug.html', {'form': form})",
"def create_ticket(self, ticket):\r\n ticket_url = self._zendesk_instance.create_ticket(data=ticket)\r\n return zendesk.get_id_from_url(ticket_url)",
"def add_user(self, user: User):\n raise NotImplementedError",
"def view_add_user(self, user, username, password):\r\n user.realm._checker.addUser(username, password)",
"def add_member(self, user):\n if user is self.owner:\n raise ValidationError('A trip owner cannot also be a member.')\n # check the user is not already a member\n if self.members.filter(pk=user.pk).exists():\n return\n self.members.add(user)",
"def test_credit_ticket_negative_int(self):\n user = UserFactory()\n self.assertEqual(user.tickets, 1)\n nb_tickets_to_add = -5\n data = {\n 'nb_tickets': nb_tickets_to_add,\n }\n\n self.client.force_authenticate(user=self.admin)\n response = self.client.post(\n reverse(\n 'user-credit-tickets',\n kwargs={'pk': user.id},\n ),\n data,\n format='json',\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_400_BAD_REQUEST,\n )",
"async def ticket(self, ctx, ticketpanel_name: str):\n licence_id = servers.get_licence_id(ctx.guild.id)\n ticketpanel: Ticketpanel = await Ticketpanel.query.where(Ticketpanel.name == ticketpanel_name).where(Ticketpanel.licence_id == licence_id).gino.first()\n\n if not ticketpanel:\n embed: Embed = settings.get_ticket_error_embed()\n embed.description = f\"\\nTicketPanel called **{ticketpanel_name}** doesnt exist\\n\"\n embed.set_footer(text=embed.footer.text,\n icon_url=self.bot.user.avatar_url)\n await ctx.send(embed=embed)\n return\n\n embed : Embed = settings.get_ticket_panel_embed()\n embed.description = ticketpanel.description\n embed.set_footer(text=embed.footer.text,\n icon_url=self.bot.user.avatar_url)\n await ctx.message.delete()\n message = await ctx.send(embed=embed)\n await message.add_reaction(settings.get_ticket_create_emoji())",
"def __create_ticket(user, subject, description, topic):\n\n target = settings.SLACK_TARGET_TFED\n if topic == 'Database':\n target = settings.SLACK_TARGET_TFED_DB\n user_email = user['user']['profile'].get('email', '[email protected]')\n display_name = user['user']['profile']['real_name']\n resp = rt_api.create_ticket(topic, user_email, subject, description + \"\\n\\n- \" + display_name)\n ticket_id = resp.get('id', None)\n if ticket_id:\n ticket_info = {\n \"url\": 'https://lnl-rt.wpi.edu/rt/Ticket/Display.html?id=' + ticket_id,\n \"id\": ticket_id,\n \"subject\": subject,\n \"description\": description,\n \"status\": \"New\",\n \"assignee\": None,\n \"reporter\": user['user']['name']\n }\n ticket = views.tfed_ticket(ticket_info)\n slack_post(target, text=description, content=ticket, username='Request Tracker')\n return\n error_message = \"Whoops! It appears something went wrong while attempting to submit your request. \" \\\n \"Please wait a few minutes then try again. If the problem persists, please email \" \\\n \"us directly at [email protected].\"\n post_ephemeral(target, error_message, user['user']['id'], username=\"Request Tracker\")",
"def do_adduser(self, line):\n\t\tif isinstance(self.cl, Book):\n\t\t\tself.cl.add_contact()\n\t\telse:\n\t\t\tprint(\"To add contacts you need to open or create a book.\")",
"def set_ticket(self, ticket: str) -> None:\n LOG.info(f\"Setting ticket to {ticket}\")\n self.ticket = ticket",
"def add_user_to_team(self, for_user, to_manager):\n for_user = User.get_user_by_username(for_user)\n manager = User.get_user_by_username(to_manager)\n # @Todo test inheritance of get_user_by_username\n self.access_handler.check_add_user_to_team(for_user, manager)\n manager.add_user_to_team(for_user)"
]
| [
"0.75067663",
"0.65820646",
"0.6444056",
"0.64260346",
"0.6213124",
"0.61979747",
"0.6157786",
"0.60477424",
"0.59763026",
"0.59427506",
"0.58696246",
"0.5796449",
"0.5795518",
"0.579028",
"0.576608",
"0.5748269",
"0.56843877",
"0.56670624",
"0.5599975",
"0.5599003",
"0.55944836",
"0.5575842",
"0.5533238",
"0.55112416",
"0.55075806",
"0.5505629",
"0.5488976",
"0.5486395",
"0.548525",
"0.5457896"
]
| 0.7707899 | 0 |
Removes an allocated ticket. | def remove_ticket(self, user):
# Get the first ticket that matches the query.
ticket = RaffleTicket.objects.filter(raffle_prize=self, user=user)[0]
ticket.delete() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ticket_deleted(self, ticket):\n if 'ticket' not in self.sources:\n return\n gnp = GrowlNotificationPacket(notification='ticket',\n title='Ticket #%d deleted' % ticket.id,\n description=self._ticket_repr(ticket))\n gs = GrowlSender(self.env)\n gs.notify(self._get_hosts('ticket'), gnp)",
"def ticket_deleted(self, ticket):",
"def ticket_delete(request, ticket_id):\n ticket = get_object_or_404(Ticket, code=ticket_id)\n ticket_assignment = TicketAssignment.objects.filter(ticket=ticket).first()\n\n # log action\n logger.info('[{}] ticket {} assignment'\n ' to office {}'\n ' has been deleted'\n ' by user {}'.format(timezone.now(),\n ticket,\n ticket_assignment.office,\n request.user))\n\n ticket_assignment.delete()\n\n # Send mail to ticket owner\n mail_params = {'hostname': settings.HOSTNAME,\n 'user': request.user,\n 'status': _(\"eliminato\"),\n 'ticket': ticket\n }\n m_subject = _('{} - ticket {} eliminato'.format(settings.HOSTNAME,\n ticket))\n\n send_custom_mail(subject=m_subject,\n recipient=request.user,\n body=settings.TICKET_DELETED,\n params=mail_params)\n # END Send mail to ticket owner\n\n ticket.delete()\n\n # log action\n logger.info('[{}] user {} deleted ticket {}'.format(timezone.now(),\n request.user,\n ticket))\n\n messages.add_message(request, messages.SUCCESS,\n _(\"Ticket {} eliminato correttamente\".format(ticket.code)))\n return redirect('uni_ticket:user_unassigned_ticket')",
"def delete(self, ticket_id):\n\n ticket = Ticket.get_by_id(ticket_id)\n\n if not ticket:\n return dict(status=\"fail\", message=f\"Ticket with id {ticket_id} not found\"), 404\n\n deleted_ticket = ticket.delete()\n\n if not deleted_ticket:\n return dict(status='fail', message='Internal Server Error'), 500\n\n return dict(status='success', message=\"Successfully deleted\"), 200",
"def delete_ticket(data):\n firebase_uid = data[\"session\"].split('/')[-1]\n for i in data[\"queryResult\"][\"outputContexts\"]:\n if \"ticket_params\" in i[\"name\"]:\n ticket_id = i[\"parameters\"][\"ticket_id\"]\n db = firebase.database()\n db.child(\"user_data\").child(firebase_uid).child(\"Complaints\").child(ticket_id).remove()\n response = {\n \"fulfillmentText\": \"Ticket removed.\"\n }\n return response",
"def delete(self, id):\n Ticket.query.filter_by(id=id).delete()\n db.session.commit()\n return None",
"def allocate_id(self, allocated_id):\n self._free_ids.remove(allocated_id)",
"def delete_service_ticket(sender, **kwargs):\n request = kwargs['request']\n session_key = request.session.session_key\n SessionServiceTicket.objects.filter(session_key=session_key).delete()",
"def delete_ticket(event_id, net_id):\n connection = get_connection()\n cursor = connection.cursor()\n sql_string = \"DELETE FROM Ticket WHERE eventID=\"+str(event_id)+\" AND netID='\"+net_id+\"'\"\n cursor.execute(sql_string)\n connection.commit()",
"def remove(self):\r\n\t\tself._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def _remove(updated_pending_requests):\n remove_member_from_pending_query = Query.room_request(roomname, \"\", updated_pending_requests)\n self.db.execute_query(remove_member_from_pending_query)",
"def delete(self) -> None:\n self.pop()",
"def remove_pending(self, trade):\n trade = self._format_sql(trade, self.pending_table)\n del self.pending[trade['id']]",
"def remove(self):\n with CachedCKAN(**self.ckan_kwargs) as ckan:\n ckan.delete_resource(resource_id=self.resource_id)",
"def release_ticket(self, wid, project):\n\n path = os.path.join(self.prjdir, project)\n q = WorkQueue(path)\n\n head_wi = Workitem(q.head())\n if head_wi.wfid != wid.wfid:\n self.log.info(\"OUCH ... released the wrong lock\")\n\n try:\n next_wid = Workitem(q.next())\n next_wid.result = True\n # Implementation is a bit convoluted but this just sends\n # the WI from the stack to BOSS\n self.send_to_engine(next_wid)\n except QueueEmpty:\n # That's OK, there's nothing waiting\n pass\n wid.result = True",
"def remove_card(self, slot):\n del self._starting_card[slot]"
]
| [
"0.6529886",
"0.62774307",
"0.6127702",
"0.6008233",
"0.59739345",
"0.5789608",
"0.57860637",
"0.57634157",
"0.57043296",
"0.54994345",
"0.53857934",
"0.53857934",
"0.53857934",
"0.53857934",
"0.53857934",
"0.53857934",
"0.53857934",
"0.53857934",
"0.53857934",
"0.53857934",
"0.53857934",
"0.53857934",
"0.53857934",
"0.53857934",
"0.5376522",
"0.5361869",
"0.532712",
"0.53019065",
"0.5295236",
"0.5290095"
]
| 0.64663655 | 1 |
Returns the number of tickets allocated to this prize. Takes an optional argument to return the number of tickets allocated by the user. | def allocated_tickets(self, user=None):
query = self.raffleticket_set.filter(raffle_prize=self)
if user:
query = query.filter(user=user)
return query.count() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def num_assigned(self):\n return FlicketTicket.query.filter_by(assigned=self.user).count()",
"def num_pickets(self) -> int:\n return len(self.pickets)",
"def num_attendees(self):\r\n n = sum([c.qty for c in self.contribution_set.all()])\r\n return n",
"def num_allocated_resources(self) -> int:\n return len(self.allocated_resources)",
"def number_of_employees(self) -> object:\n return self._number_of_employees",
"def user_numforms_size(*args):\n return _ida_hexrays.user_numforms_size(*args)",
"def _get_num_proposals(self):\n total_props = self._df['nprops'].sum()\n return total_props",
"def tally(self):\n return self.count",
"def num_tickets_left(self):\r\n return self._contributions_left[1]",
"def get_num_slots(self):\n # Your code here\n return self.capacity",
"def count(self) -> int:\n return pulumi.get(self, \"count\")",
"def getNumberOfAttempts(self, *args):\n return _CompuCell.Potts3D_getNumberOfAttempts(self, *args)",
"def ticket_range(self):\n response = self.http_call(\"{0}/tickets.json\".format(self.uri))\n return math.ceil(response.json()[\"count\"] / 100) + 1",
"def number_of_players(self) -> int:\n return self.param.number_of_players",
"def number_of_jobs_in_queue():\n\n # Initialize #\n user_name = get_username()\n\n process = subprocess.check_output([\"squeue\", \"-u\", user_name])\n\n return len([line for line in process.split(\"\\n\") if user_name in line])",
"def num_posts(self):\n\n return FlicketTicket.query.filter_by(started_id=self.id).count() + FlicketPost.query.filter_by(\n user_id=self.id).count()",
"def get_number_of_non_exhausted_votes(self):\n return (\n len(self._ballots) * self._number_of_votes_pr_voter\n - self._number_of_blank_votes\n )",
"def num_submitters(self) -> int:\n return self.snapshot['num_submitters']",
"def count(self) -> Optional[int]:\n return pulumi.get(self, \"count\")",
"def count(self) -> Optional[int]:\n return pulumi.get(self, \"count\")",
"def size(self) -> int:\n size = len(self['tbs_cert_list']['revoked_certificates'])\n return size",
"def get_total_number_of_buildings_for_user(request):\n buildings_count = get_buildings_for_user_count(request.user)\n\n return {'status': 'success', 'buildings_count': buildings_count}",
"def get_total_assigned(self):\n return sum(self.n_assigned_list)",
"def timesUsed(self)->int:\n return self._lic.params['usedTimes'].value",
"def number_of_crew(self):\n return self._number_of_crew",
"def get_num_plants(self) -> int:\r\n\r\n return len(self.plants)",
"def aquire_cnts(self,n):\n return self.cnts.get_n(n)",
"def number_of_launches(self):\n return self._number_of_launches",
"def associated_object_count(self):\n return self._associated_object_count",
"def num_available(self) -> int:\n return len(self)"
]
| [
"0.6878064",
"0.62725556",
"0.60077244",
"0.5967526",
"0.59303814",
"0.587535",
"0.5703093",
"0.5699997",
"0.5674289",
"0.56504005",
"0.56465405",
"0.5644256",
"0.5603991",
"0.5569141",
"0.5529344",
"0.55266505",
"0.55215555",
"0.5503645",
"0.54946893",
"0.54946893",
"0.5482507",
"0.5479051",
"0.54616296",
"0.5460948",
"0.54461086",
"0.54348475",
"0.5432827",
"0.54209423",
"0.54141957",
"0.5414169"
]
| 0.7432309 | 0 |
Compute the transport plan P in regularization path for any given value of lambda | def compute_transport_plan(lam, lambda_list, Pi_list):
if lam <= lambda_list[0]:
Pi_inter = np.zeros(np.shape(Pi_list[-1]))
elif lam >= lambda_list[-1]:
Pi_inter = Pi_list[-1].toarray()
else:
idx = np.where(lambda_list < lam)[0][-1]
lam_k = lambda_list[idx]
lam_k1 = lambda_list[idx+1]
pi_k = Pi_list[idx]
pi_k1 = Pi_list[idx+1]
Pi_inter = pi_k + (pi_k1-pi_k)*(1/lam - 1/lam_k) / (1/lam_k1 - 1/lam_k)
Pi_inter = Pi_inter.toarray()
return Pi_inter | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_PRP(initial):\n return plan_route((initial[0],initial[1]), initial[2],\n # Goals:\n [(2,3),(3,2)],\n # Allowed locations:\n [(0,0),(0,1),(0,2),(0,3),\n (1,0),(1,1),(1,2),(1,3),\n (2,0), (2,3),\n (3,0),(3,1),(3,2),(3,3)])",
"def GoodmanKruskalLambdaR_calc(TP, FP, FN, TN):\n try:\n n = TP + FP + FN + TN\n part1 = 0.5 * (max(TP + FP, FN + TN) + max(TP + FN, FP + TN))\n return (TP + TN - part1) / (n - part1)\n except Exception:\n return \"None\"",
"def GoodmanKruskalLambda_calc(TP, FP, FN, TN):\n try:\n n = TP + FP + FN + TN\n part1 = max(TP, FP) + max(FN, TN) + max(TP, FN) + max(FP, TN)\n part2 = max(TP + FP, FN + TN) + max(TP + FN, FP + TN)\n return (0.5 * (part1 - part2)) / (n - 0.5 * part2)\n except Exception:\n return \"None\"",
"def Lambda_pq(x, lambda_, mus, cup, aenv, pienv, niter, nburnin, seed=None):\n if isinstance(mus, basestring):\n mus = mus_from_str(mus)\n if isinstance(cup, basestring):\n cup = cup_from_str(cup)\n niter = int(niter)\n p, q, epsilon, pup = x\n alpha, beta = from_api(aenv, pienv)\n mu1, mu2 = mus(epsilon)\n return zstogrowthrate(zrecursion(lambda_, mu1, p, q, alpha, beta, pup=pup, niter=niter, mu2=mu2, seed=seed), nburnin=nburnin) - cup(pup)",
"def function_parameters_shekel(lambda_1, lambda_2, p):\n d = 4\n if type(lambda_1) is not int:\n raise ValueError('lambda_1 must be an integer.')\n if type(lambda_2) is not int:\n raise ValueError('lambda_2 must be an integer.')\n if type(p) is not int:\n raise ValueError('p must be an integer.')\n C = np.array([[4, 1, 8, 6, 3, 2, 5, 8, 6, 7],\n [4, 1, 8, 6, 7, 9, 3, 1, 2, 3.6],\n [4, 1, 8, 6, 3, 2, 5, 8, 6, 7],\n [4, 1, 8, 6, 7, 9, 3, 1, 2, 3.6]])\n b = np.array([1.25, 1.45, 1.45, 1.65, 1.7, 1.8, 1.75, 1.9, 1.7, 1.8])\n A = np.zeros((p, d, d))\n rotation = np.zeros((p, d, d))\n for j in range(p):\n # rotation[j] = ortho_group.rvs(dim=d)\n rotation[j] = mt_obj.calculate_rotation_matrix(d, 1)\n diag_vals = np.zeros(d)\n diag_vals[:2] = np.array([lambda_1, lambda_2])\n diag_vals[2:] = np.random.uniform(lambda_1 + 0.1,\n lambda_2 - 0.1, (d - 2))\n A[j] = np.diag(diag_vals)\n matrix_test = np.transpose(rotation, (0, 2, 1)) @ A @ rotation\n return matrix_test, C[:, :p], b[:p]",
"def GuttmanLambdaB_calc(TP, FP, FN, TN):\n try:\n n = TP + FP + FN + TN\n part1 = max(TP, FP) + max(FN, TN)\n part2 = max(TP + FN, FP + TN)\n return (part1 - part2) / (n - part2)\n except Exception:\n return \"None\"",
"def GuttmanLambdaA_calc(TP, FP, FN, TN):\n try:\n n = TP + FP + FN + TN\n part1 = max(TP, FN) + max(FP, TN)\n part2 = max(TP + FP, FN + TN)\n return (part1 - part2) / (n - part2)\n except Exception:\n return \"None\"",
"def prob_class_1_arrival(state, lambda_1, mu, num_of_servers):\n return lambda_1 / (lambda_1 + (mu * min(state[1], num_of_servers)))",
"def lambda_method(self,t): \n return 5*math.sin(2*math.pi*1*t) # I don't see the value of 1 here but this is how lamda is defined in the exercise.",
"def generate_arrival_ivt():\n r = np.random.rand()\n return -np.log(1-r)/_lambda\n \"\"\"\n note: the code above could be replaced with the built-in process generator:\n \n return np.random.exponential(1/_lambda)\n \"\"\"",
"def phi(t, *args):\n # Unpacking data\n mu_1, pi_mu_2, distance, affine_transfo = args\n A, b = get_Ab(t)\n N = len(mu_1)\n assert len(mu_1) == len(pi_mu_2)\n # Computing value of objective function\n r = 0.\n for i in np.arange(N):\n r += distance(affine_transfo(A, b, mu_1[i]), pi_mu_2[i]) ** 2\n return r",
"def p(e, t):\n return b * e ** 2",
"def spiking_cost(t_R, t_FP):\n # t_E_R_f = -(t_R.dimshuffle('x', 0, 1) * T.log(t_FP)\n # + (1 - t_R.dimshuffle('x', 0, 1)) * T.log(1 - t_FP))\n # Try using poisson loss instead of bernoulli loss\n t_E_R_f = -t_R.dimshuffle('x', 0, 1) * T.log(t_FP) + t_FP\n\n t_E_R_f.name = 'E_R_f'\n\n return t_E_R_f",
"def plan(self):\n\t\topt_r, opt_h = self.optimizer.maximize(bounds=self.bounds) # 1-D\n\t\tplan_r, plan_h = opt_r, opt_h\n\t\t# Update optimal control in trajectories\n\t\tself.traj_r.u = plan_r # numpy robot plan\n\t\tself.traj_r.u_th = plan_r # Theano robot plan\n\t\tself.traj_h.u = plan_h # numpy human plan\n\t\tself.traj_h.u_th = plan_h # Theano human plan\n\t\treturn self.traj_r.u # return plan in shape of self.traj.u",
"def wpCostFunc(P, *args):\n\tQ, beta = args\n\t\n\t# calculate the first item: closeness to the reference\n\tf1 = 0\n\tfor i in range(len(Q)):\n\t\tf1 += ( P[i]-Q[i] )**2\n\n\t# calculate the second item: misalignment penality\n\tf2 = 0\n\tn = len(P)/2\t# number of waypoints to be optmized\n\tfor k in range(2,n):\n\t\tp_kminus1 = ( P[2*(k-1)-2], P[2*(k-1)-1] )\n\t\tp_k = ( P[2*k-2], P[2*k-1] )\n\t\tp_kplus1 = ( P[2*(k+1)-2], P[2*(k+1)-1] )\n\n\t\tvec_cur = ( p_k[0]-p_kminus1[0], p_k[1]-p_kminus1[1] )\n\t\tvec_next = ( p_kplus1[0]-p_k[0], p_kplus1[1]-p_k[1] )\n\t\t\n\t\tf2 += np.dot(vec_cur, vec_next)/lg.norm(vec_cur)/lg.norm(vec_next)\n\n\tf = f1-beta*f2\n\treturn f",
"def lambda_rad(self):\n InputFile = self('Meta','InputFile').decode(\"utf-8\")\n d_InputFile = dict([item.replace(' ','').split('=') for item in InputFile.splitlines() if '=' in item])\n if 'lambda' in d_InputFile:\n return float(d_InputFile['lambda'])\n else:\n return self.lambdaref",
"def ComputeCost(Y, W, P, my_lambda):\n l = [np.log(P[i][np.argmax(Y[i])]) for i in range(len(Y))]\n l = -np.mean(l)\n J = l\n for w in W:\n J += my_lambda * (w**2).sum()\n return J, l",
"def value(a, y, weights, lambda_):\n\t\treturn 0.5* (np.linalg.norm(a-y)**2) / (a.shape[0] * a.shape[1])\n\t\t# return unregularize + (0.5*lambda_*np.sum(np.square(weights[-1])) / (a.shape[0] * a.shape[1])) ",
"def costFunctionReg(theta, X, y, Lambda):\n # Initialize some useful values\n m = len(y) # number of training examples\n j = costFunction(theta, X, y)\n j += (Lambda/(2*m))*np.sum(theta[1:]**2)\n return j",
"def prob(Z, T):\r\n Z = closure(Z)\r\n return np.sum(np.matmul(Z, T))",
"def compute_lambda_return(self, r_traj, v_traj):\n # Initialize (NOTE only works on single traj)\n lamb_G = np.zeros(len(r_traj))\n\n # ==\n # Calculate the lambda return for the trajectory\n\n # G_{T-1} is just the final reward (only valid for full epis trajs)\n lamb_G[-1] = r_traj[-1]\n\n # Compute lambda return via DP\n for i in reversed(range(len(lamb_G) - 1)):\n lamb_G[i] = (r_traj[i]\n + ((self.gamma * (1 - self.lamb)) * v_traj[i + 1])\n + ((self.gamma * self.lamb) * lamb_G[i + 1]))\n\n return lamb_G",
"def kappaPlanck(T, wavelen, klambda):\n #if T>2000: ##sublimation temperature\n # return 1e-2 #just a very tiny number\n sigma = 5.6704 * 10**(-5) ##in cgs: ergs/(cm^2 s K)\n I = Ikappa(T, wavelen, klambda)\n return I*np.pi / (sigma * T**4)",
"def interpolation_trigram_model(list_of_words, unigram_count, bigram_count, trigram_count, N=count_token(), lambda1=None, lambda2=None, lambda3=None):\n\n # A modifier\n assert 0 < lambda3 <= 1, \"wrong value\"\n assert 0 < lambda2 <= 1, \"wrong value\"\n assert 0 < lambda1 <= 1, \"wrong value\"\n assert 0 < lambda1 + lambda2 + lambda3 <= 1, \"wrong value\"\n c_start = list_of_words.count(start_phrase)\n c_end = list_of_words.count(end_phrase)\n if c_start == 0:\n list_of_words.insert(0, start_phrase)\n list_of_words.insert(0, start_phrase)\n if c_start == 1:\n list_of_words.insert(0, start_phrase)\n if c_end == 0:\n list_of_words.append(end_phrase)\n list_of_words.append(end_phrase)\n if c_end == 1:\n list_of_words.append(end_phrase)\n uni_count = pd.read_csv(unigram_count)\n bigram_count = pd.read_csv(bigram_count)\n trigram_count = pd.read_csv(trigram_count)\n\n proba_dict = {list_of_words[i] + \" \" + list_of_words[i + 1] + \" \" + list_of_words[i + 2]:\n lambda1 * ((trigram_count[list_of_words[i] + \" \" + list_of_words[i + 1] + \" \" + list_of_words[i + 2]].values[0]) / float(bigram_count[list_of_words[i] + \" \" + list_of_words[i + 1]].values[0])) +\n lambda2 * (bigram_count[list_of_words[i] + \" \" + list_of_words[i + 1]].values[0] / float(uni_count[list_of_words[i]].values[0])) +\n lambda3 * (uni_count[list_of_words[i]].values[0] / float(N))\n if list_of_words[i] + \" \" + list_of_words[i + 1] + \" \" + list_of_words[i + 2] in trigram_count.columns.values\n else lambda2 * (bigram_count[list_of_words[i] + \" \" + list_of_words[i + 1]].values[0] / float(uni_count[list_of_words[i]].values[0])) +\n lambda3 * (uni_count[list_of_words[i]].values[0] / float(N))\n if list_of_words[i] + \" \" + list_of_words[i + 1] in bigram_count.columns.values\n else lambda3 * (uni_count[list_of_words[i]].values[0] / float(N)) for i in xrange(len(list_of_words) - 2)}\n return proba_dict",
"def _q_x(self):\n lambda_r = self.latt_par['lambda_r'].value \n return 2*np.pi*self.k/lambda_r",
"def __call__(self, t=1 / 2):\n return (t * self.vector)(self.p1)",
"def mutation(i,N_p,t,T,P,N_vars,F_min,F_const):\n\n #Adaptive scaling factor\n if N_vars >= 3:\n F=F_min*2**np.exp(1-(T/(T+1-t)))\n else:\n F = F_const\n #candidates are assigned without the i-th element\n candidates= np.delete(np.arange(N_p), np.where(np.arange(N_p)==i))\n #3 target vectors are picked out randomly for the donorvector generator\n cand_rand=np.random.choice(candidates,3,replace= False)\n X1=P[cand_rand[0],]\n X2=P[cand_rand[1],]\n X3=P[cand_rand[2],]\n \n\t#Donorvctor generator\n V= X1 + F*(X2-X3)\n return V",
"def lrCostFunction(theta,X,y, lambda_reg):\n m = np.size(y)\n grad = np.zeros(np.size((theta)))\n J_base, grad = costFunction(theta, X, y)\n \n\n reg_cost = (lambda_reg / (2.0 * m)) * np.sum(theta[1:] ** 2)\n \n reg_gradient = (lambda_reg / m) * theta\n reg_gradient[0] = 0\n cost = J_base + reg_cost\n return cost, grad + reg_gradient",
"def graph_estimate(S, lambdaL, p, maxdf, threshold=1e-4, max_iter=10000):\n nlambda = lambdaL.shape[0]\n x = np.zeros(p * maxdf * nlambda)\n col_cnz = np.zeros(p + 1).astype(int)\n row_idx = np.zeros(p * maxdf * nlambda).astype(int)\n idx_a = np.zeros(p).astype(int)\n w1 = np.zeros(p)\n\n cnz = 0\n for m in range(p):\n idx_i = np.ones(p).astype(int)\n idx_i[m] = 0\n w0 = np.zeros(p)\n size_a = 0\n\n for i in range(nlambda):\n ilambda = lambdaL[i]\n gap_ext = 1\n iter_ext = 0\n while gap_ext != 0 and iter_ext < max_iter:\n size_a_prev = size_a\n for j in range(p):\n if idx_i[j] == 1:\n r = S[m, j]\n for k in range(size_a):\n rss_idx = idx_a[k]\n r -= S[j, rss_idx] * w0[rss_idx]\n\n if abs(r) > ilambda:\n if r >= 0:\n w1[j] = r - ilambda\n else:\n w1[j] = r + ilambda\n idx_a[size_a] = j\n size_a += 1\n idx_i[j] = 0\n else:\n w1[j] = 0\n\n w0[j] = w1[j]\n\n gap_ext = size_a - size_a_prev\n\n gap_int = 1\n iter_int = 0\n while gap_int > threshold and iter_int < max_iter:\n tmp1 = 0\n tmp2 = 0\n for j in range(size_a):\n w_idx = idx_a[j]\n r = S[m, w_idx] + w0[w_idx]\n\n for k in range(size_a):\n rss_idx = idx_a[k]\n r -= S[w_idx, rss_idx] * w0[rss_idx]\n\n if abs(r) > ilambda:\n if r >= 0:\n w1[w_idx] = r - ilambda\n else:\n w1[w_idx] = r + ilambda\n tmp2 += abs(w1[w_idx])\n else:\n w1[w_idx] = 0\n\n tmp1 += abs(w1[w_idx] - w0[w_idx])\n w0[w_idx] = w1[w_idx]\n gap_int = tmp1 / tmp2\n iter_int += 1\n\n junk_a = 0\n for j in range(size_a):\n w_idx = idx_a[j]\n if w1[w_idx] == 0:\n junk_a += 1\n idx_i[w_idx] = 1\n else:\n idx_a[j - junk_a] = w_idx\n size_a -= junk_a\n iter_ext += 1\n\n for j in range(size_a):\n w_idx = idx_a[j]\n x[cnz] = w1[w_idx]\n row_idx[cnz] = i * p + w_idx\n cnz += 1\n col_cnz[m + 1] = cnz\n\n return col_cnz, row_idx, x",
"def capacity_from_pt(self, grid, pt_1):\n assert pt_1 is not None\n\n def capacity(pt_2):\n # snr = dB_to_natural(self.tx_dbpower + self.dbgain(pt_1, pt_2) -\n # self.noise_dbpower)\n # C = np.log2(1 + snr)\n C = self.dbgain_to_capacity(self.dbgain(pt_1, pt_2))\n if (self.min_link_capacity\n is not None) and (C < self.min_link_capacity):\n C = 0\n return [C]\n\n return FunctionVectorField(grid=grid, fun=capacity)",
"def P2G_func(self, dt, P):\n p_C = ti.static(self.p_C)\n p_v = ti.static(self.p_v)\n p_x = ti.static(self.p_x)\n g_m = ti.static(self.g_m)\n g_v = ti.static(self.g_v)\n p_F = ti.static(self.p_F)\n p_Jp = ti.static(self.p_Jp)\n\n base = ti.floor(g_m.getG(p_x[P] - 0.5 * g_m.dx)).cast(Int)\n fx = g_m.getG(p_x[P]) - base.cast(Float)\n\n # Here we adopt quadratic kernels\n w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1) ** 2, 0.5 * (fx - 0.5) ** 2]\n # dw = [fx - 1.5, -2.0 * (fx - 1), fx - 0.5]\n\n # # TODO affine would do this in P2G.. why\n # p_F[P] = (ti.Matrix.identity(Int, self.dim) + dt * p_C[P]) @ p_F[P]\n\n force = ti.Matrix.zero(Float, self.dim, self.dim)\n # want to decrease branching\n if self.p_material_id[P] == MaType.elastic:\n force = self.elasticP2Gpp(P, dt)\n elif self.p_material_id[P] == MaType.liquid:\n force = self.liquidP2Gpp(P, dt)\n elif self.p_material_id[P] == MaType.snow:\n force = self.snowP2Gpp(P, dt)\n elif self.p_material_id[P] == MaType.sand:\n force = self.sandP2Gpp(P, dt)\n\n affine = force + self.cfg.p_mass * p_C[P]\n for offset in ti.static(ti.grouped(self.stencil_range3())):\n # print(\"P2G: \", offset)\n dpos = g_m.getW(offset.cast(Float) - fx)\n\n weight = 1.0\n for d in ti.static(range(self.dim)):\n weight *= w[offset[d]][d]\n\n # dweight = ts.vecND(self.dim, self.cfg.inv_dx)\n # for d1 in ti.static(range(self.dim)):\n # for d2 in ti.static(range(self.dim)):\n # if d1 == d2:\n # dweight[d1] *= dw[offset[d2]][d2]\n # else:\n # dweight[d1] *= w[offset[d2]][d2]\n\n # force = - self.cfg.p_vol * kirchoff @ dweight\n # TODO ? AFFINE\n # g_v[base + offset] += self.cfg.p_mass * weight * (p_v[P] + p_C[P] @ dpos) # momentum transfer\n # TODO Got lots of simultaneous atomic here\n g_v[base + offset] += weight * (self.cfg.p_mass * self.p_v[P] + affine @ dpos)\n g_m[base + offset] += weight * self.cfg.p_mass\n\n # g_v[base + offset] += dt * force"
]
| [
"0.5900887",
"0.58707184",
"0.57448065",
"0.56907046",
"0.5612557",
"0.56111515",
"0.5553134",
"0.5529714",
"0.55141824",
"0.54726917",
"0.5471628",
"0.54607534",
"0.54519314",
"0.5421288",
"0.5413783",
"0.5371945",
"0.5369936",
"0.5324903",
"0.5300132",
"0.52990323",
"0.52960074",
"0.52836215",
"0.5281178",
"0.5266412",
"0.52537465",
"0.5235367",
"0.52271557",
"0.5220181",
"0.5197808",
"0.5190101"
]
| 0.70644534 | 0 |
Called when the mapper has finished, to allow for any final work to be done. | def finish(self):
logging.info(str(self) + ' Mapper finished.')
if self.next_mapper is not None:
logging.info(str(self) + ' Next: ' + str(self.next_mapper))
self.next_mapper.run()
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def finished(self):\n pass",
"def _finished(self) -> None:",
"def finish(self):\n pass",
"def finish(self):\n pass",
"def finished(self):\n raise NotImplementedError()",
"def finished(self):\r\n raise NotImplementedError",
"def finished(self):",
"def finish(self):\r\n\r\n self._is_finished = True",
"def Finish(self):\n pass",
"def finish(self) -> None:",
"def finish(self) -> None:",
"def finished(self):\n\t\telog(\"finished\")",
"def complete(self):\n pass",
"def mapper_updated(self):\n self.invalidate()\n return",
"def finish(self):",
"def finish(self):",
"def finalize(self):\n logger.debug(\"Generation Complete\")\n self.events.generation_complete()",
"def _finish(self):\n self.stage = {}",
"def finalize(self):\n return",
"def on_finish(self):\n pass",
"def finalize(self):\n pass",
"def finalize(self):\n pass",
"def finalize(self):\n pass",
"def finalize(self):\n pass",
"def finalize(self):\n pass",
"def finalize(self):\n pass",
"def finalize(self):\r\n pass",
"def close(self):\n for m in self._mappers:\n m.close()\n self._mappers = []\n self._offsets = []\n self._sizes = []\n self._handler = None",
"def done(self):\n raise NotImplementedError()",
"def finish():\n pass"
]
| [
"0.6983338",
"0.67524016",
"0.6732718",
"0.6732718",
"0.6725664",
"0.6718488",
"0.6698824",
"0.6674772",
"0.6650086",
"0.65816665",
"0.65816665",
"0.6573483",
"0.6461058",
"0.6453406",
"0.6439004",
"0.6439004",
"0.6388175",
"0.63703626",
"0.6352022",
"0.63432056",
"0.6340175",
"0.6340175",
"0.6340175",
"0.6340175",
"0.6340175",
"0.6340175",
"0.6324267",
"0.6305488",
"0.62836945",
"0.6276607"
]
| 0.8608334 | 0 |
Writes updates and deletes entities in a batch. | def _batch_write(self):
if self.to_put:
db.put(self.to_put)
self.to_put = []
if self.to_delete:
db.delete(self.to_delete)
self.to_delete = [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_batch(self, batch):\n for item in batch:\n self.write_buffer.buffer(item)\n key = self.write_buffer.get_key_from_item(item)\n if self.write_buffer.should_write_buffer(key):\n self._write_current_buffer_for_group_key(key)\n self.increment_written_items()\n self._check_items_limit()",
"def flush_batch(self, batch):\n inserts = []\n replacements = []\n\n for action_type, data in batch:\n if action_type == processor.INSERT:\n inserts.append(data)\n elif action_type == processor.REPLACE:\n replacements.append(data)\n\n if inserts:\n write_rows(\n self.clickhouse,\n self.dist_table_name,\n inserts\n )\n\n if self.metrics:\n self.metrics.timing('inserts', len(inserts))\n\n if replacements:\n for key, replacement in replacements:\n self.producer.produce(\n self.replacements_topic,\n key=six.text_type(key).encode('utf-8'),\n value=json.dumps(replacement).encode('utf-8'),\n on_delivery=self.delivery_callback,\n )\n\n self.producer.flush()",
"def write(self, batch):\n cursor = connection.cursor()\n while batch:\n values = [\n \"(%s, %s)\" % (\n psycopg2.Binary(k),\n psycopg2.Binary(v)\n ) for k, v in batch[:self.BATCH_SIZE]]\n sql = \"INSERT INTO %s(k,v) VALUES %s\" % (self.table, \",\".join(values))\n batch = batch[self.BATCH_SIZE:]\n cursor.execute(sql)\n cursor.execute(\"COMMIT\")",
"def update_batch(self, *args, **kwargs):\n pass",
"def bulk_write(collection, iterable, job_id=None, unsafe=False):\n namespace = Namespace(collection.foreign_id)\n stage = get_stage(collection, OP_INDEX, job_id=job_id)\n entities = []\n for item in iterable:\n if not is_mapping(item):\n raise InvalidData(\"Failed to read input data\", errors=item)\n entity = model.get_proxy(item)\n entity = namespace.apply(entity)\n if not unsafe:\n entity = remove_checksums(entity)\n entities.append(entity)\n index_entities(stage, collection, entities)",
"def batch_write(client, resources, batch_size=MAX_DYNAMO_BATCH_SIZE, batch_counter_step=MAX_DYNAMO_BATCH_SIZE):\n idx = 0\n item_count = 0\n\n batch = defaultdict(list)\n for idx, batch_resources in enumerate(chunk(resources, batch_size)):\n batch.clear()\n for resource in batch_resources:\n batch[getmeta(resource).table_name(client)].append(\n {'PutRequest': {'Item': resource.to_dynamo_dict(skip_null_fields=True)}}\n )\n item_count += 1\n\n if (idx % batch_counter_step) == 0:\n logger.info(\"Loading batch: %s\", idx)\n\n client.batch_write_item(RequestItems=batch)\n\n logger.info(\"Loaded %s records in %s batches.\", item_count, idx + 1)",
"def _batch_update(self, query, mutation):\n logger.info(\"Performing batch update on %s. Mutation: %s\", query, mutation)\n modified = 0\n for doc in self.instances.find(query):\n with lock_instance(doc['_id']):\n pre_update_doc = self.instances.find_one({'_id' : doc['_id']})\n result = self.instances.update_one({'_id': doc['_id']}, mutation)\n assert result.modified_count == 1\n modified += 1\n updated_doc = self.instances.find_one({'_id': doc['_id']})\n instance = FixtureInstance.deserialize_mongodoc(updated_doc)\n try:\n self.axdb_client.update_fixture_instance(instance.axdbdoc())\n except Exception:\n logger.exception(\"Failed to persist updates for %s. Undoing cache update\", instance)\n self.instances.replace_one({'_id' : instance.id}, pre_update_doc)\n raise\n logger.info(\"%s fixture instances modified\", modified)",
"def flush_to_disk(self):\n logger.info(\"Flushing %s queries from in-memory cache to disk\", len(self.batch_writes))\n rows = self.memory_connection.execute(f\"\"\"\n SELECT hash_id, query, raw_query, domain, intent FROM queries\n WHERE rowid IN ({\",\".join(self.batch_writes)});\n \"\"\")\n self.disk_connection.executemany(\"\"\"\n INSERT OR IGNORE into queries values (?, ?, ?, ?, ?);\n \"\"\", rows)\n self.disk_connection.commit()\n self.batch_writes = []",
"def write(self, batch):\n time.sleep(self.WRITE_DELAY)",
"def flush(self):\n self.save()\n for obs in self.observation_set.all():\n obs.flush();",
"def upload_entities(self, batch):\n # TODO Include a Do Not Overwrite call\n results = None\n atlas_endpoint = self.endpoint_url + \"/entity/bulk\"\n\n payload = AtlasClient._prepare_entity_upload(batch)\n\n postBulkEntities = requests.post(\n atlas_endpoint,\n json=payload,\n headers=self.authentication.get_authentication_headers()\n )\n\n results = self._handle_response(postBulkEntities)\n\n return results",
"def update_batch_of_entitiies(self, username, entity_batch_data):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_UPDATE_ENTITY_BATCH, username + '|' + str(entity_batch_data))",
"def flush_batch(self, batch: Sequence[TResult]) -> None:\n pass",
"def commit(self):\n self._cur_batch.commit()\n self._cur_batch = None\n self._num_mutations = 0",
"def flush():\n with transaction.atomic():\n if voter_records:\n NCVoter.objects.bulk_create(voter_records)\n with transaction.atomic():\n # This looks weird. Let me explain.\n # All the unsaved ChangeTracker instances have references\n # to the NCVoter instances from *before* the NCVoter instances\n # were saved. So they do not know the voter instances now have\n # IDs from being inserted. This re-sets the voter on the change\n # object, ensuring it knows the ID of its voter and can be saved\n # properly.\n for c in change_records:\n c.voter = c.voter\n c.voter_id = c.voter.id\n ChangeTracker.objects.bulk_create(change_records)\n change_records.clear()\n voter_records.clear()",
"def _flush_batch(self) -> None:\n batch_len = len(self._current_batch)\n if batch_len == 0:\n self.logger.debug('Nothing to flush.')\n return\n\n self.logger.debug(f'Flushing batch size {batch_len}')\n\n with self.LOCK:\n to_process_batch = list(self._current_batch)\n self._current_batch = list()\n\n log_event = EventFactory.create_log_event(to_process_batch, self.logger)\n\n self.notification_center.send_notifications(enums.NotificationTypes.LOG_EVENT, log_event)\n\n if log_event is None:\n self.logger.exception('Error dispatching event: Cannot dispatch None event.')\n return\n\n try:\n self.event_dispatcher.dispatch_event(log_event)\n except Exception as e:\n self.logger.error(f'Error dispatching event: {log_event} {e}')",
"def __flush(self) -> None:\n assert not self.__closed\n assert self.__batch is not None, \"cannot flush without active batch\"\n\n logger.info(\n \"Flushing %s items (from %r)\",\n len(self.__batch.results),\n self.__batch.offsets,\n )\n\n self.__metrics.timing(\n \"process_message.normalized\",\n self.__batch.processing_time_ms / self.__batch.messages_processed_count,\n )\n\n batch_results_length = len(self.__batch.results)\n if batch_results_length > 0:\n logger.debug(\"Flushing batch via worker\")\n flush_start = time.time()\n self.__worker.flush_batch(self.__batch.results)\n flush_duration = (time.time() - flush_start) * 1000\n logger.info(\"Worker flush took %dms\", flush_duration)\n self.__metrics.timing(\"batch.flush\", flush_duration)\n self.__metrics.timing(\n \"batch.flush.normalized\", flush_duration / batch_results_length\n )\n\n logger.debug(\"Committing offsets for batch\")\n commit_start = time.time()\n offsets = {\n partition: offsets.hi for partition, offsets in self.__batch.offsets.items()\n }\n self.__commit(offsets)\n logger.debug(\"Committed offsets: %s\", offsets)\n commit_duration = (time.time() - commit_start) * 1000\n logger.debug(\"Offset commit took %dms\", commit_duration)\n\n self.__batch = None",
"def put(self, entity):\n self._cur_batch.put(entity)\n self._num_mutations += 1\n if self._num_mutations >= MAX_MUTATIONS_IN_BATCH:\n self.commit()\n self.begin()",
"def save_all(self):\r\n for index in range(self.count()):\r\n self.save(index)",
"def save_to_kvstore(helper, name, entries, stats):\n helper.log_info('Saving {0} entries for MineMeld feed \"{1}\"'.format(\n len(entries), name))\n url = '{0}/batch_save'.format(_uri(helper))\n\n # We need to batch in groups of 500, the default.\n for i in range(0, len(entries), 500):\n resp = helper.send_http_request(\n url=url,\n headers=_headers(helper),\n method='POST',\n verify=False,\n payload=entries[i:i+500])\n resp.raise_for_status()",
"def test_flush_pk_given(n):\n session = Session(bind=engine)\n for chunk in range(0, n, 1000):\n session.add_all(\n [\n Customer(\n id=i + 1,\n name=\"customer name %d\" % i,\n description=\"customer description %d\" % i,\n )\n for i in range(chunk, chunk + 1000)\n ]\n )\n session.flush()\n session.commit()",
"def complete_write_transaction(self) -> None:\n self.batch.__exit__(*sys.exc_info())\n self.batch = self.genes.batch_writer()",
"def write_entries(self, entries):\n for entry in entries:\n self.write(entry)",
"def commit_updates(session, update_key, update_statements, table, commit_frequency = 1000):\n primary_key = table.primary_key.columns.values()[0]\n update_key = table.columns[update_key]\n u = table.update().where(primary_key==bindparam('pk')).values({update_key: bindparam('update')})\n numgroups = len(update_statements) / commit_frequency\n for ng in range(numgroups):\n if numgroups == 0:\n break\n chunk = update_statements[ng*commit_frequency:(ng+1)*commit_frequency]\n session.connection().execute(u, *chunk)\n print \"committing chunk\",ng+1,\"of\",numgroups,\"with length\",len(chunk),\"at\",datetime.now()\n session.commit()\n last_chunk = update_statements[numgroups*commit_frequency:]\n if last_chunk:\n print \"committing last\",len(last_chunk),\"records at\",datetime.now()\n session.connection().execute(u, *last_chunk)\n session.commit()",
"def flush(self):\n self._uow.commit()",
"def online_write_batch(\n self,\n config: RepoConfig,\n table: FeatureView,\n data: List[\n Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]\n ],\n progress: Optional[Callable[[int], Any]],\n ) -> None:\n\n hbase = HbaseUtils(self._get_conn(config))\n project = config.project\n table_name = _table_id(project, table)\n\n b = hbase.batch(table_name)\n for entity_key, values, timestamp, created_ts in data:\n row_key = serialize_entity_key(\n entity_key,\n entity_key_serialization_version=config.entity_key_serialization_version,\n ).hex()\n values_dict = {}\n for feature_name, val in values.items():\n values_dict[\n HbaseConstants.get_col_from_feature(feature_name)\n ] = val.SerializeToString()\n if isinstance(timestamp, datetime):\n values_dict[HbaseConstants.DEFAULT_EVENT_TS] = struct.pack(\n \">L\", int(calendar.timegm(timestamp.timetuple()))\n )\n else:\n values_dict[HbaseConstants.DEFAULT_EVENT_TS] = timestamp\n if created_ts is not None:\n if isinstance(created_ts, datetime):\n values_dict[HbaseConstants.DEFAULT_CREATED_TS] = struct.pack(\n \">L\", int(calendar.timegm(created_ts.timetuple()))\n )\n else:\n values_dict[HbaseConstants.DEFAULT_CREATED_TS] = created_ts\n b.put(row_key, values_dict)\n b.send()",
"def batch_process(self, delete_list=[], update_list=[]):\n self.request_url = \"{0}/{1}\".format(self.API_URL, self.USER_BULK_ENDPOINT)\n payload = {\n 'updated': update_list,\n 'deleted': delete_list,\n }\n\n r = self.requests.post(\n self.request_url,\n data=json.dumps(payload),\n headers=self.default_headers,\n timeout=30\n )\n\n return r.status_code, r.json()",
"def _update_bulk(self, iterable):\n self.cursor.executemany(self.UPDATE, iterable)",
"def test_flush_no_pk(n):\n session = Session(bind=engine)\n for chunk in range(0, n, 1000):\n session.add_all(\n [\n Customer(\n name=\"customer name %d\" % i,\n description=\"customer description %d\" % i,\n )\n for i in range(chunk, chunk + 1000)\n ]\n )\n session.flush()\n session.commit()",
"def batch(self):\n return self._client.batch()"
]
| [
"0.70008594",
"0.68892026",
"0.666511",
"0.65495664",
"0.65008634",
"0.6464048",
"0.635089",
"0.63451797",
"0.6307946",
"0.6263735",
"0.6218856",
"0.6178519",
"0.61136186",
"0.5940264",
"0.59256804",
"0.5848141",
"0.5847394",
"0.5837007",
"0.5814465",
"0.5783356",
"0.5769271",
"0.5760071",
"0.57519233",
"0.567074",
"0.56688154",
"0.56451446",
"0.56223845",
"0.5608842",
"0.5599858",
"0.5596495"
]
| 0.76341254 | 0 |
Adds Card entities for the given ids, with the given parent. Adds in batches and requeues itself. | def create_cards(card_ids, box_key):
if len(card_ids) == 0:
return
BATCH_SIZE = 20
batch = card_ids[:BATCH_SIZE]
logging.info("Adding cards for %s (%d remaining). Batch: %s"%(box_key,len(card_ids),batch))
for id_tuple in batch:
key = '-'.join([str(p) for p in id_tuple])
card = models.Card.get_by_key_name(key, parent=box_key)
if card is None:
logging.info("Creating card for %s"%(str(id_tuple)))
card = models.Card(key_name=key, parent=box_key)
card.enabled = True
card.put()
deferred.defer(create_cards,
card_ids[BATCH_SIZE:],
box_key, _queue='cardcleaner') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add(self, fetchables, depth=1):\n if fetchables:\n if isinstance(fetchables, collections.Sequence):\n for fetchable in fetchables:\n self.add(fetchable, depth)\n else:\n log.debug(\"Adding to queue: %s (depth=%s)\", fetchables, depth)\n self.q.append((fetchables, depth))",
"def add_cards(self, cards):\n self.get_cards().extend(cards)",
"def add_cards(self, cards):\n ints_or_objs = list(cards)\n for card in ints_or_objs:\n if isinstance(card, int):\n card = Card.get_or_create(card)\n elif not isinstance(card, Card):\n raise InvalidCardError\n self.cards.append(card)\n return self.cards",
"def _add_children(self, parent, parent_iter):\n parent_gadget = Gadget.from_widget(parent)\n if parent_gadget:\n children = parent_gadget.get_children()\n else:\n children = get_all_children(parent)\n\n for child in children:\n gadget = Gadget.from_widget(child)\n if not gadget:\n if isinstance(child, gtk.Container):\n self._add_children(child, parent_iter)\n else:\n continue\n\n elif not self._find_iter(parent_iter, gadget):\n child_iter = self._model.append(parent_iter, (gadget,))\n self._add_children(child, child_iter)",
"async def receive_from_parent_flow(self, inputs: List[Input]):\n if not inputs:\n return\n async with self.ctxhd_lock:\n ctx_keys = list(self.ctxhd.keys())\n self.logger.debug(f\"Receiving {inputs} from parent flow\")\n self.logger.debug(f\"Forwarding inputs to contexts {ctx_keys}\")\n for ctx in ctx_keys:\n await self.sadd(ctx, *inputs)",
"def add(self, asset_ids=None):\n if asset_ids is not None and isinstance(asset_ids, list):\n for h in asset_ids:\n self.asset_ids.append(h[:self.idlen_conf[\"asset_id\"]])",
"def insert(self, cards):\n try:\n self.deck.extend(cards)\n for card in cards:\n card._deck = self\n except TypeError:\n self.deck.append(cards)\n cards._deck = self",
"def create_cards(self, cards):\n\n self._collection.insert_many([self._constrain_keys(card) for card in cards])",
"def _addChildren(self, pid, chunks):\n if chunks[pid].type in [0, -1]:\n self._addEntity(pid, chunks)\n else:\n self._addPredicate(pid, chunks)",
"def add(self, cards):\n\n super().add(cards)\n self._update_value()",
"def add_child(self, child):\r\n \r\n self._children.append(child)\r\n self.update_batch(self._batch, self._group)",
"def put_ids_to_queue(ids_list):\n LOGGER.debug('pushing %s ads to the queue', len(ids_list))\n for advert_id in ids_list:\n fetch_single_advert.delay(advert_id)",
"def Send_newCards(self, cards): \n serialized = [c.serialize() for c in cards]\n self.Send({\"action\": \"newCards\", \"cards\": serialized})",
"def push_group(self, parent_data, parent_doc_type, es_obj_list, doc_type, refresh=True):\n parent_doc_id = self.push(parent_data, doc_type=parent_doc_type, refresh=refresh)\n if parent_doc_id is None:\n raise RuntimeError(\"Failed to create parent doc\")\n es_repr_list = []\n for es_obj in es_obj_list:\n doc_type, es_repr = self._validate_doc_and_get_type_and_repr(es_obj, doc_type)\n es_repr.update(parent_data)\n es_repr['parent_id'] = parent_doc_id\n es_repr_list.append(es_repr)\n self.push_bulk(es_repr_list, doc_type, refresh)",
"def add_pieces_and_movements_to_collection(collection_id, piece_ids, movement_ids):\n collection = Collection.objects.get(id=collection_id)\n for piece_id in piece_ids:\n piece = Piece.objects.get(id=piece_id)\n collection.add(piece)\n for movement_id in movement_ids:\n movement = Movement.objects.get(id=movement_id)\n collection.add(movement)",
"async def queue_add(\n self,\n ctx: commands.Context,\n players: List[Player],\n ) -> Optional[bool]:\n\n if ctx.guild.id in self.queue:\n self.queue[ctx.guild.id].queue += players\n else:\n self.queue[ctx.guild.id] = QueueManager(self.default_volume, players)\n\n return True",
"def processLink(self, parentId, *childrenIds):\n editor = self._parent\n parent = editor.findWithUUID(parentId)\n if parent:\n for childId in childrenIds:\n child = editor.findWithUUID(childId)\n if child:\n child.parent = parent\n # apply final callbacks\n self.finishedLoadingObject(childId, child)\n else:\n # shouldnt happen :)\n print(\"b2rex.processLink: cant find child to link!\")\n else:\n for childId in childrenIds:\n editor.add_callback('object.precreate', parentId, self.processLink,\n parentId, childId)",
"def cmd_add(arguments):\r\n\r\n # build the list of entities\r\n entities = { type : [ ] for type in entityTypes }\r\n type = \"\"\r\n param = 1 # skip \"add\" command\r\n while param < len(arguments):\r\n\r\n # get an entity type and check it's valid\r\n fullType = arguments[param]\r\n type = fullType\r\n subtype = None\r\n if \"/\" in type:\r\n (type, waste, subtype) = fullType.partition(\"/\")\r\n param += 1\r\n if type not in entityTypes:\r\n print \"ERROR: entity type [%s] is not valid\" % type\r\n return -1\r\n if subtype is not None and subtype not in subtypes[type]:\r\n print \"ERROR: entity subtype [%s] for type [%s] is not valid\" % (subtype, type)\r\n return -1\r\n\r\n # build the list of values\r\n eList = [ ]\r\n if arguments[param] == \"*\":\r\n\r\n # if the entity has subtypes and we have no subtype, iterate\r\n if subtype is None and subtypes[type]:\r\n # iterate subtypes\r\n for subtype in subtypes[type]:\r\n\r\n # set the fullType\r\n fullType = type + \"/\" + subtype\r\n\r\n # get the list of (ALL) ids from the database\r\n eList = getEntityIds(type, subtype)\r\n param += 1\r\n\r\n # attach the list to the type\r\n entities[fullType] = eList\r\n\r\n else:\r\n # no subtypes or subtype specified\r\n\r\n # get the list of (ALL) ids from the database\r\n eList = getEntityIds(type, subtype)\r\n param += 1\r\n\r\n # attach the list to the type\r\n entities[fullType] = eList\r\n else:\r\n # process the params\r\n while param < len(arguments):\r\n\r\n try:\r\n eList.append(int(arguments[param]))\r\n param += 1\r\n except ValueError:\r\n break\r\n # attach the list to the type\r\n entities[fullType] = eList\r\n\r\n # get a producer\r\n producer = getProducer()\r\n\r\n # start creating jobs\r\n jobCount = 0\r\n for fullType in entities:\r\n\r\n # separate type/subtype\r\n type = fullType\r\n if \"/\" in type:\r\n (type, waste, subtype) = fullType.partition(\"/\")\r\n\r\n # get the list\r\n elist = entities[fullType]\r\n\r\n # build jobs of up to 64 entries\r\n partialJobCount = generateJobs(producer, type, elist)\r\n\r\n # report\r\n if partialJobCount > 0:\r\n print \"added %5d jobs for entity %s\" % (partialJobCount, fullType)\r\n jobCount += partialJobCount\r\n\r\n # report the number of jobs created\r\n print \"\"\r\n print \"Added a total of %5d jobs in queue\" % jobCount\r\n print \"\"\r\n\r\n return 0",
"def add_to_batch(self, created_job_ids):\n batch_folder = BatchFolder(path=self.current_dir)\n if batch_folder.has_batch():\n batch: JobBatch = batch_folder.load()\n else:\n batch = JobBatch(job_ids=[], server=self.get_active_server())\n if batch.server.url != self.get_active_server().url:\n logger.info(\n \"A batch exists in this folder, but for a different server. \"\n \"Not saving job ids in batch\"\n )\n else:\n logger.info(\"Saving job ids in batch in current folder\")\n batch.job_ids = sorted(\n list(set(batch.job_ids) | set(created_job_ids))\n ) # add only unique new ids\n batch_folder.save(batch)",
"def add_parent_groups(ctx):\n asyncio.run(add_parent_groups_impl(ctx.obj[\"config\"]))",
"def grow(self, batch_ids, **combo_runner_opts):\n if isinstance(batch_ids, int):\n batch_ids = (batch_ids,)\n\n combo_runner_core(\n grow,\n combos=((\"batch_number\", batch_ids),),\n constants={\"verbosity\": 0, \"crop\": self},\n **combo_runner_opts,\n )",
"def append(\n self,\n ids: Iterable[str],\n preds: Iterable[str],\n targets: Iterable[str],\n ) -> None:\n self._ids += list(ids)\n self._preds += list(preds)\n self._targets += list(targets)",
"def add_card(self, added_cards):\n\n self.hand[:0] = added_cards",
"def add_ents(self, ents: Iterable['Entity']) -> None:\n ents = list(ents)\n self.entities.extend(ents)\n for item in ents:\n self.by_class[item['classname'].casefold()].add(item)\n self.by_target[item['targetname', ''].casefold() or None].add(item)\n if 'nodeid' in item:\n try:\n node_id = int(item['nodeid'])\n except (TypeError, ValueError):\n pass\n else:\n item['nodeid'] = str(self.node_id.get_id(node_id))",
"def insert_cards(self, cards: List[str], deck: str) -> None:\n deck = self.collection.decks.byName(deck)\n if deck is None:\n raise ValueError(\"Deck doesn't exist\")\n\n for card in cards:\n note = self._create_card(self.DEFAULT_MODEL)\n note.model()['did'] = deck['id'] # Make card's deck be `deck`\n note.fields[0] = card # fields=[content, tags]\n self.collection.addNote(note)\n # Card IDs are timestamps (integer milliseconds). Avoid collisions\n # by staggering insertion time\n time.sleep(0.002)\n \n self._remove_duplicates()\n self.collection.save() # Commit to database",
"def add_objects(self, objects):\n requests = []\n for obj in objects:\n requests.append({\"action\": \"addObject\", \"body\": obj})\n request = {\"requests\": requests}\n return self.batch(request)",
"def push(self, card, times=1):\n self._cards.extend(times * [card])",
"def _connect_items(self, container):\n for item in self.entity_json['items']:\n item_id = item['id']\n entity = self.world.entities[item_id]\n container.add(entity)",
"def add_children(self, children: List[Node], parent: Optional[Node] = None) -> None:\n if not parent:\n parent = self.root_node\n parent.children.extend(children)\n for child in children:\n child.parent = parent",
"def _addProteins(self, proteinIds, containerNames):\n proteinIds = AUX.toList(proteinIds)\n for containerName in containerNames:\n proteinContainer = getattr(self, containerName)\n proteinContainer.update(proteinIds)"
]
| [
"0.5433942",
"0.54215556",
"0.5285374",
"0.52513355",
"0.523385",
"0.5209448",
"0.52048165",
"0.5192483",
"0.50542176",
"0.50103396",
"0.49839032",
"0.49836466",
"0.49792188",
"0.49381772",
"0.49187857",
"0.48608288",
"0.48197132",
"0.48139533",
"0.48101467",
"0.4797596",
"0.47231582",
"0.47143674",
"0.47125003",
"0.47040227",
"0.46918762",
"0.46731383",
"0.46639138",
"0.46300286",
"0.46190977",
"0.45995113"
]
| 0.6199333 | 0 |
Make sure the incoming dict is a valid rower data frame, so the out coming data is consistent. Check the validity of the incoming dict fields, make sure all the required fields exists, and the value of each key is in the corresponded data type or format. So the data consumers is guaranteed that the out coming data is in a known format. | def _check_dict_validity(self, incoming_dict: dict):
# check key error
# check value error
for key in incoming_dict.keys():
# check invalid key.
if key not in self.all_valid_keys:
raise IncomingRowerDictInvalidKeyError("Incoming rower data dict has unknown key, data rejected. "
+ key)
# check value if key is valid.
value = incoming_dict.get(key, None)
if value is None:
if key in self.mandatory_keys:
# Mandatory keys should have value.
raise IncomingRowerDictInvalidKeyError("Incoming rower data dict has wrong key, data rejected. "
+ key)
else:
# Got the value, check the value.
if key in self.integer_keys:
# integer keys should be integer
if int(value) != value:
raise IncomingRowerDictInvalidValueError("Incoming rower data dict has wrong key, "
"data rejected. " + key + ":" + str(value))
if key not in self.negative_keys:
# non-negative keys should be non-negative
if value < 0:
raise IncomingRowerDictInvalidValueError("Incoming rower data dict has wrong key, "
"data rejected. " + key + ":" + str(value))
# make sure mandatory keys exists.
for m_key in self.mandatory_keys:
if m_key not in incoming_dict.keys():
raise IncomingRowerDictMissingKeyError('Incoming rower data dict has insufficient keys, '
'mandatory keys not found. '+m_key) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_required_fields(dataframe):\n\n if dataframe is None:\n raise ValueError(\"It was not provided a valid Dataframe.\")",
"def _validate_row(self, row):\n\n # assume value.\n is_valid = True\n\n # test if each field in @row has the correct data type.\n tests = []\n for field, value in row.items():\n value_type, header_type = (type(value).__name__, \n self.required_headers[field].__name__)\n test = value_type == header_type\n if not test:\n err = \"Field '{}' not valid; expected '{}', got '{}'.\".format(field,\n header_type, value_type)\n self.logger.debug(err)\n tests.append(test)\n\n # if any test failed, set @is_valid to False.\n if False in tests:\n is_valid = False\n \n return is_valid",
"def _check_keys(dict):\n for key in dict.keys():\n if isinstance(dict[key], sio.matlab.mio5_params.mat_struct):\n dict[key] = _todict(dict[key])\n return dict",
"def _validate(self, obj):\n assert (self._confidence in obj.columns and self._predicted in obj.columns\n and self._groundtruth in obj.columns), \\\n \"Must at least have '%s', '%s' and '%s' columns.\" \\\n % (self._confidence, self._predicted, self._groundtruth)\n assert len(obj['groundtruth']) == len(obj['predicted']) == len(obj['confidence']), \\\n \"Dataframe columns are inconsistent \"\n\n if len(obj.index) < 2:\n self._logger.fatal(\"Stored procedure returned empty dataframe\")\n raise RuntimeError(\"Stored procedure returned empty dataframe\")\n\n self._logger.debug(obj.head)",
"def conform_input_data(rowdict):\n # rowdict['Value'] = float(rowdict['Value'])\n rowdict['TimeStamp'] = TS_to_date(rowdict['TimeStamp'][:19])\n for floatcolumn in ['LowPx','OpenPx','ClosePx','QuoteCount','HighPx','TradeCount']:\n if floatcolumn in rowdict:\n rowdict[floatcolumn] = float(rowdict[floatcolumn])\n return rowdict",
"def validate_data_frame(self):\n try:\n # Verify the data frame\n pandas.verify_data_frame(self.data_frame)\n except OnTaskDataFrameNoKey as exc:\n self.add_error(None, str(exc))\n return\n\n # Store the data frame in the DB.\n try:\n # Get frame info with three lists: names, types and is_key\n self.frame_info = pandas.store_temporary_dataframe(\n self.data_frame,\n self.workflow)\n except Exception as exc:\n self.add_error(\n None,\n _('Unable to process file ({0}).').format(str(exc)))",
"def _validate_data_for_sql_table(data: dict, table: dict):\n for key, val in data.items():\n if val is None:\n if not table[key].is_nullable:\n raise DataTypeError(message=f\"Column '{key}' is not nullable, but \\\n value provided is None.\")\n elif not isinstance(val, table[key].data_type):\n raise DataTypeError(message=f\"Invalid data type for '{key}'.\")\n elif isinstance(val, str):\n if isinstance(table[key].precision, int):\n if len(val) > table[key].precision:\n raise DataTypeError(message=f\"String of chars too long for '{key}'. \\\n It must be {table[key][3]} chars maximum.\")\n return",
"def __is_valid_dict(self, GRFData):\n\n if type(GRFData) is not dict:\n raise ValueError(\"Expected GRFData to be of type '{}', but received type '{}'.\".format(type(dict), type(GRFData)))\n\n for component in self.comp_list:\n if component not in GRFData.keys():\n raise ValueError(\"Component '{}' not found in GRFData.\".format(component))",
"def _validate_data(df):\n if constants.IMAGE_URI_KEY not in df.columns:\n # or label_col not in df.columns:\n raise AttributeError(\n 'DataFrame must contain image_uri column {}.')\n if constants.LABEL_KEY not in df.columns:\n raise AttributeError(\n 'DataFrame must contain label column.')\n if constants.SPLIT_KEY not in df.columns:\n raise AttributeError(\n 'DataFrame must contain split column.')\n if list(df.columns) != constants.IMAGE_CSV_COLUMNS:\n raise AttributeError(\n 'DataFrame column order must be {}'.format(\n constants.IMAGE_CSV_COLUMNS))",
"def validate_inputs(self, input_dict):\n if not self.in_distributed_mode:\n required_keys = {\n 'project_value_usd',\n 'foundation_cost_usd',\n 'construct_duration',\n 'num_hwy_permits',\n 'num_turbines',\n 'project_size_megawatts',\n 'hub_height_meters',\n 'num_access_roads',\n 'markup_contingency',\n 'markup_warranty_management',\n 'markup_sales_and_use_tax',\n 'markup_overhead',\n 'markup_profit_margin',\n 'site_facility_building_area_df'\n }\n found_keys = set(input_dict.keys())\n if len(required_keys - found_keys) > 0:\n err_msg = '{}: did not find all required keys in inputs dictionary. Missing keys are {}'\n raise ValueError(err_msg.format(type(self).__name__, required_keys - found_keys))",
"def _check_structure(input_dict, mandatory, model):\n\n # Check to see if the input dictionary has the keys for the mandatory metadata structure.\n for key, value in mandatory.items():\n if 'custom_fields' in input_dict:\n if key not in input_dict and key not in input_dict['custom_fields']:\n raise ValueError('input dictionary does not have mandatory key: {key}'.format(key=key))\n else:\n if key not in input_dict:\n raise ValueError('input dictionary does not have mandatory key: {key}'.format(key=key))\n # Check to see if the input dictionary has keys that are wrong.\n for key, value in input_dict.items():\n # Checks to see if keys of input dictionary are in the model dictionary.\n if key != 'custom_fields':\n if key not in model:\n raise ValueError('Unknown input dictionary key: {key}.'.format(key=key))\n\n # If the model dictionary key value is a list check to see if value in list are correct type.\n if type(value) is list:\n if type(value[0]) is not model[key][0]:\n err_message = 'input dictionary key: {ky} list type: {ty} is not {ref}'\n err_message = err_message.format(ky=key, ty=value[0], ref=model[key][0])\n raise ValueError(err_message)\n\n else:\n # Checks to see if the type of the value for key is correct, in comparison to the model dictionary.\n if type(value) is not model[key]:\n err_message = 'input dictionary key: {ky} type: {ty} is not {ref}'\n err_message = err_message.format(ky=key, ty=type(value), ref=model[key])\n raise ValueError(err_message)\n return True",
"def test_convert_dicts_to_teradata_rows():\n data = [\n {\n 'str_col': 'value1',\n 'int_col_1': 2,\n 'int_col_2': 3\n }, {\n 'str_col': 'value2',\n 'int_col_1': 5,\n 'int_col_2': 6\n }\n ]\n\n output = row_handling.convert_dicts_to_teradata_rows(data)\n\n assert output[1]['int_col_2'] == 6",
"def verifyData(self, expectedDict):\n pass",
"def assert_correct_and_equal(self, other: Union[pd.DataFrame, dict]):\n if isinstance(other, dict):\n other = pd.DataFrame.from_records(other)\n if not isinstance(other, pd.DataFrame):\n raise TypeError(\"other must be a dataframe or a dict!\")\n # Sort cols\n cols = list(self._data.columns) + [c for c in other.columns if c not in self._data.columns]\n other = other[cols]\n SampleDataSchema.to_schema().select_columns(self._data.columns).validate(other)\n assert_frame_equal(\n self._data.sort_values(by=list(self._data.columns)).reset_index(drop=True),\n other.sort_values(by=list(self._data.columns)).reset_index(drop=True),\n )",
"def validate_inputs(self, input_dict):\n required_keys = {\n 'start_delay_hours',\n 'mission_time_hours',\n 'critical_wind_speed_m_per_s',\n 'wind_height_of_interest_m',\n 'wind_shear_exponent',\n 'weather_window'\n }\n found_keys = set(input_dict.keys())\n if len(required_keys - found_keys) > 0:\n err_msg = '{}: did not find all required keys in inputs dictionary. Missing keys are {}'\n raise ValueError(err_msg.format(type(self).__name__, required_keys - found_keys))",
"def test_invalid_key_birthday(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Gender': 'F', 'Age': '62', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birsthday': '24-06-1995'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)",
"def verify_and_clean_input(input_dict):\n \n output_dict = {}\n try:\n output_dict[\"Account ID\"] = int(input_dict[\"Account ID\"])\n if output_dict[\"Account ID\"] < 0:\n return False, {}, \"not a valid account id\"\n except ValueError:\n return False, {}, \"not a valid account id\"\n \n try:\n output_dict[\"Created On\"] = to_datetime(input_dict[\"Created On\"], errors='coerce').strftime(\"%x\")\n except ValueError:\n output_dict[\"Created On\"] = \"\"\n \n output_dict[\"First Name\"] = input_dict[\"First Name\"]\n \n return True, output_dict, \"\"",
"def conform_output_data(rowdict,fields_to_show=''):\n rowdict['TimeStamp'] = str(rowdict['TimeStamp'])\n if fields_to_show:\n rowdict= removed_fields(fields_to_show, rowdict)\n return rowdict",
"def structure_checker(df, expected_fields, coerce = True, add_only = False):\n logging.info('Comparing expected and actual structure...')\n #what's absent?\n missing_fields = np.setdiff1d(expected_fields, df.columns)\n \n logging.warning('The following fields are missing: ' + str(missing_fields))\n \n if coerce:\n logging.info('Coercing structure to ideal...')\n #force the structure to be ideal.\n for field in missing_fields:\n df[field] = pd.np.nan\n if not add_only:\n logging.info('Removing additional fields...')\n df = df[expected_fields]\n \n return df",
"def validate_dict(data_dict, entity):\r\n fields = []\r\n for key, value in data_dict.items():\r\n if not value:\r\n fields.append(key)\r\n continue\r\n if len(fields) > 0:\r\n return provide_field_value(entity, fields)\r\n elif key == hqAddKey:\r\n status = validate_hqadd(value)\r\n if not status == ok_str:\r\n return status\r\n elif key == logoUrlKey:\r\n status = validate_logourl(value)\r\n if not status == ok_str:\r\n return status\r\n elif key == type_key:\r\n status = validate_officeType(value)\r\n if not status == ok_str:\r\n return status\r\n elif key == name_key:\r\n status = None\r\n if entity == party_key:\r\n status = validate_partyname(value)\r\n elif entity == office_key:\r\n status = validate_officeName(value)\r\n if not status == ok_str:\r\n return status\r\n if fields:\r\n return provide_field_value(entity, fields)\r\n return ok_str",
"def __validate_inputs(self):\n if self.train_df is None:\n raise ValueError(\"Dataframe cannot be null\")\n\n if (\n self.test_df is not None\n and self.train_df.shape[1] != self.test_df.shape[1]\n ):\n raise KeyError(\n \"Target variable in still present in one of the datasets or\"\n \" the number of columns in both test and train are not equal.\"\n )\n\n # target_label should not be in list of columns\n if self.target_label is None:\n warnings.warn(\n \"Parameter 'target_label' is empty. If not provided and is present in dataframe, it may get encoded. \"\n \"To mitigate, provide the target_label from dataframe or provide explicit list of columns for encoding \"\n \"via the 'cat_cols' parameter\",\n UserWarning,\n )\n if (\n self.target_label is not None\n and self.cat_cols is not None\n and (self.target_label in self.cat_cols)\n ):\n raise ValueError(\n f\"Target column: {self.target_label} will be encoded. Remove it from cat_cols if in there.\"\n )\n\n if self.ord_dict is not None:\n for key, mapping in self.ord_dict.items():\n if mapping is None or mapping == {}:\n raise ValueError(\n f\"Expected a weight mapping for ordinal column {key}.\"\n f\" Received {self.ord_dict[key]}\"\n )",
"def _validate_snak_dict(self, snak_dict: typedefs.SnakDict) -> None:\n _REQUIRED_KEYS = [\"snaktype\", \"property\"]\n for req_key in _REQUIRED_KEYS:\n if req_key not in snak_dict:\n raise ValueError(\n \"required snak_dict keys are {} but only found {}\".format(\n _REQUIRED_KEYS, list(snak_dict.keys())\n )\n )\n self.snaktype = snak_dict[\"snaktype\"]\n self.property_id = snak_dict[\"property\"]\n\n self.snak_datatype = None # type: Union[str, None]\n self.value_datatype = None # type: Union[str, None]\n self.datavalue = None # type: Union[WikidataDatavalue, None]\n\n if self.snaktype == \"value\":\n _REQUIRED_KEYS = [\"datavalue\", \"datatype\"]\n for req_key in _REQUIRED_KEYS:\n if req_key not in snak_dict:\n raise ValueError(\n \"required snak_dict keys are {} but only found {}\".format(\n _REQUIRED_KEYS, list(snak_dict.keys())\n )\n )\n self.snak_datatype = snak_dict[\"datatype\"]\n self.value_datatype = str(snak_dict[\"datavalue\"][\"type\"])\n self.datavalue = get_datavalue_from_snak_dict(snak_dict)\n\n elif self.snaktype == \"somevalue\" or self.snaktype == \"novalue\":\n self.snak_datatype = None\n self.value_datatype = None\n self.datavalue = None\n\n else:\n raise ValueError(\n 'snaktype must be one of [\"value\", \"somevalue\", \"novalue\"] but got {}'.format(\n self.snaktype\n )\n )",
"def _validate_data(self, table_definition, data):\n if len(data) == 0:\n # Length zero columns get converted on write.\n return\n\n columns_checked = set()\n\n for column_name, column_definition in table_definition.c.items():\n if column_name in data:\n expected_type = self._expected_type(column_definition)\n is_nullable_numeric = (column_definition.nullable and\n expected_type in [int, float])\n if is_nullable_numeric:\n data[column_name] = data[column_name].fillna(value=np.nan)\n actual_type = data[column_name].dtype\n is_pandas_extension = isinstance(actual_type, ExtensionDtype)\n if expected_type is int:\n self._check_int_type(actual_type, column_name,\n is_pandas_extension, table_definition)\n elif expected_type is float:\n self._check_float_type(actual_type, column_name,\n table_definition)\n elif expected_type is str:\n self._check_str_type(actual_type, column_name, data,\n table_definition)\n else:\n raise RuntimeError(f\"Unexpected type from column \"\n f\"definitions: {expected_type}.\")\n elif not (column_definition.primary_key or\n column_definition.nullable):\n raise DismodFileError(f\"Missing column in data for table \"\n f\"'{table_definition.name}': \"\n f\"'{column_name}'\")\n columns_checked.add(column_name)\n\n extra_columns = set(data.columns).difference(table_definition.c.keys())\n if extra_columns:\n raise DismodFileError(f\"extra columns in data for table \"\n f\"'{table_definition.name}': {extra_columns}\"\n )",
"def test_invalid_key_age(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Gender': 'F', 'Asge': '62', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)",
"def check_flow_by_fields(flowby_df, flowbyfields):\n for k, v in flowbyfields.items():\n try:\n vLog.debug(\"fba activity %s data type is %s\",\n k, str(flowby_df[k].values.dtype))\n vLog.debug(\"standard %s data type is %s\", k, str(v[0]['dtype']))\n except:\n vLog.debug(\"Failed to find field %s in fba\", k)",
"def normalize_data(data):\n\n # 'alphanumeric' and 'description' use the empty string instead of None\n if 'alphanumeric' in data and data['alphanumeric'] is None:\n data['alphanumeric'] = \"\"\n if 'description' in data and data['description'] is None:\n data['description'] = \"\"\n\n # Values may be None to clear them or leave them blank.\n\n # Type is the one exception; it is required and may not be None.\n if 'type' in data:\n try:\n data['type'] = OdlcType.lookup(data['type'])\n except KeyError:\n raise ValueError('Unknown odlc type \"%s\"; known types %r' %\n (data['type'], OdlcType.names()))\n\n if 'latitude' in data and data['latitude'] is not None:\n try:\n data['latitude'] = float(data['latitude'])\n if data['latitude'] < -90 or data['latitude'] > 90:\n raise ValueError\n except ValueError:\n # Unable to convert to float or out-of-range\n raise ValueError('Invalid latitude \"%s\", must be -90 <= lat <= 90'\n % data['latitude'])\n\n if 'longitude' in data and data['longitude'] is not None:\n try:\n data['longitude'] = float(data['longitude'])\n if data['longitude'] < -180 or data['longitude'] > 180:\n raise ValueError\n except ValueError:\n # Unable to convert to float or out-of-range\n raise ValueError(\n 'Invalid longitude \"%s\", must be -180 <= lat <= 180' %\n (data['longitude']))\n\n if 'orientation' in data and data['orientation'] is not None:\n try:\n data['orientation'] = Orientation.lookup(data['orientation'])\n except KeyError:\n raise ValueError(\n 'Unknown orientation \"%s\"; known orientations %r' %\n (data['orientation'], Orientation.names()))\n\n if 'shape' in data and data['shape'] is not None:\n try:\n data['shape'] = Shape.lookup(data['shape'])\n except KeyError:\n raise ValueError('Unknown shape \"%s\"; known shapes %r' %\n (data['shape'], Shape.names()))\n\n if 'background_color' in data and data['background_color'] is not None:\n try:\n data['background_color'] = Color.lookup(data['background_color'])\n except KeyError:\n raise ValueError('Unknown color \"%s\"; known colors %r' %\n (data['background_color'], Color.names()))\n\n if 'alphanumeric_color' in data and data['alphanumeric_color'] is not None:\n try:\n data['alphanumeric_color'] = \\\n Color.lookup(data['alphanumeric_color'])\n except KeyError:\n raise ValueError('Unknown color \"%s\"; known colors %r' %\n (data['alphanumeric_color'], Color.names()))\n\n if 'autonomous' in data:\n if data['autonomous'] is not True and data['autonomous'] is not False:\n raise ValueError('\"autonmous\" must be true or false')\n\n if 'actionable_override' in data:\n if (data['actionable_override'] is not True and\n data['actionable_override'] is not False): # yapf: disable\n raise ValueError('\"actionable_override\" must be true or false')\n\n return data",
"def _check_inputvalues(self):\n # Check x, y and z are int or float dtypes\n # ie do not contain any unusable values like strings\n if not (self.x.dtype in [np.int, np.float]):\n raise TypeError(\"Route input 'x' must be either int or float dtypes\")\n\n if not (self.y.dtype in [np.int, np.float]):\n raise TypeError(\"Route input 'x' must be either int or float dtypes\")\n\n # Performs checks on z if not empty\n if self.z is not None:\n for v in self.z.values():\n if not (v.dtype in [np.int, np.float]):\n raise TypeError(\"Route input 'x' must be either int or float dtypes\")",
"def test_convert_dicts_to_teradata_rows_returns_empty_list():\n data = []\n output = row_handling.convert_dicts_to_teradata_rows(data)\n assert output == []",
"def _validate_plaincolumns(self):\n\n # assert tuples for plaincolumns and plaincolumns to be PlainColumn\n if not isinstance(self.plaincolumns, tuple):\n raise ValueError(\"PlainFrame was instantiated incorrectly. \"\n \"`plaincolumns` needs to be of type `tuple`. \"\n \"However, {} was encountered. Please use \"\n \"`PlainFrame.from_plain` instead for convenient \"\n \"instantiation and proper type casts.\"\n .format(type(self.plaincolumns)))\n\n not_plaincolumn = [type(column)\n for column in self.plaincolumns\n if not isinstance(column, PlainColumn)]\n\n if not_plaincolumn:\n raise ValueError(\"PlainFrame was instantiated incorrectly. \"\n \"Elements of `plaincolumns` needs to be of type \"\n \"`PlainColumn`. However, {} was encountered. \"\n \"Please use `PlainFrame.from_plain` instead for \"\n \"convenient instantiation and proper type casts.\"\n .format(not_plaincolumn))\n\n # assert equal number of values per column\n row_lenghts = {len(column.values) for column in self.plaincolumns}\n if len(row_lenghts) > 1:\n raise ValueError(\"Input data has varying number of values per \"\n \"column. Please check provided input data.\")\n\n # assert unique column names\n duplicates = {x for x in self.columns if self.columns.count(x) > 1}\n if duplicates:\n raise ValueError(\"Duplicated column names encountered: {}. \"\n \"Please use unique column names.\"\n .format(duplicates))",
"def validate_dto(cls, data: dict) -> bool:\n\n required_keys = {'signature', 'signer'}\n return (\n cls.validate_dto_required(data, required_keys)\n and cls.validate_dto_all(data, required_keys)\n )"
]
| [
"0.6217236",
"0.61962247",
"0.61173457",
"0.611672",
"0.6115982",
"0.60280246",
"0.6014561",
"0.58647096",
"0.58622396",
"0.5823101",
"0.5781498",
"0.5753542",
"0.5747231",
"0.57242537",
"0.57188284",
"0.57178247",
"0.57135326",
"0.56972915",
"0.56814194",
"0.56690544",
"0.5654862",
"0.5638559",
"0.56328",
"0.56274253",
"0.55972886",
"0.5578765",
"0.5567216",
"0.5565386",
"0.55518615",
"0.5534288"
]
| 0.7237003 | 0 |
Parses an HTTP Error from the Google API and returns the error message. | def _get_error_message_from_httperror(err):
json_error = json.loads(str(err.content.decode()))
return json_error.get('error', {}).get('message', '') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getError(self):\n \n return self.resp[\"error\"]",
"def read_tapis_http_error(http_error_object):\n h = http_error_object\n # extract HTTP response code\n code = -1\n try:\n code = h.response.status_code\n assert isinstance(code, int)\n except Exception:\n # we have no idea what happened\n code = 418\n\n # extract HTTP reason\n reason = 'UNKNOWN ERROR'\n try:\n reason = h.response.reason\n except Exception:\n pass\n\n # Tapis APIs will give JSON responses if the target web service is at all\n # capable of fulfilling the request. Therefore, try first to extract fields\n # from the JSON response, then fall back to returning the plain text from\n # the response.\n err_msg = 'Unexpected encountered by the web service'\n status_msg = 'error'\n version_msg = 'unknown'\n try:\n j = h.response.json()\n if 'message' in j:\n err_msg = j['message']\n if 'status' in j:\n status_msg = j['status']\n if 'version' in j:\n version_msg = j['version']\n except Exception:\n err_msg = h.response.text\n\n httperror = '[{}] {}; message: {}; status: {}; version: {}; response.content: {}'\n return httperror.format(code, reason, err_msg, status_msg, version_msg,\n h.response.content)",
"def error(self, http_error):\n return HTTPResponse(str(http_error), status=http_error.status)",
"def _get_error_message(response):\n try:\n return response.json()[\"detail\"]\n except (KeyError, _JSONDecodeError):\n return response.text",
"def _processGETErr(self, e, request):\r\n if e.check(InvalidRequest):\r\n msg = e.getErrorMessage()\r\n code = httplib.BAD_REQUEST\r\n elif e.check(UnauthorizedLogin):\r\n msg = e.getErrorMessage()\r\n code = httplib.UNAUTHORIZED\r\n elif e.check(InternalError):\r\n e.printTraceback()\r\n msg = 'Internal Error'\r\n code = httplib.INTERNAL_SERVER_ERROR\r\n else:\r\n e.printTraceback()\r\n msg = 'Fatal Error'\r\n code = httplib.INTERNAL_SERVER_ERROR\r\n\r\n self._render_GET(request, code, 'text/plain; charset=utf-8', msg)",
"def handle_api_error(e):\n return f\"Failed to call Giphy API: {e}\", 500",
"def from_http_error(cls, e):\n assert isinstance(e, requests.HTTPError), \"Expected 'requests.HTTPError' object\"\n r = e.response\n if r.status_code == 400:\n raise BadRequest(format_exception(e))\n elif r.status_code == 401:\n raise Unauthorized(format_exception(e))\n elif r.status_code == 403:\n raise Forbidden(format_exception(e))\n elif r.status_code == 404:\n raise NotFound(format_exception(e))\n elif r.status_code == 405:\n raise NoMethod(format_exception(e))\n elif r.status_code == 409:\n raise Conflict(format_exception(e))\n elif r.status_code == 411:\n raise LengthRequired(format_exception(e))\n elif r.status_code == 412:\n raise PreconditionFailed(format_exception(e))\n elif r.status_code == 416:\n raise BadRange(format_exception(e))\n elif r.status_code == 500:\n raise InternalServerError(format_exception(e))\n elif r.status_code == 501:\n raise NotImplemented(format_exception(e))\n elif r.status_code == 502:\n raise BadGateway(format_exception(e))\n else:\n logger.error(\n 'Unhandled HTTPError status code {sc} -- {msg}.'.format(sc=r.status_code, msg=format_exception(e)))\n raise InternalServerError(format_exception(e))",
"def httperror( status_code=500, message=b'' ):",
"def get_http_error(status_code: int) -> Optional[Type[HTTPError]]:\n return _STATUS_CODE_TO_HTTP_ERRORS.get(status_code)",
"def _extract_error(self, resp):\n reason = resp.headers.get(\"reason\", None)\n full_response = None\n\n if reason is None:\n try:\n # if response is in json format\n reason = resp.json()[\"error\"][\"msg\"]\n except KeyError:\n # if json response has unexpected structure\n full_response = resp.content\n except ValueError:\n # otherwise we assume it's html\n reason, full_html = self._scrape_response(resp.headers, resp.content)\n full_response = unescape_html(full_html)\n\n msg = \"[Reason: %s]\" % reason\n\n if reason is None:\n msg += \"\\n%s\" % full_response\n\n return msg",
"def errmsg(r):\n return \"%s %s\\n\\n%s\" % (r.status, r.reason, r.raw)",
"def parse_response_error(html_text: str) -> str:\n html = BeautifulSoup(markup=html_text, features=\"html.parser\")\n inner_html = BeautifulSoup(markup=html.p.text, features=\"html.parser\")\n message = inner_html.text if inner_html.p is None else inner_html.p.text\n if \"face_not_found\" in message:\n message = \"Could not find a face in the image.\"\n elif \"multiple_faces\" in message:\n message = \"The image has more than one person.\"\n elif \"quality_failed\" in message:\n message = \"The provided image does not have enough quality.\"\n return message",
"def error(self):\n errors = self._info.get('error', {}).get('errors')\n if not errors:\n return None\n return ' '.join(err.get('message', 'unknown') for err in errors)",
"def parsed_error_msg(self):\r\n return self.error_msg",
"def parse_error (self, error_str):\r\n\t\t# Regex out the error and channel indices from the string\r\n\t\tob = re.match(ERROR_FORMAT, error_str)\r\n\t\t\r\n\t\t# If error_str doesn't match an error, return None\r\n\t\tif ob is None:\r\n\t\t\treturn None\r\n\t\t\r\n\t\t# Extract the two matched groups (i.e. the error and channel indices)\r\n\t\terrno,chno = ob.groups()\r\n\t\terrno = int(errno)\r\n\t\tchno = int(chno)\r\n\t\t\r\n\t\t# Get the error description; if none is defined, mark as unrecognised\r\n\t\terrdesc = self.error_desc_dict.get(errno, 'Unrecognised error code.').format(ch=chno)\r\n\t\t\r\n\t\treturn {'type':'err', 'id':errno, 'ch':chno, 'desc':errdesc, 'raw':error_str}",
"def handle_error_response(response_body):\n try:\n error_components = []\n error_data = json.loads(response_body)\n\n error_components.append(\"Error code {}\".format(error_data[\"error\"]))\n if \"error_description\" in error_data:\n error_components.append(\": {}\".format(error_data[\"error_description\"]))\n if \"error_uri\" in error_data:\n error_components.append(\" - {}\".format(error_data[\"error_uri\"]))\n error_details = \"\".join(error_components)\n # If no details could be extracted, use the response data.\n except (KeyError, ValueError):\n error_details = response_body\n\n raise exceptions.OAuthError(error_details, response_body)",
"def _extract_error(self, response):\r\n try:\r\n et = ElementTree.parse(response)\r\n error = et.findtext('body/pre')\r\n return error\r\n except ExpatError,e:\r\n return \"%s: %s (%d/%s)\" % (e,response.read(),response.status,response.reason)",
"def raise_for_status(response):\n http_error_msg = \"\"\n\n if 400 <= response.status_code < 500:\n http_error_msg = \"{} Client Error: {}\".format(\n response.status_code, response.reason\n )\n\n elif 500 <= response.status_code < 600:\n http_error_msg = \"{} Server Error: {}\".format(\n response.status_code, response.reason\n )\n\n if http_error_msg:\n try:\n more_info = response.json().get(\"message\")\n except ValueError:\n more_info = None\n if more_info and more_info.lower() != response.reason.lower():\n http_error_msg += \".\\n\\t{}\".format(more_info)\n raise requests.exceptions.HTTPError(http_error_msg, response=response)",
"def _parse_store_error(self, response):\n default_msg = \"Failure working with the Store: [{}] {!r}\".format(\n response.status_code, response.content\n )\n try:\n error_data = response.json()\n except ValueError:\n return default_msg\n\n try:\n error_info = [(error[\"message\"], error[\"code\"]) for error in error_data[\"error-list\"]]\n except (KeyError, TypeError):\n return default_msg\n\n if not error_info:\n return default_msg\n\n messages = []\n for msg, code in error_info:\n if code:\n msg += \" [code: {}]\".format(code)\n messages.append(msg)\n return \"Store failure! \" + \"; \".join(messages)",
"def print_http_error(error):\n if hasattr(error, \"msg\"):\n print \"%s msg '%s'.\" % (ERROR, error.msg)\n if hasattr(error, \"reason\"):\n print \"%s reason '%s'.\" % (ERROR, error.reason)\n if getattr(error, \"message\"):\n print \"%s message '%s'.\" % (ERROR, error.message)\n if hasattr(error, \"code\"):\n print \"%s error code '%d'.\" % (ERROR, error.code)",
"def error(self):\n error = self._wrapped.error\n if error:\n return error\n\n return self.json['response'].get('error')",
"def handle_errors(resp: requests.Response):\n error_text = resp.text\n if isinstance(resp.text, bytes):\n try:\n error_text = error_text.decode(UTF_ENCODING)\n except UnicodeDecodeError:\n error_text = error_text.decode(\"iso-8859-1\")\n if error_text != \"\":\n _raise_error(error_text)\n resp.raise_for_status()",
"def _bad_request_error(error_msg):\n return _error(error_msg, BAD_REQUEST_ERROR)",
"def auth_error(error):\n return jsonify(error.error), error.status_code",
"def return_request_error(error_message: str, http_status_code: int, response: Response):\n response.status_code = http_status_code\n return {\n 'error': error_message\n }",
"def __get_response_error(message, response):\n\n rjson = response.json()\n error_description = \"Code %s - %s\" %(str(response.status_code), rjson.get('message'))\n\n return {\n 'app_message': \"%s\" % (message),\n 'error_description': \"[%s] - %s\" % (message, error_description),\n 'code': response.status_code\n }",
"def error(logger_id, data):\n if data.status_code == 200:\n log.error(logger_id, _(\"CC1 - Problem with request: \") + data.url\n + _(\" obtain problem: \") + ast.literal_eval(data.text).get(DATA))\n else:\n log.error(logger_id, _(\"CC1 - Problem with request: \") + data.url)",
"def handle_error_response(resp):\n error_message = ''\n error_message_with_reason = ''\n try:\n error_message = (\n resp.json()\n .get('fireeyeapis', {})\n .get('description', '')\n .strip()\n )\n error_message = error_message.replace('\\n', '')\n if error_message:\n error_message_with_reason = f'Reason: {error_message}'\n except ValueError: # ignoring json parsing errors\n pass\n if resp.headers.get('Content-Type', '') == CONTENT_TYPE_ZIP:\n error_message = error_message_with_reason = resp.text\n\n status_code_messages = {\n 400: f\"{MESSAGES['BAD_REQUEST_ERROR']} {error_message_with_reason}\",\n 401: MESSAGES['AUTHENTICATION_ERROR'],\n 403: error_message,\n 404: error_message,\n 406: error_message,\n 407: MESSAGES['PROXY_ERROR'],\n 500: MESSAGES['INTERNAL_SERVER_ERROR'],\n 503: MESSAGES['INTERNAL_SERVER_ERROR'],\n }\n\n if resp.status_code in status_code_messages:\n demisto.debug(\n f'Response Code: {resp.status_code}, Reason: {status_code_messages[resp.status_code]}'\n )\n raise DemistoException(status_code_messages[resp.status_code])\n else:\n raise DemistoException(resp.raise_for_status())",
"def _parse_http_status(status_code, status_reason):\n for error in AZURE_HTTP_ERROR_CODES:\n if error == status_code:\n raise TypeError(\"Error {0}: {1}\".format(status_code, status_reason))",
"def handle_error(self, error):\n html = error.response.content\n raise SystemExit(\"API Error:\\n %s\" %\n \"\\n \".join(html.itertext()))"
]
| [
"0.6838421",
"0.6793689",
"0.6636548",
"0.6565121",
"0.64546394",
"0.63964623",
"0.63945633",
"0.6299346",
"0.62722117",
"0.6216416",
"0.6203617",
"0.61820084",
"0.6117928",
"0.61079687",
"0.61059207",
"0.6047966",
"0.60429734",
"0.60072476",
"0.60060865",
"0.5990251",
"0.59763217",
"0.5943081",
"0.59302396",
"0.5914873",
"0.5901664",
"0.58928424",
"0.5868568",
"0.5861907",
"0.585165",
"0.5824598"
]
| 0.73818463 | 0 |
run the server and wait that it returns | def run(self):
self.rpc_server.serve_forever(0.5) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self):\n self._server = self._get_server()\n self._server.serve_forever()",
"def run(self):\n self.__server.serve_forever()",
"def run():\n server = current_server()\n server._auto_stop = True\n return start()",
"def run(self):\n self.__rpc_server.run()",
"def main():\n s = start_server()\n accept_connection(s)",
"def serve(self):\n\t\timport thread\n\t\tthread.start_new_thread(self._server_thread, tuple())",
"def run_server(self):\n self.establish_connection()\n while True:\n self.receive_data(self.conn)",
"def run(self):\n self.connect()\n self.run_forever()",
"def run(self):\n server = TCPServer((self.host, self.port), TCPHandler)\n server.lymphocytes_getter = self.lymphocytes_getter\n\n #runs forever - so make this thread daemon\n server.serve_forever()",
"def run(self):\n try:\n while self._running:\n time.sleep(1)\n finally:\n self._exit()",
"def run(self):\n\n print(\"Running server on address: {}, port: {}\".format(self.ip_address, self.port))\n self.setup_for_run()\n\n try:\n read_list = [self.server]\n select_timeout = 1\n while True:\n # receive a connection request from client and get conn, addrr tuple\n readable, _, _= select.select(read_list, [], [], select_timeout)\n if self.server in readable:\n conn, addr = self.server.accept()\n # log connnection confirmation message\n print(addr[0] + \" connected\")\n # start a new client thread with the new conn and address, and create new struct\n self.addr_to_conn_struct_map[addr] = ConnStruct(conn, self.start_new_thread(conn, addr))\n # process msgs in queue\n self.process_queue()\n\n except KeyboardInterrupt:\n pass\n finally:\n self.shutting_down.set()\n # clean up all known client connections and threads\n for addr in self.addr_to_conn_struct_map:\n self.clean(addr, keep=True)\n print(\"Exiting Server Process, waiting for clients cleanup\")\n # wait for client threads to get the message and clean their sht\n time.sleep(1)\n # close server connection\n self.server.close()\n print(\"Done!\")",
"def run():\n\n # Construct a server.\n server = wsgiref.simple_server.make_server(\n _config[ 'address' ],\n _config[ 'port' ],\n application\n )\n\n # Run the server.\n server.serve_forever()\n\n # Return result.\n return 0",
"def wait_for_termination(self):\n self.server.wait_for_termination()",
"def run(self):\n while self.running:\n self.handle_request()",
"async def _main(self):\n while True:\n time.sleep(1)",
"def serve(self):\n\t\tself.keep_running=1\n\t\tif self.debug:\n\t\t\tprint \"server started\"\n\t\ttry:\n\t\t\twhile self.keep_running:\n\t\t\t\tself.handle_request()\n\t\tfinally:\n\t\t\tif self.debug:\n\t\t\t\tprint \"server finished\"\n\t\t\tself.keep_running=0\n\t\t\tself.close()",
"def main() -> None:\n runner()\n asyncio.get_event_loop().run_forever()",
"def main() -> None:\n runner()\n asyncio.get_event_loop().run_forever()",
"def start(self):\n threading.Thread(target=self.serve_forever).start()",
"def run(self):\n\t\t\n\t\tself.connect(self.config[\"server\"])",
"async def run_sever_async(host: str = \"0.0.0.0\", port: int = 8000):\n runner = web.AppRunner(app_factory())\n site = web.TCPSite(runner, host, port)\n print(f\"guix-python-app: Runing server on {host}:{port}\")\n print(f\">> try: curl http://{host}:{port}/ping\")\n await site.start()",
"def run(self):\n if not self.running:\n self.loop.run_forever()",
"def server():",
"def server():",
"def _wait_for_server():\n start_time = time.time()\n\n while True:\n try:\n urllib.request.urlopen('http://localhost:5000/')\n break\n except Exception:\n time.sleep(.1)\n if time.time() - start_time > 2:\n raise",
"def wait(self):\n try:\n self._server.wait()\n except greenlet.GreenletExit:\n LOG.info(_(\"WSGI server has stopped.\"))",
"def run():\r\n log.debug('Starter::run()')\r\n try:\r\n # check specified port\r\n if not conf.port:\r\n raise Exception(\"Please specify port number! (use --port)\")\r\n Server(conf.port).run()\r\n except Exception as E:\r\n log.critical(E)",
"def run_forever(self):\n return self.gw.run()",
"def _run(self) -> None:\n asyncio.set_event_loop(self._server_loop)\n self._server_loop.run_until_complete(self._runner.setup())\n\n site = web.TCPSite(\n self._runner, self.host, self.port, ssl_context=self.ssl_context\n )\n self._server_loop.run_until_complete(site.start())\n\n # If the Server was initialized with port 0, determine what port the\n # underlying server ended up listening on\n if self.port == 0:\n site_server = cast(AsyncioServer, site._server)\n sockets = cast(List[Socket], site_server.sockets)\n socket = sockets[0]\n self.port = socket.getsockname()[1]\n\n self._startup_event.set()\n self._server_loop.run_forever()",
"def run(self):\n try:\n self.ssh_connection.connect()\n dns_response = self.query_dns_server()\n result = self.process_dns_response(dns_response)\n self.handle_result(result)\n\n except Exception as e:\n print(f\"Error: {str(e)}\")\n sys.exit(2)"
]
| [
"0.7389395",
"0.7237915",
"0.72022533",
"0.7041745",
"0.7013421",
"0.69024694",
"0.6892635",
"0.6891861",
"0.68075854",
"0.68056464",
"0.6782797",
"0.6745265",
"0.6696141",
"0.66879743",
"0.66873586",
"0.66545063",
"0.6602608",
"0.6602608",
"0.65958893",
"0.65953624",
"0.65850544",
"0.6564999",
"0.6554001",
"0.6554001",
"0.6552439",
"0.6542945",
"0.6517653",
"0.65118814",
"0.64993674",
"0.6488838"
]
| 0.7507121 | 0 |
test if there is something to read on the console | def _check_console_input(self):
if os.name == "nt":
if 0 == ctypes.windll.Kernel32.WaitForSingleObject(self.console_handle, 500):
return True
elif os.name == "posix":
(inputready, abcd, efgh) = select.select([sys.stdin], [], [], 0.5)
if len(inputready) > 0:
return True
else:
raise Exception("%s platform is not supported yet" % os.name)
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_interactive():\n\n return sys.stdin.isatty()",
"def hastty():\n try:\n return sys.stdin and sys.stdin.isatty()\n except Exception: # pragma: no cover\n return False # i.e. no isatty method?",
"def chk_stdin(self):\t# check keyboard input\n\t\tdr, dw, de = select([sys.stdin], [], [], 0)\n\t\treturn dr",
"def is_raw_read(command): \n if command.startswith('<READ') and command.endswith('>') and \\\n is_valid_raw(command):\n return True\n else: \n return False\n # end if",
"def is_terminal(self):",
"def is_interactive():\n return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))",
"def is_tty(stream): # taken from catkin_tools/common.py # pragma: no cover\n return hasattr(stream, 'isatty') and stream.isatty()",
"def new_user_input(self):\n ready, _, _ = select([stdin], [], [], 0.0)\n return stdin in ready",
"def check_command(self):\n return self.process is not None and self.process.poll() is None",
"def isatty(self):\n return False",
"def line_event_loop(self, reader, writer):\n while True:\n line = reader.readline()\n if line == '':\n return True\n \n argv = line.split()\n \n if argv[0] == 'exit':\n return True\n elif argv[0] == 'echo':\n print argv[1:]\n elif argv[0] == 'filename':\n print argv[1]\n else:\n print 'Unrecognised command:', argv",
"def line_event_loop(self, reader, writer):\n while True:\n line = reader.readline()\n if line == '':\n return True\n \n argv = line.split()\n \n if argv[0] == 'exit':\n return True\n elif argv[0] == 'echo':\n print argv[1:]\n elif argv[0] == 'filename':\n print argv[1]\n else:\n print 'Unrecognised command:', argv",
"def start_console_reader():\n\n def console_reader():\n global console_input\n console_input = None\n\n while console_input is not False:\n sys.stderr.write(\"reading\\n\")\n if console_input is None:\n console_input = sys.stdin.readline()\n else:\n time.sleep(1)\n\n console_reader_thread = threading.Thread(target=console_reader)",
"def isTerminalRunning(self):\n return self.f4 is 'R'",
"def quiet(self):\n # do not print output if input ends in ';'\n try:\n if self.shell.input_hist[self.prompt_count].endswith(';\\n'):\n return True\n except IndexError:\n # some uses of ipshellembed may fail here\n pass\n return False",
"def readline(self) -> Optional[str]:",
"def has_more_commands(self):\n return not self.eof",
"def stderrConnectedToTerm():\n return sys.stderr.isatty()",
"def isTerminal(self) -> bool:\n ...",
"def readline(self) -> str | None:",
"def has_buffered_inputs(self):",
"def is_terminal(self) -> bool:\n pass",
"def wait_for_input(self):\n if self._dont_enter_interactive_mode:\n return\n stop = False\n while True:\n print(\">>> \", end='')\n try:\n command_str = input()\n except EOFError:\n print(\"Exiting interactive mode\")\n break\n stop = self.interpret_command(command_str)\n if stop:\n print(\"Exiting interactive mode\")\n break",
"def __can_read_command_line(self, pid):\n return os.path.isfile('/proc/%d/cmdline' % pid)",
"def read():\n print(command(\"R\"))",
"def console(self, msg, color):\r\n if self.__isInit != True:\r\n return",
"def isatty(self):\n\n return False",
"def _is_successful_read(self, lines):\n if (lines[0].strip()[-3:]) != 'YES': \n return 0\n return 1",
"def read_line():\n # try reading a line, removing any extra whitespace\n try:\n line = sys.stdin.readline().strip()\n # i3status sends EOF, or an empty line\n if not line:\n sys.exit(3)\n return line\n # exit on ctrl-c\n except KeyboardInterrupt:\n sys.exit()",
"def read_line():\n # try reading a line, removing any extra whitespace\n try:\n line = sys.stdin.readline().strip()\n # i3status sends EOF, or an empty line\n if not line:\n sys.exit(3)\n return line\n # exit on ctrl-c\n except KeyboardInterrupt:\n sys.exit()"
]
| [
"0.67703044",
"0.6668972",
"0.6553596",
"0.64391434",
"0.64319617",
"0.63720024",
"0.6358427",
"0.63391805",
"0.61975825",
"0.6152854",
"0.6141509",
"0.6141509",
"0.60947686",
"0.6083325",
"0.6058859",
"0.60456157",
"0.60350746",
"0.5998256",
"0.59890354",
"0.5987575",
"0.597453",
"0.59604615",
"0.59281695",
"0.59070814",
"0.5892194",
"0.58760333",
"0.5871038",
"0.5868272",
"0.58236533",
"0.58236533"
]
| 0.7178151 | 0 |
read from the console, transfer to the server and write the answer | def run(self):
while self._go.isSet(): #while app is running
if self._check_console_input(): #if something to read on the console
cmd = sys.stdin.readline() #read it
self.inq.put(cmd) #dispatch it tpo the server
response = self.outq.get(timeout=2.0) #wait for an answer
sys.stdout.write(response) #write the answer on the console
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def send_command(self, data):\n try:\n self.write(data)\n reply = self.read_line()\n \n if reply == \"{}\":\n pass\n else:\n print \"send_command: received bad reply %s\" % (reply)\n sys.exit(1)\n except Exception:\n raise",
"def interact(self):\n print('Ready to interact on socket connected with {}.'.format(self.remote_addr))\n try:\n # get initial input from user\n print('Enter input or press CTRL-D for no input.')\n data = sys.stdin.readline()\n self.remote_socket.sendall(data.encode())\n while True:\n if data.startswith('exit'):\n print('[*] Closing remote shell.')\n self.close()\n break\n # wait for response from target host\n recv_len = 1\n response = ''\n while recv_len:\n data = self.remote_socket.recv(4096)\n recv_len = len(data)\n response += data.decode()\n if recv_len < 4096:\n break\n print(response)\n # get further input from user\n print('Enter further input or press CTRL-D for no input.')\n data = sys.stdin.readline()\n self.remote_socket.sendall(data.encode())\n except Exception as e:\n print(e)\n print('[*] Closing remote shell.')\n self.close()",
"def _on_read(self, line):\n # Some game logic (or magic)\n line = line.strip()\n logger.info(\"RCV> %s\", line)\n if not line:\n self.stream.close()\n return\n\n self.stream.write(\"echo: %s\\n\" % line)\n\n # Wait for further input on this connection\n self.wait()",
"def run_cmd(server, client):\n msg = [client.get_command()]\n client.input_list += msg\n server.logger.info(\"RECEIVED INPUT {} : {}\".format(client.ip, msg[0]))\n if not client.username or not client.password:\n server.login_screen(client, msg)\n return\n loop_cmds(server, client, msg[0].split(';'))\n server.return_prompt(client)",
"def handle(self):\r\n # self.request is the TCP socket connected to the client\r\n # read the incoming command\r\n request = self.request.recv(1024).strip()\r\n # write to the queue waiting to be processed by the server\r\n INPUT_QUEUE.put(request)\r\n # wait for the server answer in the output queue\r\n response = OUTPUT_QUEUE.get(timeout=5.0)\r\n # send back the answer\r\n self.request.send(response)",
"def send_cmd(self):\n\n cmd = self.repl_input.get().encode()\n self.serial.write(cmd + b\"\\r\")\n self.repl_input.set(\"\")",
"def do_cmd(cmd,sock):\n\n buffer = ''\n \n # Write the command and wait one second.\n print 'writing command '+cmd \n sock.send(cmd+SBE37_NEWLINE)\n time.sleep(1)\n \n # Block to receive all data.\n # Continue reading if the received data does not include a prompt.\n # Break out when the received data ends in a prompt.\n while True:\n try:\n data = ''\n data = sock.recv(1024)\n buffer += data\n except:\n raise\n else:\n #print 'received '+str(len(data))+' bytes' \n if buffer.endswith(SBE37Prompt.COMMAND):\n break\n elif buffer.endswith(SBE37Prompt.AUTOSAMPLE):\n break\n elif buffer.endswith(SBE37Prompt.BAD_COMMAND):\n break\n\n return buffer",
"def do_command(command):\n send_command(command)\n response = get_response()\n print(\"Rcvd: <<< \\n\" + response)\n return response",
"def send(self, serial_cmnd):\n self.sio.write(serial_cmnd+\"\\n\") # TextIOWrapper object converts the newline character to \"\\r\\n\", this is required by the device \n self.sio.flush() # it is buffering. required to get the data out *now*\n response = self.sio.readline()\n response = response.rstrip() # Trim the newline character\n if (response == \"ok\"):\n return True\n else:\n logging.debug(\"Board response:\" + response) \n return response",
"def server_do(self,input, connstream):\r\n pass",
"def run(self):\n # send data to client\n currProcessID = os.getpid()\n print \"PID: %s sending echo message to server: %s\" % (currProcessID,\n ECHO_MSG)\n sentDataLen = self.sock.send(ECHO_MSG)\n print \"Sent: %d characters so far...\" % sentDataLen\n\n # Display server response\n response = self.sock.recv(BUF_SIZE)\n print \"PID %s recieved: %s\" % (currProcessID, response[5:])",
"def respond(cmd,t,p):\n\tt.write(cmd)\n\treturn wait(t,p)",
"def readAndRespond(self):\n\n if self.ser.isOpen():\n try:\n #Try to read\n self.ser.flushOutput()\n response = self.ser.readline()\n self.parseString(response)\n print response\n #if response.strip() == \"up\":\n # self.moveArmUp()\n # print \"Moving Up!\"\n #elif response.strip() == \"down\":\n # self.moveArmDown()\n # print \"Moving Down!\"\n except Exception, e:\n print \"Error: \" + str(e)",
"def do_command(command):\n send_command(command)\n # time.sleep(0.1) # may be required on slow machines\n response = get_response()\n print(\"Rcvd: <<< \" + response)\n return response",
"def shell(s_socket):\r\n shellname = \"powershell\"\r\n bytes_value = to_bytes(len(shellname), 4, 'little')\r\n s_socket.send('o' + bytes_value + shellname)\r\n value = raw_input(shellname + \"#> \")\r\n while True:\r\n bytes_value = to_bytes(len(value), 4, 'little')\r\n s_socket.send('s' + bytes_value + value)\r\n print(s_socket.recv(20000))\r\n\r\n if 'exit' in value:\r\n break\r\n\r\n value = raw_input(shellname + \"#> \")",
"def echo_client(host, port):\n # Create a TCP/IP socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # Connect the socket to the server\n server_address = (host, port)\n print \"Connecting to %s port %s\" % server_address\n sock.connect(server_address)\n\n while True:\n # Send data\n try:\n # Send data\n # message = raw_input()\n # print \"Sending %s\" % message\n # sock.sendall(message)\n # # Look for the response\n message = raw_input('\\ncontinue receive data? [Y / N]')\n sendata = raw_input('send something?\\n')\n if sendata is not None:\n sock.sendall(sendata)\n if message == 'N' or message == 'n':\n break\n \n data = sock.recv(1024)\n sock.sendall('reply:'+data)\n print(\"Received: %s\" ) % data\n except socket.errno, e:\n print \"Socket error: %s\" %str(e)\n except Exception, e:\n print \"Other exception: %s\" %str(e)\n # finally:\n # print \"Closing connection to the server\"\n # # sock.close()\n print('end connection\\n')\n sock.close()",
"def console():\r\n while True:\r\n interpret_command(input(\"POM> \"))",
"def communicate(host, port):\n s = socket.socket()\n s.connect((host, port))\n payload = sys.stdin.read().encode()\n s.sendall(payload)\n s.shutdown(socket.SHUT_WR)\n\n output = []\n while True:\n read = s.recv(READ_SIZE)\n if read:\n output.append(read.decode())\n else:\n break\n return ''.join(output)",
"def ask(self):\n subprocess.run([\"say\", \"-v\", \"Kyoko\", str(self.answer)])",
"def SendCmd(self, command):\r\n if not self.__CheckConnectStatus():\r\n print \"Non telnet connection!\"\r\n return False\r\n\r\n if command == None or command == False:\r\n print \"No valid command to run.\"\r\n return True\r\n else:\r\n command = str(command) + \"\\r\\n\"\r\n print self.prompt + command\r\n \r\n try:\r\n self.tn.read_very_eager() \r\n self.tn.write(command)\r\n p_Output = self.tn.read_until(self.prompt, self.timeout)\r\n print p_Output\r\n return p_Output\r\n\r\n except:\r\n print \"Write command failure\"\r\n return False",
"def command(s_socket):\r\n command = raw_input(\"#> \")\r\n bytes_value = to_bytes(len(command) + 5, 4, 'little')\r\n s_socket.send('c' + bytes_value + command)\r\n\r\n print(s_socket.recv(MAX_BUFFER_LENGTH))",
"async def handle_echo(reader, writer):\r\n addr = writer.get_extra_info('peername')\r\n message = f\"{addr} is connected !!!!\"\r\n CLIENT_DICTIONARY[addr[1]] = Server()\r\n print(message)\r\n while True:\r\n data = await reader.read(10000)\r\n message = data.decode().strip()\r\n if message == 'quit':\r\n CLIENT_DICTIONARY[addr[1]].removelog()\r\n break\r\n print(f\"Received {message} from {addr}\")\r\n reply = CLIENT_DICTIONARY[addr[1]].split(message)\r\n print(f\"Send: {reply}\")\r\n #hello = 'successful'\r\n if reply != '' or reply != 'None':\r\n writer.write(reply.encode())\r\n else:\r\n reply = '.'\r\n writer.write(reply.encode())\r\n await writer.drain()\r\n print(\"Close the connection\")\r\n writer.close()",
"def run(self, cmdline):\n self.send(cmdline+\"\\n\")\n rdata = '\\n'.join(self.recv_to_prompt())\n return rdata",
"def write(self):\n\n while self.dowrite:\n data = sys.stdin.readline()\n if (self.algo == \"rsa\"):\n data = self.ras_encrypt(data)\n if (self.algo == \"des\"):\n data = self.des_encrypt(data)\n if (self.algo == \"3des\"):\n data = self.triple_des_encrypt(data)\n if (self.algo == \"aes\"):\n data = self.aes_encrypt(data)\n self.conn.send(data)\n\n if (data.strip() == self.exitcode):\n self.conn.shutdown(socket.SHUT_RDWR)\n self.conn.close()\n self.dowrite = False",
"def send_data(sock):\n while True:\n data = sys.stdin.readline()\n sock.send(data.encode())",
"def send_msg(self, msg):\n self.proc.stdin.write(msg)",
"def sendCmd(self,cmd):\n self.ser.write(cmd.encode()+END.encode())\n out = self.ser.readline()\n return out",
"def _send_command(self, command):\n command = \"%s\\n\" % (command.strip())\n self.server.write(command)\n self.server.flush()\n\n #read the length of the result\n length = int(self.server.readline())\n output = self.server.read(length)\n\n result = pickle.loads(output)\n if result[0] == 'ok':\n return result[1]\n else:\n raise RobotCommandError(str(result))",
"def issue(self, cmd):\n self.send([cmd])\n return self.read_until_prompt()[1:] # drop the echo",
"def enter_repl(self):\n text_input = ''\n while True:\n text_input = input('>>')\n if text_input == 'exit':\n break\n #An alias for querying an instrument error string\n elif text_input == 'err?':\n self.write_to_serial(':SYST:ERR?')\n print(self.read_from_serial())\n else:\n self.write_to_serial(text_input)\n print(self.read_from_serial())"
]
| [
"0.6864386",
"0.6665979",
"0.6607433",
"0.6399282",
"0.6332425",
"0.63022834",
"0.6229169",
"0.61992306",
"0.6194718",
"0.6157062",
"0.6156837",
"0.6120637",
"0.61120206",
"0.6109998",
"0.6108802",
"0.60982066",
"0.608361",
"0.60783035",
"0.60692763",
"0.60670555",
"0.6063523",
"0.60422134",
"0.60176826",
"0.6011047",
"0.59924924",
"0.5991598",
"0.5956189",
"0.5953467",
"0.59441996",
"0.5942327"
]
| 0.70131034 | 0 |
convert a tuple to a string | def _tuple_to_str(self, the_tuple):
ret = ""
for item in the_tuple:
ret += (" " + str(item))
return ret[1:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _tupstr(tuple_):\n return ', '.join(list(map(str, tuple_)))",
"def tupleStrFormat(tupl):\n string = \"this is a tuple (\"\n for element in tupl:\n string += str(element) + \", \"\n string += \")\"\n return string",
"def str_tuple(item):\n return \"{}:{}\".format(item[0], item[1])",
"def tuple_to_string(letter_word_pair):\n letter, word = letter_word_pair\n return '{letter}: {word}'.format(letter=letter, word=word)",
"def deg_tuple_to_str(tup):\n if len(tup) == 0:\n return \"()\"\n str = '('\n for x in tup:\n str += \"{0:.2f}, \".format(x)\n str = str[:-2] + ')'\n return str",
"def strtuple(iterable): \n string = ''\n function = type(strtuple)\n for i in iterable:\n if isinstance(i , function):\n string += i.__name__ + ', '\n else:\n string += str(i) + ', '\n string = string.rstrip(', ')\n string = '(' + string + ')'\n return string",
"def tuple_to_string(transcript_info):\n\n return \"\\t\".join(transcript_info.data_attributes())",
"def format_tuple(data):\n return \",\".join([str(item) for item in data])",
"def tupleToString(vector):\n string = '[%d](' % len(vector)\n for x in vector[:-1]:\n string += '%f,' % x\n string += '%f)' % vector[-1]\n return string",
"def main():\n sampleTuple = (100, 200, 300)\n print(tupleStrFormat(sampleTuple))",
"def parents_to_string(parent_tuple):\n return str(parent_tuple[0])+\" \"+str(parent_tuple[1])",
"def _convert_rgb_tuple_to_string(self, rgb_tuple):\n\n return ''.join([self._zero_pad_number(v) for v in rgb_tuple])",
"def tuple(self, arg: SeField[Any]) -> str:\n if is_bare_tuple(arg.type):\n return arg.varname\n elif is_variable_tuple(arg.type):\n earg = arg[0]\n earg.name = \"v\"\n return f\"tuple({self.render(earg)} for v in {arg.varname})\"\n else:\n rvalues = []\n for i, _ in enumerate(type_args(arg.type)):\n r = arg[i]\n r.name = f\"{arg.varname}[{i}]\"\n rvalues.append(self.render(r))\n return f\"({', '.join(rvalues)},)\" # trailing , is required for single element tuples",
"def serialize_tuple(self, obj):\n return '(' + ''.join([self.serialize(i) for i in obj]) + 't'",
"def tuple_to_string(tuptup):\n\n def join_deepest(tup, sep=';'):\n \"\"\" Recursive function to create the string representation for the deepest level of the\n tuptup list.\n\n Parameters\n ----------\n tup : object\n Element to join if list or list of lists.\n\n sep : str, optional\n Separation character to join the list elements by.\n\n Returns\n -------\n object\n List containing joined string in max depth. Str if input depth = 1.\n\n \"\"\"\n\n if not isinstance(tup, list):\n return tup\n if not isinstance(tup[0], list):\n return sep.join(tup)\n\n for idx, val in enumerate(tup):\n tup[idx] = join_deepest(val, sep)\n return tup\n\n tup = copy.deepcopy(tuptup)\n tup = join_deepest(tup, ';')\n tup = join_deepest(tup, '/')\n tup = join_deepest(tup, '|')\n return tup",
"def gen_type_tuple_string(self, name, node):\n return \"('{}', {})\".format(name, self.gen_type_string(node))",
"def ymd_tuple_to_string(t):\n return '%s_%s_%s' % t",
"def tostr (x):\n if isinstance (x, tuple):\n return tuple ( map (tostr, x))\n if isinstance(x, (float, numpy.float32,numpy.float64)):\n return float_to_str(x)\n return str(x)",
"def tuple2str(tagged_token, sep='/'):\n word, tag = tagged_token\n if tag is None:\n return word\n else:\n assert sep not in tag, 'tag may not contain sep!'\n return '%s%s%s' % (word, sep, tag)",
"def to_string_tuple(self):\n return \" \".join(self._left), \" \".join(self._right)",
"def encode_tuple1(value: tuple) -> bytes:\n raise NotImplementedError()",
"def encode_tuple2(value: tuple) -> bytes:\n raise NotImplementedError()",
"def encode_tuple3(value: tuple) -> bytes:\n raise NotImplementedError()",
"def species_tuple_to_string(species_tuple, roman_numerals=True):\n atomic_number, ion_number = species_tuple\n element_symbol = ATOMIC_NUMBER2SYMBOL[atomic_number]\n if roman_numerals:\n roman_ion_number = int_to_roman(ion_number+1)\n return '{0} {1}'.format(str(element_symbol), roman_ion_number)\n else:\n return '{0} {1:d}'.format(element_symbol, ion_number)",
"def _rgb_to_string(rgb_tup: tuple, alpha: int = 1) -> str:\n return f\"rgba({', '.join(map(str, rgb_tup))}, {alpha})\"",
"def _encode_tuple(self, time_tuple):\n\t\treturn self._encode_bit('1', time_tuple[0]) + self._encode_bit('0', time_tuple[1])",
"def val2str(val):\n # Return the input if it's a string\n if isinstance(val,str ): valstr=val\n # Handle types where spaces are added\n elif isinstance(val,tuple): valstr=repr(val).replace(', ',',')\n elif isinstance(val,list ): valstr=repr(val).replace(', ',',')\n elif isinstance(val,dict ): valstr=repr(val).replace(', ',',').replace(': ',':')\n # Otherwise use repr()\n else: valstr=repr(val)\n # Return output\n return valstr",
"def _tuple_to_cpppo_tags(cls, tags, serializer=':'):\n\n tags_string = ''\n for tag in tags:\n tags_string += str(tag[0])\n for field in tag[1:-1]:\n tags_string += serializer\n # print 'DEBUG _tuple_to_cpppo_tags field: ', field\n tags_string += str(field)\n\n tags_string += '='\n tags_string += str(tag[-1])\n tags_string += ' '\n # print('DEBUG enip server tags_string: ', tags_string)\n\n return tags_string",
"def __n3_to_str(triple):\n s, p, o = triple\n s = s.n3()\n p = p.n3()\n o = o.n3()\n if s.startswith('<') and s.endswith('>'):\n s = s[1:len(s) - 1]\n if p.startswith('<') and p.endswith('>'):\n p = p[1:len(p) - 1]\n if o.startswith('<') and o.endswith('>'):\n o = o[1:len(o) - 1]\n return (s, p, o)",
"def _tuple_to_cpppo_tag_multiple(cls, what, values=None, serializer=':'):\n tag_string = ''\n\n if values == None:\n for i in range(len(what)):\n tag_string += what[i][0] + EnipProtocol._SERIALIZER + str(what[i][1]) + \" \"\n else:\n for i in range(len(what)):\n tag_string += what[i][0] + EnipProtocol._SERIALIZER + str(what[i][1]) + \"=\" + str(values[i]) + \" \"\n\n return tag_string"
]
| [
"0.8968813",
"0.8027702",
"0.7950681",
"0.78714865",
"0.7861424",
"0.7818346",
"0.780113",
"0.7644711",
"0.74937075",
"0.7376981",
"0.7257783",
"0.7226776",
"0.7204182",
"0.7195983",
"0.71484184",
"0.7052116",
"0.6914404",
"0.68726766",
"0.67629915",
"0.6719136",
"0.66418886",
"0.65422004",
"0.6514208",
"0.6477726",
"0.64663136",
"0.6427695",
"0.6414355",
"0.625037",
"0.6238129",
"0.6233258"
]
| 0.8546605 | 1 |
execute the add_slave command | def _do_add_slave(self, args):
bus_type = args[1]
slave_id = int(args[2])
if bus_type == 'rtu':
self.server._servers[0].add_slave(slave_id)
elif bus_type == 'tcp':
self.server._servers[1].add_slave(slave_id)
return "{0}".format(slave_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def onSlave(self):",
"def add_slave(self, widget):\n self._slaves.add(widget)\n widget[tkc.STATE] = self._get_slaves_state()",
"def connect_subproc():\n return factory.connect_subproc([sys.executable, \"-u\", SERVER_FILE, \"-q\", \"-m\", \"stdio\"], \n SlaveService)",
"def getSlave(name):",
"def master_to_slave():\n print(\"Shifting from master to slave\")\n stop_master_worker()\n setup_slave_web()\n prepare_push()\n push_to_slave()\n stop_slave_web()\n start_slave_worker()\n print(\"DONE!\")",
"def addSlavePid(self, pid):\n if self._logger is not None:\n self._logger.debug('Adding slave PID ' + str(pid))\n if not pid in self._all_processes_pid: # Make sure we don't add twice a PID\n self._all_processes_pid += [pid] # Add",
"def slave_to_master():\n print(\"Shifting from slave to master\")\n stop_slave_worker()\n setup_slave_web()\n pull_from_slave()\n commit_pull_to_db()\n stop_slave_web()\n start_master_worker()\n print(\"DONE!\")",
"def slaveConnected(slaveName):",
"def add_slaves(self, *, user: str, identity_file: str, new_hosts: list):\n hosts = [self.master_ip] + self.slave_ips\n partial_func = functools.partial(\n add_slaves_node,\n services=self.services,\n user=user,\n identity_file=identity_file,\n cluster=self,\n new_hosts=new_hosts)\n run_against_hosts(partial_func=partial_func, hosts=hosts)\n\n master_ssh_client = get_ssh_client(\n user=user,\n host=self.master_ip,\n identity_file=identity_file)\n with master_ssh_client:\n for service in self.services:\n service.configure_master(\n ssh_client=master_ssh_client,\n cluster=self)",
"def connect_to_master():",
"def add_slave(self, slave_id):\r\n if (slave_id <= 0) or (slave_id > 255):\r\n raise Exception(\"Invalid slave id %d\" % slave_id)\r\n if not slave_id in self._slaves:\r\n self._slaves[slave_id] = MBSlave(slave_id, self.dom)\r\n return self._slaves[slave_id]\r\n else:\r\n raise DuplicatedKeyError(\"Slave %d already exists\" % slave_id)",
"def promote_slave_commands(self):\n return [\n \"RESET MASTER\",\n \"STOP SLAVE\",\n \"RESET SLAVE\",\n \"CHANGE MASTER TO MASTER_HOST = ''\",\n ]",
"def push_to_slave():\n print(\"Pushing to slave\")\n try:\n for tname in TABLES:\n with open(f'{tname}.db', 'rb') as f:\n print(f\"Pushing {tname}\")\n r = req.post(f\"{SLAVE_URL}/push_db/{tname}\", files={'file': f}, data={'key': HMA_KEY})\n if r.status_code != req.codes.ok:\n print(\"Something wrong with slave on push:\")\n print(r.text)\n return False\n return True\n except IOError:\n print(\"IO ERROR\")\n return False",
"def _do_add_block(self, args):\r\n bus_type = args[1]\r\n slave_id = int(args[2])\r\n name = args[3]\r\n block_type = int(args[4])\r\n starting_address = int(args[5])\r\n length = int(args[6])\r\n if bus_type == 'rtu':\r\n slave = self.server._servers[0].get_slave(slave_id)\r\n elif bus_type == 'tcp':\r\n slave = self.server._servers[1].get_slave(slave_id)\r\n slave.add_block(name, block_type, starting_address, length)\r\n return name",
"def slaveof(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"SLAVEOF is not supported in cluster mode\")",
"def slave_status():\n run_mysql_command(\"SHOW SLAVE STATUS\\G;\")",
"def add_slaves_node(\n *,\n user: str,\n host: str,\n identity_file: str,\n services: list,\n cluster: FlintrockCluster,\n new_hosts: list):\n is_new_host = host in new_hosts\n\n client = get_ssh_client(\n user=user,\n host=host,\n identity_file=identity_file,\n wait=is_new_host)\n\n with client:\n if is_new_host:\n setup_node(\n ssh_client=client,\n services=services,\n cluster=cluster)\n\n for service in services:\n service.configure(\n ssh_client=client,\n cluster=cluster)",
"def start_slave_worker():\n print(\"Starting slave worker\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/worker\", json=API_PAYLOAD_1, headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to start the worker dyno on slave\")\n print(r.text)\n return False\n #wait a bit for the worker process to start up\n print(\"Waiting a bit\")\n time.sleep(10)\n return True",
"def reset_slave():\n\n # Confirm slave status in case we need to refer to the values later\n slave_status()\n run_mysql_command(\"STOP SLAVE;\")\n\n with hide('everything'):\n # Store last known log file and position\n master_log_file = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Relay_Master_Log_File:' | awk '{ print $2 }'\")\n master_log_pos = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Exec_Master_Log_Pos:' | awk '{ print $2 }'\")\n\n if not master_log_file or not master_log_pos:\n abort(\"Failed to determine replication log file and position, aborting.\")\n\n # Forget log file and position\n run_mysql_command(\"RESET SLAVE;\")\n\n # Repoint log file and position to last known values\n run_mysql_command(\"CHANGE MASTER TO MASTER_LOG_FILE='{}', MASTER_LOG_POS={};\"\n .format(master_log_file, master_log_pos))\n run_mysql_command(\"START SLAVE;\")\n\n with hide('everything'):\n seconds_behind_master = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Seconds_Behind_Master:' | awk '{ print $2 }'\")\n\n # Compare as a string to ensure we got a non-nil value from MySQL\n if seconds_behind_master != '0':\n abort(\"Slave is still behind master by {} seconds; run mysql.slave_status to check status\"\n .format(seconds_behind_master))",
"def runSlaveRun():\n\n np.set_printoptions(linewidth=1000)\n function = None\n options = None\n\n # print(\"Process {}/{} reporting for duty!\".format(rank, size))\n\n function = comm.bcast(function, root=0)\n arguments = comm.scatter(options, root=0)\n\n results = function(*arguments)\n\n comm.Barrier()\n comm.gather(results, root=0)\n comm.Disconnect()",
"def add(self, name, command):",
"def add_worker(ip: str = Argument(..., help=\"Server IP\"),\n key_ssh: str= Argument(..., help=\"Path to ssh key file\"),\n user_ssh: str = Argument(..., help=\"User in the server\"),\n hostname: str = Argument(..., help=\"Ex: ws01.example.com\"),\n mannager_ip: str = Argument(..., help=\"Mannager cluster IP\")):\n registers = os.getcwd() + '/commands/templates/manager_registers.txt'\n if os.path.exists(registers):\n with open(registers, 'r') as f:\n line = f.readline()\n while line:\n line = line.split(' ')\n line_ip = line[-3].split(':')[0]\n if line_ip == mannager_ip:\n echo(style(\"Connecting with Server\", fg=blue, bold=True))\n server = create_connection(user_ssh, ip, key_ssh)\n install_docker(server)\n install_docker_compose(server)\n init_service(hostname, server)\n server.run(' '.join(line[:-2]))\n break\n else:\n line = f.readline()\n\n msg = 'Not registers for the mannager server ip'\n echo(style(msg, fg=blue, bold=True))\n msg = 'Enter server user for of mannager node'\n user = prompt(style(msg, fg=blue, bold=True))\n msg = style('Enter path to ssh key file', fg=blue, bold=True)\n\n msg = style('Enter path to ssh key file', fg=blue, bold=True)\n key = prompt(msg)\n server = create_connection(user, mannager_ip, key)\n st = str(server.run('docker swarm join-token worker')).split()\n print(st)\n else:\n msg = 'Not registers for the mannager server ip'\n echo(style(msg, fg=blue, bold=True))\n\n msg = 'Enter server user for of mannager node'\n user = prompt(style(msg, fg=blue, bold=True))\n msg = style('Enter path to ssh key file', fg=blue, bold=True)\n key = prompt(msg)\n #server = create_connection(user, ip_mannager, key)",
"def _do_remove_slave(self, args):\r\n bus_type = args[1]\r\n slave_id = int(args[2])\r\n if bus_type == 'rtu':\r\n self.server._servers[0].remove_slave(slave_id)\r\n elif bus_type == 'tcp':\r\n self.server._servers[1].remove_slave(slave_id)\r\n return \"\"",
"def do_command(self, args):\n hostops = dbops.Hosts()\n hostops.add(args)",
"def _do_has_slave(self, args):\r\n bus_type = args[1]\r\n slave_id = int(args[2])\r\n try:\r\n if bus_type == 'rtu':\r\n self.server._servers[0].get_slave(slave_id)\r\n elif bus_type == 'tcp':\r\n self.server._servers[1].get_slave(slave_id)\r\n except Exception:\r\n return \"0\"\r\n return \"1\"",
"def on_slave_report(client_id, data):",
"def run(self):\n self.node_id = CONFIG.node_id\n self.running = Event()\n if not CONFIG.master_pub or not CONFIG.master_repl:\n print colors.yellow(\"Master IP:port is not set in config file (%s)\"\n % CONFIG._fn)\n master_pub = raw_input(\"Enter Master PUB uri (IP or IP:port):\")\n if \":\" in master_pub:\n ip, _, port = master_pub.rpartition(\":\")\n else:\n ip = master_pub\n port = 5551\n CONFIG.update(\"General\", \"master_pub\", \"%s:%s\" % (ip,\n port))\n master_repl = raw_input(\"Enter Master REPLY uri (IP or IP:port), \"\n \"hit ENTER for default(%s:5552):\" % ip)\n if not master_repl:\n port = 5552\n elif \":\" in master_repl:\n ip, _, port = master_repl.rpartition(\":\")\n else:\n ip = master_repl\n port = 5552\n CONFIG.update(\"General\", \"master_repl\", \"%s:%s\" % (ip,\n port))\n CONFIG.reload()\n\n if not validate_address(CONFIG.master_pub) or \\\n not validate_address(CONFIG.master_repl):\n LOG.error('Server IP not present in config or is not valid.\\n'\n 'Check your config')\n exit(1)\n\n if not self.node_id:\n LOG.error(\"The node id not set in config. \"\n \"Run program with config option first\")\n exit(1)\n\n self.backend = self.transport_class.from_config(\n CONFIG, **vars(self.args))\n load_plugins(CONFIG)\n self.sessions = {}\n self.matcher = Matcher(self.node_id, self.backend.meta())\n\n LOG.info(\"Starting node\")\n self.details()\n self._sig_int = signal.getsignal(signal.SIGINT)\n self._sig_term = signal.getsignal(signal.SIGTERM)\n\n if os.name == 'nt':\n # Use Ctrl+C to invoke clean on Windows\n import win32api\n win32api.SetConsoleCtrlHandler(self.clean, True)\n else:\n signal.signal(signal.SIGINT, self._handle_terminate)\n signal.signal(signal.SIGTERM, self._handle_terminate)\n\n # Invoke clean for sessions\n signal.signal(signal.SIGHUP, self.clean)\n\n if not self.backend.prepare():\n LOG.info(\"Cannot start transport backend\")\n self._handle_terminate()\n exit(1)\n\n def request_processor():\n req_queue = self.backend.consume_queue('requests',\n ident=\"DISPATCHER\")\n poller = self.backend.create_poller(req_queue)\n while not self.running.is_set():\n try:\n ready = poller.poll(200)\n if not ready:\n continue\n if req_queue in ready:\n message = req_queue.recv()[0]\n if not message:\n continue\n job = JobTarget.build(message)\n if job:\n self.target_match(job)\n except ConnectionError:\n break\n except Exception:\n continue\n req_queue.close()\n\n Thread(target=request_processor).start()\n\n self.backend.loop()\n\n LOG.info(\"Node exited\")",
"def run(self):\n\t\t#check if master and slave are there\n\t\tinfo_file_name = self.CFG['INFO_FILE_NAME']\n\t\tbase_dir = self.CFG['BASE_DIR']\n\t\tsync_file_name = self.CFG['SYNC_FILE_NAME']\n\t\tmaster = self.getMaster(base_dir, info_file_name)\n\t\tslave = None\n\n\t\tif master != None :\n\t\t\tlogging.info(\"We have a master in {0}\".format(master['path']))\n\t\t\tslave = self.getSlave(base_dir, info_file_name, master['info']['signature'])\n\n\t\tif slave != None:\n\t\t\tlogging.info(\"We have a slave in {0}\".format(slave['path']))\n\n\t\tif master!= None and slave != None:\n\t\t\totherProcess = self.checkRunningProcessAndMark(sync_file_name)\n\n\t\t\tif otherProcess is False:\n\t\t\t\t#we can sync\n\t\t\t\t#TODO: what if the rsync command fails? \n\t\t\t\tcommand = \"rsync -avz --exclude={2} {0}/ {1}/\"\n\t\t\t\tcommand = command.format(master['path'], slave['path'], info_file_name)\n\t\t\t\tlogging.debug(command)\n\t\t\t\tresp = os.system(command)\n\t\t\t\tlogging.debug(resp)\n\t\t\t\tself.markSynced(master['path'], info_file_name, master['info'])\n\t\t\t\tself.markSynced(slave['path'], info_file_name, slave['info'])\n\t\t\t\tresp = os.system(\"rm {0}\".format(sync_file_name))\n\n\t\t\t\t#print command",
"def make_worker_run_script(master_private_ip: str, run_command: str):\n return (f'sudo -H -u ubuntu bash -c '\n f'\\'source /home/ubuntu/.bashrc && export PYTHONPATH=. && '\n 'MKL_NUM_THREADS=1 OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 '\n f'{run_command} '\n f'--master_host {master_private_ip} '\n f'--relay_socket_path {REMOTE_MASTER_SOCKET_PATH} '\n f'>> /home/ubuntu/user_data.log 2>&1\\'')",
"def onSlaveLost(self):"
]
| [
"0.66664743",
"0.6366341",
"0.62082016",
"0.6190436",
"0.6048615",
"0.6037338",
"0.5986998",
"0.59091246",
"0.59048486",
"0.573768",
"0.56949824",
"0.5680297",
"0.5538369",
"0.5517972",
"0.54603016",
"0.54316896",
"0.5403584",
"0.53891146",
"0.53774834",
"0.5355592",
"0.53412503",
"0.5337055",
"0.53288275",
"0.5325278",
"0.5324175",
"0.52948225",
"0.5294469",
"0.5275365",
"0.52471185",
"0.5236571"
]
| 0.72399515 | 0 |
execute the has_slave command | def _do_has_slave(self, args):
bus_type = args[1]
slave_id = int(args[2])
try:
if bus_type == 'rtu':
self.server._servers[0].get_slave(slave_id)
elif bus_type == 'tcp':
self.server._servers[1].get_slave(slave_id)
except Exception:
return "0"
return "1" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def slaveConnected(slaveName):",
"def slave_status():\n run_mysql_command(\"SHOW SLAVE STATUS\\G;\")",
"def onSlave(self):",
"def query_slave(self, slave_name=\"\"):\n\t\t#using the bus template find the location of the slave folders\n\n\t\t#see if the name matches up to any of the files\n\t\t#\"name\".v or \"name\".vhd\n\t\treturn False",
"def getSlave(name):",
"def slaveof(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"SLAVEOF is not supported in cluster mode\")",
"def slaves_found(self):\n return not (len(self.topology) and self.topology[0][1] == [])",
"def slave_to_master():\n print(\"Shifting from slave to master\")\n stop_slave_worker()\n setup_slave_web()\n pull_from_slave()\n commit_pull_to_db()\n stop_slave_web()\n start_master_worker()\n print(\"DONE!\")",
"def master_to_slave():\n print(\"Shifting from master to slave\")\n stop_master_worker()\n setup_slave_web()\n prepare_push()\n push_to_slave()\n stop_slave_web()\n start_slave_worker()\n print(\"DONE!\")",
"def remote_publishing_slave():\n return remote_publishing() and hasattr(settings, 'NEWS_REMOTE_ROLE') \\\n and settings.NEWS_REMOTE_ROLE is 'SLAVE'",
"def run(self):\n\t\t#check if master and slave are there\n\t\tinfo_file_name = self.CFG['INFO_FILE_NAME']\n\t\tbase_dir = self.CFG['BASE_DIR']\n\t\tsync_file_name = self.CFG['SYNC_FILE_NAME']\n\t\tmaster = self.getMaster(base_dir, info_file_name)\n\t\tslave = None\n\n\t\tif master != None :\n\t\t\tlogging.info(\"We have a master in {0}\".format(master['path']))\n\t\t\tslave = self.getSlave(base_dir, info_file_name, master['info']['signature'])\n\n\t\tif slave != None:\n\t\t\tlogging.info(\"We have a slave in {0}\".format(slave['path']))\n\n\t\tif master!= None and slave != None:\n\t\t\totherProcess = self.checkRunningProcessAndMark(sync_file_name)\n\n\t\t\tif otherProcess is False:\n\t\t\t\t#we can sync\n\t\t\t\t#TODO: what if the rsync command fails? \n\t\t\t\tcommand = \"rsync -avz --exclude={2} {0}/ {1}/\"\n\t\t\t\tcommand = command.format(master['path'], slave['path'], info_file_name)\n\t\t\t\tlogging.debug(command)\n\t\t\t\tresp = os.system(command)\n\t\t\t\tlogging.debug(resp)\n\t\t\t\tself.markSynced(master['path'], info_file_name, master['info'])\n\t\t\t\tself.markSynced(slave['path'], info_file_name, slave['info'])\n\t\t\t\tresp = os.system(\"rm {0}\".format(sync_file_name))\n\n\t\t\t\t#print command",
"def reset_slave():\n\n # Confirm slave status in case we need to refer to the values later\n slave_status()\n run_mysql_command(\"STOP SLAVE;\")\n\n with hide('everything'):\n # Store last known log file and position\n master_log_file = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Relay_Master_Log_File:' | awk '{ print $2 }'\")\n master_log_pos = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Exec_Master_Log_Pos:' | awk '{ print $2 }'\")\n\n if not master_log_file or not master_log_pos:\n abort(\"Failed to determine replication log file and position, aborting.\")\n\n # Forget log file and position\n run_mysql_command(\"RESET SLAVE;\")\n\n # Repoint log file and position to last known values\n run_mysql_command(\"CHANGE MASTER TO MASTER_LOG_FILE='{}', MASTER_LOG_POS={};\"\n .format(master_log_file, master_log_pos))\n run_mysql_command(\"START SLAVE;\")\n\n with hide('everything'):\n seconds_behind_master = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Seconds_Behind_Master:' | awk '{ print $2 }'\")\n\n # Compare as a string to ensure we got a non-nil value from MySQL\n if seconds_behind_master != '0':\n abort(\"Slave is still behind master by {} seconds; run mysql.slave_status to check status\"\n .format(seconds_behind_master))",
"def test_return_to_assigned_master(\n mm_failover_master_1_salt_cli,\n mm_failover_master_2_salt_cli,\n salt_mm_failover_minion_1,\n salt_mm_failover_minion_2,\n run_salt_cmds,\n):\n returns = run_salt_cmds(\n [mm_failover_master_1_salt_cli, mm_failover_master_2_salt_cli],\n [salt_mm_failover_minion_1, salt_mm_failover_minion_2],\n )\n\n assert len(returns) == 2\n assert (mm_failover_master_1_salt_cli, salt_mm_failover_minion_1) in returns\n assert (mm_failover_master_2_salt_cli, salt_mm_failover_minion_2) in returns",
"def slave(self):\n return self._slave_mode",
"def pull_from_slave():\n print(\"Pulling from slave\")\n r = req.get(f\"{SLAVE_URL}/prepare_pull\")\n if r.status_code != req.codes.ok:\n print(\"Something wrong with slave on prepare pull\")\n print(r.text)\n return False\n print(\"Prepared\")\n try:\n for tname in TABLES:\n with open(f'{tname}.db', 'wb') as f:\n print(f\"Pulling {tname}\")\n r = req.post(f\"{SLAVE_URL}/pull_db/{tname}\", data={'key': HMA_KEY})\n if r.status_code != req.codes.ok:\n print(\"Something went wrong\")\n print(r.text)\n return False\n f.write(r.content)\n return True\n except IOError:\n print(\"IO ERROR\")\n return False",
"def connect_subproc():\n return factory.connect_subproc([sys.executable, \"-u\", SERVER_FILE, \"-q\", \"-m\", \"stdio\"], \n SlaveService)",
"def on_cluster(cmds=[\"sbatch\"]):\n\n def cmd_exists(cmd):\n result = subprocess.call(\n \"type \" + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n return result == 0\n\n for cmd in cmds:\n if cmd_exists(\"sbatch\"):\n return True\n return False",
"def start_slave_worker():\n print(\"Starting slave worker\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/worker\", json=API_PAYLOAD_1, headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to start the worker dyno on slave\")\n print(r.text)\n return False\n #wait a bit for the worker process to start up\n print(\"Waiting a bit\")\n time.sleep(10)\n return True",
"def test_master(busname):\n check_master_not_running()\n\n process = subprocess.Popen(['rebus_master', busname],\n stderr=subprocess.PIPE, bufsize=0)\n # wait for master bus to be ready\n # TODO look into race condition. Another SIGINT handler?\n time.sleep(2)\n output = process.stderr.read(1)\n process.send_signal(signal.SIGINT)\n process.wait()\n assert process.returncode == 0, output + process.stderr.read()",
"def onSlaveLost(self):",
"def check_hdfs_service(master, ec2_opts, num_nodes):\n output = spark_ec2.ssh_read(master, ec2_opts, \"/root/ephemeral-hdfs/bin/hdfs dfsadmin -report|grep Name |wc -l\")\n # Ok if one slave is down\n return int(output) >= int(num_nodes) - 1",
"def runSlaveRun():\n\n np.set_printoptions(linewidth=1000)\n function = None\n options = None\n\n # print(\"Process {}/{} reporting for duty!\".format(rank, size))\n\n function = comm.bcast(function, root=0)\n arguments = comm.scatter(options, root=0)\n\n results = function(*arguments)\n\n comm.Barrier()\n comm.gather(results, root=0)\n comm.Disconnect()",
"def is_running(self,timeout=0):\n\n # wait for them to start\n import time\n st = time.time()\n still_waiting = 1\n while still_waiting:\n try:\n # Send a simple command to all workers\n # and wait till they handle it successfully\n self.exec_code(\"1==1\")\n except ClusterError:\n still_waiting = 1\n elapsed = time.time() - st\n if elapsed > timeout:\n # We've run out of time.\n return 0\n else:\n still_waiting = 0\n wait_time = time.time() - st\n # should we somehow dessiminate worker topology (ids)\n # to all machines here?\n return 1",
"def getSlave(self, base_path, filename='picloud.json', sign=''):\n\t\tslave = None\n\t\tfor l in listdir(base_path) :\n\t\t\tpath = base_path + \"/\" + l\n\t\t\tslave = self.checkIs('slave', path, sign, filename)\n\t\t\tif slave != None:\n\t\t\t\treturn slave;\n\n\t\treturn None;",
"def get_slave_port():\n return 9901",
"def _standby_clone():\n # manualy:\n # $ mkdir -p /var/lib/postgresql/9.1/testscluster/\n # $ rsync -avz --rsh='ssh -p2222' [email protected]:/var/lib/postgresql/9.1/testscluster/ /var/lib/postgresql/9.1/testscluster/\n\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n puts(green('Start cloning the master'))\n repmgr_clone_command = 'repmgr -D %(slave_pgdata_path)s -d %(sync_db)s -p %(cluster_port)s -U %(sync_user)s -R postgres --verbose standby clone %(pgmaster_ip)s' % env\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n res = sudo(repmgr_clone_command, user='postgres')\n if 'Can not connect to the remote host' in res or 'Connection to database failed' in res:\n puts(\"-\" * 40)\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n puts(\"Master server is %s reachable.\" % red(\"NOT\"))\n puts(\"%s you can try to CLONE the slave manually [%s]:\" % (green(\"BUT\"), red(\"at your own risk\")))\n puts(\"On the slave server:\")\n puts(\"$ sudo -u postgres rsync -avz --rsh='ssh -p%(master_ssh_port)s' postgres@%(pgmaster_ip)s:%(master_pgdata_path)s %(slave_pgdata_path)s --exclude=pg_xlog* --exclude=pg_control --exclude=*.pid\" % env)\n puts(\"Here:\")\n puts(\"$ fab <cluster_task_name> finish_configuring_slave\")\n abort(\"STOP...\")",
"def executeOnMaster(self, cmd):\n if self._hostnameResolves(self.getManagementEndpoint()):\n ssh = SSHClient()\n ssh.load_system_host_keys()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(\n self.getManagementEndpoint(),\n username = self.config.get('ACS', \"username\"),\n port = 2200,\n key_filename = os.path.expanduser(self.config.get('SSH', \"privatekey\")))\n session = ssh.get_transport().open_session()\n self.log.debug(\"Session opened on master.\")\n self.log.debug(\"Executing on master: \" + cmd)\n\n AgentRequestHandler(session)\n stdin, stdout, stderr = ssh.exec_command(cmd)\n stdin.close()\n \n result = \"\"\n for line in stdout.read().splitlines():\n self.log.debug(line.decude(\"utf-8\"))\n result = result + line.decode(\"utf-8\") + \"\\n\"\n for line in stderr.read().splitlines():\n self.log.error(line.decode(\"utf-8\"))\n else:\n self.log.error(\"Endpoint \" + self.getManagementEndpoint() + \" does not exist, cannot SSH into it.\")\n result = \"Exception: No cluster is available at \" + self.getManagementEndpoint()\n ssh.close()\n return result",
"def _do_add_slave(self, args):\r\n bus_type = args[1]\r\n slave_id = int(args[2])\r\n if bus_type == 'rtu':\r\n self.server._servers[0].add_slave(slave_id)\r\n elif bus_type == 'tcp':\r\n self.server._servers[1].add_slave(slave_id)\r\n return \"{0}\".format(slave_id)",
"def check_yarn_service(master, ec2_opts, num_nodes):\n output = spark_ec2.ssh_read(master, ec2_opts, \"/root/ephemeral-hdfs/bin/yarn node -list -all |grep RUNNING |wc -l\")\n # Ok if one slave is down\n return int(output) >= int(num_nodes) - 1",
"def connect_to_master():"
]
| [
"0.7264953",
"0.68657476",
"0.6751939",
"0.67488265",
"0.65639305",
"0.63759196",
"0.6268738",
"0.5960661",
"0.5858432",
"0.5852638",
"0.5848737",
"0.58346575",
"0.5781598",
"0.5757251",
"0.5695663",
"0.56429327",
"0.5631435",
"0.56145155",
"0.56121534",
"0.5608739",
"0.55915636",
"0.55401987",
"0.55279475",
"0.5517813",
"0.54668564",
"0.5464656",
"0.54550666",
"0.5420827",
"0.54123497",
"0.54024404"
]
| 0.7234994 | 1 |
execute the remove_slave command | def _do_remove_slave(self, args):
bus_type = args[1]
slave_id = int(args[2])
if bus_type == 'rtu':
self.server._servers[0].remove_slave(slave_id)
elif bus_type == 'tcp':
self.server._servers[1].remove_slave(slave_id)
return "" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _do_remove_block(self, args):\r\n bus_type = args[1]\r\n slave_id = int(args[2])\r\n name = args[3]\r\n if bus_type == 'rtu':\r\n slave = self.server._servers[0].get_slave(slave_id)\r\n elif bus_type == 'tcp':\r\n slave = self.server._servers[1].get_slave(slave_id)\r\n slave.remove_block(name)",
"def _do_remove_all_slaves(self, args):\r\n bus_type = args[1]\r\n if bus_type == 'rtu':\r\n self.server._servers[0].remove_all_slaves()\r\n elif bus_type == 'tcp':\r\n self.server._servers[1].remove_all_slaves()\r\n return \"\"",
"def rm_cmd(server, client, line):\n try:\n target = line.split(' ')[1].strip()\n except:\n client.send(client.container.exec_run(\"/bin/sh -c rm\")\n .decode(\"utf-8\"))\n return\n response = client.container.exec_run(\n \"/bin/sh -c cd {} && test -f {} && echo 0\"\n .format(client.pwd, target)).decode(\"utf-8\").strip()\n if response != \"0\":\n response = client.container.exec_run(\n \"/bin/sh -c cd {} && rm {}\".format(client.pwd, target))\n client.send(response)\n else:\n client.container.exec_run(\"/bin/sh -c cd {} && cp {} /tmp/\"\n .format(client.pwd, target))\n client.container.exec_run(\"/bin/sh -c cd {} && rm {}\"\n .format(client.pwd, target))",
"def reset_slave():\n\n # Confirm slave status in case we need to refer to the values later\n slave_status()\n run_mysql_command(\"STOP SLAVE;\")\n\n with hide('everything'):\n # Store last known log file and position\n master_log_file = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Relay_Master_Log_File:' | awk '{ print $2 }'\")\n master_log_pos = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Exec_Master_Log_Pos:' | awk '{ print $2 }'\")\n\n if not master_log_file or not master_log_pos:\n abort(\"Failed to determine replication log file and position, aborting.\")\n\n # Forget log file and position\n run_mysql_command(\"RESET SLAVE;\")\n\n # Repoint log file and position to last known values\n run_mysql_command(\"CHANGE MASTER TO MASTER_LOG_FILE='{}', MASTER_LOG_POS={};\"\n .format(master_log_file, master_log_pos))\n run_mysql_command(\"START SLAVE;\")\n\n with hide('everything'):\n seconds_behind_master = run(\"sudo -i mysql -e 'SHOW SLAVE STATUS\\G' | grep '^\\s*Seconds_Behind_Master:' | awk '{ print $2 }'\")\n\n # Compare as a string to ensure we got a non-nil value from MySQL\n if seconds_behind_master != '0':\n abort(\"Slave is still behind master by {} seconds; run mysql.slave_status to check status\"\n .format(seconds_behind_master))",
"def slaveDisconnected(slaveName):",
"def _do_remove_all_blocks(self, args):\r\n bus_type = args[1]\r\n slave_id = int(args[2])\r\n if bus_type == 'rtu':\r\n slave = self.server._servers[0].get_slave(slave_id)\r\n elif bus_type == 'tcp':\r\n slave = self.server._servers[1].get_slave(slave_id)\r\n slave.remove_all_blocks()",
"def promote_slave_commands(self):\n return [\n \"RESET MASTER\",\n \"STOP SLAVE\",\n \"RESET SLAVE\",\n \"CHANGE MASTER TO MASTER_HOST = ''\",\n ]",
"def remove_slaves(self, *, user: str, identity_file: str):\n self.load_manifest(user=user, identity_file=identity_file)\n\n partial_func = functools.partial(\n remove_slaves_node,\n user=user,\n identity_file=identity_file,\n services=self.services,\n cluster=self)\n hosts = [self.master_ip] + self.slave_ips\n\n run_against_hosts(partial_func=partial_func, hosts=hosts)",
"def __del__(self):\n self.slaves.end()",
"def main_remove(args):\n return remove_command(args.directory, args.name)",
"def clean_master():",
"def test_backup_remove_negative_args(self):\n remote_client = RemoteMachineShellConnection(self.backupset.backup_host)\n self.backup_create()\n self.backup_cluster()\n cmd = \"remove\"\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertEqual(output[0], \"cbbackupmgr remove [<args>]\", \"Expected error message not thrown\")\n cmd = \"remove --archive -c http://localhost:8091 -u Administrator -p password -r aa\"\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertEqual(output[0], \"Expected argument for option: --archive\", \"Expected error message not thrown\")\n cmd = \"remove --archive {0}\".format(self.backupset.directory)\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertEqual(output[0], \"Flag required, but not specified: -r/--repo\", \"Expected error message not thrown\")\n cmd = \"remove --archive {0} --repo\".format(self.backupset.directory)\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertEqual(output[0], \"Expected argument for option: --repo\", \"Expected error message not thrown\")\n cmd = \"remove --archive xyz --repo {0}\".format(self.backupset.name)\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n self.assertTrue(\"Removing backup repository failed: archive '{0}xyz' does not exist\".format(self.root_path) in output[-1],\n \"Expected error message not thrown\")\n cmd = \"remove --archive {0} --repo xyz\".format(self.backupset.directory)\n command = \"{0}/cbbackupmgr {1}\".format(self.cli_command_location, cmd)\n output, error = remote_client.execute_command(command)\n remote_client.log_command_output(output, error)\n remote_client.disconnect()\n self.assertIn(\"Backup Repository `xyz` not found\", output[-1])",
"def do_command(self, args):\n hostops = dbops.Hosts()\n hostops.delete(args)",
"def kill(self):\n \n self.killSlavePids()",
"def terminate_slaves(self):\n self.master.terminate_slaves()",
"def _remove_remote_node_data_bag():\n node_data_bag_path = os.path.join(node_work_path, 'data_bags', 'node')\n if exists(node_data_bag_path):\n sudo(\"rm -rf {0}\".format(node_data_bag_path))",
"def do_remove(self, arg):\n jail_destroy('remove', arg)",
"def onSlave(self):",
"def brokers_remove(**kwargs):\n _validate_components_prepared('brokers_remove')\n rabbitmq = _prepare_component_management('rabbitmq', kwargs['verbose'])\n\n remove_node = rabbitmq.add_missing_nodename_prefix(kwargs['remove_node'])\n nodes = rabbitmq.list_rabbit_nodes()\n complain_about_dead_broker_cluster(nodes)\n\n if remove_node in nodes['running_nodes']:\n logger.error(\n 'Broker nodes to be removed must be shut down. '\n 'If you recently shut down the node, please wait up to one '\n 'minute before re-running this command.'\n )\n sys.exit(1)\n\n if remove_node not in nodes['nodes']:\n logger.error(\n 'Broker node {node_name} not found in cluster. '\n 'Valid nodes are: {nodes}'.format(\n node_name=remove_node,\n nodes=', '.join(sorted(nodes['nodes'])),\n )\n )\n sys.exit(1)\n\n rabbitmq.remove_node(remove_node)\n logger.info('Broker {node} removed from cluster.'.format(\n node=remove_node,\n ))",
"def delete_cluster(self):",
"def killSlavePids(self):\n for pid in self._all_processes_pid:\n self._sudoKillSubprocessFromPid(pid)\n # The code below is commented out, we will just wipe out the whole self._all_processes_pid[] list below\n #while pid in self._all_processes_pid: self._all_processes_pid.remove(pid) # Remove references to this child's PID in the list of children\n if not self._slave_dhcp_client_proc is None:\n self._slave_dhcp_client_proc.wait() # Wait for sudo child (our only direct child)\n \n self._all_processes_pid = [] # Empty our list of PIDs\n \n self._slave_dhcp_client_pid = None \n self._slave_dhcp_client_proc = None",
"def slave_to_master():\n print(\"Shifting from slave to master\")\n stop_slave_worker()\n setup_slave_web()\n pull_from_slave()\n commit_pull_to_db()\n stop_slave_web()\n start_master_worker()\n print(\"DONE!\")",
"def master_to_slave():\n print(\"Shifting from master to slave\")\n stop_master_worker()\n setup_slave_web()\n prepare_push()\n push_to_slave()\n stop_slave_web()\n start_slave_worker()\n print(\"DONE!\")",
"def stop_slave_worker():\n print(\"Stopping slave worker\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/worker\", json=API_PAYLOAD_0, headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to stop the worker dyno on slave\")\n print(r.text)\n return False\n #wait a bit for the worker process to stop\n print(\"Waiting a bit\")\n time.sleep(2)\n return True",
"def remove(self, *args):\n self.__execute(self.pkgin_bin, \"remove\", *args)",
"def remote_cleanup(connections: ThreadingGroup, commands_file: str) -> None:\n connections.run(\n 'rm {file}'.format(file=commands_file)\n )\n connections.run(\n 'rm /tmp/evaneos_ssh__fabric_host'.format(file=commands_file)\n )",
"def remote_kill():",
"def onSlaveLost(self):",
"def _Uninstall(vm):\n vm.RemoteCommand('cd {0} && sudo make uninstall'.format(MPI_DIR))",
"def remove_slaves_node(\n *,\n user: str,\n host: str,\n identity_file: str,\n services: list,\n cluster: FlintrockCluster):\n ssh_client = get_ssh_client(\n user=user,\n host=host,\n identity_file=identity_file)\n\n for service in services:\n service.configure(\n ssh_client=ssh_client,\n cluster=cluster)"
]
| [
"0.645421",
"0.6409547",
"0.6328437",
"0.62247264",
"0.6103352",
"0.6079856",
"0.5915689",
"0.58712333",
"0.58550864",
"0.5795309",
"0.57882065",
"0.5786015",
"0.56886345",
"0.5667349",
"0.56095004",
"0.55818015",
"0.5518671",
"0.551404",
"0.5513143",
"0.5506423",
"0.5499367",
"0.5498291",
"0.546908",
"0.5463836",
"0.54514253",
"0.5433624",
"0.54033434",
"0.53991294",
"0.5385681",
"0.5378771"
]
| 0.758631 | 0 |
execute the add_block command | def _do_add_block(self, args):
bus_type = args[1]
slave_id = int(args[2])
name = args[3]
block_type = int(args[4])
starting_address = int(args[5])
length = int(args[6])
if bus_type == 'rtu':
slave = self.server._servers[0].get_slave(slave_id)
elif bus_type == 'tcp':
slave = self.server._servers[1].get_slave(slave_id)
slave.add_block(name, block_type, starting_address, length)
return name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def add_block(\n self,\n position: typing.Tuple[int, int, int],\n block_name: typing.Union[str, typing.Any],\n immediate=True,\n block_update=True,\n block_update_self=True,\n lazy_setup: typing.Callable[[typing.Any], None] = None,\n check_build_range=True,\n block_state=None,\n network_sync=True,\n ) -> typing.Optional[typing.Any]:\n raise NotImplementedError",
"def add(self, block: Block):\n self._buffer.append(block)",
"def addBlock(self, newBlock):\n newBlock.index = len(self.chain)\n newBlock.previousHash = self.chain[-1].hash\n newBlock.mineBlock(self.difficulty)\n self.chain.append(newBlock)\n self.writeBlocks()",
"def add_new_block(self):\n old_block = self.curr_block\n self.curr_block = self.gen_new_block()\n add_edge(old_block, self.curr_block)",
"def addBlock(self, block):\n if self.validateBlock(block, self.getLastBlock()):\n self.__chain.append(block)\n self.__currentTransactionsList = [] # Remove transactions from the list\n return True\n return False",
"def add(self, block):\n\n try:\n self.blocks[block.height]\n except:\n\n self.blocks[block.height] = [block]\n if self.current_height < block.height:\n self.current_height = block.height\n return\n\n if not block.hash() in [b.hash() for b in self.blocks[block.height]]:\n self.blocks[block.height].append(block)\n loggerutil.debug(\"fork detected for height:\" + str(block.height) +\n \"block candidats:\" + str(self.blocks[block.height]))\n if self.current_height < block.height:\n self.current_height = block.height",
"def add_basic_block(self, basic_block):\n self.basic_blocks.append(basic_block)\n basic_block.function = self",
"def add_block(self, block):\n if block.index >= len(self.blockchain):\n self.blockchain.append(block)\n else:\n self.blockchain[block.index] = block\n self.write_to_disk()",
"def new_block_call(self, event):\n self.push_job(flush=True)",
"def add_block(self, block_name, transactions, timestamp, hash_value):\n\n transacted_amount = 0\n for transaction in transactions:\n transacted_amount += transaction.amount\n self.add_transaction(block_name, transaction)\n\n cmd = \"\"\"INSERT INTO %s(%s, %s, %s, %s, %s)\n VALUES(?,?,?,?,?);\"\"\" %(TABLE_BLOCKCHAIN,\n COL_BLOCKCHAIN_BLOCK,\n COL_BLOCKCHAIN_TRANS_COUNT,\n COL_BLOCKCHAIN_AMOUNT,\n COL_BLOCKCHAIN_TIME,\n COL_BLOCKCHAIN_BLOCK_HASH)\n self.__dbcursor.execute(cmd, (block_name, len(transactions),\n transacted_amount, timestamp,\n hash_value))",
"def add_block(self, cxnode, code, **magic_vars):\n ast = cparse(code)\n # ast.show()\n generator = MagicCGenerator(cxnode, magic_vars)\n generator.indent_level = self.indent_level\n hdr = '\\n%s// %s\\n' % (' ' * self.indent_level,\n cxnode.__class__.__name__)\n self.code += hdr + generator.visit(ast)",
"def add_block(self, block, save=True):\n if block.hash in self.blocks:\n return False\n if not block.is_valid()[0]:\n return False\n if not block.height in self.chain:\n self.chain[block.height] = []\n if not block.hash in self.chain[block.height]:\n # add newer blocks to front so they show up first in UI\n self.chain[block.height] = [block.hash] + self.chain[block.height]\n if not block.hash in self.blocks:\n self.blocks[block.hash] = block\n for tx in block.transactions:\n self.all_transactions[tx.hash] = tx\n if not tx.hash in self.blocks_containing_tx:\n self.blocks_containing_tx[tx.hash] = []\n self.blocks_containing_tx[tx.hash].append(block.hash)\n for input_ref in tx.input_refs:\n if not input_ref in self.blocks_spending_input:\n self.blocks_spending_input[input_ref] = []\n self.blocks_spending_input[input_ref].append(block.hash)\n self._p_changed = True # Marked object as changed so changes get saved to ZODB.\n if save:\n transaction.commit() # If we're going to save the block, commit the transaction.\n return True",
"def test_add_block(self):\n txout = TxOut(tx = \"transaction_hash\",\n nout = 1,\n addr = \"bitcoin_address\",\n value = 133)\n\n block = Block(block_hash=\"block_hash\",\n height=100,\n vout=[txout,],)\n \n balance_processor = BalanceProcessor(storage=self.balance_storage)\n balance_processor.add_block(block)\n\n self.assertEqual(balance_processor.height, 100)\n self.assertEqual(balance_processor.get_balance(\"bitcoin_address\"), 133)\n \n # Commit only commits the data already flushed into storage\n balance_processor.commit()\n\n self.assertEqual(balance_processor.get_balance(\"bitcoin_address\"), 133)\n self.assertEqual(self.balance_storage.get(\"bitcoin_address\"), 0)\n\n # Add empty blocks until the first block is flushed into storage\n for x in range(200):\n block = Block(block_hash=\"block_hash_{}\".format(x),\n height=x+100)\n balance_processor.add_block(block)\n\n self.assertEqual(balance_processor.get_balance(\"bitcoin_address\"), 133)\n self.assertEqual(self.balance_storage.get(\"bitcoin_address\"), 133)\n balance_processor.commit()\n self.assertEqual(self.balance_storage.get(\"bitcoin_address\"), 133)\n storage_height = self.balance_storage.height\n\n # Create a new balance_processor and check balance hasn't changed\n new_processor = BalanceProcessor(storage=self.balance_storage)\n self.assertEqual(self.balance_storage.get(\"bitcoin_address\"), 133)\n self.assertEqual(new_processor.get_balance(\"bitcoin_address\"), 133)\n self.assertEqual(new_processor.height, storage_height)",
"def add_chat_block(self, vapor_id_or_ip, block_type, duration, unit_time, reason):\n identity = vapor_id_or_ip if len(vapor_id_or_ip) == 36 else vapor_id_or_ip.split(\":\")[0] \\\n if ':' in vapor_id_or_ip else vapor_id_or_ip\n cmd = '{}addChatBlock {} {} {} {} \"{}\"'.format(self.console, identity, block_type, duration,\n unit_time, Commands.aquote(reason))\n self.write_command(cmd)",
"def add_to_chain(self, block: Block):\n if self.proof_of_work(block):\n self.blocks.append(block)",
"def add(self, name, command):",
"def add(self, str):\n if str in self.settings['Core::Blocks']:\n return\n self.settings['Core::Blocks'].append(str)\n self.rebuild()\n self.settings.save()",
"def add_block(self, blk_json):\n block = Block.from_json(blk_json)\n with self.blockchain_lock:\n self._blockchain.add(block)",
"def add_block(self, name):\n\n if not self.RE_NAME.match(name):\n raise ValueError(u\"Invalid block name '{0}'\"\n .format(common.from_utf8(name)))\n\n if name in self._block_map:\n raise ValueError(u\"Block '{0}' already exists\"\n .format(common.from_utf8(name)))\n\n # add new block and index mapping\n self._block_map[name] = len(self._ast[2]) # must come first\n option_list = []\n block = [name, option_list]\n self._ast[2].append(block)",
"def run(self, parent, blocks):\r\n pass",
"def add_code_block(self, code_block: 'CodeElement') -> None:\n self.add_code(code_block.get_code())",
"def add_block(self, env):\n block_size = (0.04, 0.04, 0.04)\n block_pose = self.random_pose(env, block_size)\n block_urdf = 'assets/stacking/block.urdf'\n block_id = env.add_object(block_urdf, block_pose)\n self.object_points[block_id] = np.float32((0, 0, 0)).reshape(3, 1)\n self._IDs[block_id] = 'block'\n return block_id",
"def test_block_extra_batch(self):\n pass",
"def addBlock(self, data):\n #get the hashVal of last block in blockchain\n lastHash = self.chain[len(self.chain) - 1].hashVal\n timestamp = time()\n hashVal = Block.hashSHA(timestamp, lastHash, data, NONCE, DIFFICULTY)\n adding_block = Block(timestamp, lastHash, hashVal, data, NONCE, DIFFICULTY)\n \n self.chain.append(adding_block)\n return adding_block",
"def add_block(self, env, block_color, width, height):\n\n block_size = (0.04, 0.04, 0.04)\n block_urdf = \"stacking/block.urdf\"\n block_pose = self.get_random_pose(env, block_size)\n block_id = env.add_object(block_urdf, block_pose)\n pb.changeVisualShape(\n block_id, -1, rgbaColor=utils.COLORS[block_color] + [1])\n # (0, None): 0 means that the block is symmetric.\n # TODO(hagrawal): Not sure what None means. Update. This is kept\n # for CLIPort compatibility. We don't use it.\n self.blocks.append((block_id, (0, None)))\n block_pix = utils.xyz_to_pix(block_pose[0], self.bounds, self.pix_size)\n block_obj_info = {\n \"obj_id\": block_id,\n \"pose\": block_pose,\n \"size\": block_size,\n \"urdf\": block_urdf,\n \"color\": block_color,\n \"unknown_color\": block_color in utils.EVAL_COLORS,\n \"pix\": block_pix,\n \"region\": determine_region(block_pix[0], block_pix[1], width, height),\n }\n return block_obj_info",
"def put_block(self):\n self.blocks[self.editor_cursor_position[1]][\n self.editor_cursor_position[0]] = self.available_block_types[self.current_block_type]",
"def add_transaction(self, block, transaction):\n cmd = \"\"\"INSERT INTO %s(%s, %s, %s, %s, %s, %s)\n VALUES(?,?,?,?,?,?);\"\"\" %(TABLE_TRANSACTIONS,\n COL_TRANSACTION_BLOCK,\n COL_TRANSACTION_SENDER,\n COL_TRANSACTION_RECEIVER,\n COL_TRANSACTION_AMOUNT,\n COL_TRANSACTION_SUB_TIME,\n COL_TRANSACTION_VER_TIME)\n self.__dbcursor.execute(cmd, (block, transaction.sender,\n transaction.receiver,\n transaction.amount,\n transaction.submitted_time,\n transaction.verified_time))",
"def _add_block(self, block: Block, write_to_ledger, mined_ourselves):\n\n with self.lock:\n self.mining_flag = GIVEN_BLOCK\n\n with self.lock:\n if block.parent_hash in self.blocks:\n parent_node = self.blocks[block.parent_hash]\n block_node = BlockNode(block, parent_node)\n parent_node.add_child(block_node)\n\n self._update_latest_pointers(block_node) # Check if the new block makes a longer chain and switch to it\n\n self.log.debug(GREEN + \"%s:[%s] added block to fork %d at depth %d\" + NC, block.miner_key_hash[:6], time.ctime(block.create_time), block_node.fork_num, block_node.depth)\n # self.log.debug(\"Added block to blockchain\")\n elif block.is_root:\n block_node = BlockNode(block, None)\n self.root = block_node\n # self.log.debug(\"Added block as root\")\n self.log.debug(GREEN + \"%s:[%s] added block as root %d\" + NC, block.miner_key_hash[:6], time.ctime(block.create_time), block_node.tree_num)\n self._update_latest_pointers(block_node)\n # self.messages.clear()\n Blockchain.num_trees += 1\n\n self._add_block_msgs(block) # Add all new posts to message table\n if self.message_num % 10000 == 0:\n self._write_new_messages(self.message_num-10000) # Save new messages to file\n self.blocks[block.block_hash] = block_node\n self.total_blocks += 1\n\n # Update ledger.txt with newly added block\n if write_to_ledger:\n with open(self.ledger_file, 'a') as ledger:\n ledger.write(repr(block) + \"\\n\")\n\n if self.total_blocks % STATS_UPDATE_INTERVAL == 0: # Every few blocks update stats.txt\n self._write_stats_file()\n\n self.mining_flag = CONTINUE_MINING\n\n if not mined_ourselves:\n self._update_msg_queue(block)\n\n if time.time() - self.last_msg_update > MSG_UPDATE_DELAY:\n self._reinit_message_table()\n self.last_msg_update = time.time()\n\n return True",
"def commit_block(self, block):\n raise NotImplementedError('commit_block: Implementation of this method is required.')",
"def add_block(self, block, proof):\n # print(block.__dict__)\n try:\n previous_hash = self.last_block.hash\n except AttributeError:\n previous_hash = block.previous_hash\n\n if previous_hash != block.previous_hash:\n print(\"Hashes don't match\\n{}\\n{}\".format(previous_hash, block.previous_hash))\n return False\n\n if not self.is_valid_proof(block, proof):\n print(\"block is not valid\")\n return False\n\n block.hash = proof\n self.chain.append(block)\n return True"
]
| [
"0.65428215",
"0.637844",
"0.6291531",
"0.62718385",
"0.62567204",
"0.6255184",
"0.6248362",
"0.62457955",
"0.62122875",
"0.6192997",
"0.6187441",
"0.61778",
"0.6176169",
"0.61067283",
"0.6066772",
"0.60516053",
"0.6045066",
"0.6039789",
"0.6039594",
"0.6026823",
"0.60244447",
"0.59921694",
"0.59460986",
"0.59159243",
"0.5910895",
"0.58845025",
"0.58808994",
"0.58697045",
"0.5869272",
"0.5862947"
]
| 0.71566474 | 0 |
execute the remove_block command | def _do_remove_block(self, args):
bus_type = args[1]
slave_id = int(args[2])
name = args[3]
if bus_type == 'rtu':
slave = self.server._servers[0].get_slave(slave_id)
elif bus_type == 'tcp':
slave = self.server._servers[1].get_slave(slave_id)
slave.remove_block(name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_block(self, block):\n raise NotImplementedError()",
"async def remove_block(\n self,\n position: typing.Union[\n typing.Tuple[int, int, int],\n typing.Any,\n ],\n immediate: bool = True,\n block_update: bool = True,\n block_update_self: bool = True,\n network_sync=True,\n reason=None,\n ):\n raise NotImplementedError",
"def delete_block(self, block):\n raise NotImplementedError('delete_block')",
"def remove_from_block(self):\n self.enclosing_block.remove_ops([self])",
"def removeBlock(self, block: ghidra.program.model.mem.MemoryBlock, monitor: ghidra.util.task.TaskMonitor) -> None:\n ...",
"def removeMemoryBlock(self, block: ghidra.program.model.mem.MemoryBlock) -> None:\n ...",
"def _do_remove_all_blocks(self, args):\r\n bus_type = args[1]\r\n slave_id = int(args[2])\r\n if bus_type == 'rtu':\r\n slave = self.server._servers[0].get_slave(slave_id)\r\n elif bus_type == 'tcp':\r\n slave = self.server._servers[1].get_slave(slave_id)\r\n slave.remove_all_blocks()",
"def delete(self, block, name):\n self._kvs.delete(self._key(block, name))",
"def remove_blocks(self, block_ids):\n self.smd3.remove_blocks(block_ids)\n self.logic.update(self.smd3)\n self.header.update(self.smd3)",
"def remove_chat_block(self, vapor_id_or_ip, block_type):\n identity = vapor_id_or_ip if len(vapor_id_or_ip) == 36 else vapor_id_or_ip.split(\":\")[0] \\\n if ':' in vapor_id_or_ip else vapor_id_or_ip\n cmd = \"{}removeChatBlock {} {}\".format(self.console, identity, block_type)\n self.write_command(cmd)",
"def remove(self):\n if self.function is None:\n raise IRError('Basic block is not in function')\n self.function.basic_blocks.remove(self)\n self.function = None",
"def remove():",
"def func(self):\n\n self.caller.execute_cmd('@del ' + self.caller.db.fbat + '-' + self.caller.db.lbat)\n\n #self.caller.msg(\"Command called!\")",
"def do_remove(self, loglevel=logging.DEBUG):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tcfg = self.cfg\n\t\t# Now get the run_order keys in order and go.\n\t\tself.log('PHASE: remove', level=loglevel)\n\t\tself.pause_point('\\nNow removing any modules that need removing', print_input=False, level=3)\n\t\t# Login at least once to get the exports.\n\t\tfor module_id in self.module_ids():\n\t\t\tmodule = self.shutit_map[module_id]\n\t\t\tself.log('considering whether to remove: ' + module_id, level=logging.DEBUG)\n\t\t\tif cfg[module_id]['shutit.core.module.remove']:\n\t\t\t\tself.log('removing: ' + module_id, level=logging.DEBUG)\n\t\t\t\tself.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)\n\t\t\t\tif not module.remove(self):\n\t\t\t\t\tself.log(self.print_modules(), level=logging.DEBUG)\n\t\t\t\t\tself.fail(module_id + ' failed on remove', shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover\n\t\t\t\telse:\n\t\t\t\t\tif self.build['delivery'] in ('docker','dockerfile'):\n\t\t\t\t\t\t# Create a directory and files to indicate this has been removed.\n\t\t\t\t\t\tself.send(' command mkdir -p ' + shutit_global.shutit_global_object.shutit_state_dir_build_db_dir + '/module_record/' + module.module_id + ' && command rm -f ' + shutit_global.shutit_global_object.shutit_state_dir_build_db_dir + '/module_record/' + module.module_id + '/built && command touch ' + shutit_global.shutit_global_object.shutit_state_dir_build_db_dir + '/module_record/' + module.module_id + '/removed', loglevel=loglevel, echo=False)\n\t\t\t\t\t\t# Remove from \"installed\" cache\n\t\t\t\t\t\tif module.module_id in self.get_current_shutit_pexpect_session_environment().modules_installed:\n\t\t\t\t\t\t\tself.get_current_shutit_pexpect_session_environment().modules_installed.remove(module.module_id)\n\t\t\t\t\t\t# Add to \"not installed\" cache\n\t\t\t\t\t\tself.get_current_shutit_pexpect_session_environment().modules_not_installed.append(module.module_id)\n\t\t\t\tself.logout(echo=False)",
"def removeBlock(self, aBlock: gp.Block):\n \n for y, row in iter(self.blocks):\n for x, block in iter(row):\n if block is aBlock:\n self.blocks[y][x] = None\n self.playerSprites.remove(aBlock.sprite)\n return",
"def remove(self, block):\n try:\n self.blocks[block.height]\n except:\n raise ValueError(\"cant remove block: \" + str(block))\n\n removed = False\n for b in self.blocks[block.height]:\n # only delete the one with equal hash\n if b.hash() == block.hash():\n self.blocks[block.height].remove(b)\n removed = True\n if self.blocks[block.height] == []:\n self.blocks.pop(block.height)\n\n if not removed:\n raise ValueError(\"cant remove block: \" + str(block))",
"def test_empty_reexecute_block_remove_in_unicorn_native_interface():\n\n binary = os.path.join(bin_location, \"tests\", \"cgc\", \"KPRCA_00052\")\n pov_file = os.path.join(bin_location, \"tests_data\", \"cgc_povs\", \"KPRCA_00052_POV_00000.xml\")\n output_initial_bytes = (\n b\"Enter system password: \\nWelcome to the CGC Pizzeria order management system.\\n1. Input Order\\n\"\n b\"2. Update Order\\n3. View One Orders\\n4. View All Orders\\n5. Delete Order\\n6. Clear All Orders\\n7. Logout\\n\"\n b\"Choice: Enter Pickup Name: Choose what the kind of pizza\\n1. Pizza Pie - The classic!\\n\"\n b\"2. Pizza Sub - All the fun, on a bun\\n3. Pizza Bowl - Our own twist\\nChoice: Select Size\\n1. Small\\n\"\n b\"2. Medium\\n3. Large\\nChoice: Successfully added a new Pizza Pie!\\nSelect an option:\\n1. Add Toppings\\n\"\n b\"2. Remove Toppings\\n3. Add Sauce\\n4. Remove Sauce\\n5. Finished With Pizza\\nChoice: Successfully added pizza!\"\n b\"\\n1. Add another Pizza\\n2. Quit\\nChoice: 0. Cancel\\n==================================================\\n \"\n b\"Item #1. Classic Pizza Pie, Size: SMALL\\n Selected Toppings\\n\\tNone\\n Sauce on the side\\n\\tNone\\n\"\n b\"--------------------------------------\\n\\t\\tCalories: 1000\\n\\t\\tCarbs : 222\\n\\nPizza length... = 1\\n\\t\\t\"\n b\"Estimated wait time: 36 minute(s)\\n==================================================\\nChoice: \"\n b\"Removed Item #1\\n1. Add another Pizza\\n2. Quit\\nChoice: Order successfully added!\\n1. Input Order\\n\"\n b\"2. Update Order\\n3. View One Orders\\n4. View All Orders\\n5. Delete Order\\n6. Clear All Orders\\n7. Logout\\n\"\n b\"Choice: 1 - pov: Ordered 0 pizza(s)\\n==================================================\\n\"\n b\"--------------------------------------\\n\\t\\tCalories: 0\\n\\t\\tCarbs : 0\\n\\n\"\n )\n add_options = {\n angr.options.UNICORN_HANDLE_CGC_RECEIVE_SYSCALL,\n angr.options.UNICORN_HANDLE_SYMBOLIC_ADDRESSES,\n angr.options.UNICORN_HANDLE_SYMBOLIC_CONDITIONS,\n }\n trace_cgc_with_pov_file(\n binary,\n \"tracer_empty_reexecute_block_remove_in_unicorn_native_interface\",\n pov_file,\n output_initial_bytes,\n add_options=add_options,\n )",
"def do_remove(self, arg):\n jail_destroy('remove', arg)",
"def bdev_zone_block_delete(client, name):\n params = {'name': name}\n return client.call('bdev_zone_block_delete', params)",
"def __delitem__(self, index):\n def _removeBlock(blockIndex):\n block = self._doc.findBlockByNumber(blockIndex)\n if block.next().isValid(): # not the last\n cursor = QTextCursor(block)\n cursor.movePosition(QTextCursor.NextBlock, QTextCursor.KeepAnchor)\n elif block.previous().isValid(): # the last, not the first\n cursor = QTextCursor(block.previous())\n cursor.movePosition(QTextCursor.EndOfBlock)\n cursor.movePosition(QTextCursor.NextBlock, QTextCursor.KeepAnchor)\n cursor.movePosition(QTextCursor.EndOfBlock, QTextCursor.KeepAnchor)\n else: # only one block\n cursor = QTextCursor(block)\n cursor.movePosition(QTextCursor.EndOfBlock, QTextCursor.KeepAnchor)\n cursor.removeSelectedText()\n\n if isinstance(index, int):\n index = self._checkAndConvertIndex(index)\n _removeBlock(index)\n elif isinstance(index, slice):\n \"\"\"List of indexes is reversed for make sure \n not processed indexes are not shifted during document modification\n \"\"\"\n start, stop, step = index.indices(self._doc.blockCount())\n if step > 0:\n start, stop, step = stop - 1, start - 1, step * -1\n\n for blockIndex in range(start, stop, step):\n _removeBlock(blockIndex)",
"def remove_block(self, name):\n\n if not self._ast or not name in self._block_map:\n raise ValueError(u\"Block '{0}' does not exist\"\n .format(common.from_utf8(name)))\n\n block_idx = self._block_map[name]\n\n # remove block\n self._ast[2].pop(block_idx)\n del self._block_map[name]",
"def remove():\n pass",
"def delete_item(self, usage_locator, user_id, force=False): # lint-amnesty, pylint: disable=arguments-differ\n if not isinstance(usage_locator, BlockUsageLocator) or usage_locator.deprecated:\n # The supplied UsageKey is of the wrong type, so it can't possibly be stored in this modulestore.\n raise ItemNotFoundError(usage_locator)\n\n with self.bulk_operations(usage_locator.course_key):\n original_structure = self._lookup_course(usage_locator.course_key).structure\n block_key = BlockKey.from_usage_key(usage_locator)\n if original_structure['root'] == block_key:\n raise ValueError(\"Cannot delete the root of a course\")\n if block_key not in original_structure['blocks']:\n raise ValueError(\"Cannot delete block_key {} from course {}, because that block does not exist.\".format(\n block_key,\n usage_locator,\n ))\n index_entry = self._get_index_if_valid(usage_locator.course_key, force)\n new_structure = self.version_structure(usage_locator.course_key, original_structure, user_id)\n new_blocks = new_structure['blocks']\n new_id = new_structure['_id']\n parent_block_keys = self._get_parents_from_structure(block_key, original_structure)\n for parent_block_key in parent_block_keys:\n parent_block = new_blocks[parent_block_key]\n parent_block.fields['children'].remove(block_key)\n parent_block.edit_info.edited_on = datetime.datetime.now(UTC)\n parent_block.edit_info.edited_by = user_id\n parent_block.edit_info.previous_version = parent_block.edit_info.update_version\n parent_block.edit_info.update_version = new_id\n # remove the source_version reference\n parent_block.edit_info.source_version = None\n self.decache_block(usage_locator.course_key, new_id, parent_block_key)\n\n self._remove_subtree(BlockKey.from_usage_key(usage_locator), new_blocks)\n\n # update index if appropriate and structures\n self.update_structure(usage_locator.course_key, new_structure)\n\n if index_entry is not None:\n # update the index entry if appropriate\n self._update_head(usage_locator.course_key, index_entry, usage_locator.branch, new_id)\n result = usage_locator.course_key.for_version(new_id)\n else:\n result = CourseLocator(version_guid=new_id)\n\n if isinstance(usage_locator.course_key, LibraryLocator):\n self._flag_library_updated_event(usage_locator.course_key)\n\n self._emit_item_deleted_signal(usage_locator, user_id)\n\n return result",
"def remove(self, str):\n if str in self.settings['Core::Blocks']:\n self.settings['Core::Blocks'].remove(str)\n self.rebuild()\n self.settings.save()",
"def decache_block(self, course_key, version_guid, block_key):\n bulk_write_record = self._get_bulk_ops_record(course_key)\n if bulk_write_record.active:\n try:\n del bulk_write_record.modules[version_guid][block_key]\n except KeyError:\n pass",
"async def remove(message, client, extra_args):\n\n if await funnypts_transaction(message, client, extra_args, \"remove\"):\n await message.channel.send(\"BRUH, THAT WAS CRINGE. SOMEONE JUST REVOKED YOUR FUNNYPOINT\")",
"def example_deletion_with_block_lowering(self):\n i = 0\n while i < len(self.shrink_target.blocks):\n if not self.is_shrinking_block(i):\n i += 1\n continue\n\n u, v = self.blocks[i].bounds\n\n j = 0\n while j < len(self.shrink_target.examples):\n n = int_from_bytes(self.shrink_target.buffer[u:v])\n if n == 0:\n break\n ex = self.shrink_target.examples[j]\n if ex.start < v or ex.length == 0:\n j += 1\n continue\n\n buf = bytearray(self.shrink_target.buffer)\n buf[u:v] = int_to_bytes(n - 1, v - u)\n del buf[ex.start : ex.end]\n if not self.incorporate_new_buffer(buf):\n j += 1\n\n i += 1",
"def remove_a_result(self, idblock):\n self.resultPanel.remove_item(idblock)",
"def delete(self, flow):\n for parent in self.parents:\n parent.children.remove(self)\n for child in self.children:\n child.parents.remove(self)\n\n flow.blocks.remove(self)",
"def unblock_all(t):\n blocked_count = 0\n\n while True:\n blocked_user_ids = t.blocks.ids()[\"ids\"]\n if not blocked_user_ids:\n print(\"No more IDs to unblock\")\n break\n\n for user_id in blocked_user_ids:\n blocked_count = blocked_count + 1\n print(f\"{blocked_count}: {user_id}\")\n try:\n t.blocks.destroy(user_id=user_id, include_entities=False, skip_status=True)\n except:\n print(\"error\")"
]
| [
"0.7275056",
"0.71058",
"0.7038019",
"0.6806753",
"0.6744042",
"0.6390654",
"0.6365233",
"0.62796766",
"0.6235635",
"0.6175417",
"0.6071809",
"0.60490525",
"0.60421735",
"0.59872586",
"0.5936501",
"0.5895922",
"0.5890499",
"0.5844823",
"0.5836963",
"0.5827584",
"0.57998425",
"0.5789642",
"0.5777072",
"0.57542187",
"0.57356465",
"0.57216084",
"0.5720691",
"0.57166004",
"0.57047975",
"0.5681664"
]
| 0.75226086 | 0 |
execute the remove_all_blocks command | def _do_remove_all_blocks(self, args):
bus_type = args[1]
slave_id = int(args[2])
if bus_type == 'rtu':
slave = self.server._servers[0].get_slave(slave_id)
elif bus_type == 'tcp':
slave = self.server._servers[1].get_slave(slave_id)
slave.remove_all_blocks() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_blocks(draft):\n for symbol in draft.Blocks:\n if symbol.Name in blocks_to_delete:\n print(\"[-] %s, \\tdeleted\" % symbol.Name)\n symbol.delete()\n\n # for ball in draft.ActiveSheet.Balloons:\n if draft.Balloons:\n for ball in draft.Balloons:\n if ball.BalloonType == 7: # type 7 filter the triangle balloons.\n print(\"[-] %s, \\tdeleted\" % ball.Name)\n ball.Delete()\n else:\n pass",
"def remove_blocks(self, block_ids):\n self.smd3.remove_blocks(block_ids)\n self.logic.update(self.smd3)\n self.header.update(self.smd3)",
"def _do_remove_block(self, args):\r\n bus_type = args[1]\r\n slave_id = int(args[2])\r\n name = args[3]\r\n if bus_type == 'rtu':\r\n slave = self.server._servers[0].get_slave(slave_id)\r\n elif bus_type == 'tcp':\r\n slave = self.server._servers[1].get_slave(slave_id)\r\n slave.remove_block(name)",
"def clean_files(self):\n clean_cmds = []\n for server_params in self.runner.job.yaml_params.server_params:\n scm_mount = server_params.scm_mount.value\n self.log.info(\"Cleaning up the %s directory.\", str(scm_mount))\n\n # Remove the superblocks\n cmd = \"rm -fr {}/*\".format(scm_mount)\n if cmd not in clean_cmds:\n clean_cmds.append(cmd)\n\n # Dismount the scm mount point\n cmd = \"while sudo umount {}; do continue; done\".format(scm_mount)\n if cmd not in clean_cmds:\n clean_cmds.append(cmd)\n\n if self.runner.job.yaml_params.is_scm():\n scm_list = server_params.scm_list.value\n if isinstance(scm_list, list):\n self.log.info(\n \"Cleaning up the following device(s): %s.\",\n \", \".join(scm_list))\n # Umount and wipefs the dcpm device\n cmd_list = [\n \"for dev in {}\".format(\" \".join(scm_list)),\n \"do mount=$(lsblk $dev -n -o MOUNTPOINT)\",\n \"if [ ! -z $mount ]\",\n \"then while sudo umount $mount\",\n \"do continue\",\n \"done\",\n \"fi\",\n \"sudo wipefs -a $dev\",\n \"done\"\n ]\n cmd = \"; \".join(cmd_list)\n if cmd not in clean_cmds:\n clean_cmds.append(cmd)\n\n pcmd(self._hosts, \"; \".join(clean_cmds), True)",
"def test_remove_all_items(self):\n lib1 = self._create_library(slug=\"test-lib-rm-all\", title=\"Title 1\", description=\"Description\")\n self._add_block_to_library(lib1['id'], \"problem\", \"problem1\")\n self._add_block_to_library(lib1['id'], \"problem\", \"problem2\")\n assert len(LibraryBlockIndexer.get_items()) == 2\n\n LibraryBlockIndexer.remove_all_items()\n assert len(LibraryBlockIndexer.get_items()) == 0",
"def unblock_all(t):\n blocked_count = 0\n\n while True:\n blocked_user_ids = t.blocks.ids()[\"ids\"]\n if not blocked_user_ids:\n print(\"No more IDs to unblock\")\n break\n\n for user_id in blocked_user_ids:\n blocked_count = blocked_count + 1\n print(f\"{blocked_count}: {user_id}\")\n try:\n t.blocks.destroy(user_id=user_id, include_entities=False, skip_status=True)\n except:\n print(\"error\")",
"def remove_from_block(self):\n self.enclosing_block.remove_ops([self])",
"def clear_blockages(self):\n debug.info(3,\"Clearing all blockages\")\n self.rg.clear_blockages()",
"def remove_blocks(self, *vertices):\n for vertex in vertices:\n try:\n self.world[vertex] = None\n self.shown.pop(vertex)\n for vtx in self._shown[vertex]:\n vtx.delete()\n except KeyError:\n pass\n except IndexError:\n pass",
"def unblockAll():\n result = subprocess.Popen(\"/sbin/iptables -F INPUT 2>&1\", shell=True, stdout=subprocess.PIPE).stdout.read()\n if result.strip() != \"\":\n logger.error(\"Could not flush INPUT chain. Error: %s.\" % (result))\n result = subprocess.Popen(\"/usr/sbin/ipset destroy 2>&1\", shell=True, stdout=subprocess.PIPE).stdout.read()\n if result.strip() != \"\":\n logger.error(\"Could not destroy all ipsets. Error: %s.\" % (result))\n sys.exit(255)",
"def remove_block(self, block):\n raise NotImplementedError()",
"def _removeListFromAuxiliaries(self, assembly):\n del self.assembliesByName[assembly.getName()]\n for b in assembly:\n try:\n del self.blocksByName[b.getName()]\n except KeyError:\n runLog.warning(\n \"Cannot delete block {0}. It is not in the Core.blocksByName structure\"\n \"\".format(b),\n single=True,\n label=\"cannot dereference: lost block\",\n )",
"def clear(self):\n self.blocks.clear()",
"def consolidate_empty_blocks(self):\n new_blocks = []\n for block in self.blocks:\n if isinstance(block, BasicBlock) and not block.statements:\n self.remove_block(block)\n else:\n new_blocks.append(block)\n self.blocks = new_blocks",
"def remove_objects(self, objects):\n for sprite_group in self.sprite_level_blocks:\n sprite_group.remove(objects)",
"def func(self):\n\n self.caller.execute_cmd('@del ' + self.caller.db.fbat + '-' + self.caller.db.lbat)\n\n #self.caller.msg(\"Command called!\")",
"def remove():",
"def dropall_cmd():\n drop_all()\n print(\"all tables dropped\")",
"def launch_nuke(self, *args, **kwarg):\n for region_tag in self.region_tags:\n self.delete_all_spaces(region_tag)",
"def snap_delete_all(mnode):\n cmd = \"gluster snapshot delete all --mode=script\"\n return g.run(mnode, cmd)",
"def test_empty_reexecute_block_remove_in_unicorn_native_interface():\n\n binary = os.path.join(bin_location, \"tests\", \"cgc\", \"KPRCA_00052\")\n pov_file = os.path.join(bin_location, \"tests_data\", \"cgc_povs\", \"KPRCA_00052_POV_00000.xml\")\n output_initial_bytes = (\n b\"Enter system password: \\nWelcome to the CGC Pizzeria order management system.\\n1. Input Order\\n\"\n b\"2. Update Order\\n3. View One Orders\\n4. View All Orders\\n5. Delete Order\\n6. Clear All Orders\\n7. Logout\\n\"\n b\"Choice: Enter Pickup Name: Choose what the kind of pizza\\n1. Pizza Pie - The classic!\\n\"\n b\"2. Pizza Sub - All the fun, on a bun\\n3. Pizza Bowl - Our own twist\\nChoice: Select Size\\n1. Small\\n\"\n b\"2. Medium\\n3. Large\\nChoice: Successfully added a new Pizza Pie!\\nSelect an option:\\n1. Add Toppings\\n\"\n b\"2. Remove Toppings\\n3. Add Sauce\\n4. Remove Sauce\\n5. Finished With Pizza\\nChoice: Successfully added pizza!\"\n b\"\\n1. Add another Pizza\\n2. Quit\\nChoice: 0. Cancel\\n==================================================\\n \"\n b\"Item #1. Classic Pizza Pie, Size: SMALL\\n Selected Toppings\\n\\tNone\\n Sauce on the side\\n\\tNone\\n\"\n b\"--------------------------------------\\n\\t\\tCalories: 1000\\n\\t\\tCarbs : 222\\n\\nPizza length... = 1\\n\\t\\t\"\n b\"Estimated wait time: 36 minute(s)\\n==================================================\\nChoice: \"\n b\"Removed Item #1\\n1. Add another Pizza\\n2. Quit\\nChoice: Order successfully added!\\n1. Input Order\\n\"\n b\"2. Update Order\\n3. View One Orders\\n4. View All Orders\\n5. Delete Order\\n6. Clear All Orders\\n7. Logout\\n\"\n b\"Choice: 1 - pov: Ordered 0 pizza(s)\\n==================================================\\n\"\n b\"--------------------------------------\\n\\t\\tCalories: 0\\n\\t\\tCarbs : 0\\n\\n\"\n )\n add_options = {\n angr.options.UNICORN_HANDLE_CGC_RECEIVE_SYSCALL,\n angr.options.UNICORN_HANDLE_SYMBOLIC_ADDRESSES,\n angr.options.UNICORN_HANDLE_SYMBOLIC_CONDITIONS,\n }\n trace_cgc_with_pov_file(\n binary,\n \"tracer_empty_reexecute_block_remove_in_unicorn_native_interface\",\n pov_file,\n output_initial_bytes,\n add_options=add_options,\n )",
"def test_remove_blocks(tmp_path):\n fn1 = tmp_path / \"test.asdf\"\n fn2 = tmp_path / \"test2.asdf\"\n\n tree = {\"a\": np.zeros(3), \"b\": np.ones(1)}\n af = asdf.AsdfFile(tree)\n af.write_to(fn1)\n\n with asdf.open(fn1, mode=\"rw\") as af:\n assert len(af._blocks._internal_blocks) == 2\n af[\"a\"] = None\n af.write_to(fn2)\n\n with asdf.open(fn1, mode=\"rw\") as af:\n assert len(af._blocks._internal_blocks) == 2\n af[\"a\"] = None\n af.update()\n\n for fn in (fn1, fn2):\n with asdf.open(fn) as af:\n assert len(af._blocks._internal_blocks) == 1",
"def example_deletion_with_block_lowering(self):\n i = 0\n while i < len(self.shrink_target.blocks):\n if not self.is_shrinking_block(i):\n i += 1\n continue\n\n u, v = self.blocks[i].bounds\n\n j = 0\n while j < len(self.shrink_target.examples):\n n = int_from_bytes(self.shrink_target.buffer[u:v])\n if n == 0:\n break\n ex = self.shrink_target.examples[j]\n if ex.start < v or ex.length == 0:\n j += 1\n continue\n\n buf = bytearray(self.shrink_target.buffer)\n buf[u:v] = int_to_bytes(n - 1, v - u)\n del buf[ex.start : ex.end]\n if not self.incorporate_new_buffer(buf):\n j += 1\n\n i += 1",
"def delete(self, flow):\n for parent in self.parents:\n parent.children.remove(self)\n for child in self.children:\n child.parents.remove(self)\n\n flow.blocks.remove(self)",
"def forceload_removeall(server):\n screenCmd(server, 'forceload remove all')\n screenCmd(\n server, 'execute in compactmachines:compact_world run forceload remove all'\n )",
"def validate_blocks():\n # Validate the remesh blocks modifiers\n bpy.ops.object.apply_all_modifiers()\n \n date_1 = datetime.datetime.now()\n print(\"Start\")\n \n # Select the bottom faces\n Button_Operations.select_faces(radians(10))\n\n # Extrude the support\n bpy.ops.mesh.extrude_region_move(MESH_OT_extrude_region={\"use_normal_flip\":False, \"use_dissolve_ortho_edges\":False, \"mirror\":False}, TRANSFORM_OT_translate={\"value\":(0, 0, -20), \"orient_type\":'GLOBAL', \"orient_matrix\":((1, 0, 0), (0, 1, 0), (0, 0, 1)), \"orient_matrix_type\":'GLOBAL', \"constraint_axis\":(False, False, True), \"mirror\":False, \"use_proportional_edit\":False, \"proportional_edit_falloff\":'SMOOTH', \"proportional_size\":1, \"use_proportional_connected\":False, \"use_proportional_projected\":False, \"snap\":False, \"snap_target\":'CLOSEST', \"snap_point\":(0, 0, 0), \"snap_align\":False, \"snap_normal\":(0, 0, 0), \"gpencil_strokes\":False, \"cursor_transform\":False, \"texture_space\":False, \"remove_on_cancel\":False, \"release_confirm\":False, \"use_accurate\":False, \"use_automerge_and_split\":False})\n\n # Select all\n bpy.ops.mesh.select_all(action='SELECT')\n\n # Bissect and delete the element under the xy plane\n bpy.ops.mesh.bisect(plane_co=(0, 0, 0.01), plane_no=(0, 0, 1), use_fill=False, clear_inner=True, xstart=942, xend=1489, ystart=872, yend=874, flip=False) \n\n # Fill the hole and triangulate faces\n Button_Operations.manifold_and_triangulate()\n \n # Delete the copy\n object_to_delete = bpy.data.objects[\"temp_copy\"]\n bpy.data.objects.remove(object_to_delete, do_unlink=True) \n \n # Switch in object mode \n bpy.ops.object.mode_set(mode='OBJECT')",
"def remove(self):\n if self.function is None:\n raise IRError('Basic block is not in function')\n self.function.basic_blocks.remove(self)\n self.function = None",
"def delete_block(self, block):\n raise NotImplementedError('delete_block')",
"def deleteBolts(self):\n a = self.get_bolts()\n for i in self.get_bolts():\n if i.y>GAME_HEIGHT:\n a.remove(i)\n self.set_plyrbolts(0)\n elif i.y<=-BOLT_HEIGHT:\n a.remove(i)",
"async def remove_block(\n self,\n position: typing.Union[\n typing.Tuple[int, int, int],\n typing.Any,\n ],\n immediate: bool = True,\n block_update: bool = True,\n block_update_self: bool = True,\n network_sync=True,\n reason=None,\n ):\n raise NotImplementedError"
]
| [
"0.64979184",
"0.64848286",
"0.62811196",
"0.6276664",
"0.6214501",
"0.61948407",
"0.6133304",
"0.60413116",
"0.5981843",
"0.5847182",
"0.5838975",
"0.58206236",
"0.58180887",
"0.5778389",
"0.57099503",
"0.570558",
"0.56985164",
"0.5684649",
"0.5678892",
"0.5661721",
"0.5643133",
"0.5631017",
"0.5626673",
"0.55783314",
"0.5532971",
"0.55309385",
"0.5528909",
"0.55087906",
"0.54960936",
"0.5493268"
]
| 0.720971 | 0 |
install a function as a hook | def _do_install_hook(self, args):
hook_name = args[1]
fct_name = args[2]
hooks.install_hook(hook_name, self._hooks_fct[fct_name]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hook(callback):\n hooks.append(callback)",
"def hook(self, name):\r\n def wrapper(func):\r\n self.hooks.add(name, func)\r\n return func\r\n return wrapper",
"def install_hook(hook_id, proc):\n handle = user32.SetWindowsHookExA(hook_id, proc, None, 0)\n if not handle:\n # https://docs.microsoft.com/en-us/windows/win32/debug/system-error-codes--0-499-\n msg = \"Failed to install hook. errno=\" + str(GetLastError())\n logging.error(msg)\n return handle",
"def setup_hooks(self):\n pass",
"def add_hook(self, event, function):\n if event not in self.hooks:\n self.hooks[event] = []\n self.hooks[event].append(function)",
"def add_hook(f, h):\n if f in hooks:\n hooks[f] += [h]\n else:\n hooks[f] = [h]",
"def on_hook(self) -> None:",
"def register(name, fn=None):\n def _hook_add(func):\n if name not in _hooks:\n logger.debug(\"Creating new hook %s\" % name)\n _hooks[name] = []\n\n logger.debug('Registering hook %s for function %s' % (name, fn))\n _hooks[name].append(func)\n\n if fn is None:\n # Behave like a decorator\n def decorator(func):\n _hook_add(func)\n return func\n return decorator\n else:\n # Behave like a function, just register hook\n _hook_add(fn)",
"def add_hook(self, method, args=None, kwargs=None):\n self.hook.append((method, args, kwargs))",
"def register_mutate_hook(self, hook):\n self._mutate_hooks.add(hook)",
"def declare_hook(self, fct_name, fct):\r\n self._hooks_fct[fct_name] = fct",
"def add_run_hook(h):\n add_hook(run, h)",
"def exec_hooks(f):\n global hooks\n if f in hooks:\n for h in hooks[f]:\n h()",
"def set_hook(self, module):\n self.hook = module.register_forward_hook(self.hook_fn)",
"def setHook(self, module):\n self.hook = module.register_forward_hook(self.hook_fn)",
"def _hook(self):",
"def add_hook(self, name: str, callable):\n assert name in self.typenames, \"'{}' not in '{}'\".format(name, self.typenames)\n self.__hooks[name] = callable",
"def register(self, hook_url):\n raise NotImplementedError()",
"def set_hook(self,name,hook):\n\n # At some point in the future, this should validate the hook before it\n # accepts it. Probably at least check that the hook takes the number\n # of args it's supposed to.\n setattr(self.hooks,name,new.instancemethod(hook,self,self.__class__))",
"def before(hook_name, methods, kwargs):\n for hookimpl in methods:\n self._plugin2calls[hookimpl.plugin].add(hook_name)",
"def setup_hooks(self) -> None:\n install_hooks(self.record_interaction)",
"def _setup_hook(add_print=False, unit_test=False):\n # we can check many things, needed module\n # any others things before unit tests are started\n if add_print: # pragma: no cover\n print(\"Success: _setup_hook\")",
"def add_hook(**_kwargs):\n hook = import_hook.create_hook(\n transform_source=transform_source,\n transform_ast=transform_ast,\n hook_name=__name__,\n extensions=[\".🐍\"],\n )\n return hook",
"def register_method_before(fn, phase): # type: (Callable, str) -> None\n PackageMixinsMeta._methods_to_be_added[fn.__name__] = fn\n PackageMixinsMeta._add_method_before[phase].append(fn)",
"def register_main_hook(callback):\n main_hooks.append(callback)",
"def add_step_hook(h):\n add_hook(step, h)",
"def on_install(self, request, trigger_context):\n raise NotImplementedError",
"def registerInternalLinkHook(tag, function):\n mInternalLinkHooks[tag] = function",
"def add_hook(self, type_: str, hook: typing.Callable) -> typing.Callable:\n if type_ not in self._request_hooks:\n self._request_hooks[type_] = []\n\n self._request_hooks[type_].append(hook)\n return hook",
"def install_hook(context: Context) -> None:\n import git # import inside def for performance\n\n def is_bento_precommit(filename: str) -> bool:\n if not os.path.exists(filename):\n return False\n with open(filename) as f:\n lines = f.read()\n return constants.BENTO_TEMPLATE_HASH in lines\n\n # Get hook path\n repo = bento.git.repo(context.base_path)\n if repo is None:\n echo_error(\"Not a git project\")\n sys.exit(3)\n\n hook_path = git.index.fun.hook_path(\"pre-commit\", repo.git_dir)\n\n if is_bento_precommit(hook_path):\n echo_success(f\"Bento already installed as a pre-commit hook\")\n else:\n legacy_hook_path = f\"{hook_path}.pre-bento\"\n if os.path.exists(hook_path):\n # If pre-commit hook already exists move it over\n if os.path.exists(legacy_hook_path):\n raise Exception(\n \"There is already a legacy hook. Not sure what to do so just exiting for now.\"\n )\n else:\n # Check that\n shutil.move(hook_path, legacy_hook_path)\n\n # Copy pre-commit script template to hook_path\n template_location = os.path.join(\n os.path.dirname(__file__), \"../resources/pre-commit.template\"\n )\n shutil.copyfile(template_location, hook_path)\n\n # Make file executable\n original_mode = os.stat(hook_path).st_mode\n os.chmod(hook_path, original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n echo_success(f\"Added Bento to your git pre-commit hooks.\")"
]
| [
"0.71828526",
"0.69452894",
"0.6812174",
"0.6810592",
"0.6807268",
"0.6787101",
"0.6680161",
"0.6601569",
"0.65442866",
"0.6482846",
"0.6470736",
"0.6468058",
"0.6451436",
"0.63961804",
"0.63879573",
"0.6370445",
"0.6350216",
"0.6326162",
"0.6303307",
"0.63018394",
"0.6298826",
"0.62967473",
"0.6290436",
"0.6285388",
"0.62724036",
"0.6254621",
"0.62462324",
"0.6220988",
"0.6166892",
"0.61636144"
]
| 0.80193776 | 0 |
uninstall a function as a hook. If no function is given, uninstall all functions | def _do_uninstall_hook(self, args):
hook_name = args[1]
try:
hooks.uninstall_hook(hook_name)
except KeyError as exception:
LOGGER.error(str(exception)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def uninstall_hook(handle):\n if handle:\n user32.UnhookWindowsHookEx(handle)",
"def _uninstall(package_name, remove_all, app_id, cli, app):\n\n package_manager = _get_package_manager()\n err = package.uninstall(\n package_manager, package_name, remove_all, app_id, cli, app)\n if err is not None:\n emitter.publish(err)\n return 1\n\n return 0",
"def RemoveHooks(obj: object) -> None:\n for function in obj.__dict__.values():\n if not callable(function):\n continue\n\n hook_targets = getattr(function, \"HookTargets\", None)\n if hook_targets is None:\n continue\n\n for target in hook_targets:\n unrealsdk.RemoveHook(target, function.HookName)",
"def remove_custom_hook(hook_name):\n _CUSTOM_HOOKS.pop(hook_name)\n globals().pop(hook_name)",
"def on_uninstall(self, request, trigger_context):\n raise NotImplementedError",
"def remove_hook(self):\n for handle in self.handlers:\n handle.remove()",
"def remove_hook(self):\n for handle in self.handlers:\n handle.remove()",
"def del_functions(self, *args):\n if len(args) > 0:\n attrs = args\n else:\n self._user_function.clear()",
"def removeFunction(self, function: ghidra.program.model.listing.Function) -> None:\n ...",
"def PluginUninstall(self, packageName):\n pass",
"def uninstall_pointer_hook(self):\n\n\t\tself._interface.uninstall_pointer_hook()",
"def local_uninstall(environment):\n environment.remove_cleanup(\n environment.cfy.local.execute,\n args=['uninstall'],\n )\n result = environment.cfy.local.execute('uninstall')\n assert result['returncode'] == 0, (\n 'Uninstall workflow failed!'\n )",
"def del_hook(f, hindex):\n if f in hooks:\n hlist = hooks[f]\n if not (0 <= hindex and hindex < len(hooks[f])):\n print \"invalid argument; index should be in range [0, {:d}].\"\\\n .format(len(hooks[f]))\n else:\n hlist[hindex:hindex+1] = []\n else:\n print \"no hook is attached to\", f.__name__",
"def test_uninstall(self):\n pass",
"def del_hook(self, name: str):\n try:\n del self.__hooks[name]\n except KeyError:\n pass",
"def remove_hook(self, atom: Union[BaseAtom, str], callback):\n name = str(atom)\n\n if isinstance(atom, BaseAtom):\n name = name[:-1]\n\n if name not in self.hooks:\n return\n self.hooks[name].discard(callback)",
"def _uninstall():\n\tif not \"SCRIPTS\" in os.environ:\n\t\tprint \"Please set SCRIPTS environment variable.\"\n\t\tsys.exit(1)\n\t\n\tscript_dir = os.environ[\"SCRIPTS\"]\n\t\n\tif SCRIPT_NAME in os.listdir(script_dir):\n\t\tshutil.rmtree(os.path.join(script_dir, SCRIPT_NAME))\n\tfor name in EXEC_NAMES:\n\t\tif name in os.listdir(\"/bin/\"):\n\t\t\tos.system(\"sudo rm -f /bin/{}\".format(name))",
"def test_hook_unregister(self):\n self.assertEqual(list(self.registry), [])\n item = self.DummyItem(123)\n self.hook_cls(self.extension, item)\n\n self.extension.shutdown()\n self.assertEqual(list(self.registry), [])",
"def uninstall(portal, reinstall=False):\n if reinstall:\n return\n setup_tool = getToolByName(portal, 'portal_setup')\n return setup_tool.runAllImportStepsFromProfile(PROFILE_ID)",
"def uninstall_keyboard_hook(self):\n\n\t\tself._interface.uninstall_keyboard_hook()",
"def remove_handler ( handler_list, handler_function ):\n if handler_function in handler_list:\n handler_list.remove ( handler_function )",
"def uninstall_window_hook(self):\n\n\t\tif cls._interface:\n\t\t\tcls._interface.uninstall_window_hook()\n\t\telse:\n\t\t\traise NotImplementedError('Unsupported platform')",
"def remove_command(self, func):\n del self.commands[func.__name__]",
"def off_hook(self) -> None:",
"def del_run_hook(hindex):\n del_hook(run, hindex)",
"def del_microstep_hook(hindex):\n del_hook(microstep, hindex)",
"def uninstall_platform(self):\n if self.status: self.status.Warning(\"Uninstall Function Not Implemented Yet!\")",
"def deregister(self):\n if self.debug:\n print(\"%r.deregister()\" % (self,))\n if not self.func:\n if self.debug:\n print(\"already deregistered\")\n return\n try:\n self.tkApp.deletecommand(self.tclFuncName)\n except tkinter.TclError as e:\n if self.debug:\n print(\"deregistering failed: %r\" % (e,))\n pass\n self.func = None",
"def uninstall(self):\n getSiteManager.sethook(self.old)",
"def uninstall(args):\n scripts = get_console_scripts(args.package)\n for script in scripts:\n path = os.path.join(args.destination, script)\n logger.info('removing {0}'.format(path))\n os.remove(path)"
]
| [
"0.69406205",
"0.65433",
"0.6514345",
"0.6452318",
"0.631227",
"0.618152",
"0.618152",
"0.6086222",
"0.6082233",
"0.60555613",
"0.6052477",
"0.6052156",
"0.60201836",
"0.6006638",
"0.60027117",
"0.5980151",
"0.5978945",
"0.59783494",
"0.5975453",
"0.5919139",
"0.58928365",
"0.58918697",
"0.58871615",
"0.58806896",
"0.5867708",
"0.5853712",
"0.5851407",
"0.58491516",
"0.5847559",
"0.5841751"
]
| 0.73846674 | 0 |
change the verbosity of the server | def _do_set_verbose(self, args):
verbose = int(args[1])
self.server.set_verbose(verbose)
return "%d" % verbose | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def verbosity(v):\n assert v in [0,1,2] # debug, warn, info\n GLOBAL['VERBOSITY'] = v",
"def set_verbosity():\n\n\tif conf.verbose is None:\n\t\tconf.verbose = 1\n\n\tconf.verbose = int(conf.verbose)\n\n\tif conf.verbose == 0:\n\t\tlogger.setLevel(logging.ERROR)\n\telif conf.verbose == 1:\n\t\tlogger.setLevel(logging.INFO)\n\telif conf.verbose == 2:\n\t\tlogger.setLevel(logging.DEBUG)\n\telif conf.verbose == 3:\n\t\tlogger.setLevel(CUSTOM_LOGGING.PAYLOAD)\n\telif conf.verbose == 4:\n\t\tlogger.setLevel(CUSTOM_LOGGING.TRAFFIC_OUT)\n\telif conf.verbose >= 5:\n\t\tlogger.setLevel(CUSTOM_LOGGING.TRAFFIC_IN)",
"def set_verbosity(self, verbosity):\n if verbosity == 0:\n self.__logger.setLevel(logging.CRITICAL)\n if verbosity == 1:\n self.__logger.setLevel(logging.ERROR)\n if verbosity == 2:\n self.__logger.setLevel(logging.WARNING)\n if verbosity == 3:\n self.__logger.setLevel(logging.INFO)\n if verbosity >= 4:\n self.__logger.setLevel(logging.DEBUG)",
"def turn_on_verbosity(self):\n self.m.setParam('OutputFlag', 1)",
"def set_verbose(verbosity: bool) -> None:\n global VERBOSE # pylint: disable=global-statement\n VERBOSE = verbosity",
"def verbose():\n GLOBAL['VERBOSE'] = True",
"def cli(verbose):\n level = (logging.WARNING, logging.INFO, logging.DEBUG)[min(verbose, 2)]\n logging.basicConfig(level=level)",
"def set_verbose(self, v):\n self._verbose = bool(v)",
"def enable_verbose(self):\n self.verbose = True",
"def do_verbose(self, arg):\n global verbose\n if verbose == 1:\n verbose = 0\n # prtin and add to log file \n logmsg = \" INFO: verbose mode disable\"\n log(logmsg)\n else:\n verbose = 1\n # prtin and add to log file \n logmsg = \" INFO: verbose mode enable\"\n log(logmsg)",
"def SetVerbose(new_verbose=True):\n global _verbose\n _verbose = new_verbose",
"def _set_verbose(value):\n global VERBOSE\n VERBOSE = value",
"def verbose(self, value):\n if value > self.DEBUG:\n value = self.DEBUG\n if value < self.NONE:\n value = self.NONE\n self._verbose = value",
"def setVerbose(newVal):\n global verbose\n verbose = newVal",
"def set_verbose(self, verbose):\r\n for srv in self._servers:\r\n srv.set_verbose(verbose)",
"def test_increase_verbosity(self):\n # Start from a known state.\n set_level(logging.INFO)\n assert get_level() == logging.INFO\n # INFO -> VERBOSE.\n increase_verbosity()\n assert get_level() == logging.VERBOSE\n # VERBOSE -> DEBUG.\n increase_verbosity()\n assert get_level() == logging.DEBUG\n # DEBUG -> SPAM.\n increase_verbosity()\n assert get_level() == logging.SPAM\n # SPAM -> NOTSET.\n increase_verbosity()\n assert get_level() == logging.NOTSET\n # NOTSET -> NOTSET.\n increase_verbosity()\n assert get_level() == logging.NOTSET",
"def _do_option(self, line: str) -> None:\n if line.startswith(\"option verbosity\"):\n self._verbosity = Level(int(line[len(\"option verbosity \") :]))\n _write(\"ok\")\n else:\n _write(\"unsupported\")",
"def Verbose(on_off=\"on\"):\n\n global verbose\n \n if on_off.isdigit():\n int_value = int(on_off)\n else:\n int_value = 1\n\n if on_off.lower() == \"off\":\n int_value = 0\n print \"Verbose disabled.\"\n elif on_off.lower() == \"on\":\n int_value = 1\n print \"Verbose enabled.\"\n \n if -1 < int_value < 3:\n verbose=int_value\n interface.VERBOSE=int_value\n else:\n raise TypeError",
"def __init__(self, verbosity: int = max_verbosity):\n self.verbosity = verbosity",
"def setVerbose(*args,**kwargs):\n verbose = args[0] if args else True\n if verbose:\n verbositySampleTools = 2\n verbosityPlotTools = 2\n verbosityVariableTools = 2\n verbositySelectionTools = 2\n verbosityWJ = 2",
"def set_verbose(verbose=\"ERROR\"):\n if verbose == \"INFO\":\n logger.setLevel(logging.INFO)\n elif verbose == \"DEBUG\":\n logger.setLevel(logging.DEBUG)\n elif verbose == \"ERROR\":\n logger.setLevel(logging.ERROR)\n else:\n print('Incorrect verbose level, option:[\"INFO\",\"DEBUG\",\"ERROR\"], use \"ERROR instead.\"')\n logger.setLevel(logging.ERROR)",
"def verbose():\n return Verbose.level()",
"def toggleVerbose(self):\n self.__VERBOSE = not self.__VERBOSE",
"def verbosity_for_session(request):\n return request.config.getoption(\"--verbosity-project\")",
"def verbosity(self):\n return self._get('verbosity')",
"def verbosity(self):\n return self._verbosity",
"def set_verbosity(self, value):\n for source in self._sources.itervalues():\n source.verbosity = value",
"def verbose():\n return _verbose",
"def setVerbose(self, v):\n return self._set(verbose=v)",
"def verbose(self, enabled=True):\r\n self.verbose = verbose"
]
| [
"0.7681478",
"0.72215545",
"0.71093506",
"0.6939538",
"0.6873078",
"0.676105",
"0.67317533",
"0.6667096",
"0.6627816",
"0.65958714",
"0.6590193",
"0.65867203",
"0.65688455",
"0.6464963",
"0.6452119",
"0.6382751",
"0.63656926",
"0.6328291",
"0.63164514",
"0.6315265",
"0.630789",
"0.6307454",
"0.6302284",
"0.62826747",
"0.6250783",
"0.62442225",
"0.62232274",
"0.6202416",
"0.6190059",
"0.6181821"
]
| 0.7388952 | 1 |
Get the current version or exit the process. | def version_or_exit(path):
with cd(path):
versioning_file = join(os.curdir, 'versioning.py')
try:
get_version = run_command(versioning_file)
if get_version.returncode:
abort(colors.red('versioning.py') + ' returned an error.')
else:
return get_version.stdout.strip()
except OSError:
abort(colors.red('versioning file not found: ') + versioning_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_version():\n click.echo(get_current_version_number())",
"def get_current_version(self):\n current_version = self.get_version(self.get_module_and_path(self._main_dir))\n return current_version",
"def latest_version(self) -> AwesomeVersion | None:\n return self.sys_updater.version_cli",
"def get_current_version(self):\n raise NotImplementedError(\"get_current_version is not implemented\")",
"def version(self):\n\n if self.running() is True:\n return APIConsumer.get(\"/version\").content\n else:\n return None",
"def get_version():\n return about.get_version()",
"async def get_version(self):\n\n # Display info message\n log.info(\"get_version\")\n\n # By default empty string\n version = \"\"\n\n # Run get version on the device\n output = await self.send_command(self.cmd_get_version)\n\n # Seek \"Version: \" on each line of the returned output\n for line in output.splitlines():\n\n log.info(f\"get_version: line: {line}\")\n\n # Is it the line with \"Version: \"\n if \"Version: \" in line:\n\n # Yes\n\n # Then take the version from this line\n version = line.split(\"Version: \")[1]\n\n # Break the loop\n break\n\n # Display info message\n log.info(f\"get_version: version: {version}\")\n\n # Return the version of the software of the device\n return version",
"def version():\n return Tns.exec_command(command='--version')",
"def get_current_version(self) -> str:\n raise NotImplementedError()",
"def current_version(self):\n try:\n return self.versions.latest()\n except DocumentVersion.DoesNotExist:\n return None",
"async def version(self):\n self.do(\"version\")\n return (await self.read(7)).strip()",
"def GetVersion(self):\n return self._SendRequest(HTTP_GET, \"/version\", None, None)",
"async def get_version(self):\n\n # Display info message\n log.info(\"get_version\")\n\n # By default empty string\n version = \"\"\n\n # Run get version on the device\n output = await self.send_command(self.cmd_get_version)\n\n # Seek \"Version \" and \",\" to get the version in the returned output\n version = output.split(\"Version \")[1].split(\",\")[0]\n\n # Display info message\n log.info(f\"get_version: version: {version}\")\n\n # Return the version of the software of the device\n return version",
"def go_version(self):\n env = self.m.step.get_from_context('env', {})\n env.update(self.go_env)\n with self.m.step.context({'env': env}):\n self.m.run(\n self.m.step,\n 'go version',\n cmd=[self.go_exe, 'version'])\n self.m.run(\n self.m.step,\n 'env go version',\n cmd=['go', 'version'])",
"def get_current_version(self):\n #full_path = self._root.knob('name').value()\n full_path = os.path.normpath(\n self.comp.GetAttrs()['COMPS_FileName']\n ).replace('\\\\', '/')\n return self.get_version_from_full_path(full_path)",
"def get_version(self):\n return self.__make_api_call('get/version')",
"def current_version(self):\n if self.current_tag:\n version = self.current_tag.lstrip('v')\n else:\n version = None\n\n if version and not version_is_valid(version):\n version = None\n\n return version",
"def current_version(self):\n try:\n return self.release_set.order_by('-created')[0].version\n except IndexError:\n return \"0.0.0\"",
"def do_get_version(self, arg):\n arg = arg\n print(self.phil.if_version)",
"def get_version():\n version_dict = {}\n exec(open(\"src/chimera/version.py\").read(), version_dict)\n return version_dict['version']",
"def _grab_version(self):\n version = self.vcs.version\n if not version:\n logger.critical(\"No version detected, so we can't do anything.\")\n sys.exit(1)\n self.data['version'] = version",
"def get_version(self):\n\n r = self._create_operation_request(self, method=\"GET\")\n root_info = send_session_request(self._session, r).json()\n return root_info[\"currentVersion\"]",
"def get_external_version(self, path=None):\n exe = self.command.split()[0] if path is None else path\n try:\n p = subprocess.Popen(exe + ' --version', shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n #p.wait() #block the rest\n stdo, stde = p.communicate()\n stdr = p.returncode\n if stdr > 0:\n raise RuntimeError(\"Could not check version of \" + exe + \" - Please check your installation and FRED2 \"\n \"wrapper implementation.\")\n except Exception as e:\n raise RuntimeError(e)\n return str(stdo).strip()",
"def version(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"version\")",
"def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''",
"def getversion(): # 3\n res,resargs = _msk.Env.getversion()\n if res != 0:\n raise Error(rescode(res),\"\")\n _major_return_value,_minor_return_value,_build_return_value,_revision_return_value = resargs\n return _major_return_value,_minor_return_value,_build_return_value,_revision_return_value",
"def get_version():\n\n current_dir = os.path.dirname(os.path.realpath(__file__))\n version_path = os.path.join(current_dir, VERSION_FILE)\n\n with open(version_path, 'r') as version_fd:\n return version_fd.read().strip()",
"async def version(self):\n # [p]version\n\n await self.bot.say(\"Current version: \" + CoreAPI.get_version())",
"def main(self):\n logging.info(\"Doing release for %s\", self.version.raw)\n\n if self.version.branch is None:\n logging.debug(\"No branch, assuming '%s'. Override with --branch.\",\n self.options.branch)\n self.version.branch = self.options.branch\n\n # No version specified, assuming a snapshot release\n if self.options.version is None:\n self.do_release(\n version=MwVersion.new_snapshot(self.options.branch))\n return 0\n\n if self.options.previousversion:\n # Given the previous version on the command line\n self.do_release(version=self.version)\n return 0\n\n no_previous = False\n if self.version.prev_version is None:\n no_previous = True\n if not self.ask(\"No previous release found. Do you want to make a \"\n \"release with no patch?\"):\n logging.error('Please specify the correct previous release ' +\n 'on the command line')\n return 1\n if no_previous or self.options.no_previous:\n self.do_release(version=self.version)\n else:\n if not self.ask(\"Was %s the previous release?\" %\n self.version.prev_version):\n logging.error('Please specify the correct previous release ' +\n 'on the command line')\n return 1\n\n self.do_release(version=self.version)\n return 0",
"def get_version(self):\n res = requests.get(self.base_url + '/version')\n\n return res"
]
| [
"0.781276",
"0.7021756",
"0.69878983",
"0.6981611",
"0.6781809",
"0.67792046",
"0.67674327",
"0.67511374",
"0.67059594",
"0.6689565",
"0.66695756",
"0.66640997",
"0.66595495",
"0.6645535",
"0.6642224",
"0.6629286",
"0.6607862",
"0.6600906",
"0.6595271",
"0.6594787",
"0.6563294",
"0.65490204",
"0.6544968",
"0.65370363",
"0.65358394",
"0.65198606",
"0.65139025",
"0.6511103",
"0.6499709",
"0.6473016"
]
| 0.707849 | 1 |
Roll back the tagging that was just done and inform the user. >>> rollback('not_a_tag') | def rollback(tag):
done = run_command(['git', 'tag', '-d', tag])
if done.returncode:
echo.bold(colors.red(str(done)))
sys.exit(done.returncode)
echo.cyan('Done:', done.stdout.strip()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rollback(self):\n pass",
"def rollback(self):\n self._rollback = True",
"def rollback(self):\n raise NotImplementedError",
"def rollback(self):\n raise TransactionRollback('rollback called outside of transaction')",
"def rollback(self, stage, enodes, exception):",
"def rollback(self):\n self._connection.execute_nonquery(\"sql\", \"ROLLBACK\", True)",
"def rollback(self):\r\n self.db.rollback()",
"def rollback(self):\n self.conn.rollback()",
"def rollback(self):\n\n if not self.is_active:\n return\n\n if self.is_context_active:\n raise states.RolledBack(self)\n else:\n self.__do_rollback()\n self._cleanup()",
"def rollback(self):\n self.db.rollback()",
"def rollback(self) -> None:\n with self.lock:\n self.wait(self._rollback_gen())",
"def _do_rollback(self):\n self.backend.rollback()",
"def RollBack(self):\r\n self.conn.rollback()",
"def rollback(self):\n try:\n if self._cur_batch:\n self._cur_batch.rollback()\n except ValueError:\n # ignore \"Batch must be in progress to rollback\" error\n pass\n self._cur_batch = None\n self._num_mutations = 0",
"def rollback(self):\n # PEP 249\n raise impala.error.NotSupportedError()",
"def rollback(self) -> None:\n if self._transaction is None:\n pass\n else:\n self._transaction.rollback(_to_root=True)",
"def rollback(self):\n self.success = False\n self.close()",
"def rollback(self):\n if self.dbmi.__name__ == \"psycopg2\":\n if self.connected:\n self.connection.rollback()",
"def rollback_action(args, kwargs, was_interrupted, result=None):\n raise NotImplementedError()",
"def rollback(self, rollback_to):\n raise NotImplementedError",
"def rollback_transaction(self, event=None):\n assert self._current_transaction\n\n # Store stacks\n undo_stack = list(self._undo_stack)\n\n erroneous_tx = self._current_transaction\n self._current_transaction = None\n try:\n with Transaction(self.event_manager):\n try:\n erroneous_tx.execute()\n except Exception as e:\n logger.error(\"Could not roolback transaction\")\n logger.error(e)\n finally:\n # Discard all data collected in the rollback \"transaction\"\n self._undo_stack = undo_stack\n\n self._action_executed()",
"def Rollback(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def rollback(self):\n self._session.rollback()\n\n return True",
"def rollback(self):\n conn = self.threadingLocal.connection\n if isinstance(conn, Transaction) and not conn._obsolete:\n self.threadingLocal.connection.rollback()",
"def rollback(self):\n return self.connection.rollback",
"def rollback_transaction(self):\n cursor = self._cursor()\n cursor.close()\n self._db.rollback()\n self._end_transaction()",
"def rollback(self):\n return self.connection.rollback()",
"def rollback():\n current_timestamp = current()\n previous_timestamp = previous()\n\n if previous_timestamp:\n execute(symlink, *(previous_timestamp, ))\n run('rm -rf %s' % os.path.join(env.releases_dir, current_timestamp))",
"def rollback(self):\n\t\traise GeneratorException(\"Not implemented\")",
"def savepoint_rollback(self, id):\n self.execute(\"ROLLBACK TO SAVEPOINT {}\".format(id))"
]
| [
"0.76122165",
"0.75165194",
"0.74412733",
"0.72169214",
"0.71961904",
"0.7160262",
"0.70686543",
"0.70201325",
"0.70071846",
"0.70071536",
"0.70067024",
"0.69872856",
"0.6894638",
"0.6878397",
"0.6830433",
"0.67013144",
"0.6643999",
"0.66095114",
"0.66077113",
"0.66047925",
"0.65556264",
"0.6512962",
"0.6397116",
"0.6389851",
"0.63857883",
"0.6365782",
"0.6355081",
"0.6349179",
"0.63155496",
"0.63099355"
]
| 0.8277594 | 0 |
Do a release step, possibly rolling back the tagging. >>> do_release_step('true', 'rollback_tag') | def do_release_step(command, tag, no_rollback=None):
echo.cyan('running:', command)
published = run_command(command)
if published.returncode:
echo.bold(colors.red('Failed:'))
echo.yellow(published.stderr)
echo.white(published.stdout)
if no_rollback:
echo.cyan(no_rollback)
else:
rollback(tag)
sys.exit(published.returncode) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def release_command(project_path=None, noop=None):\n\n if not sys.version_info.major == 3:\n noop or abort(colors.bold(\n 'Releases are only compatible with both Python2 and Python3 if done via Python3. Aborting since this is Python2.'\n ))\n\n auto_version = version_or_exit(project_path)\n\n if auto_version == '0':\n echo.bold('Tag-Version check failed:', colors.cyan(auto_version))\n abort('It looks like no (initial) version tag(s) exist(s).')\n\n released = '.dev' not in auto_version\n if released:\n echo.bold('Tag-Version check failed:', colors.cyan(auto_version))\n abort('Are you trying to re-release the current version tag?')\n\n dirty = 'dirty' in auto_version\n if dirty:\n echo.bold('Tag-Version check failed:', colors.red(auto_version))\n abort('You have to commit all changes before releasing.')\n\n #XXX: Check more? like branch... might move it to gitflow then\n\n echo.bold('Tag-Version check passed:', colors.green(auto_version))\n echo.bold('Bumping version... ', nl=False)\n\n if noop: return\n\n bump_result = run_command(join(project_path, 'setup.py bump'))\n if bump_result.returncode:\n echo.red(bump_result.stdout)\n echo.bold(colors.red(bump_result.stderr))\n sys.exit(bump_result.returncode)\n\n auto_version = version_or_exit(project_path)\n echo.bold('version is now:', colors.green(auto_version))\n\n tag = noop or bump_result.stdout.split('\\n')[-2].split()[-1]\n message = colors.bold('Do the release? (tag: %s)' % tag)\n if noop or click.confirm(message):\n do_publish(tag)\n else:\n noop or rollback(tag)",
"def release(context):\n print(f\"Starting a release of v{IMAGE_VER} on GitHub!\")\n run_cmd(context, exec_cmd=\"git checkout main\", pty=False, error_message=\"Failed to checkout main!\")\n\n run_cmd(context, exec_cmd=\"git pull origin main\", pty=False, error_message=\"Failed to pull from origin/main\")\n\n run_cmd(\n context, exec_cmd=f\"git tag v{IMAGE_VER}\", pty=False, error_message=f\"Failed to create the tag 'v{IMAGE_VER}'!\"\n )\n\n run_cmd(context, exec_cmd=\"git push --tags\", pty=False, error_message=f\"Failed to push the tag 'v{IMAGE_VER}'!\")",
"def main(self):\n logging.info(\"Doing release for %s\", self.version.raw)\n\n if self.version.branch is None:\n logging.debug(\"No branch, assuming '%s'. Override with --branch.\",\n self.options.branch)\n self.version.branch = self.options.branch\n\n # No version specified, assuming a snapshot release\n if self.options.version is None:\n self.do_release(\n version=MwVersion.new_snapshot(self.options.branch))\n return 0\n\n if self.options.previousversion:\n # Given the previous version on the command line\n self.do_release(version=self.version)\n return 0\n\n no_previous = False\n if self.version.prev_version is None:\n no_previous = True\n if not self.ask(\"No previous release found. Do you want to make a \"\n \"release with no patch?\"):\n logging.error('Please specify the correct previous release ' +\n 'on the command line')\n return 1\n if no_previous or self.options.no_previous:\n self.do_release(version=self.version)\n else:\n if not self.ask(\"Was %s the previous release?\" %\n self.version.prev_version):\n logging.error('Please specify the correct previous release ' +\n 'on the command line')\n return 1\n\n self.do_release(version=self.version)\n return 0",
"def create_release(config, args):\n yield config.repo.create_release(args.tag_name, name=args.name,\n target_commitish=args.get(\"target_commitish\"), body=args.get(\"body\"),\n draft=args.get_bool(\"draft\"), prerelease=args.get_bool(\"prerelease\"))",
"def rollback(tag):\n done = run_command(['git', 'tag', '-d', tag])\n if done.returncode:\n echo.bold(colors.red(str(done)))\n sys.exit(done.returncode)\n echo.cyan('Done:', done.stdout.strip())",
"def step(self, *args: Any, **kwargs: Any):\n return self._sim.step(HabitatSimActions.GRAB_RELEASE)",
"def tag_release():\n # We're assuming that setup.py has already been updated\n # manually or using scripts/release/bump-version so the\n # current version in setup.py is the version number we should tag.\n version_number = get_current_version_number()\n click.echo(\"Tagging %s release\" % version_number)\n subprocess.check_call(\n ['git', 'tag', '-a', version_number,\n '-m', 'Tagging %s release' % version_number],\n )",
"def cmake_release(session):\n _cmake(session, BUILD_TYPE_RELEASE)",
"def process_release(vb, options):\n if options.release_type:\n vb.set_release(type=options.release_type)\n\n if options.release_stack:\n vb.set_release(stack=options.release_stack)\n\n if options.release_version:\n vb.set_release(version=options.release_version)\n\n if options.release_build:\n vb.set_release(build=options.release_build)\n\n if options.release_compatible:\n vb.set_release(compatible=options.release_compatible)\n\n if options.release_notes:\n vb.set_release(notes=options.release_notes)\n\n if options.release_display:\n vb.set_release(display=options.release_display)\n\n if options.release_package_version:\n vb.set_release(package_version=options.release_package_version)",
"def release(c, dry_run=False):\n tox_args = \"--skip-pkg-install -e py37\" if not dry_run else \"\"\n c.run(f\"tox {tox_args}\")\n dry = \"--dry-run\" if dry_run else \"\"\n c.run(f\"bump2version {dry} --verbose patch\")\n\n if not dry_run:\n c.run(\"git push --tags\")",
"def rollback():\n current_timestamp = current()\n previous_timestamp = previous()\n\n if previous_timestamp:\n execute(symlink, *(previous_timestamp, ))\n run('rm -rf %s' % os.path.join(env.releases_dir, current_timestamp))",
"def main(owner: str, repository: str, token: str, tag: Optional[str]) -> None:\n if tag is None:\n today = datetime.date.today()\n tag = f\"{today:%Y.%-m.%-d}\"\n\n try:\n publish_release(\n owner=owner,\n repository_name=repository,\n token=token,\n tag=tag,\n )\n except Exception as error:\n click.secho(f\"error: {error}\", fg=\"red\")\n sys.exit(1)",
"def delete_release(ctx, name):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo('Deleting release...', break_line=False)\n gh.delete_release(name=name)\n log.checkmark()\n except BaseException as _:\n log.xmark()\n raise",
"def rollback_django(ref=None, debug=False, dirty=False):\n if has_version_info():\n #\n # To roll back we need to fetch the existing version, execute the\n # database rollback, and then do a deploy of a specific version\n #\n create_virtualenv()\n\n # Copy the new code... the one we want to back out, as we need the\n # migrations from this\n operations.fetch_render_copy(ref, debug, dirty)\n\n # Rollback the database\n migratedb(True)\n\n # Get the old code\n del env['tempdir']\n operations.fetch_render_copy(env.scm_tag[\"rollback\"], debug, dirty, True)\n\n pip_requirements()\n refresh_wsgi()\n\n else:\n abort(\"No version info present to allow rollback\")",
"def test_finish_release_merge_conflict_tag(self):\n version_filename = 'VERSION'\n new_version = '1.1\\n'\n\n gitflow = GitFlow(self.repo).init()\n fmgr = FeatureBranchManager(gitflow)\n fmgr.finish('even')\n fake_commit(self.repo, 'Overwrite version',\n filename=version_filename,\n change=new_version)\n\n # verify that the tag does not yet exist\n # \"v\" comes form \"versiontag\" prefix in the gitflow config for the \"release\" fixture\n self.assertNotIn('v1.0', self.repo.tags)\n\n mgr = ReleaseBranchManager(gitflow)\n taginfo = dict(\n message='Tagging version 1.0',\n )\n self.assertRaises(MergeError,\n mgr.finish, '1.0', tagging_info=taginfo)\n\n # verify that the tag exists, even though there was a failed merge\n self.assertIn('v1.0', self.repo.tags)\n\n # resolve the conflict\n # this is in favor of the change on develop\n write_file(filename=version_filename,\n append=False,\n change=new_version)\n gitflow.git.add(version_filename)\n gitflow.git.commit('-F.git/MERGE_MSG')\n # the release branch is still here\n self.assertIn('rel/1.0',\n [b.name for b in self.repo.branches])\n # finish the release again\n # this should skip the tagging, since that part previously succeeded\n mgr.finish('1.0', tagging_info=taginfo)\n # now the release branch is gone\n self.assertNotIn('rel/1.0',\n [b.name for b in self.repo.branches])\n\n # verify that the tag still exists\n self.assertIn('v1.0', self.repo.tags)",
"def release(filepath, github_account, force=False):\n repo = _git.clone_from_github(\n _REPO_PATH, join(filepath, _REPO_NAME), github_account=github_account)\n latest_tag = repo.latest_tag()\n version = _Version(latest_tag)\n if not _common.check_prerelease(repo, latest_tag, github_account, force):\n return\n _run_tests(repo)\n version.bump_minor()\n new_version = str(version)\n repo.tag(new_version)\n repo.push(tags=True)",
"def releaser_middle(data):\n\n import os\n import sys\n\n from zest.releaser.git import Git\n from zest.releaser.release import Releaser\n\n # Copied verbatim from zest.releaser, but with the cmd string modified to\n # use the -s option to create a signed tag\n def _my_create_tag(self, version):\n msg = \"Tagging %s\" % (version,)\n cmd = 'git tag -s %s -m \"%s\"' % (version, msg)\n if os.path.isdir('.git/svn'):\n print_(\"\\nEXPERIMENTAL support for git-svn tagging!\\n\")\n cur_branch = open('.git/HEAD').read().strip().split('/')[-1]\n print_(\"You are on branch %s.\" % (cur_branch,))\n if cur_branch != 'master':\n print_(\"Only the master branch is supported for git-svn \"\n \"tagging.\")\n print_(\"Please tag yourself.\")\n print_(\"'git tag' needs to list tag named %s.\" % (version,))\n sys.exit()\n cmd = [cmd]\n local_head = open('.git/refs/heads/master').read()\n trunk = open('.git/refs/remotes/trunk').read()\n if local_head != trunk:\n print_(\"Your local master diverges from trunk.\\n\")\n # dcommit before local tagging\n cmd.insert(0, 'git svn dcommit')\n # create tag in svn\n cmd.append('git svn tag -m \"%s\" %s' % (msg, version))\n return cmd\n\n # Similarly copied from zer.releaser to support use of 'v' in front\n # of the version number\n def _my_make_tag(self):\n from zest.releaser import utils\n from os import system\n\n if self.data['tag_already_exists']:\n return\n cmds = self.vcs.cmd_create_tag(self.data['version'])\n if not isinstance(cmds, list):\n cmds = [cmds]\n if len(cmds) == 1:\n print_(\"Tag needed to proceed, you can use the following command:\")\n for cmd in cmds:\n print_(cmd)\n if utils.ask(\"Run this command\"):\n print_(system(cmd))\n else:\n # all commands are needed in order to proceed normally\n print_(\"Please create a tag for %s yourself and rerun.\" % \\\n (self.data['version'],))\n sys.exit()\n if not self.vcs.tag_exists('v' + self.data['version']):\n print_(\"\\nFailed to create tag %s!\" % (self.data['version'],))\n sys.exit()\n\n # Normally all this does is to return '--formats=zip', which is currently\n # hard-coded as an option to always add to the sdist command; they ought to\n # make this actually optional\n def _my_sdist_options(self):\n return ''\n\n Git.cmd_create_tag = _my_create_tag\n Releaser._make_tag = _my_make_tag\n Releaser._sdist_options = _my_sdist_options",
"def release(self) -> pulumi.Output['outputs.ReleaseResponse']:\n return pulumi.get(self, \"release\")",
"def test_release_deployment_run(self):\n pass",
"def release(self, location): \n self.LogCommand() \n tclcode = \"stc::release \" + location\n\n result = self.Exec(tclcode) \n logging.debug(\" - Python result - \" + str(result))\n return result",
"async def test_finish_release_no_release(doof, repo_info, event_loop, mocker):\n get_release_pr_mock = mocker.patch('bot.get_release_pr', autospec=True, return_value=None)\n with pytest.raises(ReleaseException) as ex:\n await doof.run_command(\n manager='mitodl_user',\n channel_id=repo_info.channel_id,\n words=['finish', 'release'],\n loop=event_loop,\n )\n assert 'No release currently in progress' in ex.value.args[0]\n org, repo = get_org_and_repo(repo_info.repo_url)\n get_release_pr_mock.assert_called_once_with(GITHUB_ACCESS, org, repo)",
"def bump_postrelease(self: _R, inc: int = 1) -> _R:\n post = (VersionParts.POST, max(inc, 1))\n base_post: Optional[Tuple[str, int]] = self._version.post\n if base_post:\n post = (VersionParts.POST, max(base_post[1], 1) + inc)\n base = BaseVersion(\n epoch=0,\n release=self._version.release,\n pre=None,\n post=post,\n dev=None,\n local=None,\n )\n return self._replace(base)",
"def release(ctx, releaser, group, commit, app, yes, silent):\n if not group and not app:\n click.echo('Error: Missing option \"--group\" / \"-g\" or flag \"--app\" / \"-a\".')\n exit(4)\n\n if not releaser.is_valid_commit(commit):\n click.echo(f\"Invalid release commit: {commit}\")\n exit(2)\n if group and not releaser.is_valid_release_group(group):\n click.echo(f\"Invalid release group: {group}\")\n exit(3)\n\n if not silent:\n ctx.invoke(info)\n click.echo(\"Devices:\")\n ctx.invoke(show_devices_status)\n click.echo()\n\n # TODO: this doesn't account for both -a and -g being set\n group_name = f'release group \"{group}\"' if group else \"app\"\n\n confirm_text = f'Are you sure you want to set {group_name} to \"{commit}\"?'\n if not yes and not click.confirm(confirm_text):\n click.echo(\"Cancelled!\")\n exit(1)\n releaser.set_release(commit, group, app)",
"def util_sign_release():\n os.chdir(REPO_PATH)\n dr = DebRepo()\n keyname = dr.read_keyname()\n out, err = dr.sign_release(keyname)\n print(out)\n print(err)",
"def action_rollback(branch_dir, branch_props):\n\n # Make sure the revision arguments are present\n if not opts[\"revision\"]:\n error(\"The '-r' option is mandatory for rollback\")\n\n # Check branch directory is ready for being modified\n check_dir_clean(branch_dir)\n\n # Extract the integration info for the branch_dir\n branch_props = get_merge_props(branch_dir)\n # Get the list of all revisions already merged into this source-pathid.\n merged_revs = merge_props_to_revision_set(branch_props,\n opts[\"source-pathid\"])\n\n # At which revision was the src created?\n oldest_src_rev = get_created_rev(opts[\"source-url\"])\n src_pre_exist_range = RevisionSet(\"1-%d\" % oldest_src_rev)\n\n # Limit to revisions specified by -r (if any)\n revs = merged_revs & RevisionSet(opts[\"revision\"])\n\n # make sure there's some revision to rollback\n if not revs:\n report(\"Nothing to rollback in revision range r%s\" % opts[\"revision\"])\n return\n\n # If even one specified revision lies outside the lifetime of the\n # merge source, error out.\n if revs & src_pre_exist_range:\n err_str = \"Specified revision range falls out of the rollback range.\\n\"\n err_str += \"%s was created at r%d\" % (opts[\"source-pathid\"],\n oldest_src_rev)\n error(err_str)\n\n record_only = opts[\"record-only\"]\n\n if record_only:\n report('recording rollback of revision(s) %s from \"%s\"' %\n (revs, opts[\"source-url\"]))\n else:\n report('rollback of revision(s) %s from \"%s\"' %\n (revs, opts[\"source-url\"]))\n\n # Do the reverse merge(s). Note: the starting revision number\n # to 'svn merge' is NOT inclusive so we have to subtract one from start.\n # We try to keep the number of merge operations as low as possible,\n # because it is faster and reduces the number of conflicts.\n rollback_intervals = minimal_merge_intervals(revs, [])\n # rollback in the reverse order of merge\n rollback_intervals.reverse()\n for start, end in rollback_intervals:\n if not record_only:\n # Do the merge\n svn_command(\"merge --force -r %d:%d %s %s\" % \\\n (end, start - 1, opts[\"source-url\"], branch_dir))\n\n # Write out commit message if desired\n # calculate the phantom revs first\n if opts[\"commit-file\"]:\n f = open(opts[\"commit-file\"], \"w\")\n if record_only:\n f.write('Recorded rollback of revisions %s via %s from \\n' % \\\n (revs , NAME))\n else:\n f.write('Rolled back revisions %s via %s from \\n' % \\\n (revs , NAME))\n f.write('%s\\n' % opts[\"source-url\"])\n\n f.close()\n report('wrote commit message to \"%s\"' % opts[\"commit-file\"])\n\n # Update the set of merged revisions.\n merged_revs = merged_revs - revs\n branch_props[opts[\"source-pathid\"]] = str(merged_revs)\n set_merge_props(branch_dir, branch_props)",
"def cancel_release(release_id, fail=False):\n logger.info(\n f\"{'Canceling' if not fail else 'Failing'} release {release_id}\"\n )\n\n release = Release.objects.get(kf_id=release_id)\n studies = [study.kf_id for study in release.studies.all()]\n\n # In case another task has already canceled or failed this release\n if release.state in [\"canceled\", \"failed\"]:\n logger.warn(f\"Release is already marked as {release.state}\")\n return\n\n for task in release.tasks.all():\n # The task may have been the one to cause the cancel/fail\n # Don't try to change its state if it's already canceled/failed\n if task.state in [\"canceled\", \"failed\", \"rejected\"]:\n continue\n\n body = {\n \"action\": \"cancel\",\n \"task_id\": task.kf_id,\n \"release_id\": release.kf_id,\n \"studies\": studies,\n }\n try:\n requests.post(\n task.task_service.url + \"/tasks\",\n headers=headers(),\n json=body,\n timeout=settings.REQUEST_TIMEOUT,\n )\n except requests.exceptions.RequestException as err:\n ev = Event(\n event_type=\"error\",\n message=f\"request to cancel task failed: {err}\",\n release=release,\n task=task,\n task_service=task.task_service,\n )\n ev.save()\n\n task.cancel()\n task.save()\n\n try:\n if fail:\n release.failed()\n else:\n release.canceled()\n except django_fsm.TransitionNotAllowed as err:\n logger.info(f\"Tried to make an invalid transition: {err}\")\n release.save()",
"def publish_release(ctx):\n rel = _get_release()\n rel.update_release(rel.title, rel.raw_data[\"body\"], draft=False)",
"def _doReleaseBuild(self, farbconfig):\n print \"Building all releases ...\"\n try:\n rbr = runner.ReleaseBuildRunner(farbconfig)\n rbr.run()\n print \"Release build completed.\"\n except runner.ReleaseBuildRunnerError, e:\n print >>sys.stderr, e\n sys.exit(1)",
"def test_create_release(self):\n releases_before = self.hello_world_project.get_releases()\n latest_release = releases_before[0].tag_name\n count_before = len(releases_before)\n increased_release = \".\".join(\n [\n latest_release.rsplit(\".\", 1)[0],\n str(int(latest_release.rsplit(\".\", 1)[1]) + 1),\n ]\n )\n release = self.hello_world_project.create_release(\n tag=increased_release, name=\"test\", message=\"testing release\"\n )\n count_after = len(self.hello_world_project.get_releases())\n assert release.tag_name == increased_release\n assert release.title == \"test\"\n assert release.body == \"testing release\"\n assert count_before + 1 == count_after",
"def __Release(self, command, release, description):\n description = _NormalizeDescription(description)\n path = command.GetPath()\n if len(path) >= 2 and path[1] == 'internal':\n release = 'INTERNAL'\n return release, description"
]
| [
"0.6151928",
"0.6130211",
"0.6007178",
"0.59868795",
"0.58979225",
"0.5823386",
"0.5756305",
"0.5547832",
"0.5530814",
"0.5437972",
"0.5422568",
"0.53532875",
"0.53283924",
"0.5260167",
"0.5257902",
"0.52491486",
"0.5245861",
"0.5234293",
"0.5214079",
"0.5172153",
"0.5157206",
"0.51311827",
"0.51197684",
"0.51020414",
"0.5083999",
"0.50472426",
"0.5045366",
"0.504483",
"0.50295895",
"0.5027317"
]
| 0.7707624 | 0 |
menu menu page logic. displaying all the products in our DB | def menu(request):
cart = cartData(request)
cart_items = cart['cart_items']
# order = cart['order']
# items = cart['items']
# Get all our object
products = BobaProduct.objects.all()
# Dictionary to hold our products
context = {"products": products, "cart_items": cart_items}
return render(request, 'store/menu.html', context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __products_menu(self):\n log.debug(\"Displaying __products_menu\")\n # Get the products list from the db\n products = self.session.query(db.Product).filter_by(deleted=False).all()\n # Create a list of product names\n product_names = [product.name for product in products]\n # Insert at the start of the list the add product option, the remove product option and the Cancel option\n product_names.insert(0, self.loc.get(\"menu_all_cancel\"))\n product_names.insert(1, self.loc.get(\"menu_add_product\"))\n product_names.insert(2, self.loc.get(\"menu_delete_product\"))\n # Create a keyboard using the product names\n keyboard = [[telegram.KeyboardButton(product_name)] for product_name in product_names]\n # Send the previously created keyboard to the user (ensuring it can be clicked only 1 time)\n self.bot.send_message(self.chat.id, self.loc.get(\"conversation_admin_select_product\"),\n reply_markup=telegram.ReplyKeyboardMarkup(keyboard, one_time_keyboard=True))\n # Wait for a reply from the user\n selection = self.__wait_for_specific_message(product_names, cancellable=True)\n # If the user has selected the Cancel option...\n if isinstance(selection, CancelSignal):\n # Exit the menu\n return\n # If the user has selected the Add Product option...\n elif selection == self.loc.get(\"menu_add_product\"):\n # Open the add product menu\n self.__edit_product_menu()\n # If the user has selected the Remove Product option...\n elif selection == self.loc.get(\"menu_delete_product\"):\n # Open the delete product menu\n self.__delete_product_menu()\n # If the user has selected a product\n else:\n # Find the selected product\n product = self.session.query(db.Product).filter_by(name=selection, deleted=False).one()\n # Open the edit menu for that specific product\n self.__edit_product_menu(product=product)",
"def products():\n\n\treturn render_template(\"products.html\")",
"def products(request):\n\n return render(request, \"core/products.html\", {\n \"products\": Product.objects.all()\n })",
"def product_management():\n sort_by = request.args.get(\"sort\")\n\n \"\"\"\n Sort method is from https://docs.mongodb.com/manual/reference/\n method/cursor.sort/index.html\n \"\"\"\n if sort_by:\n products = list(mongo.db.products.find().sort(sort_items(sort_by)))\n\n else:\n products = list(mongo.db.products.find().sort('name', 1))\n\n \"\"\"\n Pagination code is from https://gist.github.com/mozillazg/\n 69fb40067ae6d80386e10e105e6803c9\n \"\"\"\n page, per_page, offset = get_page_args(\n page_parameter='page', per_page_parameter='per_page', per_page=10)\n pagination_products = paginate_items(products, offset, per_page)\n pagination = paginate(products, page, per_page)\n\n return render_template(\n \"product_management.html\",\n page_title=\"Product Management\",\n products=pagination_products,\n pagination=pagination)",
"def products(request):\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n products = Product.objects.all()\n template = \"auctionsmng/products.html\"\n\n context = {\n 'products': products\n }\n\n return render(request, template, context)",
"def open_products_page(catalog_menu):\n catalog_menu.open_products_page()",
"def all_products(request):\n products = Product.objects.all()\n return render(request, \"products.html\", {\"products\": products})",
"def all_products(request):\n\n products = Product.objects.all()\n return render(request, 'products.html', {'products': products})",
"def __order_menu(self):\n log.debug(\"Displaying __order_menu\")\n # Get the products list from the db\n products = self.session.query(db.Product).filter_by(deleted=False).all()\n # Create a dict to be used as 'cart'\n # The key is the message id of the product list\n cart: Dict[List[db.Product, int]] = {}\n # Initialize the products list\n for product in products:\n # If the product is not for sale, don't display it\n if product.price is None:\n continue\n # Send the message without the keyboard to get the message id\n message = product.send_as_message(w=self, chat_id=self.chat.id)\n # Add the product to the cart\n cart[message['result']['message_id']] = [product, 0]\n # Create the inline keyboard to add the product to the cart\n inline_keyboard = telegram.InlineKeyboardMarkup(\n [[telegram.InlineKeyboardButton(self.loc.get(\"menu_add_to_cart\"), callback_data=\"cart_add\")]]\n )\n # Edit the sent message and add the inline keyboard\n if product.image is None:\n self.bot.edit_message_text(chat_id=self.chat.id,\n message_id=message['result']['message_id'],\n text=product.text(w=self),\n reply_markup=inline_keyboard)\n else:\n self.bot.edit_message_caption(chat_id=self.chat.id,\n message_id=message['result']['message_id'],\n caption=product.text(w=self),\n reply_markup=inline_keyboard)\n # Create the keyboard with the cancel button\n inline_keyboard = telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton(self.loc.get(\"menu_all_cancel\"),\n callback_data=\"cart_cancel\")]])\n # Send a message containing the button to cancel or pay\n final_msg = self.bot.send_message(self.chat.id,\n self.loc.get(\"conversation_cart_actions\"),\n reply_markup=inline_keyboard)\n # Wait for user input\n while True:\n callback = self.__wait_for_inlinekeyboard_callback()\n # React to the user input\n # If the cancel button has been pressed...\n if callback.data == \"cart_cancel\":\n # Stop waiting for user input and go back to the previous menu\n return\n # If a Add to Cart button has been pressed...\n elif callback.data == \"cart_add\":\n # Get the selected product, ensuring it exists\n p = cart.get(callback.message.message_id)\n if p is None:\n continue\n product = p[0]\n # Add 1 copy to the cart\n cart[callback.message.message_id][1] += 1\n # Create the product inline keyboard\n product_inline_keyboard = telegram.InlineKeyboardMarkup(\n [\n [telegram.InlineKeyboardButton(self.loc.get(\"menu_add_to_cart\"),\n callback_data=\"cart_add\"),\n telegram.InlineKeyboardButton(self.loc.get(\"menu_remove_from_cart\"),\n callback_data=\"cart_remove\")]\n ])\n # Create the final inline keyboard\n final_inline_keyboard = telegram.InlineKeyboardMarkup(\n [\n [telegram.InlineKeyboardButton(self.loc.get(\"menu_all_cancel\"), callback_data=\"cart_cancel\")],\n [telegram.InlineKeyboardButton(self.loc.get(\"menu_done\"), callback_data=\"cart_done\")]\n ])\n # Edit both the product and the final message\n if product.image is None:\n self.bot.edit_message_text(chat_id=self.chat.id,\n message_id=callback.message.message_id,\n text=product.text(w=self,\n cart_qty=cart[callback.message.message_id][1]),\n reply_markup=product_inline_keyboard)\n else:\n self.bot.edit_message_caption(chat_id=self.chat.id,\n message_id=callback.message.message_id,\n caption=product.text(w=self,\n cart_qty=cart[callback.message.message_id][1]),\n reply_markup=product_inline_keyboard)\n\n self.bot.edit_message_text(\n chat_id=self.chat.id,\n message_id=final_msg.message_id,\n text=self.loc.get(\"conversation_confirm_cart\",\n product_list=self.__get_cart_summary(cart),\n total_cost=str(self.__get_cart_value(cart))),\n reply_markup=final_inline_keyboard)\n # If the Remove from cart button has been pressed...\n elif callback.data == \"cart_remove\":\n # Get the selected product, ensuring it exists\n p = cart.get(callback.message.message_id)\n if p is None:\n continue\n product = p[0]\n # Remove 1 copy from the cart\n if cart[callback.message.message_id][1] > 0:\n cart[callback.message.message_id][1] -= 1\n else:\n continue\n # Create the product inline keyboard\n product_inline_list = [[telegram.InlineKeyboardButton(self.loc.get(\"menu_add_to_cart\"),\n callback_data=\"cart_add\")]]\n if cart[callback.message.message_id][1] > 0:\n product_inline_list[0].append(telegram.InlineKeyboardButton(self.loc.get(\"menu_remove_from_cart\"),\n callback_data=\"cart_remove\"))\n product_inline_keyboard = telegram.InlineKeyboardMarkup(product_inline_list)\n # Create the final inline keyboard\n final_inline_list = [[telegram.InlineKeyboardButton(self.loc.get(\"menu_all_cancel\"),\n callback_data=\"cart_cancel\")]]\n for product_id in cart:\n if cart[product_id][1] > 0:\n final_inline_list.append([telegram.InlineKeyboardButton(self.loc.get(\"menu_done\"),\n callback_data=\"cart_done\")])\n break\n final_inline_keyboard = telegram.InlineKeyboardMarkup(final_inline_list)\n # Edit the product message\n if product.image is None:\n self.bot.edit_message_text(chat_id=self.chat.id, message_id=callback.message.message_id,\n text=product.text(w=self,\n cart_qty=cart[callback.message.message_id][1]),\n reply_markup=product_inline_keyboard)\n else:\n self.bot.edit_message_caption(chat_id=self.chat.id,\n message_id=callback.message.message_id,\n caption=product.text(w=self,\n cart_qty=cart[callback.message.message_id][1]),\n reply_markup=product_inline_keyboard)\n\n self.bot.edit_message_text(\n chat_id=self.chat.id,\n message_id=final_msg.message_id,\n text=self.loc.get(\"conversation_confirm_cart\",\n product_list=self.__get_cart_summary(cart),\n total_cost=str(self.__get_cart_value(cart))),\n reply_markup=final_inline_keyboard)\n # If the done button has been pressed...\n elif callback.data == \"cart_done\":\n # End the loop\n break\n # Create an inline keyboard with a single skip button\n cancel = telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton(self.loc.get(\"menu_skip\"),\n callback_data=\"cmd_cancel\")]])\n # Ask if the user wants to add notes to the order\n self.bot.send_message(self.chat.id, self.loc.get(\"ask_order_notes\"), reply_markup=cancel)\n # Wait for user input\n notes = self.__wait_for_regex(r\"(.*)\", cancellable=True)\n # Create a new Order\n order = db.Order(user=self.user,\n creation_date=datetime.datetime.now(),\n notes=notes if not isinstance(notes, CancelSignal) else \"\")\n # Add the record to the session and get an ID\n self.session.add(order)\n self.session.flush()\n # For each product added to the cart, create a new OrderItem\n for product in cart:\n # Create {quantity} new OrderItems\n for i in range(0, cart[product][1]):\n order_item = db.OrderItem(product=cart[product][0],\n order_id=order.order_id)\n self.session.add(order_item)\n # Ensure the user has enough credit to make the purchase\n credit_required = self.__get_cart_value(cart) - self.user.credit\n # Notify user in case of insufficient credit\n if credit_required > 0:\n self.bot.send_message(self.chat.id, self.loc.get(\"error_not_enough_credit\"))\n # Suggest payment for missing credit value if configuration allows refill\n if self.cfg.ccard[\"credit_card_token\"] != \"\" \\\n and self.cfg.appearance[\"refill_on_checkout\"] \\\n and self.Price(self.cfg.ccard[\"min_amount\"]) <= \\\n credit_required <= \\\n self.Price(self.cfg.ccard[\"max_amount\"]):\n self.__make_payment(self.Price(credit_required))\n # If afer requested payment credit is still insufficient (either payment failure or cancel)\n if self.user.credit < self.__get_cart_value(cart):\n # Rollback all the changes\n self.session.rollback()\n else:\n # User has credit and valid order, perform transaction now\n self.__order_transaction(order=order, value=-int(self.__get_cart_value(cart)))",
"def listall():\n # I like to define the query separately.\n query = db.product\n\n # List of additional links.\n links = []\n \n links.append(\n dict(header = \"Profit\",\n body = lambda row : produce_profit(row)\n )\n )\n links.append(\n dict(header = \"\",\n body = lambda row : produce_star_btn(row.id)\n )\n )\n links.append(\n dict(header = \"\",\n body = lambda row : produce_pls_minus_btn(row)\n )\n )\n links.append(\n dict(header='',\n body = lambda row : produce_poster_btns(row.id)\n \n )\n )\n \n # Let's get rid of some fields in the add form.\n if len(request.args) > 0 and request.args[0] == 'new':\n db.product.prod_poster.readable = False\n db.product.prod_post_time.writable = False\n db.product.prod_sold.writable = False\n db.product.prod_starred.readable, db.product.prod_starred.writable =False, False\n # Grid definition.\n grid = SQLFORM.grid(\n query, \n field_id = db.product.id, # Useful, not mandatory.\n fields = [db.product.id, db.product.prod_name,\n db.product.prod_in_stock, db.product.prod_sold,\n db.product.prod_price, db.product.prod_cost], \n headers = {'product.prod_name': 'Product Name',\n 'product.prod_in_stock':'In Stock',\n 'product.prod_sold':'Sold', \n 'product.prod_price':'Price', \n 'product.prod_cost':'Cost'},\n links = links,\n # And now some generic defaults.\n details=False,\n create=True, editable=False, deletable=False,\n csv=False, \n user_signature=True, # We don't need it as one cannot take actions directly from the form.\n )\n return dict(grid=grid)",
"def listProducts(request):\n form_product = forms.ProductForm()\n Products = productBll.listProduct()\n paginator = Paginator(Products, LIST_COUNT)\n \n page = request.GET.get('page')\n if page == None :\n page=1\n \n try:\n ProductList = paginator.page(page)\n except PageNotAnInteger:\n ProductList = paginator.page(1)\n except EmptyPage:\n ProductList = paginator.page(paginator.num_pages)\n return render_to_response('admin/product/listProducts.html',{'form': form_product,'IMAGE_FILE_PATH':IMAGE_FILE_PATH,'ProductList':ProductList},context_instance=RequestContext(request))",
"def box_menu(request):\n\n products = Product.objects.all()\n sizes = Size.objects.all()\n forsixs = Forsix.objects.all()\n categories = None\n\n if request.GET:\n category_name = request.GET['box']\n products = products.filter(category__name=category_name)\n categories = Category.objects.filter(name=category_name)\n\n context = {\n 'products': products,\n 'forsixs': forsixs,\n 'sizes': sizes,\n 'categories_selected': categories,\n }\n\n return render(request, 'products/shop.html', context)",
"def index(request):\n\n products = Top_selling_product.objects.all()\n context = {'products':products}\n\n return render(request, 'home/index.html',context)",
"def buns_menu(request):\n\n products = Product.objects.all()\n sizes = Size.objects.all()\n forsixs = Forsix.objects.all()\n categories = None\n\n if request.GET:\n category_name = request.GET['buns']\n products = products.filter(category__name=category_name)\n categories = Category.objects.filter(name=category_name)\n\n context = {\n 'products': products,\n 'forsixs': forsixs,\n 'sizes': sizes,\n 'categories_selected': categories,\n }\n\n return render(request, 'products/shop.html', context)",
"def all_products(request):\n\n products = Product.objects.all()\n categories = None\n query = None\n sort = None\n direction = None\n heading = 'Products & Services'\n\n if request.GET:\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n if sortkey == 'name':\n sortkey = 'lower_name'\n products = products.annotate(lower_name=Lower('name'))\n if sortkey == 'category':\n sortkey = 'category__name'\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n products = products.order_by(sortkey)\n\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n products = products.filter(category__name__in=categories)\n categories = Category.objects.filter(name__in=categories)\n if len(categories) == 1:\n heading = categories[0].friendly_name\n else:\n for category in categories:\n if 'products' in category.name:\n heading = 'Products & Services'\n break\n else:\n if 'services' in category.name:\n heading = 'Services'\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request, \"Please enter search criteria!\")\n return redirect(reverse('home'))\n\n queries = Q(\n name__icontains=query) | Q(description__icontains=query)\n products = products.filter(queries)\n\n current_sorting = f'{sort}_{direction}'\n\n context = {\n 'products': products,\n 'search_term': query,\n 'current_categories': categories,\n 'current_sorting': current_sorting,\n 'heading': heading,\n }\n\n return render(\n request, 'products/products.html', context)",
"def all_products_view(request):\n products = Product.objects.all()\n paginator = Paginator(products, 6)\n page_number = request.GET.get('page', 1)\n page = paginator.page(page_number)\n\n \"\"\"render a products html page and within that page we will have access to products, so all_products\"\"\"\n\n if page.has_next():\n\n next_url = f'?page={page.next_page_number()}'\n\n else:\n\n next_url = ''\n\n if page.has_previous():\n\n prev_url = f'?page={page.previous_page_number()}'\n\n else:\n\n prev_url = ''\n\n \n return render(request, 'products.html', {'page': page, 'next_page_url': next_url, 'prev_page_url': prev_url})",
"def view_products():\n min_id = (Product.select().order_by(Product.product_id.asc()).get()).product_id\n max_id = (Product.select().order_by(Product.product_id.desc()).get()).product_id\n print(f\"\\nPlease select id between {min_id} & {max_id}\")\n id = int(input(\"Select product id: \"))\n while id not in range(min_id, max_id+1):\n print(\"Your selection must be between {} and {}\".format(min_id, max_id))\n id = int(input(\"Select product id: \"))\n print(f\"\"\"\\n-Product: {Product.get_by_id(id).product_name}\n-Quantity: {Product.get_by_id(id).product_quantity}\n-Price: {Product.get_by_id(id).product_price} cents\n-Date updated: {Product.get_by_id(id).date_updated}\\n\"\"\")\n input(\"\\nPress ENTER to continue\")\n clear()",
"def cakes_menu(request):\n\n products = Product.objects.all()\n sizes = Size.objects.all()\n forsixs = Forsix.objects.all()\n categories = None\n\n if request.GET:\n category_name = request.GET['cakes_deserts'].split(',')\n products = products.filter(category__name__in=category_name)\n categories = Category.objects.filter(name__in=category_name)\n\n context = {\n 'products': products,\n 'forsixs': forsixs,\n 'sizes': sizes,\n 'categories_selected': categories,\n }\n\n return render(request, 'products/shop.html', context)",
"def display(auth_context):\n\n products = product_catalog.list_products()\n # Get promoted products recommended by the AutoML model.\n promos = product_catalog.get_promos()\n return render_template('product_catalog.html',\n products=products,\n promos=promos,\n auth_context=auth_context,\n bucket=product_catalog.BUCKET)",
"def specialoccasion(request):\n products = Product.objects.all()\n return render(request, \"specialoccasion.html\", {\"products\": products})",
"def get_all_products(self):\n\t\tpass",
"def product_list_view(request):\n queryset = Product.objects.all()\n context = {\n \"object_list\": queryset\n }\n\n return render(request, \"products/product_list.html\", context)",
"def all_products(request):\n\n products_list = Product.objects.all().order_by('id')\n query = None\n collections = None\n collection_page = None\n sort = None\n direction = None\n query_page = None\n \n if request.GET:\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n if sortkey == 'name':\n sortkey = 'lower_name'\n products_list = products_list.annotate(lower_name=Lower('name'))\n\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n products_list = products_list.order_by(sortkey)\n\n if 'collection' in request.GET:\n collections = request.GET['collection'].split(',')\n products_list = products_list.filter(collection__name__in=collections)\n collections = Collection.objects.filter(name__in=collections)\n collection_page = request.GET['collection']\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request, \"You didn't enter any search criteria!\\\n You were automatically redirected to All Products page.\")\n return redirect(reverse('products'))\n query_page = request.GET['q']\n \n queries = Q(name__icontains=query) | Q(description__icontains=query)\n products_list = products_list.filter(queries)\n\n if 'on_sale' in request.GET:\n products_list = products_list.filter(on_sale=True)\n \n current_sorting = f'{sort}_{direction}'\n total = len(products_list)\n paginator = Paginator(products_list, 12)\n page_number = request.GET.get('page')\n products = paginator.get_page(page_number)\n\n context = {\n 'products': products,\n 'current_collections': collections,\n 'collection_page': collection_page,\n 'search_term': query,\n 'query_page': query_page,\n 'current_sorting': current_sorting,\n 'sort': sort,\n 'direction': direction,\n 'total': total,\n }\n\n return render(request, 'products/products.html', context)",
"def menu_products(self, app: object, entry: str) -> None:\n while True:\n if self.back:\n break\n else:\n self.cmd_products = app.view_prod(entry)\n print(\"-\" * 50)\n for key, element in self.cmd_products.items():\n print(f\"{key} : {element}\")\n entry = input(\n \"\\nEntrer un chiffre pour sélectionner le produit correspondant : \"\n )\n if entry in self.cmd_products:\n if entry == \"0\":\n break\n else:\n self.menu_saving(app, entry)\n else:\n print(\"\\nCommande incorrecte\")",
"def search_prod(self):\n\n # Connection to the SQL database\n self.db_connect = pymysql.connect(\"localhost\",\"root\",\"\",\"mydb\")\n db = self.db_connect.cursor()\n \n # Category selection display loop\n choice = 0\n while choice is 0:\n choice = input(\"\\nVoulez vous afficher la liste de catégories ? O/n \")\n if choice.upper() == \"N\":\n print(\"\\nRetour au menu principal\")\n break\n elif choice.upper() == \"O\":\n choice_cat = True\n print(\"\\n Liste des catégories:\\n\")\n # Defining the query to return the list of categories\n show_list_cat = \"SELECT category_id, category_name \\\n FROM `mydb`.`Categories`\"\n # Command to execute the request\n db.execute(show_list_cat)\n # Variable assignment loop\n for ligne in db.fetchall():\n list_id_cat = ligne[0]\n list_name_cat = ligne[1]\n # View category list\n print(list_id_cat,'-', list_name_cat)\n\n # Product Selection Loop\n choice_cat = 0\n while choice_cat == 0:\n choice_cat = input(\"\\nSélectionner une catégorie de produit (Q pour annuler): \")\n\n if choice_cat.upper() == \"Q\":\n choice = False\n break\n\n elif choice_cat.isdigit() == False or int(choice_cat) >= 29 \\\n or int(choice_cat) == 0:\n print(\"\\nMerci de bien vouloir entrer un chiffre compris entre 1 et 28\\n\")\n time.sleep(2)\n choice_cat = 0\n \n # Definition of the request to return \n # the list of products of the selected category\n show_list_prod_select = f\"SELECT product_id, \\\n product_name, \\\n product_nutri \\\n FROM `mydb`.`Products` \\\n WHERE Categories_category_id = {choice_cat}\"\n \n # Execution command of the SQL query\n db.execute(show_list_prod_select)\n for ligne in db.fetchall():\n list_id_prod = ligne[0]\n list_name_prod = ligne[1]\n nutriscore_prod = ligne[2]\n # Uppercase display\n nutriscore_prod = nutriscore_prod.upper()\n # Display of the list of products and nutriscores\n print(list_id_prod,\"-\", list_name_prod, \\\n \"- Nutriscore:\",nutriscore_prod)\n \n # SQL query to determine the maximum and minimum value\n # the product ID for the selected category\n vmax = f\"SELECT MAX(product_id) \\\n FROM `mydb`.`Products` \\\n WHERE Categories_category_id = {choice_cat}\"\n db.execute(vmax)\n for maxv in db.fetchall():\n max_id_prod = maxv[0]\n\n vmin = f\"SELECT MIN(product_id) \\\n FROM `mydb`.`Products` \\\n WHERE Categories_category_id = {choice_cat}\"\n db.execute(vmin)\n for minv in db.fetchall():\n min_id_prod = minv[0]\n \n # Display loop for selecting the product to substitute\n choice_sub = 0\n while choice_sub == 0:\n choice_sub = input(\"\\nSélectionner un produit à substituer (Q pour annuler): \")\n\n if choice_sub.upper() == \"Q\":\n choice = 0\n break\n # Selects the product selection choice for the selected category\n elif choice_sub.isdigit() == False or \\\n int(choice_sub) > max_id_prod or \\\n int(choice_sub) < min_id_prod or int(choice_sub) == 0:\n print(\"\\nMerci de bien vouloir entrer un chiffre compris dans la liste des produits\")\n time.sleep(2)\n choice_sub = 0\n\n # Definition of the request to return the list of\n # Substitute products of the selected category\n show_list_sub_select = f\"SELECT * FROM `mydb`.`Products`\\\n WHERE product_id = {choice_sub}\"\n \n # Execution command of the SQL query\n db.execute(show_list_sub_select)\n for ligne_sub in db.fetchall():\n sub_name = ligne_sub[1]\n sub_nutri = ligne_sub[3]\n # Uppercase display\n sub_nutri = sub_nutri.upper()\n \n # Definition of the request to return the product \n # with the highest nutriscores of the selected category\n list_best_nutri = f\"SELECT * FROM `mydb`.`Products`\\\n WHERE Categories_category_id = {choice_cat} \\\n AND product_nutri <= '{sub_nutri}' \\\n ORDER BY product_nutri ASC\"\n \n db.execute(list_best_nutri)\n result = db.fetchall()\n best = (result)[0]\n bestnutri = (best)[3]\n # Uppercase display\n bestnutri = bestnutri.upper()\n # Case where the user chooses one of the products \n # with the highest nutriscore of the category\n if sub_nutri == bestnutri:\n print(\"\\nLe produit sélectionné a le meilleur nutriscore de cette catégorie\")\n print(\"Veuillez sélectionner un autre produit dans la liste\")\n choice_sub = 0\n # Case where the user selects a substitutable product \n else:\n best_id = (best)[0]\n best_name = (best)[1]\n best_nutriscore = (best)[3]\n best_nutriscore = best_nutriscore.upper()\n best_shop = (best)[2]\n best_url = (best)[4]\n # Returns the selected product and its substitute\n print(\"\\nProduit sélectionné:\",sub_name,\\\n \". Nutriscore:\",sub_nutri)\n print(\"Substitue:\",best_name,\". Nutriscore:\",\\\n best_nutriscore)\n print(\"Magasin(s):\",best_shop)\n print(\"Infos produit:\",best_url)\n # Backup loop in the SQL database\n save_choice1 = 0\n while save_choice1 is 0:\n save_choice1 = input(\"\\nSouhaitez-vous sauvegarder le produit dans la base ? O/n \")\n if save_choice1.upper() == \"N\":\n print(\"\\nRetour au menu principal\")\n time.sleep(2)\n break\n elif save_choice1.upper() == \"O\":\n # Insertion request in the \"Substitute\" table\n save_sub = \"INSERT IGNORE INTO \\\n `mydb`.`Substitute` \\\n (save_product_id, save_product_sub_name) \\\n VALUES ('{}','{}')\".format(best_id, sub_name)\n db.execute(save_sub)\n self.db_connect.commit()\n print(\"\\nLe produit a bien été sauvegardé dans vos favoris\")\n print(\"Retour au menu principal\")\n time.sleep(2)\n break",
"def products():\n username = session['username']\n api_key = session['api_key']\n url = 'https://consumernotebook.com/api/v1/products/?username={0}&apikey={1}'.format(username, api_key)\n r = requests.get(url)\n products = []\n if r.status_code != 200:\n error = \"{0} error. Are you sure you entered a valid API key?\".format(r.status_code)\n return render_template('products.html', error=error)\n else:\n products_json = json.loads(r.content)\n for product in products_json[u'objects']:\n products.append(product[u'title'])\n return render_template('products.html', products=products)",
"def shop_all(request):\n shop_items = Product.objects.all()\n categories = None\n query = None\n sort = None\n direction = None\n queried_category = None\n\n if request.GET:\n # checks whether a sort parameter exists and orders by selected\n # criteria if so\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n if sortkey == 'name':\n sortkey == 'lower_name'\n shop_items = shop_items.annotate(lower_name=Lower('name'))\n if sortkey == 'category':\n sortkey = 'category__name'\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n shop_items = shop_items.order_by(sortkey)\n\n # checks whether category parameter exists and splits categories\n # into a list and filters each one if it does\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n shop_items = shop_items.filter(category__name__in=categories)\n categories = Category.objects.filter(name__in=categories)\n # Renders the category name on the pagewhen user views all items\n # in one category\n for category in categories:\n queried_category = category.friendly_name\n\n # checks whether search query exists and returns results containing\n # keywords\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request, \"You didn't enter any search terms!\")\n return redirect(reverse('shop'))\n\n queries = Q(friendly_name__icontains=query) | Q(description__icontains=query)\n shop_items = shop_items.filter(queries)\n\n sort_by = f'{sort}_{direction}'\n\n context = {\n 'shop_items': shop_items,\n 'search_term': query,\n 'categories': categories,\n 'sort_by': sort_by,\n 'queried_category': queried_category,\n }\n\n return render(request, 'shop/shop.html', context)",
"def products_list(driver, login_action, open_products_page, products_page, logger):\n try:\n return products_page.all_products_list()\n except logger.on_exception(exception, driver):\n print(exception)",
"def processProductsRequest(self):\n\n\t\t# Use the simple page renderer to create the body content\n\t\treturn self.render_simple_page('Products')",
"def all_products(request):\n\n products = Product.objects.filter(is_holiday=False)\n query = None\n categories = None\n sort = None\n direction = None\n\n if request.GET:\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n if sortkey == 'name':\n sortkey = 'lower_name'\n products = products.annotate(lower_name=Lower('name'))\n if sortkey == 'category':\n sortkey = 'category__name'\n\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n products = products.order_by(sortkey)\n\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n products = products.filter(category__name__in=categories)\n categories = Category.objects.filter(name__in=categories)\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request, \"You didn't enter any search critera.\\\n Please try again.\")\n return redirect(reverse('products'))\n\n queries = Q(name__icontains=query) | \\\n Q(description__icontains=query)\n products = products.filter(queries)\n\n current_sorting = f'{sort}_{direction}'\n\n context = {\n 'products': products,\n 'search_term': query,\n 'current_categories': categories,\n 'current_sorting': current_sorting,\n }\n\n return render(request, 'products/all_products.html', context)"
]
| [
"0.73752975",
"0.724429",
"0.69932497",
"0.6988591",
"0.6971659",
"0.692478",
"0.69133264",
"0.688549",
"0.673323",
"0.6712486",
"0.66611415",
"0.6580116",
"0.6567587",
"0.6560556",
"0.6493791",
"0.64319545",
"0.6419612",
"0.63676864",
"0.6316506",
"0.62987536",
"0.6239047",
"0.6234494",
"0.6226155",
"0.6171915",
"0.61562294",
"0.61268073",
"0.61087626",
"0.6108293",
"0.6106579",
"0.61042607"
]
| 0.7492175 | 0 |
guestChat If the user is not authenticated, they will be redirected to this site where they can input a guest name and enter the chatbox | def guestChat(request):
form = GuestChat()
if request.method == "POST":
form = GuestChat(request.POST)
if form.is_valid():
guestName = form.cleaned_data.get('guest_name')
return render(request, 'chat/room.html', {"guestName": guestName})
context = {"form": form}
return render(request, 'chat/guestUser.html', context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def chat():\n username = request.cookies.get('username')\n\n if username != None and username != \"\":\n return r.renderContent('chat.html', name=username)\n return redirect('/login')",
"def chat():\n name = session.get('name', '')\n room = session.get('room', '')\n if name == '' or room == '':\n return redirect(url_for('.index'))\n return render_template('chat.html', name=name, room=room)",
"def chat():\n name = session.get('name', '')\n room = session.get('room', '')\n if name == '' or room == '':\n return redirect(url_for('.index'))\n return render_template('chat.html', name=name, room=room)",
"def chat():\n name = session.get('name', '')\n room = session.get('room', '')\n if name == '' or room == '':\n return redirect(url_for('.index'))\n return render_template('chat.html', name=name, room=room)",
"def guest_setup(roomid,name):\n token = channel.create_channel(name + roomid) \n template_values = {\n \"roomid\":roomid,\n \"token\": channel.create_channel(name + roomid),\n \"yourname\": name\n }\n return render_template(\"guest.html\",values=template_values)",
"def chat(request):\n message = '{}: {}'.format(request.form['user'], request.form['message'])\n if message:\n ChatNamespace.broadcast('message', message)\n return Response()",
"def view_contact_chat(self):\n if self._user.chats == {}:\n print(\"No chats to be viewed yet\")\n self.homepage()\n \n print('-=' * 30)\n chats = self._user.list_chats()\n user_choice = self._int_input_in_range(\"Pick whose contact chat to be viewed: \"\n ,range_ = (1, len(chats)))\n if not user_choice:\n return self.homepage()\n \n chat, contact = chats[user_choice - 1]\n chat_content = chat.get_content(self._user)\n print('-=' * 12 + \" Chat Window \" + '-=' * 12)\n if chat_content != []:\n for line in chat_content:\n print(line.rstrip()) \n else:\n print('This chat is empty, send your first msg now')\n \n user_choice = self._int_input_in_range(' (1) Send new msg \\n (2) Back to homepage \\n Your choice: '\n , range_ = (1,2))\n if user_choice == 1:\n print('HINT: send (0) to exist the chat window')\n return self._send_msg(contact)\n else:\n return self.homepage()",
"def online():\r\n if current_user.is_authenticated:\r\n session['name'] = current_user.username\r\n name = current_user.username\r\n else:\r\n name = 'Guest' + str(secrets.token_hex(8))\r\n session['name'] = name\r\n # room = session.get('room', '123')\r\n return render_template('online.html', name=name)",
"def main():\n return redirect('chat')",
"def greet_guest():\n print('Welcome')",
"def login_bot(self):\n pass",
"def enter_username(self):",
"def _send_msg(self, contact):\n msg_content = input('{} :'.format(self._user.username))\n if msg_content == '0': \n return self.homepage()\n self._user.send_msg(contact, msg_content)\n\n return self._send_msg(contact)",
"def greet_user():\n username = get_stored_username()\n if username:\n print(\"Welcome back, \" + username['Name'] + \"!\")\n else:\n username = get_new_username()\n print(\"We'll remember you when you come back, \" + username + \"!\")",
"def login_anonymously(self):\n username = b\"anonymous\"\n password = b\"anonymous\"\n self.send_cmd(b\"USER \" + username + B_CRLF)\n self.send_cmd(b\"PASS \" + password + B_CRLF)",
"def get_into_site():\n\n ans = user_handler.get_guest_unique_user_name()\n auth.guest_registering(ans)\n return ans",
"def greet_user():\n username = load_user_data()\n if username != None:\n print(\"Welcome back, \" + username)\n else:\n register_user()",
"def greet_user():\n\tusername = get_stored_username()\n\tif username:\n\t\tprint(\"Welcome back, \" + username + \"!\")\n\telse:\n\t\tusername = get_new_username()\n\t\tprint(\"We'll remember you when you come back, \" + username + \"!\")",
"def login(self):\n identity = request.environ.get('repoze.who.identity')\n came_from = str(request.GET.get('came_from', '')) or \\\n url('/')\n if identity:\n redirect(url(came_from))\n else:\n c.came_from = came_from\n c.login_counter = request.environ['repoze.who.logins'] + 1\n return render('/forms/login.mako')",
"def greet_user():\n\tusername = get_stored_username()\n\tif username:\n\t\tprint(f\"WElcome back {username}!\")\n\telse:\n\t\tusername = get_new_username()\n\t\tprint(f\"We'll remember you when you come back again {username}! \")",
"def welcome(request):\n # flag che indica se sono presenti oppure no nuovi messaggi nella casella di posta\n new_messages = False\n if request.user.is_authenticated:\n # controllo le liste di attesa a cui è prenotato l'utente\n controlList(request.user)\n messaggi = Messaggio.objects.filter(userDestinatario=request.user)\n for msg in messaggi:\n if msg.letto == False:\n new_messages = True\n return render(request, 'main_page/main_page.html', {'new_messages':new_messages})",
"def chat():\n kwargs = {\"title\": u\"chat channel\", \"entries\": log.getLogEntries()}\n return render_template(\"chat.html\", **kwargs)",
"def defaultlanding():\n #send user to description page if not logged in\n if not g.user:\n return redirect(url_for('description'))\n #display leaderboard for competition if logged in\n return redirect(url_for('leaderboard'))",
"async def shell_chat(self, user: discord.Member, message: str):\n author = user.nick or user.name\n message = message.replace('\\n', '').replace('/', '').replace('§', '')\n self.console(f'say <{author}> {message}')",
"def post_login(self, came_from=lurl('/')):\n if not request.identity:\n login_counter = request.environ.get('repoze.who.logins', 0) + 1\n redirect('/login', params=dict(came_from=came_from, __logins=login_counter))\n return\n userid = request.identity['repoze.who.userid']\n flash(_('Welcome back, %s!') % userid)\n redirect(came_from)",
"def welcome_page():\n \n username = session.get('username')\n reset()\n if request.method == 'POST':\n if not username:\n session['username'] = request.form['username']\n if username:\n return redirect(url_for('question_page'))\n return render_template('welcome.html', username=username)",
"def me():\n if g.USER:\n return redirect(url_for(\"profile\", username=g.USER.username))\n return redirect(url_for(\"home\"))",
"def user_page(username):\n\n user = User.query.get_or_404(username)\n feedback = Feedback.query.filter_by(recipient=username).all()\n \n if \"user\" not in session:\n flash(\"Not logged in\")\n return redirect('/login')\n else:\n return render_template('user_page.html', user=user, feedback=feedback)",
"def post_login(self, came_from=lurl('/')):\n if not request.identity:\n login_counter = request.environ.get('repoze.who.logins', 0) + 1\n redirect('/login',\n params=dict(came_from=came_from, __logins=login_counter))\n userid = request.identity['repoze.who.userid']\n flash(_('Welcome back, %s!') % userid)\n\n # Do not use tg.redirect with tg.url as it will add the mountpoint\n # of the application twice.\n return HTTPFound(location=came_from)",
"def post_login(self, came_from='/'):\n if not request.identity:\n login_counter = request.environ['repoze.who.logins'] + 1\n redirect('/login', came_from=came_from, __logins=login_counter)\n userid = request.identity['repoze.who.userid']\n flash(_('Welcome back, %s!') % userid)\n redirect(came_from)"
]
| [
"0.7034232",
"0.6945806",
"0.6945806",
"0.6945806",
"0.62233275",
"0.6180299",
"0.60441524",
"0.60381275",
"0.603254",
"0.5890163",
"0.58691007",
"0.57892066",
"0.57755506",
"0.5752947",
"0.5749893",
"0.57432944",
"0.57223237",
"0.57203776",
"0.5709425",
"0.5697157",
"0.5693043",
"0.5630236",
"0.56033546",
"0.55904526",
"0.55864525",
"0.5567869",
"0.5540501",
"0.550397",
"0.5496182",
"0.54885566"
]
| 0.7114216 | 0 |
Computes the Wasserstein distance of order 2 between two Gaussian distributions | def wass_gaussians(mu1, mu2, Sigma1, Sigma2):
d = mu1.shape[0]
if d == 1:
w2 = (mu1 - mu2)**2 + (np.sqrt(Sigma1) - np.sqrt(Sigma2))**2
else:
prodSigmas = Sigma2**(1/2)*Sigma1*Sigma2**(1/2)
w2 = np.linalg.norm(mu1 - mu2)**2 + np.trace(Sigma1 + Sigma2 - 2*(prodSigmas)**(1/2))
return np.sqrt(w2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_distribution_distance(freqs1, freqs2):\n A = np.array([freqs1, freqs2])\n p_value = calculate_chi_square_p_value(A)\n return 1 - p_value",
"def distributions_EMD(d1, d2):\n return ss.wasserstein_distance(d1.get_probs(), d2.get_probs()) / len(d1.get_probs())",
"def dist_sph(w1, w2):\n r = w1.norm(2, -1)\n theta = torch.sum((w1*w2), -1)/r**2\n return torch.acos(theta)",
"def wasserstein_distance_1d(pers_diag_1, pers_diag_2) -> float:\n wasserstein_distance = d.wasserstein_distance(pers_diag_1[1], pers_diag_2[1],\n q=1, delta=0.2)\n return wasserstein_distance",
"def compute_grassman_distance(Y1, Y2):\n Q1, _ = jnp.linalg.qr(Y1)\n Q2, _ = jnp.linalg.qr(Y2)\n\n _, sigma, _ = jnp.linalg.svd(Q1.T @ Q2)\n sigma = jnp.round(sigma, decimals=6)\n return jnp.linalg.norm(jnp.arccos(sigma))",
"def GetDist(feature_1, feature_2):\n return np.linalg.norm(feature_1 - feature_2)",
"def hellinger_weighted(mu1, sigma1, pi1, mu2, sigma2, pi2):\n sigma1norm = np.linalg.norm(sigma1)\n sigma2norm = np.linalg.norm(sigma2)\n X0 = np.zeros(mu1.shape)\n i = 2 * (sigma1norm**(1.0/4)) * (sigma2norm**(1.0/4)) * np.sqrt(2*np.pi) *\\\n gmm.mulnormpdf(X0, mu1-mu2, 2*sigma1 + 2*sigma2)\n #return np.sqrt(pi1*pi2) * (1-2*i)\n return 1-i[0]",
"def wasserstein_distance(x, y, w=None, safe=True, normalize=True):\n if w is None:\n w = np.arange(x.shape[0])\n weights = np.diff(w)\n if normalize:\n x = x/np.sum(x)\n y = y/np.sum(y)\n if safe:\n assert (x.shape == y.shape == w.shape)\n np.testing.assert_almost_equal(np.sum(x), np.sum(y))\n assert ((x >= 0).all())\n assert ((y >= 0).all())\n assert ((weights >= 0).all())\n cx = np.cumsum(x)[:-1]\n cy = np.cumsum(y)[:-1]\n return np.sum(weights * np.abs(cx - cy)) / (w[-1] - w[0])",
"def KolmogorovSmirnoff_statistics(dd1, dd2):\n cum1 = dd1.cumulative_distribution()\n cum2 = dd2.cumulative_distribution()\n minimum = max(cum1[0][0], cum2[0][0])\n maximum = max(cum1[-1][0], cum2[-1][0])\n index1 = len(cum1) - 1\n index2 = len(cum2) - 1\n summa1 = summa2 = 0\n\n difference = 0\n for i in reversed(range(minimum, maximum+1)):\n if cum1[index1][0] == i:\n summa1 = cum1[index1][1]\n index1 -= 1\n if cum2[index2][0] == i:\n summa2 = cum2[index2][1]\n index2 -= 1\n if abs(summa1 - summa2) > difference:\n difference = abs(summa1 - summa2)\n return difference",
"def _sliced_wasserstein(a, b, random_sampling_count, random_projection_dim):\n s = array_ops.shape(a)\n means = []\n for _ in range(random_sampling_count):\n # Random projection matrix.\n proj = random_ops.random_normal(\n [array_ops.shape(a)[1], random_projection_dim])\n proj *= math_ops.rsqrt(\n math_ops.reduce_sum(math_ops.square(proj), 0, keep_dims=True))\n # Project both distributions and sort them.\n proj_a = math_ops.matmul(a, proj)\n proj_b = math_ops.matmul(b, proj)\n proj_a = _sort_rows(proj_a, s[0])\n proj_b = _sort_rows(proj_b, s[0])\n # Pairwise Wasserstein distance.\n wdist = math_ops.reduce_mean(math_ops.abs(proj_a - proj_b))\n means.append(wdist)\n return math_ops.reduce_mean(means)",
"def hellinger_dist(v1, v2):\n if len(v1) != len(v2):\n raise ValueError(\"Vectors should have the same size! \")\n return sqrt( sum( map(lambda e: \n (sqrt(e[0])-sqrt(e[1]))**2, zip(v1,v2))))/sqrt(2)",
"def dist(a, b):\n return np.sum((a-b)**2.0)**.5",
"def dist2D(a, b):\n return ((a[0]-b[0])**2 + (a[1]-b[1])**2)**0.5",
"def hellinger_distance(doca, docb, axis=1):\n return np.sum((doca**.5 - docb**.5)**2, axis=axis)",
"def vf_wasserstein_distance(x, y, critic):\n return torch.mean(critic(x)) - torch.mean(critic(y))",
"def distance(p1, p2):\n return np.linalg.norm(p2-p1)",
"def distance(a, b):\n return (np.sum((a - b)**2))**0.5",
"def get_wmd_dist(s1, s2, model):\r\n s1 = s1.lower().strip().split()\r\n s2 = s2.lower().strip().split()\r\n\r\n distance = model.wmdistance(s1, s2)\r\n return distance",
"def compute_dist(self, s1, s2):\n return sp_linalg.norm(self.wrap(s1, s2))",
"def compute_dist(self, s1, s2):\n return sp_linalg.norm(self.wrap(s1, s2))",
"def distance (p1,p2):\n return np.sqrt(np.sum(np.power(p2-p1,2)))",
"def _calc_distance(hmm1, hmm2, seqs2):\n p12 = hmm1.calc_loglikelihood(seqs2)\n p22 = hmm2.calc_loglikelihood(seqs2)\n # calc total number of elements in all sequences\n # TODO: consider the case when number of elements vary from seq to seq\n n_elements = len(seqs2) * len(seqs2[0])\n return (p22 - p12) / n_elements",
"def _calc_distance(r1, r2):\n return np.linalg.norm(r1 - r2)",
"def _pairwise_dist(self,s1,s2):\n\n return 0.0",
"def _sliced_wasserstein_svd(a, b):\n s = array_ops.shape(a)\n # Random projection matrix.\n sig, u = linalg_ops.svd(array_ops.concat([a, b], 0))[:2]\n proj_a, proj_b = array_ops.split(u * sig, 2, axis=0)\n proj_a = _sort_rows(proj_a[:, ::-1], s[0])\n proj_b = _sort_rows(proj_b[:, ::-1], s[0])\n # Pairwise Wasserstein distance.\n wdist = math_ops.reduce_mean(math_ops.abs(proj_a - proj_b))\n return wdist",
"def dist_2D(v1, v2):\n return ((v1[0]-v2[0])**2 + (v1[1]-v2[1])**2 )**(0.5)",
"def distance(p1, p2):\n return np.linalg.norm(np.array(p1) - np.array(p2))",
"def get_dist_sqrd(self, other):\n return (self.x - other[0])**2 + (self.y - other[1])**2",
"def hellinger(mu1, sigma1, mu2, sigma2, s=0.5):\n sigma1inv = np.linalg.inv(sigma1)\n sigma2inv = np.linalg.inv(sigma2)\n sigma1inv_sigma2 = np.dot(sigma1inv, sigma2)\n sigma2inv_sigma1 = np.dot(sigma2inv, sigma1)\n N = sigma1.shape[0]\n I = np.diag(np.ones(N))\n d = np.linalg.det(s*I+(1-s)*sigma1inv_sigma2)**(-s/2) *\\\n np.linalg.det((1-s)*I+s*sigma2inv_sigma1)**(-(1-s)/2) *\\\n np.exp(0.5*(_maha(s*np.dot(sigma2inv, mu2) + (1-s) *\\\n np.dot(sigma1inv, mu1), s*sigma2inv + (1-s)*sigma1inv) -\\\n s * _maha(mu2,sigma2)-(1-s)*_maha(mu1,sigma1)))\n return d",
"def distance(p1,p2):\n import numpy as np\n x = np.sqrt(sum(np.power(p2-p1,2)))\n return(x)"
]
| [
"0.6888003",
"0.6883681",
"0.67507505",
"0.6649754",
"0.6564868",
"0.65519416",
"0.6545447",
"0.6508782",
"0.6508671",
"0.6506747",
"0.64647543",
"0.64203846",
"0.639461",
"0.6382358",
"0.6368685",
"0.6355784",
"0.6326227",
"0.62741095",
"0.6272526",
"0.6272526",
"0.62541515",
"0.6244973",
"0.6241147",
"0.62306273",
"0.62275726",
"0.62251717",
"0.6221166",
"0.62147325",
"0.6207945",
"0.6196093"
]
| 0.7291682 | 0 |
Computes the Hilbert distance of order p | def hilbert_distance(X, Y, p=2):
# We consider N_X = N_Y
xordered = X[HilbertCode_caller.hilbert_order_(X.T)]
yordered = Y[HilbertCode_caller.hilbert_order_(Y.T)]
hilbert_dist = (np.abs(xordered - yordered) ** p).sum()
hilbert_dist /= X.shape[0]
hilbert_dist = hilbert_dist ** (1/p)
return hilbert_dist | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def HammingDistance(p, q):\r\n if len(p) != len(q):\r\n return -1\r\n dist = 0\r\n #zip(AB,CD) gives (('A','C'),('B','D'))\r\n for first, second in zip(p, q):\r\n if first != second:\r\n dist = dist + 1\r\n return dist",
"def ham_dist(p, q):\n count = 0\n for i in range(len(p)):\n if p[i] != q[i]:\n count += 1\n return count",
"def ham_dist(p, q):\n count = 0\n for i in range(len(p)):\n if p[i] != q[i]:\n count += 1\n return count",
"def distancia_puntos(c, p):\n return sqrt((c[0] - p[0]) ** 2 + (c[1] - p[1]) ** 2)",
"def hamdist(inp):\n\treturn sum(c1 != c2 for c1, c2 in itertools.izip(inp[0],inp[1]))",
"def hamming_distance(p, q):\n result = 0\n for x, y in zip(p, q):\n if x != y:\n result += 1\n return result + abs(len(p) - len(q))",
"def distances(p):\n\n if not isinstance(p, Pharmacophore):\n raise TypeError(\"Expected Pharmacophore, got %s instead\" %\n type(p).__name__)\n\n dist = np.array(p.edges)\n\n for i in range(p.numnodes):\n for j in range(i):\n if dist[i][j] == 0:\n dist[i][j] = dist[j][i] = float(\"inf\")\n\n for i in range(len(dist)):\n compute = False\n for j in range(i):\n if dist[i][j] == float(\"inf\"):\n compute = True\n break\n if compute:\n queue = [k for k in range(p.numnodes)]\n while queue:\n queue.sort(key=lambda x: dist[i, x])\n u = queue[0]\n del queue[0]\n for v in np.where(p.edges[u] > 0)[0]:\n if v in queue:\n alt = dist[i, u] + p.edges[u, v]\n if alt < dist[i, v]:\n dist[i, v] = dist[v, i] = alt\n return dist",
"def TR_algo2(p, vd=2):\n # h will contain the Hilbert index\n h = 0\n # ve and vd contain the entry point and dimension of the current subcube\n # we choose here a main traversal direction N-2 (i.e. z for a cube) to match\n # the illustrations\n ve = 0\n for i in range(M-1, -1, -1):\n # the cell label is constructed in two steps\n # 1. extract the relevant bits from p\n l = [bit_component(px, i) for px in p]\n # 2. construct a integer whose bits are given by l\n l = sum( [lx*2**j for j, lx in enumerate(l)] )\n # transform l into the current subcube\n l = T(ve, vd, l)\n # obtain the gray code ordering from the label l\n w = inverse_gc(l)\n # compose (see [TR] lemma 2.13) the transform of ve and vd\n # with the data of the subcube\n ve = ve ^ (rotate_left(e(w), vd+1))\n vd = (vd + d(w) + 1) % N\n # move the index to more significant bits and add current value\n h = (h << N) | w\n return h",
"def euclidian_distance(p):\n return(np.sqrt(sum([(p[0][i]-p[1][i])**2 for i, _ in enumerate(p)])))",
"def dist(p, q):\n return ((p[0] - q[0])**2 + (p[1] - q[1])**2 + (p[2] - q[2])**2)**0.5",
"def dist(p,q):\n return math.sqrt((p[0] - q[0]) ** 2+(p[1] - q[1]) ** 2)",
"def distance_to(self, p):\n sign = 1 # -1 if self.is_inside(p) else 1\n dist = min([tri.distance_to(p) for tri in self.triangles])\n return sign * dist",
"def cphase(h1, h2):\n\n for h in (h1, h2):\n h.assert_ket_space()\n\n field = h1.base_field\n\n d = h1.dim()\n if h2.dim() != d:\n raise HilbertError('spaces must be of the same dimension')\n\n ret = (h1*h2).O.array()\n for (j, a) in enumerate(h1.index_iter()):\n for (k, b) in enumerate(h2.index_iter()):\n ret[{ h1: a, h1.H: a, h2: b, h2.H: b }] = field.fractional_phase(j*k, d)\n return ret",
"def _phi(self, x, d, p):\n ks = np.arange(self.p + 1)\n ks = ks[np.where(2 * (self.p - ks) - d >= 0)][:, np.newaxis]\n return np.sum(\n binom(self.p, ks)\n * (-1) ** ks\n * x[np.newaxis, :] ** (2 * (self.p - ks) - d)\n * perm(2 * (self.p - ks), d),\n axis=0,\n )",
"def distance (p1,p2):\n return np.sqrt(np.sum(np.power(p2-p1,2)))",
"def sym_distance(p, q):\n p = np.asarray(p)\n q = np.asarray(q)\n return np.minimum(norm(p - q), norm(p + q))",
"def distance(p1, p2):\n return math.hypot(p2[0] - p1[0], p2[1] - p1[1])",
"def dist(p0, p1):\n return math.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)",
"def _phi_int(self, x, d, p):\n ks = np.arange(self.p + 1)\n ks = ks[np.where(2 * (self.p - ks) - d >= 0)][:, np.newaxis]\n return np.sum(\n binom(self.p, ks)\n * (-1) ** ks\n * x[np.newaxis, :] ** (2 * (self.p - ks) - d + 1)\n * perm(2 * (self.p - ks), d)\n / (2 * (self.p - ks) - d + 1),\n axis=0,\n )",
"def hyperboloidDist(point1, point2):\n return np.arccosh(-minkowskiDot(point1, point2))",
"def euclidean_distance_3(P1, P2):\r\n return (P1[0]-P2[0])**2+(P1[1]-P2[1])**2+(P1[2]-P2[2])**2",
"def tridiagonal_matrix_algorithm(l, d, u, b): # noqa:E741\n n = l.size\n cp = np.zeros(n)\n dp = np.zeros(n)\n x = np.zeros(n)\n cp[0] = u[0]/d[0]\n dp[0] = b[0]/d[0]\n for k in range(1, n):\n cp[k] = u[k] / (d[k]-l[k]*cp[k-1])\n dp[k] = (b[k]-l[k]*dp[k-1]) / (d[k]-l[k]*cp[k-1])\n x[-1] = dp[-1]\n for k in range(n-2, -1, -1):\n x[k] = dp[k] - cp[k]*x[k+1]\n return x",
"def distance(p1, p2):\n return math.sqrt((math.pow((p2[0] - p1[0]), 2) + math.pow((p2[1] - p1[1]), 2)))",
"def compute_dist(p_1, p_2):\n return sqrt((p_2[0] - p_1[0])**2 + (p_2[1] - p_1[1])**2 +\n (p_2[2] - p_1[2])**2)",
"def distance(p1, p2):\r\n return math.hypot(p1[0] - p2[0], p1[1] - p2[1])",
"def dist(self, a, b, l):\n # works for non-arrays\n return sum( ((i-j)/k)**2 for i,j,k in zip(a, b, l) )",
"def dist(self, p):\n return math.sqrt((p.x - self.x)**2 + (p.y - self.y)**2)",
"def dist(p1,p2):\n\n return sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)",
"def DKL(p, q,eps=1e-12):\n return -Hshannon(p,eps=eps) + NLL(p, q,eps=eps)",
"def distance(p1, p2):\n return np.linalg.norm(p2-p1)"
]
| [
"0.6240977",
"0.6224513",
"0.6224513",
"0.62134373",
"0.61977607",
"0.61486363",
"0.60876817",
"0.608004",
"0.5990324",
"0.59267354",
"0.5843678",
"0.580374",
"0.5779593",
"0.57608706",
"0.56451637",
"0.56375206",
"0.563657",
"0.5633568",
"0.5630969",
"0.5597321",
"0.5574527",
"0.555948",
"0.5548169",
"0.5544327",
"0.55382264",
"0.55297697",
"0.5524312",
"0.5522262",
"0.5503853",
"0.55005354"
]
| 0.80400836 | 0 |
Computes the swapping distance | def swap_distance(X, Y, n_sweeps=10000, tol=1e-8, p=2):
# We consider N_X = N_Y
if p == 2:
M = ot.dist(X, Y) # Cost matrix
o1 = HilbertCode_caller.hilbert_order_(X.T)
o2 = HilbertCode_caller.hilbert_order_(Y.T)
permutation = o2[np.argsort(o1)]
total_cost = list(map(lambda k: M[k, permutation[k]], range(X.shape[0])))
total_cost = np.array(total_cost).sum()
previous_total_cost = total_cost
i_sweep = 0
while i_sweep < n_sweeps:
i_sweep += 1
# permutation, total_cost = swapsweep_py(permutation, M, total_cost) # Slow!
permutation, total_cost = swapsweep.swapsweep(permutation, M, total_cost)
error = np.abs(total_cost - previous_total_cost) / X.shape[0]
if error < tol:
break
previous_total_cost = total_cost
swap_distance = total_cost / X.shape[0]
swap_distance = swap_distance ** (1/p)
return swap_distance | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def switch_distance(pair_1, pair_2):\r\n\th1_1=pair_1[0]\r\n\th2_1=pair_2[0]\r\n\th2_2=pair_2[1]\r\n\tcnt_1=0\r\n\tcnt_2=0\r\n\t# print h2_1, h1_1\r\n\tfor i in xrange(0,len(pair_1[0])):\r\n\t\tif h2_1[i]!=h1_1[i]:\r\n\t\t\th2_1, h2_2=switch(h2_1, h2_2, i)\r\n\t\t\t# print h2_1, h2_2, i\r\n\t\t\tcnt_1+=1\r\n\tprint h2_1, h1_1\r\n\th1_1=pair_1[0]\r\n\th2_1=pair_2[0]\r\n\th2_2=pair_2[1]\r\n\tfor i in xrange(0,len(pair_1[0])):\r\n\t\tif h2_2[i]!=h1_1[i]:\r\n\t\t\th2_1, h2_2=switch(h2_1, h2_2, i)\r\n\t\t\t# print h2_1, h2_2, i\r\n\t\t\tcnt_2+=1\r\n\t# print cnt_1, cnt_2\r\n\treturn cnt_1 if cnt_1<cnt_2 else cnt_2",
"def _pairwise_dist(self,s1,s2):\n\n return 0.0",
"def hamming_dist(self):\r\n distance = 0\r\n distance = abs(len(self.s1) - len(self.s2))\r\n distance += sum(i1 != i2 for i1,i2 in zip(self.s2,self.s1))\r\n return distance",
"def distance_swap(cities, index_a, index_b):\n index_A = min(index_a, index_b)\n index_B = max(index_a, index_b)\n \n (index_A_previous, index_A_next) = compute_swap_indices(index_A, len(cities))\n (index_B_previous, index_B_next) = compute_swap_indices(index_B, len(cities))\n \n distances = []\n # These two distances are common to the two sub-cases\n distances.append(cities[index_A_previous].distance_to_city_in_km(cities[index_A]))\n distances.append(cities[index_B].distance_to_city_in_km(cities[index_B_next]))\n if index_A == index_B_previous:\n # B is following A in the list: the distance between A and B must not\n # be counted twice.\n # ---x---A---B---x---\n distances.append(cities[index_A].distance_to_city_in_km(cities[index_B]))\n else:\n # B is not following A in the list: all distances must be counted\n # ---x---A---x--- ... ---x---B---x---\n distances.append(cities[index_A].distance_to_city_in_km(cities[index_A_next]))\n distances.append(cities[index_B_previous].distance_to_city_in_km(cities[index_B]))\n\n return sum(distances)",
"def _pairwise_distance(x):\n x_inner = -2*torch.matmul(x, x.transpose(2, 1))\n x_square = torch.sum(torch.mul(x, x), dim=-1, keepdim=True)\n return x_square + x_inner + x_square.transpose(2, 1)",
"def hamming_dist(v1, v2):\r\n edits = (v1 != v2)\r\n return edits.sum()",
"def _clifford_swap(cls, slot_i, slot_j) -> Tensor:\n\n return Tensor(\n {\n Tensor._merge_keys((slot_j,), (slot_i,)): -1,\n Tensor._merge_keys(): 2 * cls.symmetric_bilinear_form(slot_i, slot_j),\n }\n )",
"def _dist(a, b):\n return torch.pow(a - b, 2).sum(-1)",
"def _distorted_distance(self):\n distance = 0\n for i, pixel in enumerate(self.training_set):\n distance += self._euclid_distance(\n pixel, self.clusters[self.labels[i]], axis=0)\n return distance",
"def new_distance(self, prev_len, prev, sol, i, l):\n swapped1 = i\n swapped2 = l\n if swapped2 == self.file_size-1:\n remove = self.dist_matrix[prev[swapped1 - 1]][prev[swapped1]] \\\n + self.dist_matrix[prev[swapped2]][prev[0]]\n add = self.dist_matrix[sol[swapped1 - 1]][sol[swapped1]] + self.dist_matrix[sol[swapped2]][sol[0]]\n attempt = prev_len - remove + add\n else:\n remove = self.dist_matrix[prev[swapped1 - 1]][prev[swapped1]] \\\n + self.dist_matrix[prev[swapped2+1]][prev[swapped2]]\n add = self.dist_matrix[sol[swapped1 - 1]][sol[swapped1]] + self.dist_matrix[sol[swapped2+1]][sol[swapped2]]\n attempt = prev_len - remove + add\n return attempt",
"def _edit_dist(s1, s2):\r\n dist = 0\r\n for i in range(len(s1)):\r\n if s1[i] != s2[i]:\r\n dist += 1\r\n return dist",
"def direct_distance(a, b):\n\n if a[0] == b[0]:\n return abs(a[1] - b[1]) - 1\n if a[1] == b[1]:\n return abs(a[0] - b[0]) - 1\n return abs(a[0] - b[0]) - 1",
"def swap(a, b, state, target):\n # a = random.randrange(0, 200, 2)\n # b = random.randrange(0, 200, 2)\n new_state = ''\n for i in range(len(state)):\n if i == a:\n new_state += state[b]\n continue\n if i == b:\n new_state += state[a]\n continue\n new_state += state[i]\n # print(new_state)\n res = solving(int(new_state[0]), int(new_state[2]), new_state[1])\n for i in range(2, len(new_state) - 2, 2):\n res = solving(res, int(new_state[i + 2]), new_state[i + 1])\n # print(\"Distance from target: \", target - res)\n return new_state, abs(target - res)",
"def distance(brd1,brd2):\n\n step=brd1[1,0]-brd1[0,0]\n return np.sum(np.abs(brd1[:,1]-brd2[:,1]))*step",
"def swap(self):\n if self.cnt_swap == 0:\n i = self.swaplist[self.cnt_swap][0]\n j = self.swaplist[self.cnt_swap][1]\n self.b[i], self.b[j] = self.b[j], self.b[i]\n self.f[i], self.f[j] = self.f[j], self.f[i]\n elif self.cnt_swap < self.nb_swaps:\n i = self.swaplist[self.cnt_swap - 1][0]\n j = self.swaplist[self.cnt_swap - 1][1]\n self.b[i], self.b[j] = self.b[j], self.b[i]\n self.f[i], self.f[j] = self.f[j], self.f[i]\n i = self.swaplist[self.cnt_swap][0]\n j = self.swaplist[self.cnt_swap][1]\n self.b[i], self.b[j] = self.b[j], self.b[i]\n self.f[i], self.f[j] = self.f[j], self.f[i]\n else:\n return 0\n self.cnt_swap += 1\n return 1",
"def dist(a, b, i, j):\n return np.sqrt(sqrSum(a, b, i, j))",
"def dist(a, b):\n return np.sum((a-b)**2.0)**.5",
"def _pairwise_dist(self,seq1,seq2):\n \n return jf.damerau_levenshtein_distance(str(seq1), str(seq2))",
"def dist(self, a, b, l):\n # works for non-arrays\n return sum( ((i-j)/k)**2 for i,j,k in zip(a, b, l) )",
"def _heuristic(a, b):\n return (b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2",
"def distance(a,b):\n n, m = len(a), len(b)\n if n > m:\n # Make sure n <= m, to use O(min(n,m)) space\n a,b = b,a\n n,m = m,n\n current = range(n+1)\n for i in range(1,m+1):\n previous, current = current, [i]+[0]*n\n for j in range(1,n+1):\n add, delete = previous[j]+1, current[j-1]+1\n change = previous[j-1]\n if a[j-1] != b[i-1]:\n change = change + 1\n current[j] = min(add, delete, change)\n return current[n]",
"def hamming_distance(lhs,rhs):\n return len([(x,y) for x,y in zip(lhs,rhs) if x !=y])",
"def distances(self):",
"def dist2D(a, b):\n return ((a[0]-b[0])**2 + (a[1]-b[1])**2)**0.5",
"def flat_dist(z1, z1b, z2, z2b):\n assert z1 >= z1b\n assert z2 >= z2b\n if z1 > z2 and z1b <= z2:\n return 0\n elif z1 > z2:\n return z1b - z2\n elif z2 > z1 and z2b >= z2:\n return 0\n else:\n return z2b - z1",
"def hamming_distance(s1, s2):\n assert(len(s1) == len(s2))\n return np.sum([1 if c1 != c2 else 0 for c1, c2 in zip(s1, s2)])",
"def _get_distance_diff(self, input):\n nbatch = input.shape[0]\n in1 = input.unsqueeze(1).expand(\n nbatch, self.nelec, self.nelec, self.ndim)\n in2 = input.unsqueeze(2).expand(\n nbatch, self.nelec, self.nelec, self.ndim)\n dist = torch.pow(in1 - in2, 2).sum(3)\n return dist",
"def distance(a, b):\n return (np.sum((a - b)**2))**0.5",
"def calc_dist(self, neighboring_pos):\n vec = np.array([i[1] - i[0] for i in zip(self.pos, neighboring_pos)])\n dist = np.linalg.norm(vec)\n return vec, dist",
"def dist(self, one, two):\n return sum((one[0] != two[0], one[1] != two[1]))"
]
| [
"0.6582465",
"0.6532841",
"0.63852835",
"0.63033766",
"0.6189577",
"0.6141517",
"0.60874605",
"0.6075062",
"0.60507214",
"0.5982342",
"0.59569436",
"0.59408253",
"0.59205955",
"0.5896892",
"0.5882448",
"0.58802503",
"0.5877402",
"0.58580816",
"0.5843558",
"0.58246493",
"0.5818703",
"0.58150446",
"0.58016723",
"0.5792988",
"0.5787376",
"0.57619524",
"0.5754256",
"0.57499117",
"0.5743266",
"0.5735667"
]
| 0.69632035 | 0 |
Cleans passed string to either return a valid SVGRGBHEXnotation or an empty string. | def cleanup_passed_color_value(s):
reo = re.compile('[0-9a-f]')
cannotBeCleaned = ''
if s[0] == '#' and len(s) in [4,7] and reo.match(s[1:]):
return s
if s in colorNamesAndCodes:
col = colorNamesAndCodes[s]
if reo.match(col[1:]):
return col
else:
return cannotBeCleaned
if len(s) in [3,6] and reo.match(s):
return '#' + s
if len(s) == 2 and reo.match(s):
return '#' +s +s +s
return cannotBeCleaned | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_hex(text): \n return re.sub(r'&.*?;', r'', text)",
"def clean_xml_string(s):\n return VALID_XML_CHARS_REGEX.sub(\"\", s)",
"def clean_str(string):\n #just return string if already cleaned\n return string",
"def stripColor(self, s):\n return _stripColorRe.sub('', s)",
"def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()",
"def _dehex(s):\n import re\n import binascii\n\n # Remove all non-hexadecimal digits\n s = re.sub(br'[^a-fA-F\\d]', b'', s)\n # binscii.unhexlify works in Python 2 and Python 3 (unlike\n # thing.decode('hex')).\n return binascii.unhexlify(s)",
"def _hashsanitize(bytesin):\n # Used for converting raw byte data into a hex string. If the byte isn't a hex digit, use nothing instead.\n return \"\".join([x if x.lower() in 'abcdef0123456789' else '' for x in bytesin])",
"def _strip_ansi(s):\n if isinstance(s, str):\n return _ansi_codes.sub(r\"\\4\", s)\n else: # a bytestring\n return _ansi_codes_bytes.sub(r\"\\4\", s)",
"def __set_has_hexadecimal(text=str):\n reg_ex = constants.HEXADECIMAL_REG_EX_PATTERN\n if reg_ex.search(text) is None:\n return text\n return reg_ex.sub(constants.QUESTION_HAS_HEXADECIMAL_KEY, text)",
"def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()",
"def _strip_invalid_xml(s):\n if _badchars_re.search(s):\n return ''.join(c for c in s if c >= ' ' or c in '\\r\\n\\t')\n else:\n return s",
"def _strip_invalid_xml(s):\n if _badchars_re.search(s):\n return ''.join(c for c in s if c >= ' ' or c in '\\r\\n\\t')\n else:\n return s",
"def test_empty_string(self):\n self.assertEqual(hash_str(\"\", salt=\"\").hex()[:6], \"e3b0c4\")",
"def _cleanup_string(self, bytes):\n try:\n b = bytes.index(b'\\x00')\n except ValueError:\n return bytes.decode('latin-1').strip()\n else:\n return bytes[:b].decode('latin-1').strip()",
"def cstrip(inString):\n zeroDex = inString.find('\\x00')\n if zeroDex == -1:\n return inString\n else:\n return inString[:zeroDex]",
"def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string) \n string = re.sub(r\"\\s{2,}\", \" \", string) \n return string.strip().lower()",
"def replace_gaiji_hex(self, utf8_string):\n #print re.findall(\"{0x[0-9a-f]+}\", utf8_string)\n return re.sub(r'{0x([0-9a-f]+)}', \n self.replace_gaiji_hex_sub_helper, utf8_string)",
"def isHex(string, needHexPrefix):\n return (True)",
"def clean(string):\r\n if string is None or not string: return ''\r\n string = html.unescape(string)\r\n string = unicodedata.normalize('NFC', string)\r\n string = unescape(string)\r\n string = html.escape(string)\r\n string = unicodedata.normalize('NFC', string)\r\n return string",
"def strip_raw_ansi(string, parser=ANSI_PARSER):\n string = string or \"\"\n return parser.strip_raw_codes(string)",
"def sanatize_hex(data: str) -> str:\n return data.replace(\"0x\", \"\").replace(\"0X\", \"\")",
"def sanitize_unicode(value):\n return re.sub(\"[\\x00-\\x08\\x0B\\x0C\\x0E-\\x1F\\uD800-\\uDFFF\\uFFFE\\uFFFF]\", \"\", value)",
"def clean_str_vn(string):\n string = re.sub(r\"[~`@#$%^&*-+]\", \" \", string)\n def sharp(str):\n b = re.sub('\\s[A-Za-z]\\s\\.', ' .', ' '+str)\n while (b.find('. . ')>=0): b = re.sub(r'\\.\\s\\.\\s', '. ', b)\n b = re.sub(r'\\s\\.\\s', ' # ', b)\n return b\n string = sharp(string)\n string = re.sub(r\" : \", \":\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \"\", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()",
"def test_hexlify_empty_script():\n assert uflash.hexlify('') == ''",
"def from_hex_str(value):\n \n return SHex(value)",
"def sanitizte_color(value):\n if len(value) == 7 and value[0] == '#':\n return \"#%06x\" % int(value[1:], 16)\n raise ValueError('invalid color')",
"def clean_string(value):\n\treturn re.sub(r'[^a-zA-Z0-9_.]', '', str(value))",
"def hexstring(self):\n if self.current != b\"<\":\n self.on_parser_error(\"Hexadecimal string expected\")\n self.next()\n token = b''\n self.maybe_spaces_or_comments()\n while self.is_hex_digit:\n token += self.next()\n self.maybe_spaces_or_comments()\n\n ch = self.next()\n if ch != b'>':\n self.on_parser_error(\"Wrong hexadecimal string\")\n if len(token) % 2:\n # if there is an odd number of digits - the last one should be assumed 0\n token += b'0'\n return HexString(token.decode(DEFAULT_ENCODING).upper())",
"def test_is_valid_hex(self):\n self.assertTrue(is_valid_hex('#aabb11'))\n self.assertTrue(is_valid_hex('#000'))\n self.assertTrue(is_valid_hex('#aaa'))\n self.assertFalse(is_valid_hex('black'))\n self.assertFalse(is_valid_hex('bl(ack'))",
"def clean_string(raw_string):\n if raw_string == None:\n return\n\n clean_string = raw_string.strip('\\n')\n clean_string = ' '.join(clean_string.split())\n return clean_string"
]
| [
"0.60601455",
"0.59946215",
"0.59191805",
"0.58995914",
"0.581662",
"0.5805634",
"0.5798346",
"0.5726138",
"0.57169926",
"0.5714296",
"0.5709046",
"0.5709046",
"0.57067734",
"0.56963587",
"0.56107944",
"0.5573957",
"0.5571539",
"0.5549539",
"0.55424464",
"0.5520134",
"0.5516258",
"0.5512318",
"0.5509272",
"0.5509167",
"0.54740757",
"0.54635775",
"0.5444915",
"0.5430019",
"0.54224765",
"0.54140985"
]
| 0.60755706 | 0 |
Prints debugging information when the script encounters an illegal color. | def print_illegal_color_format_screen( enteredBGColor, enteredFGColor, convertedBGColor, convertedFGColor ):
print ""
print "Error: are the passed in colors valid?"
print " - passed in background-color '" + enteredBGColor + "' was converted to '" + convertedBGColor + "'."
print " - passed in foreground-color '" + enteredFGColor + "' was converted to '" + convertedFGColor + "'."
print "" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_err(*vargs, **kwargs):\n _do_print_color(*vargs, colorcode = 31, **kwargs)",
"def print_warn(*vargs, **kwargs):\n _do_print_color(*vargs, colorcode = 33, **kwargs)",
"def error_debug(input):\n print(\"\\033[1;31;40m{}\\033[0m\".format(input))",
"def print_debug(*vargs, **kwargs):\n _do_print_color(*vargs, colorcode = 36, **kwargs)",
"def test_colors_fail_uncalibrated(self):\n command = ('{0} -b 100 -e {1} {2} {2} {3}').format(\n os.path.join(self.datadir,\n 'monol_testA_nustar_fpma_ev' + HEN_FILE_EXTENSION),\n 3, 5, 10)\n with pytest.raises(ValueError) as excinfo:\n hen.colors.main(command.split())\n\n assert \"No energy information is present \" in str(excinfo.value)",
"def is_invalid():\n print(colored('Invalid input\\n', 'red', attrs=['bold']))",
"def failure(self, message=''):\n print(colored(message, 'red'))",
"def print_failure(text):\n\n print(colorize(text, Colors.FAIL))",
"def check_color_scoping(self):\n for mapping in self.OrderedColorMappings:\n if mapping.token.text not in self.ColorDefinitions:\n raise Exception(\"%d:%d Color %s is never defined\" % (mapping.token.line, mapping.token.col, mapping.token.text))",
"def magic_color_info(self,parameter_s = ''):\n \n self.shell.rc.color_info = 1 - self.shell.rc.color_info\n self.magic_colors(self.shell.rc.colors)\n print 'Object introspection functions have now coloring:',\n print ['OFF','ON'][self.shell.rc.color_info]",
"def print_green(msg: str = None) -> None:\n if msg is None:\n raise Exception(\"msg was not defined\")\n\n print(Fore.GREEN + msg)\n print(Style.RESET_ALL + \"\", end=\"\")",
"def print_yellow(msg: str = None) -> None:\n if msg is None:\n raise Exception(\"msg was not defined\")\n\n print(Fore.YELLOW + msg)\n print(Style.RESET_ALL + \"\", end=\"\")",
"def error(text):\n print(red(\"✘ {0}\".format(text)))\n sys.stdout.flush()",
"def print_warning(text):\n weechat.prnt(\"\", (\"%s[vimode.py] %s\" % (weechat.color(\"red\"), text)))",
"def fail():\n sys.stdout.write('%s[ fail ]%s\\n' % (colors.RED, colors.RESET))",
"def err(msg):\n print(colored.red(\"[ERROR]: {0}\".format(msg)))",
"def print_red(msg: str = None) -> None:\n if msg is None:\n raise Exception(\"msg was not defined\")\n\n print(Fore.RED + msg)\n print(Style.RESET_ALL + \"\", end=\"\")",
"def print_disabled(*vargs, **kwargs):\n _do_print_color(*vargs, colorcode = 30, **kwargs)",
"def offline_error():\n\n colored('No available internet connection\\n', 'red')",
"def error(text):\n return color_str(text, 'RED')",
"def color_print(message, color, newline='\\n'):\n sys.stderr.write('%s%s%s%s' % (color, message, ANSI_NORMAL, newline))",
"def error(self, *lines):\n if self.__debug_level >= DEBUG_LEVELS['error']:\n self.print_lines(self.colored(('red', 'bold'), lines))",
"def print_with_color(message: str, color: str):\n import sys\n print(color + message + constant.Color.ENDC, file=sys.stderr)",
"def test_weirdColorFormatting(self):\n self.assertAssembledEqually(\"\\x031kinda valid\", A.fg.black[\"kinda valid\"])\n self.assertAssembledEqually(\n \"\\x03999,999kinda valid\", A.fg.green[\"9,999kinda valid\"]\n )\n self.assertAssembledEqually(\n \"\\x031,2kinda valid\", A.fg.black[A.bg.blue[\"kinda valid\"]]\n )\n self.assertAssembledEqually(\n \"\\x031,999kinda valid\", A.fg.black[A.bg.green[\"9kinda valid\"]]\n )\n self.assertAssembledEqually(\n \"\\x031,242 is a special number\",\n A.fg.black[A.bg.yellow[\"2 is a special number\"]],\n )\n self.assertAssembledEqually(\"\\x03,02oops\\x03\", A.normal[\",02oops\"])\n self.assertAssembledEqually(\"\\x03wrong\", A.normal[\"wrong\"])\n self.assertAssembledEqually(\"\\x031,hello\", A.fg.black[\"hello\"])\n self.assertAssembledEqually(\"\\x03\\x03\", A.normal)",
"def color_sample():\r\n env = dict()\r\n setup_quiet_build(env)\r\n for item in env.iteritems():\r\n print item[0],item[1]",
"def print_warn(msg):\n print('{}{}'.format(colorama.Fore.YELLOW, msg))",
"def debug(self, msg):\n debug_msg = self._debug_color\n debug_msg += \"[SHOULDER_DEBUG] \" + msg\n debug_msg += self._reset_color\n self.logger.debug(debug_msg)",
"def __debugInfo(self, msg):\n\t\tif self.verbosity:\n\t\t\tprint(stylize(\"[*] DEBUG: {}\".format(msg), colored.fg(\"wheat_1\")))",
"def error(message='Ops, there are some error...'):\n print(colorful_text(message, Fore.RED))",
"def print_warn(*args):\n print(CYELLOW2 + str(*args) + CEND)"
]
| [
"0.68145615",
"0.6723713",
"0.66837156",
"0.64507365",
"0.6412046",
"0.64085376",
"0.6315633",
"0.6304557",
"0.62818974",
"0.61877924",
"0.6137318",
"0.6117421",
"0.6116814",
"0.6102645",
"0.60926074",
"0.6007336",
"0.60038733",
"0.59981924",
"0.5976492",
"0.5952483",
"0.58773273",
"0.585635",
"0.58358294",
"0.5833018",
"0.5818926",
"0.5805821",
"0.57786834",
"0.57615733",
"0.5754168",
"0.5746734"
]
| 0.7021701 | 0 |
If a tfq_simulate op is asked to simulate states given circuits acting on different numbers of qubits, the op should return a tensor padded with zeros up to the size of the largest circuit. The padding should be physically correct, such that samples taken from the padded states still match samples taken from the original circuit. | def test_simulate_state_output_padding(self, all_n_qubits):
circuit_batch = []
for n_qubits in all_n_qubits:
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch += util.random_circuit_resolver_batch(qubits, 1)[0]
tfq_results = tfq_simulate_ops.tfq_simulate_state(
util.convert_to_tensor(circuit_batch), [],
[[]] * len(circuit_batch))
# Don't use batch_util here to enforce consistent padding everywhere
# without extra tests.
sim = cirq.Simulator()
manual_padded_results = []
for circuit in circuit_batch:
result = sim.simulate(circuit)
wf = result.final_state_vector
blank_state = np.ones(
(2**max(all_n_qubits)), dtype=np.complex64) * -2
blank_state[:wf.shape[0]] = wf
manual_padded_results.append(blank_state)
self.assertAllClose(tfq_results, manual_padded_results, atol=1e-5) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_sampling_output_padding(self, all_n_qubits, n_samples):\n op = tfq_simulate_ops.tfq_simulate_samples\n circuits = []\n expected_outputs = []\n for n_qubits in all_n_qubits:\n this_expected_output = np.zeros((n_samples, max(all_n_qubits)))\n this_expected_output[:, max(all_n_qubits) - n_qubits:] = 1\n this_expected_output[:, :max(all_n_qubits) - n_qubits] = -2\n expected_outputs.append(this_expected_output)\n circuits.append(\n cirq.Circuit(*cirq.X.on_each(\n *cirq.GridQubit.rect(1, n_qubits))))\n results = op(util.convert_to_tensor(circuits), [], [[]] * len(circuits),\n [n_samples]).numpy()\n self.assertAllClose(expected_outputs, results)",
"def _GetDefaultPaddings(self, inputs):\n return tf.zeros(\n tf.concat([tf.shape(inputs)[:-1], [1]], 0), dtype=inputs.dtype)",
"def _GetDefaultPaddings(self, inputs):\n return tf.zeros(\n tf.concat([tf.shape(inputs)[:-1], [1]], 0), dtype=inputs.dtype)",
"def test_Pad3D27():\n input_shape = (1, 1, 2, 2, 2)\n pad = 2\n mode = \"replicate\"\n res = [\n [\n [\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n ]\n ]\n ]\n\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)",
"def test_simulate_state_inputs(self):\n n_qubits = 5\n batch_size = 5\n symbol_names = ['alpha']\n qubits = cirq.GridQubit.rect(1, n_qubits)\n circuit_batch, resolver_batch = \\\n util.random_symbol_circuit_resolver_batch(\n qubits, symbol_names, batch_size)\n\n symbol_values_array = np.array(\n [[resolver[symbol]\n for symbol in symbol_names]\n for resolver in resolver_batch])\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'programs must be rank 1'):\n # programs tensor has the wrong shape.\n tfq_simulate_ops.tfq_simulate_state(\n util.convert_to_tensor([circuit_batch]), symbol_names,\n symbol_values_array)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'symbol_names must be rank 1'):\n # symbol_names tensor has the wrong shape.\n tfq_simulate_ops.tfq_simulate_state(\n util.convert_to_tensor(circuit_batch), np.array([symbol_names]),\n symbol_values_array)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'symbol_values must be rank 2'):\n # symbol_values tensor has the wrong shape.\n tfq_simulate_ops.tfq_simulate_state(\n util.convert_to_tensor(circuit_batch), symbol_names,\n np.array([symbol_values_array]))\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'symbol_values must be rank 2'):\n # symbol_values tensor has the wrong shape 2.\n tfq_simulate_ops.tfq_simulate_state(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array[0])\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'Unparseable proto'):\n # programs tensor has the right type, but invalid value.\n tfq_simulate_ops.tfq_simulate_state(['junk'] * batch_size,\n symbol_names,\n symbol_values_array)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'Could not find symbol in parameter map'):\n # symbol_names tensor has the right type, but invalid value.\n tfq_simulate_ops.tfq_simulate_state(\n util.convert_to_tensor(circuit_batch), ['junk'],\n symbol_values_array)\n\n with self.assertRaisesRegex(TypeError, 'Cannot convert'):\n # programs tensor has the wrong type.\n tfq_simulate_ops.tfq_simulate_state([1] * batch_size, symbol_names,\n symbol_values_array)\n\n with self.assertRaisesRegex(TypeError, 'Cannot convert'):\n # symbol_names tensor has the wrong type.\n tfq_simulate_ops.tfq_simulate_state(\n util.convert_to_tensor(circuit_batch), [1], symbol_values_array)\n\n with self.assertRaisesRegex(tf.errors.UnimplementedError, ''):\n # symbol_values tensor has the wrong type.\n tfq_simulate_ops.tfq_simulate_state(\n util.convert_to_tensor(circuit_batch), symbol_names,\n [['junk']] * batch_size)\n\n with self.assertRaisesRegex(TypeError, 'missing'):\n # too few tensors.\n # pylint: disable=no-value-for-parameter\n tfq_simulate_ops.tfq_simulate_state(\n util.convert_to_tensor(circuit_batch), symbol_names)\n # pylint: enable=no-value-for-parameter\n\n # TODO (mbbrough): determine if we should allow extra arguments ?\n with self.assertRaisesRegex(TypeError, 'positional arguments'):\n # pylint: disable=too-many-function-args\n tfq_simulate_ops.tfq_simulate_state(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array, [])\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n expected_regex='do not match'):\n # wrong symbol_values size.\n tfq_simulate_ops.tfq_simulate_state(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array[:int(batch_size * 0.5)])\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n expected_regex='cirq.Channel'):\n # attempting to use noisy circuit.\n noisy_circuit = cirq.Circuit(cirq.depolarize(0.3).on_each(*qubits))\n tfq_simulate_ops.tfq_simulate_state(\n util.convert_to_tensor([noisy_circuit for _ in circuit_batch]),\n symbol_names, symbol_values_array)",
"def pad(input, pad_size):\n if not pad_size:\n return input\n return tf.pad(input, [[0,0],[pad_size, pad_size],[pad_size, pad_size],[0,0]], 'REFLECT')",
"def _zero_state_tensors(state_size, batch_size, dtype):\n def get_state_shape(s):\n \"\"\"Combine s with batch_size to get a proper tensor shape.\"\"\"\n c = _concat(batch_size, s)\n c_static = _concat(batch_size, s, static=True)\n size = array_ops.zeros(c, dtype=dtype)\n size.set_shape(c_static)\n return size\n return nest.map_structure(get_state_shape, state_size)",
"def wrap_pad(input, size):\n M1 = tf.concat([input[:, :, -size[1]:, :], input, input[:, :, 0:size[1], :]], 2)\n M1 = tf.concat([M1[:, -size[0]:, :, :], M1, M1[:, 0:size[0], :, :]], 1)\n return M1",
"def test_Pad3D17():\n input_shape = (1, 2, 3)\n pad = [2, 1]\n mode = \"replicate\"\n res = [[[1, 1, 1, 2, 3, 3], [4, 4, 4, 5, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")",
"def test_four_qubit_random_circuit(self, device, tol):\n n_wires = 4\n dev = device(n_wires)\n dev_def = qml.device(\"default.qubit\", wires=n_wires)\n\n if dev.name == dev_def.name:\n pytest.skip(\"Device is default.qubit.\")\n\n if dev.shots is not None:\n pytest.skip(\"Device is in non-analytical mode.\")\n\n gates = [\n qml.PauliX(wires=0),\n qml.PauliY(wires=1),\n qml.PauliZ(wires=2),\n qml.S(wires=3),\n qml.T(wires=0),\n qml.RX(2.3, wires=1),\n qml.RY(1.3, wires=2),\n qml.RZ(3.3, wires=3),\n qml.Hadamard(wires=0),\n qml.Rot(0.1, 0.2, 0.3, wires=1),\n qml.CRot(0.1, 0.2, 0.3, wires=[2, 3]),\n qml.Toffoli(wires=[0, 1, 2]),\n qml.SWAP(wires=[1, 2]),\n qml.CSWAP(wires=[1, 2, 3]),\n qml.U1(1.0, wires=0),\n qml.U2(1.0, 2.0, wires=2),\n qml.U3(1.0, 2.0, 3.0, wires=3),\n qml.CRX(0.1, wires=[1, 2]),\n qml.CRY(0.2, wires=[2, 3]),\n qml.CRZ(0.3, wires=[3, 1]),\n ]\n\n layers = 3\n np.random.seed(1967)\n gates_per_layers = [np.random.permutation(gates).numpy() for _ in range(layers)]\n\n def circuit():\n \"\"\"4-qubit circuit with layers of randomly selected gates and random connections for\n multi-qubit gates.\"\"\"\n np.random.seed(1967)\n for gates in gates_per_layers:\n for gate in gates:\n qml.apply(gate)\n return qml.expval(qml.PauliZ(0))\n\n qnode_def = qml.QNode(circuit, dev_def)\n qnode = qml.QNode(circuit, dev)\n\n assert np.allclose(qnode(), qnode_def(), atol=tol(dev.shots))",
"def pad(x, system_shape, pad_size):\n res = unpad(tf.tile(x, (1,)+(3,)*len(pad_size)),\n tuple(s-p for s, p in zip(system_shape, pad_size)))\n return res",
"def fixed_padding(inputs, kernel_size, rate=1):\n kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)\n pad_total = kernel_size_effective - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n return padded_inputs",
"def test_Pad3D18():\n input_shape = (1, 2, 3, 4)\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [1, 1, 1, 2, 3, 4, 4],\n [5, 5, 5, 6, 7, 8, 8],\n [9, 9, 9, 10, 11, 12, 12],\n [9, 9, 9, 10, 11, 12, 12],\n ],\n [\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [13, 13, 13, 14, 15, 16, 16],\n [17, 17, 17, 18, 19, 20, 20],\n [21, 21, 21, 22, 23, 24, 24],\n [21, 21, 21, 22, 23, 24, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCHW\")",
"def pad_to_match(feature, target_length, rank, constant_values):\n padding_list = []\n target_length = tf.maximum(target_length, tf.shape(feature)[1])\n for r in range(rank):\n if r == 1:\n padding_list.append([0, target_length - tf.shape(feature)[1]])\n else:\n padding_list.append([0, 0])\n return tf.pad(feature, padding_list, constant_values=constant_values,\n name=\"pad_to_match\")",
"def padding_tensor(sequences, max_length=1000000):\n # get the number of sequences\n num = len(sequences)\n # get the maximum length (clip too long sequences)\n max_len = min(max([s.shape[0] for s in sequences]), max_length)\n # define new output dimensions\n out_dims = (num, max_len, *sequences[0].shape[1:])\n # create output_tensor with new dimensionality\n out_tensor = sequences[0].data.new(*out_dims).fill_(0)\n # create new mask_tensor with the corresponding mask\n mask = sequences[0].data.new(*out_dims).fill_(0)\n # iterate over the sequences\n logger.info('Start padding breaths....')\n with tqdm(\n total=len(sequences),\n bar_format=\"{desc:<5.5}{percentage:3.0f}%|{bar:100}{r_bar}\",\n ascii=True\n ) as pbar:\n for i, tensor in enumerate(sequences):\n # get the length of the current breath\n length = min(tensor.size(0), max_len)\n # add all valid breaths\n print(tensor)\n input('before')\n out_tensor[i, :length] = tensor[:length, :]\n # for the breaths that are \"too short\" padd with last value\n out_tensor[i, length:] = 0\n print(out_tensor)\n input('after')\n # create mask\n mask[i, :length] = 1\n # update progressbar\n pbar.update(1)\n\n # return result\n return max_len, out_tensor, mask",
"def test_Pad3D26():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 0, 0, 1]).astype('int32')\n pad = [1, 0, 1, 0, 0, 1]\n mode = \"replicate\"\n res = [\n [\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n [[[1, 2], [1, 2], [3, 4], [5, 6]], [[1, 2], [1, 2], [3, 4], [5, 6]], [[7, 8], [7, 8], [9, 10], [11, 12]]],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NDHWC\", data=data)",
"def test_Pad3D5():\n input_shape = (1, 1, 2, 3)\n # pad = np.array([1, 0, 1, 2]).astype('int32')\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)",
"def unitary_builder(qubit_register, circuit): \n \n no_of_qubits = math.log(next(x for x in qubit_register.shape if x != 1), 2)\n qubit_ordering = []\n operations_in_slice = []\n operation_list = None\n for slice in circuit:\n for step in slice[\"operations\"]:\n qubit_ordering.extend(step[1])\n operations_in_slice.extend([step[0]])\n identity_operation_count = int(no_of_qubits - len(qubit_ordering))\n operations_in_slice.extend([qeye(2)] * identity_operation_count)\n qubit_ordering.extend([x for x in range(int(no_of_qubits)) if x not in qubit_ordering])\n operation_slice = tensor(operations_in_slice).permute(qubit_ordering)\n if operation_list is None:\n operation_list = [operation_slice]\n else:\n operation_list.extend([operation_slice])\n qubit_ordering = []\n operations_in_slice = [] \n \n circuit_unitary = reduce((lambda x, y: x * y), operation_list)\n \n return circuit_unitary",
"def call(self, inputs):\n if self._padding_mode == 'zero':\n # tf.pad interprets this as paddings per dimension. We only need to pad\n # the middle dimension which is why the other two values are pairs of 0s.\n paddings = ((0, 0), self._padding, (0, 0))\n outputs = tf.pad(inputs, paddings)\n elif self._padding_mode == 'wrap':\n outputs = tf.concat([\n inputs[:, (-self._padding[0]):, :],\n inputs,\n inputs[:, :self._padding[1], :]\n ], axis=1)\n elif self._padding_mode == 'repeat':\n outputs = tf.concat([\n tf.repeat(inputs[:, :1, :], self._padding[0], axis=1),\n inputs,\n tf.repeat(inputs[:, -1:, :], self._padding[1], axis=1)\n ], axis=1)\n else:\n raise ValueError(f'Padding mode {self._padding_mode} not supported.')\n return outputs",
"def pad_model():\n\n inputs = tf.keras.Input(shape=(10, 10, 3,))\n x = tf.keras.layers.Conv2D(16, (1, 1))(inputs)\n x = tf.pad(x, tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]))\n x = tf.keras.layers.Conv2D(8, (2, 2))(x)\n x = tf.pad(x, tf.constant([[0, 0], [1, 1], [1, 1], [1, 1]]))\n x = tf.keras.layers.Conv2D(8, (2, 2))(x)\n x = tf.pad(x, tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]), constant_values=2)\n x = tf.keras.layers.Conv2D(8, (2, 2))(x)\n x = tf.pad(x, tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]), mode='SYMMETRIC')\n x = tf.keras.layers.Conv2D(8, (2, 2))(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"pad_model\")(x)\n return outputs",
"def test_simulate_expectation_inputs(self):\n n_qubits = 5\n batch_size = 5\n symbol_names = ['alpha']\n qubits = cirq.GridQubit.rect(1, n_qubits)\n circuit_batch, resolver_batch = \\\n util.random_symbol_circuit_resolver_batch(\n qubits, symbol_names, batch_size)\n\n symbol_values_array = np.array(\n [[resolver[symbol]\n for symbol in symbol_names]\n for resolver in resolver_batch])\n\n pauli_sums = util.random_pauli_sums(qubits, 3, batch_size)\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'programs must be rank 1'):\n # Circuit tensor has too many dimensions.\n tfq_simulate_ops.tfq_simulate_expectation(\n util.convert_to_tensor([circuit_batch]), symbol_names,\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]))\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'symbol_names must be rank 1.'):\n # symbol_names tensor has too many dimensions.\n tfq_simulate_ops.tfq_simulate_expectation(\n util.convert_to_tensor(circuit_batch), np.array([symbol_names]),\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]))\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'symbol_values must be rank 2.'):\n # symbol_values_array tensor has too many dimensions.\n tfq_simulate_ops.tfq_simulate_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n np.array([symbol_values_array]),\n util.convert_to_tensor([[x] for x in pauli_sums]))\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'symbol_values must be rank 2.'):\n # symbol_values_array tensor has too few dimensions.\n tfq_simulate_ops.tfq_simulate_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array[0],\n util.convert_to_tensor([[x] for x in pauli_sums]))\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'pauli_sums must be rank 2.'):\n # pauli_sums tensor has too few dimensions.\n tfq_simulate_ops.tfq_simulate_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array, util.convert_to_tensor(list(pauli_sums)))\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'pauli_sums must be rank 2.'):\n # pauli_sums tensor has too many dimensions.\n tfq_simulate_ops.tfq_simulate_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array,\n util.convert_to_tensor([[[x]] for x in pauli_sums]))\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'Unparseable proto'):\n # circuit tensor has the right type but invalid values.\n tfq_simulate_ops.tfq_simulate_expectation(\n ['junk'] * batch_size, symbol_names, symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]))\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'Could not find symbol in parameter map'):\n # symbol_names tensor has the right type but invalid values.\n tfq_simulate_ops.tfq_simulate_expectation(\n util.convert_to_tensor(circuit_batch), ['junk'],\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]))\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'qubits not found in circuit'):\n # pauli_sums tensor has the right type but invalid values.\n new_qubits = [cirq.GridQubit(5, 5), cirq.GridQubit(9, 9)]\n new_pauli_sums = util.random_pauli_sums(new_qubits, 2, batch_size)\n tfq_simulate_ops.tfq_simulate_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array,\n util.convert_to_tensor([[x] for x in new_pauli_sums]))\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n 'Unparseable proto'):\n # pauli_sums tensor has the right type but invalid values 2.\n tfq_simulate_ops.tfq_simulate_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array, [['junk']] * batch_size)\n\n with self.assertRaisesRegex(TypeError, 'Cannot convert'):\n # circuits tensor has the wrong type.\n tfq_simulate_ops.tfq_simulate_expectation(\n [1.0] * batch_size, symbol_names, symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]))\n\n with self.assertRaisesRegex(TypeError, 'Cannot convert'):\n # symbol_names tensor has the wrong type.\n tfq_simulate_ops.tfq_simulate_expectation(\n util.convert_to_tensor(circuit_batch), [0.1234],\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]))\n\n with self.assertRaisesRegex(tf.errors.UnimplementedError, ''):\n # symbol_values tensor has the wrong type.\n tfq_simulate_ops.tfq_simulate_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n [['junk']] * batch_size,\n util.convert_to_tensor([[x] for x in pauli_sums]))\n\n with self.assertRaisesRegex(TypeError, 'Cannot convert'):\n # pauli_sums tensor has the wrong type.\n tfq_simulate_ops.tfq_simulate_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array, [[1.0]] * batch_size)\n\n with self.assertRaisesRegex(TypeError, 'missing'):\n # we are missing an argument.\n # pylint: disable=no-value-for-parameter\n tfq_simulate_ops.tfq_simulate_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array)\n # pylint: enable=no-value-for-parameter\n\n with self.assertRaisesRegex(TypeError, 'positional arguments'):\n # pylint: disable=too-many-function-args\n tfq_simulate_ops.tfq_simulate_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]), [])\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n expected_regex='do not match'):\n # wrong op size.\n tfq_simulate_ops.tfq_simulate_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums\n ][:int(batch_size * 0.5)]))\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n expected_regex='do not match'):\n # wrong symbol_values size.\n tfq_simulate_ops.tfq_simulate_expectation(\n util.convert_to_tensor(circuit_batch), symbol_names,\n symbol_values_array[:int(batch_size * 0.5)],\n util.convert_to_tensor([[x] for x in pauli_sums]))\n\n with self.assertRaisesRegex(tf.errors.InvalidArgumentError,\n expected_regex='cirq.Channel'):\n # attempting to use noisy circuit.\n noisy_circuit = cirq.Circuit(cirq.depolarize(0.3).on_each(*qubits))\n tfq_simulate_ops.tfq_simulate_expectation(\n util.convert_to_tensor([noisy_circuit for _ in pauli_sums]),\n symbol_names, symbol_values_array,\n util.convert_to_tensor([[x] for x in pauli_sums]))\n\n res = tfq_simulate_ops.tfq_simulate_expectation(\n util.convert_to_tensor([cirq.Circuit() for _ in pauli_sums]),\n symbol_names, symbol_values_array.astype(np.float64),\n util.convert_to_tensor([[x] for x in pauli_sums]))\n self.assertDTypeEqual(res, np.float32)",
"def test_Pad3D16():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"replicate\"\n res = [[[1, 1, 2, 3, 3, 3], [4, 4, 5, 6, 6, 6]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")",
"def _generate_zero_filled_state(batch_size_tensor, state_size, dtype):\n if batch_size_tensor is None or dtype is None:\n raise ValueError(\n 'batch_size and dtype cannot be None while constructing initial state: '\n 'batch_size={}, dtype={}'.format(batch_size_tensor, dtype))\n\n def create_zeros(unnested_state_size):\n flat_dims = tensor_shape.as_shape(unnested_state_size).as_list()\n init_state_size = [batch_size_tensor] + flat_dims\n return array_ops.zeros(init_state_size, dtype=dtype)\n\n if nest.is_sequence(state_size):\n return nest.map_structure(create_zeros, state_size)\n else:\n return create_zeros(state_size)",
"def test_zero_pad():\r\n # Freely assume that time is the last dimension:\r\n ts1 = np.empty((64, 64, 35, 32))\r\n NFFT = 64 \r\n zp1 = utils.zero_pad(ts1, NFFT)\r\n npt.assert_equal(zp1.shape[-1], NFFT)\r\n\r\n # Try this with something with only 1 dimension:\r\n ts2 = np.empty(64)\r\n zp2 = utils.zero_pad(ts2, NFFT)\r\n npt.assert_equal(zp2.shape[-1], NFFT)",
"def test_mismatched_starts(cell_cls):\n with tf.Graph().as_default():\n with tf.Session() as sess:\n pos_enc = positional_encoding(5, 6, dtype=tf.float64)\n in_seq = tf.get_variable('in_seq',\n shape=(3, 5, 6),\n initializer=tf.truncated_normal_initializer(),\n dtype=tf.float64)\n cell = cell_cls(pos_enc, num_layers=3, num_heads=2, hidden=24)\n _, states_1 = tf.nn.dynamic_rnn(cell, in_seq[:, :1], dtype=tf.float64)\n _, states_2 = tf.nn.dynamic_rnn(cell, in_seq[:, :2], dtype=tf.float64)\n _, states_3 = tf.nn.dynamic_rnn(cell, in_seq[:, :3], dtype=tf.float64)\n new_states = tuple(tf.stack([s2[0], s3[1], s1[2]], axis=0)\n for s1, s2, s3 in zip(states_1, states_2, states_3))\n\n full_seq, _ = tf.nn.dynamic_rnn(cell, in_seq, dtype=tf.float64)\n expected = tf.stack([full_seq[0, 2:4], full_seq[1, 3:5], full_seq[2, 1:3]], axis=0)\n\n inputs = tf.stack([in_seq[0, 2:4], in_seq[1, 3:5], in_seq[2, 1:3]], axis=0)\n actual, _ = tf.nn.dynamic_rnn(cell, inputs, initial_state=new_states)\n\n sess.run(tf.global_variables_initializer())\n\n actual, expected = sess.run((actual, expected))\n\n assert not np.isnan(actual).any()\n assert not np.isnan(expected).any()\n assert actual.shape == expected.shape\n assert np.allclose(actual, expected)",
"def _zero_state_dynamic_length(self, batch_size):\n p = self.params\n assert p.enable_value_proj, 'Value projection must be enabled.'\n\n dtype = py_utils.FPropDtype(p)\n context_len = p.left_context - 1 + p.right_context\n per_head_dim = p.hidden_dim // p.num_heads\n\n key_state = tf.zeros([batch_size, context_len, p.num_heads, per_head_dim],\n dtype)\n value_state = tf.zeros_like(key_state, dtype)\n # At the beginning, all positions are masked out.\n masks = tf.zeros([batch_size, context_len], tf.bool)\n state0 = py_utils.NestedMap(key=key_state, value=value_state, masks=masks)\n if p.right_context > 0:\n query_right = p.right_context // p.query_stride\n state0.query = tf.zeros(\n [batch_size, query_right, p.num_heads, per_head_dim], dtype)\n state0.out_masks = tf.zeros([batch_size, query_right], tf.bool)\n # This is used only if the caller of the layer uses skip_connection in\n # the layer's client code.\n state0.skip_conn_input = tf.zeros([batch_size, query_right, p.hidden_dim],\n dtype)\n return state0",
"def test_Pad3D6():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 2, 1, 0]).astype('int32')\n pad = [1, 0, 1, 2, 1, 0]\n mode = \"constant\"\n res = [\n [\n [\n [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 1, 2], [0, 3, 4], [0, 5, 6], [0, 0, 0], [0, 0, 0]],\n [[0, 0, 0], [0, 7, 8], [0, 9, 10], [0, 11, 12], [0, 0, 0], [0, 0, 0]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)",
"def _fixed_padding(inputs, kernel_size, rate=1):\n kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),\n kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]\n pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]\n pad_beg = [pad_total[0] // 2, pad_total[1] // 2]\n pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]],\n [pad_beg[1], pad_end[1]], [0, 0]])\n return padded_inputs",
"def test_Pad3D25():\n input_shape = (1, 1, 2, 3, 2)\n # pad = np.array([1, 0, 1, 0, 0, 1]).astype('int32')\n pad = [1, 0, 1, 0, 0, 1]\n mode = \"replicate\"\n res = [\n [\n [\n [[1, 1, 2], [1, 1, 2], [3, 3, 4], [5, 5, 6]],\n [[7, 7, 8], [7, 7, 8], [9, 9, 10], [11, 11, 12]],\n [[7, 7, 8], [7, 7, 8], [9, 9, 10], [11, 11, 12]],\n ]\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)",
"def _prepare_onnx_paddings__tensorrt(g, input, pad):\n ctx = FUNCTION_REWRITER.get_context()\n torch_version = version_parse(torch.__version__)\n if torch_version.major == 1 and torch_version.minor < 10:\n return ctx.origin_func(g, input, pad)\n # The desired order of paddings is\n # dim_0_begin, dim_1_begin, ... , dim_0_end, ..., dim_n_end.\n # n is the dimension of input.\n # Assume zero-dimensions in the beginning, pad the \"pad\" sequence with\n # zeros in the beginning\n pad_len = torch.onnx.symbolic_opset9.size(\n g, pad, g.op('Constant', value_t=torch.tensor([0])))\n # Set extension = [0] * (dim * 2 - len(pad))\n rank = sym_help._get_tensor_rank(input)\n if rank is None:\n rank = g.op('Size', g.op('Shape', input))\n else:\n rank = g.op('Constant', value_t=torch.tensor(rank, dtype=torch.int64))\n extension = g.op(\n 'Sub',\n g.op('Mul', rank,\n g.op('Constant', value_t=torch.tensor(2, dtype=torch.int64))),\n pad_len)\n # Concat pad with extension: paddings = [dim_n_begin, dim_n_end,\n # dim_n-1_begin, dim_n-1_end, 0, 0, ... ]\n # Currently ONNX only supports int64 type for Pad\n pad = g.op('Cast', pad, to_i=sym_help.cast_pytorch_to_onnx['Long'])\n paddings = g.op(\n 'Concat',\n pad,\n g.op(\n 'ConstantOfShape',\n extension,\n value_t=torch.tensor([0], dtype=torch.int64)),\n axis_i=0)\n # Reshape and reverse order and collate first beginnings and then ends\n # paddings = [[..., 0, dim_n-1_begin, dim_n_begin],\n # [..., 0, dim_n-1_end, dim_n_end]]\n # Reshape back to 1-D paddings = [..., 0, dim_n - 1_begin, dim_n_begin,\n # ..., 0, dim_n - 1_end, dim_n_end]\n\n # replace original Constant-Transpose-Constant with Slices and Concat.\n paddings = torch.onnx.symbolic_opset10.flip(g, paddings, [0])\n begins = sym_help._slice_helper(\n g, paddings, axes=[0], starts=[1], ends=[0xffff], steps=[2])\n ends = sym_help._slice_helper(\n g, paddings, axes=[0], starts=[0], ends=[0xffff], steps=[2])\n paddings = g.op('Concat', begins, ends, axis_i=0)\n padding_c = g.op(\n 'Cast', paddings, to_i=sym_help.cast_pytorch_to_onnx['Long'])\n return padding_c"
]
| [
"0.6290017",
"0.5681582",
"0.5681582",
"0.5676849",
"0.5593831",
"0.5576278",
"0.5497892",
"0.5487373",
"0.5466293",
"0.540858",
"0.54031944",
"0.539102",
"0.5358094",
"0.5357061",
"0.5343899",
"0.5330886",
"0.5322747",
"0.52873987",
"0.52870905",
"0.5285144",
"0.52784127",
"0.5272989",
"0.52727515",
"0.52679753",
"0.5264918",
"0.5257962",
"0.5255796",
"0.52552617",
"0.52382576",
"0.5234522"
]
| 0.7703024 | 0 |
Check that the sampling ops pad outputs correctly | def test_sampling_output_padding(self, all_n_qubits, n_samples):
op = tfq_simulate_ops.tfq_simulate_samples
circuits = []
expected_outputs = []
for n_qubits in all_n_qubits:
this_expected_output = np.zeros((n_samples, max(all_n_qubits)))
this_expected_output[:, max(all_n_qubits) - n_qubits:] = 1
this_expected_output[:, :max(all_n_qubits) - n_qubits] = -2
expected_outputs.append(this_expected_output)
circuits.append(
cirq.Circuit(*cirq.X.on_each(
*cirq.GridQubit.rect(1, n_qubits))))
results = op(util.convert_to_tensor(circuits), [], [[]] * len(circuits),
[n_samples]).numpy()
self.assertAllClose(expected_outputs, results) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_padding_necessary(self, signal: np.array) -> bool:\n if len(signal) < self.number_expected_samples:\n return True\n else:\n return False",
"def test_pad():\n x = randtool(\"float\", -10, 10, [3, 2, 1, 2])\n pad = [1, 1, 2, 3]\n mode = \"constant\"\n value = 2.0\n data_format = \"NCHW\"\n res = np.array(\n [\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -8.88523461, 1.99072967, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 4.45995261, 9.40579439, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 6.43138915, 0.55102135, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -3.37046541, -2.92035609, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -8.41939397, 1.11828761, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -6.68411074, -4.09524338, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)",
"def test_sample_outputs_range(self):\n context = {'class_label': tf.zeros((_BATCH_SIZE,), dtype=tf.int32)}\n sample_dict = self.model.sample(\n _BATCH_SIZE, max_sample_length=_MAX_SAMPLE_LENGTH_VERTS,\n context=context)\n with self.session() as sess:\n sess.run(tf.global_variables_initializer())\n sample_dict_np = sess.run(sample_dict)\n in_range = np.logical_and(\n 0 <= sample_dict_np['vertices'],\n sample_dict_np['vertices'] <= 2**_QUANTIZATION_BITS).all()\n self.assertTrue(in_range)",
"def check_sample_decode(self):\n sample_size = len(self.sample_decode.sample_decode_list)\n log_line = \"epoch={0}, global step={1}, sample size={2}\".format(self.sample_decode_info.epoch,\n self.sample_decode_info.global_step, sample_size).encode('utf-8')\n self.log_writer.write(\"{0}\\r\\n\".format(log_line))\n print(log_line)\n \n for i, sample_decode in enumerate(self.sample_decode.sample_decode_list):\n sample_input = sample_decode[\"sample_input\"]\n log_line = \"sample {0} - input: {1}\".format(i+1, sample_input).encode('utf-8')\n self.log_writer.write(\"{0}\\r\\n\".format(log_line))\n print(log_line)\n sample_output = sample_decode[\"sample_output\"]\n log_line = \"sample {0} - output: {1}\".format(i+1, sample_output).encode('utf-8')\n self.log_writer.write(\"{0}\\r\\n\".format(log_line))\n print(log_line)\n sample_reference = sample_decode[\"sample_reference\"]\n log_line = \"sample {0} - reference: {1}\".format(i+1, sample_reference).encode('utf-8')\n self.log_writer.write(\"{0}\\r\\n\".format(log_line))\n print(log_line)",
"def test_sample_from_extra_bounds_good(self):\n dim = Real(\"yolo\", \"norm\", 0, 2, low=-5, high=+5, shape=(4, 4))\n for _ in range(8):\n samples = dim.sample(8)\n for sample in samples:\n assert sample in dim",
"def test_make_sampled_format(self):\n for num_inputs in [1, 3]:\n for num_outputs in [1, 2, 4]:\n for num_time_steps in [4, 10, 12]:\n # Generate data\n # P=2 format [0, 1, 2, 3, ...]\n sample_interval = 2\n dt_system = np.random.random()\n dt_sample = sample_interval * dt_system\n outputs = np.random.random(\n (num_time_steps, num_outputs, num_inputs))\n time_steps = make_time_steps(\n num_time_steps, sample_interval)\n time_values = time_steps * dt_system\n\n # Compute using modred\n my_ERA = era.ERA()\n time_steps_computed, outputs_computed =\\\n era.make_sampled_format(time_values, outputs)\n #self.assertEqual(dt_system_computed, dt_system)\n\n # Reference values\n num_time_steps_true = (num_time_steps - 1) * 2\n time_steps_true = make_time_steps(num_time_steps_true, 1)\n outputs_true = np.zeros(\n (num_time_steps_true, num_outputs, num_inputs))\n outputs_true[::2] = outputs[:-1]\n outputs_true[1::2] = outputs[1:]\n\n # Compare values\n np.testing.assert_equal(\n time_steps_computed, time_steps_true)\n np.testing.assert_equal(outputs_computed, outputs_true)\n\n # Test that if there is a wrong time value, get an error\n time_values[num_time_steps // 2] = -1\n self.assertRaises(\n ValueError, era.make_sampled_format, time_values,\n outputs)",
"def test_pad1():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = [0, 0, 0, 0, 0, 0, 1, 1, 0, 0]\n mode = \"constant\"\n value = 0\n res = np.array([[[[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [0.0, 0.0, 0.0]]]]])\n # data_format = \"NCDHW\"\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value)",
"def check_sample_correctishness_bc01(f):\n\n batch_size = 5\n rows = 32\n cols = 30\n channels = 3\n pool_rows = 2\n pool_cols = 3\n\n rng = np.random.RandomState([2012, 9, 26])\n zv = rng.randn(batch_size, channels, rows,\n cols).astype(config.floatX) * 2. - 3.\n top_down_v = rng.randn(batch_size, channels, rows / pool_rows,\n cols / pool_cols).astype(config.floatX)\n\n z_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n z_th.tag.test_value = zv\n z_th.name = 'z_th'\n\n top_down_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n top_down_th.tag.test_value = top_down_v\n top_down_th.name = 'top_down_th'\n\n theano_rng = MRG_RandomStreams(rng.randint(2147462579))\n p_th, h_th, p_sth, h_sth = f(z_th, (pool_rows, pool_cols), top_down_th,\n theano_rng)\n\n prob_func = function([z_th, top_down_th], [p_th, h_th])\n pv, hv = prob_func(zv, top_down_v)\n\n sample_func = function([z_th, top_down_th], [p_sth, h_sth])\n\n acc_p = 0. * pv\n acc_h = 0. * hv\n\n # make sure the test gets good coverage, ie, that it includes many\n # different activation probs for both detector and pooling layer\n buckets = 10\n bucket_width = 1. / float(buckets)\n for i in xrange(buckets):\n lower_lim = i * bucket_width\n upper_lim = (i+1) * bucket_width\n\n assert np.any((pv >= lower_lim) * (pv < upper_lim))\n assert np.any((hv >= lower_lim) * (hv < upper_lim))\n\n assert upper_lim == 1.\n\n for i in xrange(10000):\n ps, hs = sample_func(zv, top_down_v)\n\n assert ps.shape == pv.shape\n assert hs.shape == hv.shape\n\n acc_p += ps\n acc_h += hs\n\n est_p = acc_p / float(i+1)\n est_h = acc_h / float(i+1)\n\n pd = np.abs(est_p-pv)\n hd = np.abs(est_h-hv)\n\n \"\"\"\n # plot maps of the estimation error, this is to see if it has some\n # spatial pattern this is useful for detecting bugs like not handling\n # the border correctly, etc.\n from pylearn2.gui.patch_viewer import PatchViewer\n\n pv = PatchViewer((pd.shape[0],pd.shape[3]),(pd.shape[1],pd.shape[2]),\n is_color = False)\n for i in xrange(pd.shape[0]):\n for j in xrange(pd.shape[3]):\n pv.add_patch( (pd[i,:,:,j] / pd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n\n pv = PatchViewer((hd.shape[0],hd.shape[3]), (hd.shape[1],hd.shape[2]),\n is_color = False)\n for i in xrange(hd.shape[0]):\n for j in xrange(hd.shape[3]):\n pv.add_patch( (hd[i,:,:,j] / hd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n \"\"\"\n\n \"\"\"\n plot expectation to estimate versus error in estimation\n expect bigger errors for values closer to 0.5\n\n from matplotlib import pyplot as plt\n\n #nelem = reduce( lambda x, y : x*y, pd.shape)\n #plt.scatter( pv.reshape(nelem), pd.reshape(nelem))\n #plt.show()\n\n nelem = reduce( lambda x, y : x*y, hd.shape)\n plt.scatter( hv.reshape(nelem), hd.reshape(nelem))\n plt.show()\n \"\"\"\n\n # don't really know how tight this should be\n # but you can try to pose an equivalent problem\n # and implement it in another way\n # using a numpy implementation in softmax_acc.py\n # I got a max error of .17\n assert max(pd.max(), hd.max()) < .17\n\n # Do exhaustive checks on just the last sample\n assert np.all((ps == 0) + (ps == 1))\n assert np.all((hs == 0) + (hs == 1))\n\n for k in xrange(batch_size):\n for i in xrange(ps.shape[2]):\n for j in xrange(ps.shape[3]):\n for l in xrange(channels):\n p = ps[k, l, i, j]\n h = hs[k, l, i*pool_rows:(i+1)*pool_rows,\n j*pool_cols:(j+1)*pool_cols]\n assert h.shape == (pool_rows, pool_cols)\n assert p == h.max()\n assert h.sum() <= 1\n\n \"\"\" If you made it to here, it's correctish\n (cant tell if samples are perfectly \"correct\") \"\"\"",
"def test_pad2():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (0, 1, 1, 1, 2, 0)\n mode = \"constant\"\n value = 0\n data_format = \"NCDHW\"\n res = np.array(\n [\n [\n [\n [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n [[0.0, 0.0, 0.0, 0.0], [1.0, 2.0, 3.0, 0.0], [4.0, 5.0, 6.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n ]\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)",
"def verify_psample_stats(dut, params):\n output = psample_stats(dut, params.keys())\n if not output:\n st.log(\"Observed empty output\")\n return False\n entries = filter_and_select(output, None, params)\n if not entries:\n st.log(\"PSAMPLE STATS VERIFICATION FAILED\")\n return False\n return True",
"def test_bits_per_sample(self):\n test_bits_per_sample = 24\n self.encoder._bits_per_sample = test_bits_per_sample\n self.assertEqual(self.encoder._bits_per_sample, test_bits_per_sample)",
"def test_qpu_0_shots():\n _aws_device(wires=2, shots=0)",
"def _expected_inputs():\n return 1",
"def _test_sampsize(t):\n return t.shape[1] != len(t.ids(axis='sample'))",
"def test_pad6():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (2, 1, 3, 0, 2, 0)\n mode = \"replicate\"\n data_format = \"NDHWC\"\n res = np.array(\n [\n [\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, data_format=data_format)",
"def test_pad3():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = [0, 0, 1, 1, 0, 0]\n mode = \"constant\"\n value = 0\n data_format = \"NCDHW\"\n res = np.array([[[[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [0.0, 0.0, 0.0]]]]])\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)",
"def _VerifyValues(\n self,\n input_sizes=None,\n filter_sizes=None,\n out_backprop_sizes=None,\n strides=None,\n dilations=None,\n padding=None,\n data_format_src=\"NHWC\",\n data_format_dst=\"NHWC\",\n expected=None,\n ):\n\n total_size_1 = np.prod(input_sizes)\n total_size_2 = np.prod(out_backprop_sizes)\n x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes)\n x2 = np.arange(1, total_size_2 + 1, dtype=np.float32).reshape(\n out_backprop_sizes\n )\n strides = [1] + strides + [1]\n if dilations is not None:\n dilations = [1] + dilations + [1]\n\n expected = np.reshape(expected, filter_sizes)\n\n # Convert between data formats.\n x1 = test_utils.ConvertBetweenDataFormats(x1, data_format_src, data_format_dst)\n x2 = test_utils.ConvertBetweenDataFormats(x2, data_format_src, data_format_dst)\n input_sizes = test_utils.PermuteDimsBetweenDataFormats(\n input_sizes, data_format_src, data_format_dst\n )\n out_backprop_sizes = test_utils.PermuteDimsBetweenDataFormats(\n out_backprop_sizes, data_format_src, data_format_dst\n )\n strides = test_utils.PermuteDimsBetweenDataFormats(\n strides, data_format_src, data_format_dst\n )\n if dilations is not None:\n dilations = test_utils.PermuteDimsBetweenDataFormats(\n dilations, data_format_src, data_format_dst\n )\n\n with self.session() as sess:\n t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes)\n t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes)\n with self.test_scope():\n tensor = gen_nn_ops.conv2d_backprop_filter(\n input=t1,\n filter_sizes=filter_sizes,\n out_backprop=t2,\n strides=strides,\n dilations=dilations,\n padding=padding,\n data_format=data_format_dst,\n )\n\n value = sess.run(tensor, {t1: x1, t2: x2})\n self.assertAllEqual(filter_sizes, value.shape)\n self.assertAllClose(expected, value, 1e-3)",
"def _VerifyValues(\n self,\n input_sizes=None,\n filter_sizes=None,\n out_backprop_sizes=None,\n strides=None,\n dilations=None,\n padding=None,\n data_format_src=\"NHWC\",\n data_format_dst=\"NHWC\",\n expected=None,\n ):\n\n total_size_1 = np.prod(filter_sizes)\n total_size_2 = np.prod(out_backprop_sizes)\n x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(filter_sizes)\n x2 = np.arange(1, total_size_2 + 1, dtype=np.float32).reshape(\n out_backprop_sizes\n )\n strides = [1] + strides + [1]\n if dilations is not None:\n dilations = [1] + dilations + [1]\n\n expected = np.reshape(expected, input_sizes)\n\n # Convert between data formats.\n expected = test_utils.ConvertBetweenDataFormats(\n expected, data_format_src, data_format_dst\n )\n x2 = test_utils.ConvertBetweenDataFormats(x2, data_format_src, data_format_dst)\n input_sizes = test_utils.PermuteDimsBetweenDataFormats(\n input_sizes, data_format_src, data_format_dst\n )\n out_backprop_sizes = test_utils.PermuteDimsBetweenDataFormats(\n out_backprop_sizes, data_format_src, data_format_dst\n )\n strides = test_utils.PermuteDimsBetweenDataFormats(\n strides, data_format_src, data_format_dst\n )\n if dilations is not None:\n dilations = test_utils.PermuteDimsBetweenDataFormats(\n dilations, data_format_src, data_format_dst\n )\n\n with self.session() as sess:\n t1 = array_ops.placeholder(dtypes.float32, shape=filter_sizes)\n t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes)\n with self.test_scope():\n out = gen_nn_ops.conv2d_backprop_input(\n input_sizes=input_sizes,\n filter=t1,\n out_backprop=t2,\n strides=strides,\n dilations=dilations,\n padding=padding,\n data_format=data_format_dst,\n )\n\n value = sess.run(out, {t1: x1, t2: x2})\n self.assertAllEqual(input_sizes, value.shape)\n self.assertAllClose(expected, value, 1e-3)",
"def init_expected_outputs(data, no_labels=26):\n expected_outputs = np.zeros((data.shape[0], no_labels))\n \n for i in range(0,data.shape[0]): \n expected_outputs[i, data[i].astype(int)]=1\n\n return expected_outputs",
"def check_sample_correctishness_channelwise(f):\n\n batch_size = 27\n pool_size = 4\n n = pool_size * 21\n\n rng = np.random.RandomState([2012, 9, 26])\n zv = rng.randn(batch_size, n).astype(config.floatX) * 3.5 - 5.\n top_down_v = rng.randn(batch_size, n / pool_size).astype(config.floatX)\n\n z_th = T.matrix()\n z_th.tag.test_value = zv\n z_th.name = 'z_th'\n\n top_down_th = T.matrix()\n top_down_th.tag.test_value = top_down_v\n top_down_th.name = 'top_down_th'\n\n theano_rng = MRG_RandomStreams(rng.randint(2147462579))\n p_th, h_th, p_sth, h_sth = f(z_th, pool_size, top_down_th, theano_rng)\n\n prob_func = function([z_th, top_down_th], [p_th, h_th])\n pv, hv = prob_func(zv, top_down_v)\n\n sample_func = function([z_th, top_down_th], [p_sth, h_sth])\n\n acc_p = 0. * pv\n acc_h = 0. * hv\n\n # make sure the test gets good coverage, ie, that it includes\n # many different activation probs for both detector and pooling layer\n buckets = 10\n bucket_width = 1. / float(buckets)\n print(pv.min(), pv.max())\n print(hv.min(), hv.max())\n for i in xrange(buckets):\n lower_lim = i * bucket_width\n upper_lim = (i+1) * bucket_width\n print(lower_lim, upper_lim)\n\n assert np.any((pv >= lower_lim) * (pv < upper_lim))\n assert np.any((hv >= lower_lim) * (hv < upper_lim))\n\n assert upper_lim == 1.\n\n for i in xrange(10000):\n ps, hs = sample_func(zv, top_down_v)\n\n assert ps.shape == pv.shape\n assert hs.shape == hv.shape\n\n acc_p += ps\n acc_h += hs\n\n est_p = acc_p / float(i+1)\n est_h = acc_h / float(i+1)\n\n pd = np.abs(est_p-pv)\n hd = np.abs(est_h-hv)\n\n \"\"\"\n # plot maps of the estimation error, this is to see if it has some\n # spatial pattern this is useful for detecting bugs like not handling\n # the border correctly, etc.\n # from pylearn2.gui.patch_viewer import PatchViewer\n\n pv = PatchViewer((pd.shape[0],pd.shape[3]),(pd.shape[1],pd.shape[2]),\n is_color = False)\n for i in xrange(pd.shape[0]):\n for j in xrange(pd.shape[3]):\n pv.add_patch( (pd[i,:,:,j] / pd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n\n pv = PatchViewer((hd.shape[0],hd.shape[3]),(hd.shape[1],hd.shape[2]),\n is_color = False)\n for i in xrange(hd.shape[0]):\n for j in xrange(hd.shape[3]):\n pv.add_patch( (hd[i,:,:,j] / hd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n \"\"\"\n\n \"\"\"\n plot expectation to estimate versus error in estimation\n expect bigger errors for values closer to 0.5\n\n from matplotlib import pyplot as plt\n\n #nelem = reduce( lambda x, y : x*y, pd.shape)\n #plt.scatter( pv.reshape(nelem), pd.reshape(nelem))\n #plt.show()\n\n nelem = reduce( lambda x, y : x*y, hd.shape)\n plt.scatter( hv.reshape(nelem), hd.reshape(nelem))\n plt.show()\n \"\"\"\n\n # don't really know how tight this should be\n # but you can try to pose an equivalent problem\n # and implement it in another way\n # using a numpy implementation in softmax_acc.py\n # I got a max error of .17\n assert max(pd.max(), hd.max()) < .17\n\n # Do exhaustive checks on just the last sample\n assert np.all((ps == 0) + (ps == 1))\n assert np.all((hs == 0) + (hs == 1))\n\n for k in xrange(batch_size):\n for i in xrange(ps.shape[1]):\n p = ps[k, i]\n h = hs[k, i*pool_size:(i+1)*pool_size]\n assert h.shape == (pool_size,)\n assert p == h.max()\n assert h.sum() <= 1\n\n \"\"\" If you made it to here, it's correctish\n (cant tell if samples are perfectly \"correct\") \"\"\"",
"def testAllInputOptions(self):\n num_batches = 5\n num_channels = 3\n num_rows = 20\n num_cols = 30\n for pseudo_random in True, False:\n for overlapping in True, False:\n tensor_shape = (num_batches, num_rows, num_cols, num_channels)\n # random tensor with value in [-500.0, 500.0)\n rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500\n self._ValidateFractionalMaxPoolResult(\n rand_mat, [1, math.sqrt(3), math.sqrt(2), 1], pseudo_random,\n overlapping)",
"def test_Pad3D24():\n input_shape = (1, 2, 3, 4)\n # pad = np.array([2, 1, 2, 1]).astype('int32')\n pad = [2, 1, 2, 1]\n mode = \"replicate\"\n res = [\n [\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [9, 10, 11, 12]],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n [\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n [21, 22, 23, 24],\n [21, 22, 23, 24],\n ],\n ]\n ]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NHWC\", data=data)",
"def test_pass_dummy_scans(algo_dummy_scans, dummy_scans, expected_out):\n skip_vols = pass_dummy_scans(algo_dummy_scans, dummy_scans)\n\n assert skip_vols == expected_out",
"def test_does_not_sample_twice_ppswor(self):\n with self.assertRaises(ValueError):\n s = private_sampling.ThresholdSample(\n 1.0, private_sampling.PpsworSamplingMethod)\n s.process(\"a\", math.log(FAILURE_PROBABILITY_INVERSE, math.e))\n s.process(\"a\", 1)",
"def test_combine_nsamples_different_shapes():\n test_sample_1 = np.ones((2, 13, 21))\n test_sample_2 = np.ones((3, 13, 21))\n pytest.raises(ValueError, utils.combine_nsamples, test_sample_1, test_sample_2)",
"def _VerifyValues(\n self,\n input_sizes=None,\n filter_sizes=None,\n strides=None,\n dilations=None,\n padding=None,\n data_format_src=\"NHWC\",\n data_format_dst=\"NHWC\",\n expected=None,\n ):\n\n total_size_1 = np.prod(input_sizes)\n total_size_2 = np.prod(filter_sizes)\n x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes)\n x2 = np.arange(1, total_size_2 + 1, dtype=np.float32).reshape(filter_sizes)\n strides = [1] + strides + [1]\n if dilations is None:\n dilations = [1, 1]\n dilations = [1] + dilations + [1]\n\n # Convert between data formats.\n expected = test_utils.ConvertBetweenDataFormats(\n expected, data_format_src, data_format_dst\n )\n x1 = test_utils.ConvertBetweenDataFormats(x1, data_format_src, data_format_dst)\n input_sizes = test_utils.PermuteDimsBetweenDataFormats(\n input_sizes, data_format_src, data_format_dst\n )\n strides = test_utils.PermuteDimsBetweenDataFormats(\n strides, data_format_src, data_format_dst\n )\n dilations = test_utils.PermuteDimsBetweenDataFormats(\n dilations, data_format_src, data_format_dst\n )\n\n with self.session() as sess:\n t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes)\n t2 = array_ops.placeholder(dtypes.float32, shape=filter_sizes)\n with self.test_scope():\n out = nn_ops.conv2d(\n t1,\n t2,\n strides=strides,\n padding=padding,\n data_format=data_format_dst,\n dilations=dilations,\n )\n\n value = sess.run(out, {t1: x1, t2: x2})\n self.assertAllClose(expected, value, 1e-3)",
"def check_topology(self, ninputs: \"int\", noutputs: \"int\") -> \"bool\":\n return _beamforming_swig.randomsampler_sptr_check_topology(self, ninputs, noutputs)",
"def test_output_heads_error_catching():\n output_dims_that_should_break = [[\"linear\", 2, 2, \"SAME\", \"conv\", 3, 4, \"SAME\"], [[[\"lstm\", 3], [\"gru\", 4]]],\n [[2, 8]], [-33, 33, 33, 33, 33]]\n for output_dim in output_dims_that_should_break:\n with pytest.raises(AssertionError):\n RNN(input_dim=5, layers_info=[[\"gru\", 20], [\"lstm\", 8], output_dim],\n hidden_activations=\"relu\", output_activation=\"relu\")\n output_activations_that_should_break = [\"relu\", [\"relu\"], [\"relu\", \"softmax\"]]\n for output_activation in output_activations_that_should_break:\n with pytest.raises(AssertionError):\n RNN(input_dim=5, layers_info=[[\"gru\", 20], [\"lstm\", 8], [[\"linear\", 5], [\"linear\", 2], [\"linear\", 5]]],\n hidden_activations=\"relu\", output_activation=output_activation)",
"def test_pad_8():\n paddle.disable_static()\n x = np.array([[[[1.0, 3.0], [-3.0, 1.0]]]])\n pad = [1, 1, 1, 2]\n mode = \"constant\"\n value = np.array(2.0)\n data_format = \"NCHW\"\n res = np.array(\n [\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 1.0, 3.0, 2.0],\n [2.0, -3.0, 1.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ]\n ]\n ]\n )\n exp = paddle.nn.functional.pad(\n x=paddle.to_tensor(x), pad=pad, mode=mode, value=paddle.to_tensor(value), data_format=data_format\n )\n assert np.allclose(exp.numpy(), res)",
"def can_sample(self, batch_size):\n return batch_size + 1 <= self.num_in_buffer"
]
| [
"0.6232269",
"0.6190101",
"0.6067233",
"0.5857253",
"0.5819368",
"0.58028764",
"0.57961124",
"0.57630396",
"0.5727364",
"0.5590215",
"0.55863315",
"0.55732685",
"0.5556439",
"0.55498695",
"0.554983",
"0.55409205",
"0.5534093",
"0.55317795",
"0.55132616",
"0.55058515",
"0.5488409",
"0.54642373",
"0.5451638",
"0.5449678",
"0.54417825",
"0.54379797",
"0.5426063",
"0.54252255",
"0.5422706",
"0.5415554"
]
| 0.6402059 | 0 |
Returns new subLightCurve, choosing ndays with maximum RMS variation | def best_sublc(self, ndays, npoints=600, chunksize=300,
flat_order=3, **kwargs):
x_full = self.x_full
y_full = self.y_full
N = len(x_full)
cadence = np.median(x_full[1:] - x_full[:-1])
window = int(ndays / cadence)
stepsize = window//50
i1 = 0
i2 = i1 + window
max_std = 0
max_i1 = None
max_i2 = None
while i2 < N:
x = x_full[i1:i2].copy()
y = y_full[i1:i2].copy()
x, y, _ = sigma_clip(x, y, y, 5)
p = np.polyfit(x, y, flat_order)
y -= np.polyval(p, x)
std = np.std(y)
if std > max_std:
max_i1 = i1
max_i2 = i2
max_std = std
i1 += stepsize
i2 += stepsize
x, y, yerr = (x_full[max_i1:max_i2],
y_full[max_i1:max_i2],
self.yerr_full[max_i1:max_i2])
newname = self.name + '_{:.0f}d'.format(ndays)
if 'sub' not in kwargs:
kwargs['sub'] = window//npoints
return LightCurve(x, y, yerr, chunksize=chunksize,
name=newname, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_sigma_high_low(rate, dt=1 * units.ns, time_window_high_low=5 * units.ns):\n def obj(sigma):\n return get_high_low_rate(sigma, dt=dt, time_window_high_low=time_window_high_low) - rate\n res = opt.brentq(obj, 0, 10)\n return res",
"def lCurve(self): \n\n # --------------------------------------------------------------------------------------------- #\n # Read data\n fitsNnam = os.path.join(self.workpath, 'LCresults.fits')\n lcTab = Table.read(fitsNnam)\n if (self.tstart is not None) and (self.tstop is not None):\n lcTab = lcTab[ (self.tstart <= lcTab['mjd']) & (lcTab['mjd'] <= self.tstop)]\n lcTab = lcTab[lcTab['flux'] != -1.] # avoid undone analyses\n\n timeMJD = lcTab['mjd']\n tref = int(np.floor( timeMJD[0] / 100.0)) * 100 # round to lowest hundred\n timeMJD -= tref\n ts = lcTab['ts']\n detect = lcTab['ts'] >= self.tsmin\n undet = lcTab['ts'] < self.tsmin\n flux = lcTab['flux'][detect]\n fluxerr = lcTab['fluxerr'][detect]\n upperl = lcTab['upperlim'][undet]\n upperl[upperl == -1.] = 0. # for when it failed\n scale = 10**int(np.floor(np.log10( np.mean( np.concatenate( (flux, upperl), axis=0) ) ))) \n\n # --------------------------------------------------------------------------------------------- #\n # Plot\n lcplt = FermiPlot(savepath='', xsize=8.5, ysize=6)\n lcplt.figname = os.path.join(self.workpath, 'LightCurve.pdf')\n lcplt.xlabel = r'Time (MJD $-$ {})'.format(tref)\n lcplt.ylabel = [r'Flux ($10^{%d}$ ph\\,cm$^{-2}$\\,s$^{-1}$)'%(int(np.log10(scale))), r'TS']\n lcplt.hline = [None, self.tsmin]\n deltaY = max(np.concatenate((flux+fluxerr, upperl), axis=0)) - min(np.concatenate((flux-fluxerr, upperl), axis=0))\n lcplt.ymin = [(min(np.concatenate((flux-fluxerr, upperl-upperl*0.1), axis=0)) - 0.05*deltaY) / scale, min(ts) - 0.05*(max(ts)-min(ts))]\n lcplt.ymax = [(max(np.concatenate((flux+fluxerr, upperl), axis=0)) + 0.05*deltaY) / scale, max(ts) + 0.05*(max(ts)-min(ts))]\n deltaX = (timeMJD[-1] + lcTab['mjderr'][-1]) - (timeMJD[0] - lcTab['mjderr'][0]) \n lcplt.xmin = timeMJD[0] - lcTab['mjderr'][0] - 0.05*deltaX\n lcplt.xmax = timeMJD[-1] + lcTab['mjderr'][-1] + 0.05*deltaX\n lcplt.fill = [item for sublist in zip( timeMJD[detect]-lcTab['mjderr'][detect], timeMJD[detect]+lcTab['mjderr'][detect] ) for item in sublist]\n lcplt.shadecol= self.loran \n if len(flux) == 0:\n lcplt.mksize = [2, 2]\n lcplt.ymode = ['linear', 'linear']\n lcplt.color = ['gray', 'black']\n lcplt.prop = [3, 1]\n lcplt.limit = [True, False]\n lcplt.multiplot(x = [ timeMJD[undet], timeMJD ],\n y = [ upperl/scale, ts ],\n xerr = [ lcTab['mjderr'][undet], lcTab['mjderr']],\n yerr = [ upperl/scale*0.1, None])\n else:\n lcplt.mksize = [2, 2, 2]\n lcplt.ymode = ['linear', 'linear', 'linear']\n lcplt.color = ['gray', 'black', 'black']\n lcplt.prop = [3, 1]\n lcplt.limit = [[True, False], False]\n lcplt.multiplot(x = [ [timeMJD[undet], timeMJD[detect]], timeMJD ],\n y = [ [upperl/scale, flux/scale], ts ],\n xerr = [ [lcTab['mjderr'][undet], lcTab['mjderr'][detect]], lcTab['mjderr']],\n yerr = [ [upperl/scale*0.1, fluxerr/scale], None])\n lcplt.save()\n\n print(\"\\t=== Figure '{}' created ===\".format(lcplt.figname)) \n return",
"def _psp_max_time(rise, decay, rise_power):\n return rise * np.log(1 + (decay * rise_power / rise))",
"def periodicity_metric(light_curve_rms, sm_phase_rms):\n return (sm_phase_rms ** 2) / (light_curve_rms ** 2)",
"def detrend_and_decimate_new(trace,f_sample, params):\n\n logging.info(\"detrending\")\n \n f_new = int(params.f_new)\n print(f_sample,f_new)\n f_sample2= (int(f_sample)//1000)*1000\n print(f_sample2,f_new)\n leng =len(trace)\n\n up = int(f_new/np.gcd(f_sample2,f_new))\n down = int(f_sample2*up/f_new)\n print(up,down)\n factor=down/up\n logging.info(f\"up = {up}, down = {down}\")\n\n # up = int(100_000//f_sample)\n # down = int(100_000//f_new)\n\n\n trace_sub = resample_poly(trace,up,down,padtype='edge')\n dt=1/f_new\n times_sub = np.linspace(0.0,leng/f_sample,len(trace_sub))\n\n ord_filt_len = 2*(int(params.ord_len_ms*f_new/1000)//2)+1\n trace_sub2_ord = order_filter(trace_sub, np.ones(ord_filt_len), ord_filt_len//10) # 10 percentile filter\n\n down_temp = int(f_new//params.f_ord_decimate) \n print(f\"down_temp = {down_temp}\")\n trace_sub2_ord = decimate(trace_sub2_ord, down_temp, ftype='fir')\n trace_sub2_ord = medfilt(trace_sub2_ord) #median filter after decimation\n trace_sub2_ord = resample_poly(trace_sub2_ord, down_temp, 1,padtype='edge')\n\n savgol_len1 = 2*(int(25*f_new/1000)//2)+1\n\n # trace_sub2_ord = savgol_filter(trace_sub2_ord, savgol_len1, 3, mode='interp')\n\n #added to fix length errors, URGH\n last_ind=min(len(trace_sub),len(trace_sub2_ord))\n \n trace_zerod = trace_sub[:last_ind]-trace_sub2_ord[:last_ind]\n \n times_sub = times_sub[:last_ind]\n\n\n MAD = stats.median_absolute_deviation(trace_zerod)\n\n\n\n if params.post_savgol: # False\n savgol_len2 = 2*(int(params.savgol_len_ms*f_new/1000)//2)+1\n trace_zerod = savgol_filter(trace_zerod, savgol_len2, 3, mode='interp') # params.savgol_len=7\n \n trace_zerod = trace_zerod - np.quantile(trace_zerod, params.subs_quantile) # params.subs_quantile=0.25\n logging.info(\"finished detrending\")\n \n # times[]\n\n return trace_zerod, times_sub, MAD , factor",
"def AverageTrueRangeStopLoss(self, timeperiod = 14, multiplier = 2):\r\n stopLoss = ta.ATR(self.data.high, self.data.low, self.data.close, timeperiod)\r\n \r\n plus_dm = ta.PLUS_DM(self.data.high,self.data.low, timeperiod)\r\n minus_dm = ta.MINUS_DM(self.data.high,self.data.low, timeperiod)\r\n \r\n if plus_dm > minus_dm:\r\n stopLoss = self.data.close - multiplier * stopLoss\r\n else:\r\n stopLoss = self.data.close + multiplier * stopLoss\r\n \r\n\r\n stopLoss.dropna(inplace=True) \r\n \r\n return stopLoss",
"def smoothmax(r, eps=1e-4):\n\treturn conditional(gt(r, eps), r-eps/2, conditional(lt(r, 0), 0, r**2/(2*eps)))",
"def get_keff(self, take_final=False):\n \n # find max in rec (or final value, don't force as default)\n if take_final:\n _rec_max = self.rec_curve[-1, 1]\n else:\n _rec_max = numpy.max(self.rec_curve[:, 1])\n \n _rec_min = self.rec_curve[0, 1] \n #Bit of a cheat - take the first point. Will be wrong in the case of \n #very fast recovery compared to 1st interval. But in this case, _rec_min and _rec_max \n #should be similar and caught below\n \n if _rec_min > 0.95 * _rec_max:\n print (\"No recovery because too little desensitization (fast limit)\")\n print (\"Setting k_eff = 1000\")\n self.k_eff = 1000 #We could certainly not measure a rate this fast\n \n else:\n _half_rec_amp = _rec_max - 0.5 * (_rec_max - _rec_min)\n _near_idx = (numpy.abs(self.rec_curve[:, 1] - _half_rec_amp)).argmin()\n _near_value = self.rec_curve [_near_idx, 1]\n\n #interpolate\n #must be a smarter way to combine the two possibilities?\n if _near_value > _half_rec_amp:\n #true half time was before our nearest neighbor\n _left = self.rec_curve[_near_idx - 1, 1]\n _right = self.rec_curve[_near_idx, 1]\n _tl = self.rec_curve[_near_idx - 1, 0]\n _tr = self.rec_curve[_near_idx, 0]\n #inverse of time difference scaled by normalized (point-threshold distance)\n self.k_eff = 1 / (_tr - (_tr - _tl) * float(_right - _half_rec_amp)/(_right - _left))\n\n elif _near_value < _half_rec_amp:\n #true half time was after our nearest neighbor\n _left = self.rec_curve[_near_idx, 1]\n _right = self.rec_curve[_near_idx + 1, 1]\n _tl = self.rec_curve[_near_idx, 0]\n _tr = self.rec_curve[_near_idx + 1, 0]\n #as above rearranged to approach from below.\n self.k_eff = 1 / (_tl + (_tr - _tl) * float(_half_rec_amp - _left)/(_right - _left))\n\n elif _near_value == _half_rec_amp:\n\n self.k_eff = 1 / self.rec_curve[near_hi_idx, 0]",
"def SuperTrend(df, period, multiplier, ohlc=['open', 'high', 'low', 'close']):\n\n ATR(df, period, ohlc=ohlc) \n atr = 'ATR_' + str(period) \n st = 'ST_' + str(period) + '_' + str(multiplier) \n stx = 'STX_' + str(period) + '_' + str(multiplier) \n \"\"\" \n SuperTrend Algorithm : \n BASIC UPPERBAND = (HIGH + LOW) / 2 + Multiplier * ATR \n BASIC LOWERBAND = (HIGH + LOW) / 2 - Multiplier * ATR \n FINAL UPPERBAND = IF( (Current BASICUPPERBAND < Previous FINAL UPPERBAND) or (Previous Close > Previous FINAL UPPERBAND)) \n THEN (Current BASIC UPPERBAND) ELSE Previous FINALUPPERBAND) \n FINAL LOWERBAND = IF( (Current BASIC LOWERBAND > Previous FINAL LOWERBAND) or (Previous Close < Previous FINAL LOWERBAND)) \n THEN (Current BASIC LOWERBAND) ELSE Previous FINAL LOWERBAND) \n SUPERTREND = IF((Previous SUPERTREND = Previous FINAL UPPERBAND) and (Current Close <= Current FINAL UPPERBAND)) THEN \n Current FINAL UPPERBAND \n ELSE \n IF((Previous SUPERTREND = Previous FINAL UPPERBAND) and (Current Close > Current FINAL UPPERBAND)) THEN \n Current FINAL LOWERBAND \n ELSE \n IF((Previous SUPERTREND = Previous FINAL LOWERBAND) and (Current Close >= Current FINAL LOWERBAND)) THEN \n Current FINAL LOWERBAND \n ELSE \n IF((Previous SUPERTREND = Previous FINAL LOWERBAND) and (Current Close < Current FINAL LOWERBAND)) THEN \n Current FINAL UPPERBAND \n \"\"\" \n # Compute basic upper and lower bands \n df['basic_ub'] = (df[ohlc[1]] + df[ohlc[2]]) / 2 + multiplier * df[atr] \n df['basic_lb'] = (df[ohlc[1]] + df[ohlc[2]]) / 2 - multiplier * df[atr]\n\n # Compute final upper and lower bands \n df['final_ub'] = 0.00 \n df['final_lb'] = 0.00 \n for i in range(period, len(df)): \n df['final_ub'].iat[i] = df['basic_ub'].iat[i] if df['basic_ub'].iat[i] < df['final_ub'].iat[i - 1] or df['Close'].iat[i - 1] > df['final_ub'].iat[i - 1] else df['final_ub'].iat[i - 1] \n df['final_lb'].iat[i] = df['basic_lb'].iat[i] if df['basic_lb'].iat[i] > df['final_lb'].iat[i - 1] or df['Close'].iat[i - 1] < df['final_lb'].iat[i - 1] else df['final_lb'].iat[i - 1] \n # Set the Supertrend value \n df[st] = 0.00 \n for i in range(period, len(df)): \n df[st].iat[i] = df['final_ub'].iat[i] if df[st].iat[i - 1] == df['final_ub'].iat[i - 1] and df['Close'].iat[i] <= df['final_ub'].iat[i] else 0\n df['final_lb'].iat[i] if df[st].iat[i - 1] == df['final_ub'].iat[i - 1] and df['Close'].iat[i] > df['final_ub'].iat[i] else 0\n df['final_lb'].iat[i] if df[st].iat[i - 1] == df['final_lb'].iat[i - 1] and df['Close'].iat[i] >= df['final_lb'].iat[i] else 0\n df['final_ub'].iat[i] if df[st].iat[i - 1] == df['final_lb'].iat[i - 1] and df['Close'].iat[i] < df['final_lb'].iat[i] else 0.00 \n # Mark the trend direction up/down \n df[stx] = np.where((df[st] > 0.00), np.where((df[ohlc[3]] < df[st]), 'down', 'up'), np.NaN)\n\n # Remove basic and final bands from the columns \n df.drop(['basic_ub', 'basic_lb', 'final_ub', 'final_lb'], inplace=True, axis=1) \n df.fillna(0, inplace=True)\n\n return df",
"def lightcurve(self):\n return NGCLightCurve(self['corotid'])",
"def nrmse_range(self) -> float:\n\n return float(self.rmse() / (np.max(self.true) - np.min(self.true)))",
"def curvature_max(self):\n return 1.0 / self.radius_min",
"def substract_given_gaussian(wavelength, spectrum, centre, peak=0, sigma=0, flux=0, search_peak=False, allow_absorptions = False,\n lowlow= 20, lowhigh=10, highlow=10, highhigh = 20, \n lmin=0, lmax=0, fmin=0, fmax=0, plot=True, fcal=False, verbose = True, warnings=True): \n do_it = False\n # Check that we have the numbers!\n if peak != 0 and sigma != 0 : do_it = True\n\n if peak == 0 and flux != 0 and sigma != 0:\n #flux = peak * sigma * np.sqrt(2*np.pi)\n peak = flux / (sigma * np.sqrt(2*np.pi))\n do_it = True \n\n if sigma == 0 and flux != 0 and peak != 0 :\n #flux = peak * sigma * np.sqrt(2*np.pi)\n sigma = flux / (peak * np.sqrt(2*np.pi)) \n do_it = True \n \n if flux == 0 and sigma != 0 and peak != 0 :\n flux = peak * sigma * np.sqrt(2*np.pi)\n do_it = True\n\n if sigma != 0 and search_peak == True: do_it = True \n\n if do_it == False:\n print(\"> Error! We need data to proceed! Give at least two of [peak, sigma, flux], or sigma and force peak to f[centre]\")\n s_s = spectrum\n else:\n # Setup wavelength limits\n if lmin == 0 :\n lmin = centre-65. # By default, +-65 A with respect to line\n if lmax == 0 :\n lmax = centre+65.\n \n # Extract subrange to fit\n w_spec = []\n f_spec = []\n w_spec.extend((wavelength[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n f_spec.extend((spectrum[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n \n # Setup min and max flux values in subrange to fit\n if fmin == 0 :\n fmin = np.nanmin(f_spec) \n if fmax == 0 :\n fmax = np.nanmax(f_spec) \n \n # Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to centre\n w_cont=[]\n f_cont=[]\n w_cont.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > centre-lowlow and w_spec[i] < centre-lowhigh) or (w_spec[i] > centre+highlow and w_spec[i] < centre+highhigh) ) \n f_cont.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > centre-lowlow and w_spec[i] < centre-lowhigh) or (w_spec[i] > centre+highlow and w_spec[i] < centre+highhigh) ) \n \n # Linear Fit to continuum \n try: \n mm,bb = np.polyfit(w_cont, f_cont, 1)\n except Exception:\n bb = np.nanmedian(spectrum)\n mm = 0.\n if verbose or warnings: \n print(\" WARNING! Impossible to get the continuum!\")\n print(\" Scaling the continuum to the median value\") \n continuum = mm*np.array(w_spec)+bb \n # c_cont = mm*np.array(w_cont)+bb \n # rms continuum\n # rms_cont = np.nansum([ np.abs(f_cont[i] - c_cont[i]) for i in range(len(w_cont)) ]) / len(c_cont)\n\n if search_peak:\n # Search for index here w_spec(index) closest to line\n try:\n min_w = np.abs(np.array(w_spec)-centre)\n mini = np.nanmin(min_w)\n peak = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n flux = peak * sigma * np.sqrt(2*np.pi) \n if verbose: print(\" Using peak as f[\",np.round(centre,2),\"] = \",np.round(peak,2),\" and sigma = \", np.round(sigma,2), \" flux = \",np.round(flux,2))\n except Exception:\n if verbose or warnings: print(\" Error trying to get the peak as requested wavelength is \",np.round(centre,2),\"! Ignoring this fit!\")\n peak = 0.\n flux = -0.0001\n \n no_substract = False\n if flux < 0:\n if allow_absorptions == False:\n if np.isnan(centre) == False:\n if verbose or warnings : print(\" WARNING! This is an ABSORPTION Gaussian! As requested, this Gaussian is NOT substracted!\")\n no_substract = True\n if no_substract == False: \n if verbose: print(\" Substracting Gaussian at {:7.1f} with peak ={:10.4f} sigma ={:6.2f} and flux ={:9.4f}\".format(centre, peak,sigma,flux))\n \n gaussian_fit = gauss(w_spec, centre, peak, sigma)\n \n \n index=0\n s_s=np.zeros_like(spectrum)\n for wave in range(len(wavelength)):\n s_s[wave]=spectrum[wave]\n if wavelength[wave] == w_spec[0] : \n s_s[wave] = f_spec[0]-gaussian_fit[0]\n index=1\n if wavelength[wave] > w_spec[0] and wavelength[wave] <= w_spec[-1]:\n s_s[wave] = f_spec[index]-gaussian_fit[index]\n index=index+1\n if plot: \n plt.figure(figsize=(10, 4))\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.5)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at line\n plt.axvline(x=centre, color='k', linestyle='-', alpha=0.8)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(centre+highlow, centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(centre-lowlow, centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical lines to emission line\n #plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n #plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n #plt.plot(w_spec, residuals, 'k')\n #plt.title('Fit: x0=%.2f y0=%.2e sigma=%.2f flux=%.2e rms=%.3e' % (fit[0], fit[1], fit[2], gaussian_flux, rms_fit))\n plt.show() \n plt.close()\n \n plt.figure(figsize=(10, 4))\n plt.plot(wavelength,spectrum, \"r\")\n plt.plot(wavelength,s_s, \"c\")\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n plt.show()\n plt.close()\n else:\n s_s = spectrum\n return s_s",
"def scale(curve):\n return curve/rmsd(curve)",
"def get_sn2005ek(colorplt=False):\n z = 0.016551\n ebv = 0.210\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n t_max = 53639.9\n print (\"adopt r band t_max from Drout+13\")\n \n # tb = pd.read_csv('/Users/yuhanyao/Desktop/ZTF18abfcmjw/data/Drout2013/table1', sep='\\t')\n # tb = tb.drop(columns=[\"Unnamed: 6\"])\n \n mjds = np.array([53639.3, 53640.3, 53641.3, 53642.2, 53643.2, 53645.3,\n 53646.5, 53648.0, 53649.2, 53650.4, 53651.3, 53652.5,\n 53654.2, 53655.2, 53656.2, 53657.2])\n \n Bmags = np.array([18.25, 18.38, 18.65, np.nan, 19.10, 19.71,\n 20.07, np.nan, 20.67, 20.90, 21.05, np.nan,\n 21.74, np.nan, np.nan, np.nan])\n \n Bmag_uncs = np.array([0.02, 0.03, 0.02, np.nan, 0.05, 0.07, \n 0.07, np.nan, 0.04, 0.04, 0.04, np.nan,\n 0.12, np.nan, np.nan, np.nan])\n \n Vmags = np.array([17.83, 18.03, 17.92, np.nan, 18.24, 18.66,\n 18.93, 19.48, 19.63, 19.86, 19.98, 20.35,\n 20.60, 20.74, 20.88, 21.22])\n \n Vmag_uncs = np.array([0.02, 0.03, 0.01, np.nan, 0.02, 0.02,\n 0.02, 0.06, 0.03, 0.03, 0.04, 0.05, \n 0.08, 0.10, 0.08, 0.13])\n \n Rmags = np.array([17.46, 17.41, 17.60, 17.69, 17.86, 18.18, \n np.nan, 18.83, 19.03, 19.26, 19.48, 19.75,\n 20.08, np.nan, 20.47, np.nan])\n \n Rmag_uncs = np.array([0.01, 0.02, 0.01, 0.02, 0.01, 0.01,\n np.nan, 0.03, 0.02, 0.02, 0.02, 0.04,\n 0.05, np.nan, 0.08, np.nan])\n\n Imags = np.array([17.20, 17.13, 17.18, np.nan, 17.47, 17.71, \n np.nan, 18.13, 18.26, 18.51, 18.61, 18.74, \n 19.01, np.nan, 19.47, np.nan])\n \n Imag_uncs = np.array([0.02, 0.04, 0.02, np.nan, 0.03, 0.02,\n np.nan, 0.06, 0.02, 0.02, 0.02, 0.03,\n 0.05, np.nan, 0.06, np.nan])\n \n mymjds = np.hstack([mjds, mjds, mjds, mjds])\n mymags = np.hstack([Bmags, Vmags, Rmags, Imags])\n myemags = np.hstack([Bmag_uncs, Vmag_uncs, Rmag_uncs, Imag_uncs])\n myfilts = np.hstack([ np.repeat(\"B\", len(Bmags)),\n np.repeat(\"V\", len(Bmags)),\n np.repeat(\"R\", len(Rmags)),\n np.repeat(\"I\", len(Imags)) ])\n ix = ~np.isnan(mymags)\n tb = pd.DataFrame({'mjd': mymjds[ix],\n 'mag': mymags[ix],\n 'emag': myemags[ix],\n \"filter\": myfilts[ix]})\n \n ixB = tb['filter'].values==\"B\"\n ixV = tb['filter'].values==\"V\"\n ixR = tb['filter'].values==\"R\"\n ixI = tb['filter'].values==\"I\"\n \n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixB] = 4359\n tb['wave'].values[ixV] = 5430\n tb['wave'].values[ixR] = 6349\n tb['wave'].values[ixI] = 8797\n \n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n if colorplt==False:\n return tb\n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['B', 'R', 'I']))\n tb = tb[ix]\n\n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"R\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"B\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"R\"]\n itb = tbsub[tbsub[\"filter\"].values==\"I\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"BmR\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"RmI\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb",
"def get_most_volatile(prices):\n # TODO: Fill in this function.\n #I have tried to select the specific column and then apply the standard deviation to \n # check the volatility to a column to see how it works.\n \n \n price_modified=prices.groupby(prices['ticker'])\n # print(price_modified.price.rolling(2).std())",
"def _get_max_sampled_bandit(self)->Bandit:\n estimates = []\n for bandit in self.bandits:\n Qth = np.random.normal(loc =self.mu[bandit.id], scale = self.var[bandit.id])\n f_hat = self.mu[bandit.id]#computing moving_average here \n estimates.append(max(Qth, f_hat))\n return self.bandits[np.argmax(estimates)]",
"def lightCurve(self, time, filters):\n\n lcMags = np.zeros(time.size, dtype=float)\n\n rise = np.where(time <= self.peakTime)\n lcMags[rise] += self.riseSlope*time[rise]-self.riseSlope*self.peakTime\n decline = np.where(time > self.peakTime)\n lcMags[decline] += self.declineSlope*(time[decline]-self.peakTime)\n\n for key in self.peaks.keys():\n fMatch = np.where(filters == key)\n lcMags[fMatch] += self.peaks[key]\n\n return lcMags",
"def __init__(self, maxsteps=None, lrate=None, eps=None, verbose=None):\r\n\r\n self.maxsteps= maxsteps\r\n self.lrate = lrate\r\n self.nochange= eps\r\n\tself.verbose = verbose",
"def integrated(self, t, exptime=30.0 / 60.0 / 24.0, resolution=100):\n\n # deal with the edge case of only a single time point being passed\n try:\n t.shape\n except AttributeError:\n t = np.array([t])\n\n # don't waste time on this if the light curve is a constant\n if self.__class__.__name__ == 'constant':\n return self.model(np.array(t))\n\n # create a high-resolution subsampled timeseries\n nudges = np.linspace(-exptime / 2.0, exptime / 2.0, resolution)\n subsampled = t.reshape(1, t.shape[0]) + nudges.reshape(nudges.shape[0], 1)\n\n # make sure the average is photon-weighted (as opposed to magnitude weighted)\n flux = 10 ** (-0.4 * self.model(subsampled))\n mag = -2.5 * np.log10(flux.mean(0))\n assert (mag.shape == t.shape)\n return mag",
"def lapse(self):\n pass",
"def rmse(rslt):\n # Antibugging\n assert (isinstance(rslt, dict))\n\n # Distribute information\n x_internal = rslt['AUX']['x_internal']\n start_internal = rslt['AUX']['init_values']\n\n # Calculate statistic\n rslt = ((x_internal - start_internal) ** 2).mean()\n\n # Antibugging\n assert (np.isfinite(rslt))\n assert (rslt > 0.0)\n\n # Finishing\n return rslt",
"def sigma2_RG(self):\n sigma = np.sqrt(self.cosmo.gs_spectral_moment(l=2,RG=self.RG))\n return sigma",
"def scale_sky_spectrum(wlm, sky_spectrum, spectra, cut_sky=4., fmax=10, fmin=1, valid_wave_min=0, valid_wave_max=0, \n fibre_list=[100,200,300,400,500,600,700,800,900], plot=True, verbose=True, warnings=True): \n \n# # Read sky lines provided by 2dFdr\n# sky_line_,flux_sky_line_ = read_table(\"sky_lines_2dfdr.dat\", [\"f\", \"f\"] )\n# # Choose those lines in the range\n# sky_line=[]\n# flux_sky_line=[]\n# valid_wave_min = 6240\n# valid_wave_max = 7355\n# for i in range(len(sky_line_)):\n# if valid_wave_min < sky_line_[i] < valid_wave_max:\n# sky_line.append(sky_line_[i])\n# flux_sky_line.append(flux_sky_line_[i])\n \n \n if valid_wave_min == 0: valid_wave_min = wlm[0]\n if valid_wave_max == 0: valid_wave_max = wlm[-1]\n \n if verbose: print(\"\\n> Identifying sky lines using cut_sky =\",cut_sky,\", allowed SKY/OBJ values = [\",fmin,\",\",fmax,\"]\")\n if verbose: print(\" Using fibres = \",fibre_list)\n\n peaks,peaks_name,peaks_rest,continuum_limits=search_peaks(wlm,sky_spectrum, plot=plot, cut=cut_sky, fmax=fmax, only_id_lines=False, verbose=False) \n\n ratio_list=[]\n valid_peaks=[]\n \n if verbose: print(\"\\n Sky line Gaussian ratio Flux ratio\")\n n_sky_lines_found=0\n for i in range(len(peaks)):\n sky_spectrum_data=fluxes(wlm,sky_spectrum, peaks[i], fcal=False, lowlow=50,highhigh=50, plot=False, verbose=False, warnings=False)\n \n sky_median_continuum = np.nanmedian(sky_spectrum_data[11])\n \n object_spectrum_data_gauss=[]\n object_spectrum_data_integrated=[] \n median_list=[]\n for fibre in fibre_list: \n object_spectrum_flux=fluxes(wlm, spectra[fibre], peaks[i], fcal=False, lowlow=50,highhigh=50, plot=False, verbose=False, warnings=False)\n object_spectrum_data_gauss.append(object_spectrum_flux[3]) # Gaussian flux is 3\n object_spectrum_data_integrated.append(object_spectrum_flux[7]) # integrated flux is 7\n median_list.append(np.nanmedian(object_spectrum_flux[11]))\n object_spectrum_data=np.nanmedian(object_spectrum_data_gauss)\n object_spectrum_data_i=np.nanmedian(object_spectrum_data_integrated)\n \n object_median_continuum=np.nanmin(median_list) \n \n if fmin < object_spectrum_data/sky_spectrum_data[3] * sky_median_continuum/object_median_continuum < fmax :\n n_sky_lines_found = n_sky_lines_found + 1\n valid_peaks.append(peaks[i])\n ratio_list.append(object_spectrum_data/sky_spectrum_data[3])\n if verbose: print(\"{:3.0f} {:5.3f} {:2.3f} {:2.3f}\".format(n_sky_lines_found,peaks[i],object_spectrum_data/sky_spectrum_data[3], object_spectrum_data_i/sky_spectrum_data[7])) \n\n\n #print \"ratio_list =\", ratio_list\n #fit = np.polyfit(valid_peaks, ratio_list, 0) # This is the same that doing an average/mean\n #fit_line = fit[0]+0*wlm\n fit_line =np.nanmedian(ratio_list) # We just do a median\n #fit_line = fit[1]+fit[0]*wlm\n #fit_line = fit[2]+fit[1]*wlm+fit[0]*wlm**2\n #fit_line = fit[3]+fit[2]*wlm+fit[1]*wlm**2+fit[0]*wlm**3\n \n \n if plot:\n plt.plot(valid_peaks,ratio_list,\"+\")\n #plt.plot(wlm,fit_line)\n plt.axhline(y=fit_line, color='k', linestyle='--')\n plt.xlim(valid_wave_min-10, valid_wave_max+10) \n #if len(ratio_list) > 0:\n plt.ylim(np.nanmin(ratio_list)-0.2,np.nanmax(ratio_list)+0.2)\n plt.title(\"Scaling sky spectrum to object spectra\")\n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n plt.ylabel(\"OBJECT / SKY\")\n plt.minorticks_on()\n plt.show()\n plt.close()\n \n if verbose: print(\" Using this fit to scale sky spectrum to object, the median value is \",np.round(fit_line,3),\"...\") \n \n sky_corrected = sky_spectrum * fit_line\n\n# plt.plot(wlm,sky_spectrum, \"r\", alpha=0.3)\n# plt.plot(wlm,sky_corrected, \"g\", alpha=0.3)\n# plt.show()\n# plt.close()\n \n return sky_corrected, np.round(fit_line,3)",
"def get_sigma_high_low_simulated(rate, simulation_name=\"NTU+cheb_1\"):\n def obj(sigma):\n return get_high_low_rate_simulated(sigma, simulation_name) - rate\n res = opt.brentq(obj, 0, 10)\n return res",
"def _calc_sigma(self):\n if self.data is None:\n self._initial_blur()\n previous = self.init_sigma\n incr = 0\n self.sigmas = [(previous, incr)]\n for i in range(1, self.scale_per_octave + 3):\n sigma_abs = self.init_sigma * (self.dest_sigma / self.init_sigma) ** (1.0 * i / (self.scale_per_octave))\n increase = previous * sqrt((self.dest_sigma / self.init_sigma) ** (2.0 / self.scale_per_octave) - 1.0)\n self.sigmas.append((sigma_abs, increase))\n previous = sigma_abs\n logger.debug(\"Sigma= %s\" % self.sigmas)",
"def eady_growth_rate(data):\n N2 = ixr.brunt_vaisala(data)\n f = 2.0*omega*xruf.sin(xruf.deg2rad(data.lat))\n\n dz = ixr.domain.calculate_dz(data)\n du = ixr.domain.diff_pfull(data.ucomp, data)\n\n N = xruf.sqrt(N2.where(N2 > 0))\n\n egr = 0.31*du/dz*f/N\n return np.abs(egr)",
"def rmsd(curve):\n sum_of_squares = sum(sum(pow(curve,2)))\n return pow(sum_of_squares/curve.shape[0],0.5)",
"def sigma0_RG(self):\n sigma = np.sqrt(self.cosmo.gs_spectral_moment(l=0,RG=self.RG))\n return sigma",
"def test_variance_of_slope_sums():\n\n ticker = 'GOOG'\n main_df = pd.read_pickle(settings.settings_dict['stock_data_path'])\n\n main_df = sample_slopes.create_slope_sum(main_df)\n\n slope_sums = main_df[ticker + \"slope_sum\"]\n\n print np.mean(main_df[ticker + \"slope_sum\"])\n print np.std(main_df[ticker + \"slope_sum\"])\n\n std = pd.rolling_std(slope_sums, window=20)\n\n _, ax2 = plt.subplots()\n\n ax2.plot(slope_sums)\n ax2.plot(slope_sums + std)\n ax2.plot(slope_sums - std)\n plt.legend(['Slope_Sum ', 'Slope_Sum +1 Std', 'Slope_Sum -1 Std'])\n plt.title(ticker + ' varrience of slope sum')\n plt.show()"
]
| [
"0.5439135",
"0.52744055",
"0.5117991",
"0.5101039",
"0.5043701",
"0.5000911",
"0.49971765",
"0.49746352",
"0.4958558",
"0.49205273",
"0.48910326",
"0.48699352",
"0.48592845",
"0.48264438",
"0.48117554",
"0.48070383",
"0.48056588",
"0.4789352",
"0.47843328",
"0.47834954",
"0.47716516",
"0.47594807",
"0.4748188",
"0.4714465",
"0.47033632",
"0.470234",
"0.46997917",
"0.4679731",
"0.46688974",
"0.46661422"
]
| 0.65983933 | 0 |
Pick an agent at random, step it, bump counts. | def step(self):
self.agents[random.randint(self.get_agent_count())].step()
self.steps += 1
self.time += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def step(self):\n\t\tnumpy.random.shuffle(self.agents_list)\n\t\tfor agent in self.agents_list:\n\t\t\tagent.produce()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.charge()\n\t\tfor agent in self.agents_list:\n\t\t\tif agent.strategy == 0: \n\t\t\t\tagent.retribute()\n\t\tfor agent in self.agents_list:\n\t\t\tif agent.strategy == 0: \n\t\t\t\tagent.give()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.consume()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.solve_consumption_deficit()\n\t\tfor site in self.sites:\n\t\t\tsite.recovery()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.sprout()",
"def random_agent(self, state):\n\t\trndint = random.randint\n\t\treturn self.state[state][rndint(0, len(self.state[state]))]",
"def random_agent(bandit, iterations):\n\n for i in range(iterations):\n a = random.choice(bandit.actions)\n r = bandit.sample(a)\n yield a, r",
"def get_random_agent_reward(env, steps=50000):\n step = 0\n total_rew = 0\n env.reset()\n while step < steps:\n # Interact.\n act = env.action_space.sample()\n _, rew, _, done = env.step(act)\n\n # Update counters.\n total_rew += rew\n step += 1\n if done:\n env.reset()\n\n return total_rew / steps",
"def step(self, num_agent):\n # Save experience / reward\n # memory.add(state, action, reward, next_state, done)\n\n self.n_steps = (self.n_steps + 1) % UPDATE_EVERY ###\n # Learn, if enough samples are available in memory\n if len(memory) > BATCH_SIZE and self.n_steps == 0: ###\n experiences = memory.sample()\n self.learn(experiences, GAMMA, num_agent)\n \n self.n_steps += 1",
"def RandomVacuumAgent():\n return Agent(RandomAgentProgram(['Right', 'Left', 'Suck', 'NoOp']))",
"def step(self):\n self.age += 1\n self.move_agent()\n self.sugar -= self.metabolism\n\n # Eat sugar\n available_sugar = self.get_sugar(self.pos).amount\n self.sugar += available_sugar\n# self.total_sugar_in_field -= available_sugar\n # Set sugar in current cell to zero\n self.get_sugar(self.pos).eat_sugar() \n \n \n \n if self.sugar == 0:\n self.model.remove_agent(self)\n \n self.gen += 1\n x = self.model.random.randrange(self.model.grid.width)\n y = self.model.random.randrange(self.model.grid.height)\n new_pos = (x,y)\n \n self.model.add_agent(Consumer, new_pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.model.vision, self.model.metabolism, self.model.starting_sugar)\n \n \n if self.reproduction_and_death:\n if self.age > self.max_age: # Agent dies\n # Tax inheritance\n self.model.inheritance_tax_agent(self)\n \n if self.model.spawn_at_random:\n self.gen += 1\n x = self.model.random.randrange(self.model.grid.width)\n y = self.model.random.randrange(self.model.grid.height)\n new_pos = (x,y)\n \n self.model.add_agent(Consumer, new_pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.model.vision, self.model.metabolism, self.model.starting_sugar)\n self.model.remove_agent(self) #agent dies\n \n \n else:\n #spawn new agent\n self.gen += 1\n if self.sugar != 0:\n self.model.add_agent(Consumer, self.pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.vision, self.metabolism, self.sugar)\n else:\n self.model.add_agent(Consumer, self.pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.vision, self.metabolism, self.model.starting_sugar)\n \n self.model.remove_agent(self) #agent dies",
"def step(self):\n #_increment timers\n for agent in self.agents:\n agent.tick()\n\n # choose agent pair\n agentA, agentB = self.choose()\n\n # interact\n agentA.step(agentB)\n agentB.step(agentA)\n\n # log results\n self.logger.log(agentA, agentB)\n\n # increment counters\n self.steps += 1\n self.time += 1",
"def simulate(agent, steps, initialize=None):\n grid, r, c = random_world()\n if initialize:\n state = initialize()\n result = 0\n for t in range(steps):\n result += score(grid)\n percept = get_percept(grid, r, c)\n if initialize:\n action, *state = agent(percept, *state)\n else:\n action = agent(percept)\n r, c = apply(grid, r, c, action)\n return result",
"def RandomVacuumAgent():\n return Agent(RandomAgentProgram(['Right',\n 'Left',\n 'Suck',\n 'NoOp']))",
"def step(self, timestep):\n (agent_id, agent_traits) = self.model.get_random_agent()\n (neighbor_id, neighbor_traits) = self.model.get_random_neighbor_for_agent(agent_id)\n\n prob = analysis.calc_probability_interaction_axelrod(agent_traits, neighbor_traits)\n\n if prob == 0.0:\n return\n elif prob == 1.0:\n return\n else:\n draw = npr.random()\n if draw < prob:\n differing_features = analysis.get_different_feature_positions_axelrod(agent_traits, neighbor_traits)\n old_agent_traits = list(agent_traits)\n if len(differing_features) == 1:\n random_feature = differing_features[0]\n else:\n rand_feature_num = npr.randint(0, len(differing_features))\n random_feature = differing_features[rand_feature_num]\n neighbor_trait = neighbor_traits[random_feature]\n agent_traits[random_feature] = neighbor_trait\n #log.debug(\"agent %s: old: %s neighbor: %s post: %s differing: %s feature: %s val: %s \", agent_id, old_agent_traits, neighbor_traits, agent_traits,differing_features, random_feature, neighbor_trait )\n self.model.set_agent_traits(agent_id, agent_traits)\n\n # track the interaction and time\n self.model.update_interactions(timestep)\n else:\n # no interaction given the random draw and probability, so just return\n #log.debug(\"no interaction\")\n return",
"def step(self, timestep):\n add_rate = self.sc.add_rate\n\n (agent_id, agent_traits) = self.model.get_random_agent()\n (neighbor_id, neighbor_traits) = self.model.get_random_neighbor_for_agent(agent_id)\n\n\n\n if agent_traits == neighbor_traits:\n return\n elif agent_traits.isdisjoint(neighbor_traits):\n return\n elif neighbor_traits.issubset(agent_traits):\n return\n else:\n prob = analysis.calc_probability_interaction_extensible(agent_traits, neighbor_traits)\n draw = npr.random()\n if draw < prob:\n neighbor_diff_traits = analysis.get_traits_differing_from_focal_extensible(agent_traits, neighbor_traits)\n #log.debug(\"neighbor_diff_traits: %s\", neighbor_diff_traits)\n neighbor_random_diff_trait = random.sample(neighbor_diff_traits, 1)\n add_draw = npr.random()\n if add_draw < add_rate:\n # we add the neighbor's trait, without replacing an existing trait\n agent_traits.add(neighbor_random_diff_trait[0])\n #log.debug(\"adding trait w/o replacement: %s\", neighbor_random_diff_trait[0])\n self.model.set_agent_traits(agent_id, agent_traits)\n else:\n # we replace an existing trait with the neighbor's trait\n focal_trait_to_replace = random.sample(agent_traits, 1)\n #log.debug(\"replacing trait %s with %s\", focal_trait_to_replace[0], neighbor_random_diff_trait[0])\n agent_traits.remove(focal_trait_to_replace[0])\n agent_traits.add(neighbor_random_diff_trait[0])\n self.model.set_agent_traits(agent_id, agent_traits)\n\n # track the interaction and time\n self.model.update_interactions(timestep)\n else:\n # no interaction given the random draw and probability, so just return\n #log.debug(\"no interaction\")\n return",
"def beatRandom(yourAgent):\n\n print(\"\")\n try:\n r = RandomPlayer()\n p = yourAgent()\n game = Board(r, p, 7, 7)\n output_b = game.copy()\n winner, move_history, termination = game.play_isolation(time_limit=1000, print_moves=True)\n print(\"\\n\", winner, \" has won. Reason: \", termination)\n # Uncomment to see game\n # print game_as_text(winner, move_history, termination, output_b)\n except NotImplementedError:\n print('CustomPlayer Test: Not Implemented')\n except:\n print('CustomPlayer Test: ERROR OCCURRED')\n print(traceback.format_exc())\n \n print()",
"def _migrate(self):\n\t\tchoice_list = [s for s in self.site.neighbors if s != self.site]\n\t\tif len(choice_list) > 0: \n\t\t\tchoosed = numpy.random.choice(choice_list)\n\t\t\tif choosed.resource > self.site.resource:\n\t\t\t\tchoosed.add_agent(self)",
"def RandomAgentProgram(actions):\n return lambda percept: random.choice(actions)",
"def RandomAgentProgram(actions):\n return lambda percept: random.choice(actions)",
"def move(self):\n for agent in self.agents:\n if not agent.fidelity:\n options = agent.get_move_options(agent.hex, self.kernel_size, None, extend=True)\n target = random36.choices(population=options,weights=[x.quality**2 for x in options])\n agent.move(target[0])",
"def take_one_step(self):\n\t\tfor i in range(len(self.agents)):\n\t\t\tself.agents[i].action(0)",
"def choose_target(self, agents):\n\n number_of_suspects = [0]*(len(agents))\n number_of_suspects_per_agent = []\n\n index = 0\n for a1 in agents:\n if not a1.is_impostor():\n for a2 in agents:\n if self.km.suspects(a1.agent_id, a2.agent_id):\n number_of_suspects[index] = number_of_suspects[index] + 1\n else:\n number_of_suspects[index] = 999999\n number_of_suspects_per_agent.append((a1.agent_id,number_of_suspects[index]))\n index = index + 1\n\n self.target = min(number_of_suspects_per_agent, key = lambda t: t[1])[0]",
"def aggressive_action(self):\n if not self.agent.done:\n if not self.opponenet.done:\n self.EV = self.opponenet.pumps + np.random.randint(1,5)\n else:\n if not self.stopCount:\n if self.opponenet.cashed:\n self.EV = self.agent.pumps + np.random.randint(1,5)\n elif self.opponenet.popped:\n self.EV = self.agent.pumps + np.random.randint(1,10)\n self.stopCount = True\n self.action_gating()",
"def _generate_random_agent(self):\n\n new_random_agent = list(self._all_waypoints)\n random.shuffle(new_random_agent)\n return tuple(new_random_agent)",
"def totem_random():\n random_head()\n random_head()\n random_head()",
"def agent_step(self, reward, state):\n prev_val= self.state[self.prevAction]\n self.state[self.prevAction]=prev_val+self.alpha*(reward-prev_val)\n val=max(self.state)\n index=self.state.index(val)\n self.prevAction=index\n i=random.uniform(0,1)\n if i < 1-self.prob:\n self.prevAction=index\n return index\n else:\n index=random.randint(0,self.num_bandits-1)\n self.prevAction=index",
"def specific_reset(self) -> None:\n self.old_velocity = 0.\n self.agent.specific_reset()\n max_dist_to_origin = 4.\n min_dist_to_origin = 2\n\n agent_pos = np.random.uniform(-max_dist_to_origin, max_dist_to_origin, 2)\n positioning_done = False\n while not positioning_done:\n agent_pos = np.random.uniform(-max_dist_to_origin,\n max_dist_to_origin, 2)\n if min_dist_to_origin <= np.linalg.norm(agent_pos) <= max_dist_to_origin:\n positioning_done = True\n\n # adjust the height of agent\n agent_pos = np.concatenate((agent_pos[:2], [self.agent.init_xyz[2]]))\n self.agent.set_position(agent_pos)\n\n # set agent orientation in forward run direction\n y = angle2pos(self.agent.get_position(), np.zeros(3)) + np.pi / 2\n y += self.agent.init_rpy[2]\n quaternion = self.bc.getQuaternionFromEuler([0, 0, y])\n self.agent.set_orientation(quaternion)",
"def hesitant_action(self):\n if not self.agent.done:\n if not self.opponenet.done:\n self.EV = self.opponenet.pumps - np.random.randint(1,5)\n else:\n if self.opponenet.cashed:\n self.EV = self.opponenet.pumps + 1\n elif self.opponenet.popped:\n if not self.stopCount:\n if self.agent.pumps == 0:\n self.EV = np.random.randint(1,10)\n else:\n self.EV = self.agent.pumps\n self.stopCount = True\n self.action_gating()",
"def step(self, actions, agent_id=0):\n self._last_state = self._current_state\n\n # TODO\n # action = actions.discrete_actions[0]-1\n action = actions.argmax()\n\n done = 0\n if self._stage == 0: # is fixation\n if action == 0:\n reward = 0.\n else:\n reward = -1.\n self._current_state = 1\n self._stage = 1\n elif self._stage == 1: # is first stage, use prob_transition\n if action == 1 or action == 2:\n if np.random.random() < self._prob_transition[0][action-1]:\n self._current_state = 2\n else:\n self._current_state = 3\n reward = 0.\n else: # pick a next state at random\n reward = -1.\n self._current_state = np.random.random() < 0.5 and 2 or 3\n self._stage = 2\n else: # is second stage, use prob_reward\n # Given an action (arm pulled), sample reward, return\n if action == 1 or action == 2:\n current_prob_rewards = self._prob_reward[self._current_state-2]\n self._best_reward = self._max_reward*np.max(current_prob_rewards)\n thisProb = current_prob_rewards[action-1]\n if np.random.random() < thisProb:\n # print(\"give reward\")\n reward = self._max_reward\n else:\n reward = 0.0\n else:\n reward = -1.\n\n self._total_reward += reward\n self._best_total_reward += self._best_reward\n self._stage = 0\n self._current_state = 0\n self._trial += 1\n self._since_flipped += 1\n # if more than self._min_stable trials since flipping, certain chance of flipping prob rews\n if (self._since_flipped >= self._min_stable) and (np.random.random() <= self._flip_prob):\n self._randomize()\n self._since_flipped = 0\n\n\n self._last_action = np.zeros(self._num_arms)\n self._last_action[action] = 1\n # conditions to end episode\n if self._step >= self._steps_per_ep-1:\n self._state = READY_TO_END_EPISODE\n done = 1\n\n self._step += 1\n self._prev_reward = reward\n\n obs = self._current_state\n reset = done == 1. or self._step == MAX_FRAMES\n\n # print(np.array([[obs]]).shape)\n\n # print(reward, self._stage)\n return np.array([obs]), reward, done, reset",
"def test_counter_agent(self):\n config = {\n 'name': 'CounterAgent',\n 'network_params': {\n 'path': join(ROOT, 'test.gexf')\n },\n 'agent_type': 'CounterModel',\n 'states': [{'times': 10}, {'times': 20}],\n 'max_time': 2,\n 'num_trials': 1,\n 'environment_params': {\n }\n }\n s = simulation.from_config(config)\n env = s.run_simulation(dry_run=True)[0]\n assert env.get_agent(0)['times', 0] == 11\n assert env.get_agent(0)['times', 1] == 12\n assert env.get_agent(1)['times', 0] == 21\n assert env.get_agent(1)['times', 1] == 22",
"def execute(self, agent: Agent, state: SimState) -> None:\n if agent.state() is not AgentState.INFECTIVE:\n return\n\n if np.random.random() < state.remove_prob():\n if np.random.random() < state.lethality():\n agent.set_state(AgentState.DEAD)\n else:\n agent.set_state(AgentState.IMMUNE)\n else:\n agent.update_sick_days()",
"def step(self):\n try:\n self.agents.sort(key=lambda x: x.dist)\n except Exception as e:\n print(e)\n\n for agent in self.agents:\n try:\n agent.step()\n except Exception as e:\n print(e)\n\n\n # Removes agents if they reach exit\n for exit in self.model.exits:\n x, y = exit.pos[0] * 6 + 1, exit.pos[1] * 6 + 1\n if agent.node == (x, y):\n try:\n agent.saved()\n except Exception as e:\n print(e)",
"def step(self, timestep):\n\n (agent_id, agent_traits) = self.model.get_random_agent()\n (neighbor_id, neighbor_traits) = self.model.get_random_neighbor_for_agent(agent_id)\n\n prob = analysis.calc_probability_interaction_axelrod(agent_traits, neighbor_traits)\n\n if prob == 0.0:\n return\n elif prob == 1.0:\n return\n else:\n draw = npr.random()\n if draw < prob:\n differing_features = analysis.get_different_feature_positions_axelrod(agent_traits, neighbor_traits)\n old_agent_traits = list(agent_traits)\n if len(differing_features) == 1:\n random_feature = differing_features[0]\n else:\n rand_feature_num = npr.randint(0, len(differing_features))\n random_feature = differing_features[rand_feature_num]\n neighbor_trait = neighbor_traits[random_feature]\n agent_traits[random_feature] = neighbor_trait\n #log.debug(\"agent %s: old: %s neighbor: %s post: %s differing: %s feature: %s val: %s \", agent_id, old_agent_traits, neighbor_traits, agent_traits,differing_features, random_feature, neighbor_trait )\n self.model.set_agent_traits(agent_id, agent_traits)\n\n # track the interaction and time\n self.model.update_interactions(timestep)\n else:\n # no interaction given the random draw and probability, so just return\n #log.debug(\"no interaction\")\n return\n\n # now do the independent drift step\n draw2 = npr.random()\n if draw2 < self.model.simconfig.drift_rate:\n old_agent_traits = list(agent_traits)\n rand_feature_num = npr.randint(0, len(agent_traits))\n rand_trait_val = npr.randint(0, self.model.simconfig.num_traits)\n agent_traits[rand_feature_num] = rand_trait_val\n log.debug(\"drift event: old: %s new: %s\", old_agent_traits, agent_traits)\n self.model.set_agent_traits(agent_id, agent_traits)"
]
| [
"0.6958311",
"0.67038995",
"0.66255635",
"0.6525697",
"0.63827926",
"0.6380983",
"0.6380704",
"0.63505244",
"0.63434833",
"0.6333988",
"0.6211217",
"0.6080819",
"0.600359",
"0.5950171",
"0.59233344",
"0.59233344",
"0.5905067",
"0.5897467",
"0.58860046",
"0.58852065",
"0.58824897",
"0.58719957",
"0.58613294",
"0.58408767",
"0.5838779",
"0.58332884",
"0.5815256",
"0.58117354",
"0.5785019",
"0.56795484"
]
| 0.757041 | 0 |
Return agent's spilling state. | def spilling(self):
return self._spilling | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getPacmanState( self ):\n return self.data.agentStates[0].copy()",
"def get_agent_state(self):\n return self.world_state",
"def get_state(self):\n\t\treturn Job(SDK.PrlVm_GetState(self.handle)[0])",
"def get_state(self):\n return self.env.sim.get_state()",
"def get_tools_state(self):\n\t\treturn Job(SDK.PrlVm_GetToolsState(self.handle)[0])",
"def gsteady(self, Ppump):\n return(self.steadystate(Ppump)[1])",
"def lease_state(self) -> str:\n return pulumi.get(self, \"lease_state\")",
"def get_state(self):\n return self.agents, self.foods, self.viruses, self.masses, self.time",
"def state(self):\n\t\tif self._state in JOB_PS:\n\t\t\treturn JOB_PS[self._state]\n\t\telse:\n\t\t\treturn str(self._state)",
"def get_state(self):\r\n alarm = self._alarm()\r\n return alarm.state",
"def process_state(self):\n process = self._get_process()\n if not self.is_on:\n process = StateOptions.NONE\n return self._update_feature(WashDeviceFeatures.PROCESS_STATE, process)",
"def get_state(self):\n return self._env.get_state()",
"def provisioning_state(self) -> str:\n return pulumi.get(self, \"provisioning_state\")",
"def provisioning_state(self) -> str:\n return pulumi.get(self, \"provisioning_state\")",
"def provisioning_state(self) -> str:\n return pulumi.get(self, \"provisioning_state\")",
"def provisioning_state(self) -> str:\n return pulumi.get(self, \"provisioning_state\")",
"def provisioning_state(self) -> str:\n return pulumi.get(self, \"provisioning_state\")",
"def provisioning_state(self) -> str:\n return pulumi.get(self, \"provisioning_state\")",
"def provisioning_state(self) -> str:\n return pulumi.get(self, \"provisioning_state\")",
"def provisioning_state(self) -> str:\n return pulumi.get(self, \"provisioning_state\")",
"def provisioning_state(self) -> str:\n return pulumi.get(self, \"provisioning_state\")",
"def provisioning_state(self) -> str:\n return pulumi.get(self, \"provisioning_state\")",
"def provisioning_state(self) -> str:\n return pulumi.get(self, \"provisioning_state\")",
"def provisioning_state(self) -> str:\n return pulumi.get(self, \"provisioning_state\")",
"def provisioning_state(self) -> str:\n return pulumi.get(self, \"provisioning_state\")",
"def provisioning_state(self) -> str:\n return pulumi.get(self, \"provisioning_state\")",
"def provisioning_state(self) -> str:\n return pulumi.get(self, \"provisioning_state\")",
"def provisioning_state(self) -> str:\n return pulumi.get(self, \"provisioning_state\")",
"def provisioning_state(self) -> str:\n return pulumi.get(self, \"provisioning_state\")",
"def provisioning_state(self) -> str:\n return pulumi.get(self, \"provisioning_state\")"
]
| [
"0.5813141",
"0.57949114",
"0.5761483",
"0.5691754",
"0.5646033",
"0.562968",
"0.5607904",
"0.5590146",
"0.557208",
"0.5541229",
"0.5484805",
"0.54764456",
"0.54454887",
"0.54454887",
"0.54454887",
"0.54454887",
"0.54454887",
"0.54454887",
"0.54454887",
"0.54454887",
"0.54454887",
"0.54454887",
"0.54454887",
"0.54454887",
"0.54454887",
"0.54454887",
"0.54454887",
"0.54454887",
"0.54454887",
"0.54454887"
]
| 0.705317 | 0 |
Agent has been selected and a grain of sand is added to it. If the cell exceeds it capacity it add's itself to the model's spill queue. The model handles distributing the spill to adjacent cells. | def step(self):
self.grains += 1
if self.grains > self.spill_size:
print('spill -> ', self.agent_id)
self.model.spill(self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def spill(self, agent):\n self.spill_list.append(agent)",
"def step(self):\n self.age += 1\n self.move_agent()\n self.sugar -= self.metabolism\n\n # Eat sugar\n available_sugar = self.get_sugar(self.pos).amount\n self.sugar += available_sugar\n# self.total_sugar_in_field -= available_sugar\n # Set sugar in current cell to zero\n self.get_sugar(self.pos).eat_sugar() \n \n \n \n if self.sugar == 0:\n self.model.remove_agent(self)\n \n self.gen += 1\n x = self.model.random.randrange(self.model.grid.width)\n y = self.model.random.randrange(self.model.grid.height)\n new_pos = (x,y)\n \n self.model.add_agent(Consumer, new_pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.model.vision, self.model.metabolism, self.model.starting_sugar)\n \n \n if self.reproduction_and_death:\n if self.age > self.max_age: # Agent dies\n # Tax inheritance\n self.model.inheritance_tax_agent(self)\n \n if self.model.spawn_at_random:\n self.gen += 1\n x = self.model.random.randrange(self.model.grid.width)\n y = self.model.random.randrange(self.model.grid.height)\n new_pos = (x,y)\n \n self.model.add_agent(Consumer, new_pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.model.vision, self.model.metabolism, self.model.starting_sugar)\n self.model.remove_agent(self) #agent dies\n \n \n else:\n #spawn new agent\n self.gen += 1\n if self.sugar != 0:\n self.model.add_agent(Consumer, self.pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.vision, self.metabolism, self.sugar)\n else:\n self.model.add_agent(Consumer, self.pos, f\"{self.unique_id.split('-')[0]}-{self.gen}\", self.gen, self.vision, self.metabolism, self.model.starting_sugar)\n \n self.model.remove_agent(self) #agent dies",
"def add_to_simulation(self,agent):\n self.agents[agent.name] = agent\n self.network.add_node(agent)\n \n #agent given a grid queue at initialization\n grid_queue = [gq for gq in self.grid_queues.values() if gq.accepts(agent)][agent.sex]\n agent.grid_queue = grid_queue.index\n self.add_to_grid_queue(agent)",
"def __init__(self, agent_id, model, spill_size):\n\n super().__init__(agent_id, model)\n self._row = agent_id[0]\n self._col = agent_id[1]\n self._spill_size = spill_size\n self._grains = 0\n self._spilling = False",
"def add_to_grid_queue(self, agent):\n self.pipes[agent.grid_queue].send(\"add\")\n self.pipes[agent.grid_queue].send(agent)",
"def eat(self):\n if self._energy < self._eat_cost:\n return\n\n cell = self._env.get_cell(self._i, self._j)\n if cell.get_stored() == 0:\n return\n cell.consume()\n self._energy = self._energy + cell.get_food_energy()\n\n self._energy = self._energy - self._eat_cost\n\n self._env.simulate()",
"def add_gear_piece(self):\n self.__num_gear_collected += 1",
"def step(self):\n # amt_selected = \\\n # int(self.population_size * self.part_selected) \n\n # spawning_pool = [] # list of dna selected for reproduction\n new_data =[]\n \n sorted_dna = sorted(self.data, \n key=lambda dna: dna.fitness_function(dna),\n reverse=True)\n \n \n \n\n # mutation\n for dna in sorted_dna:\n dna.mute(self.mutation_probability)\n\n # crossover\n while len(new_data) < \\\n self.population_size - (self.population_size % 2):\n\n d1 = copy.copy(self.pick())\n d2 = copy.copy(self.pick())\n times = 2\n for i in range(times):\n d1.crossover(d2)\n\n new_data += [d1, d2]\n\n\n\n\n\n if (self.population_size % 2) == 1:\n new_data.append(copy.deepcopy(self.pick()))\n\n assert(len(self.data) == len(new_data))\n\n for i in range(len(new_data)):\n self.data[i].data = new_data[i]",
"def add_cell(self, cell: Cell):\r\n assert isinstance(cell, Cell)\r\n assert -self.pmax <= cell.gain <= self.pmax\r\n\r\n self[cell.gain].append(cell)\r\n cell.bucket_num = cell.gain + self.pmax\r\n if cell.gain > self.max_gain:\r\n self.max_gain = cell.gain",
"def add_river_greedy(me, lm, material_dict, imgs, rounded_river,\n min_length):\n print(\" Building random river...\")\n cell_source = get_river_source(material_dict)\n if not(cell_source):\n print(\"no cell source\")\n return\n xi,yi = cell_source\n cell_source = lm.cells[xi][yi]\n path = [cell_source]\n cell_xy = cell_source\n maxn = 1000\n it = 0\n should_finish = False\n margin = 0.01\n lake_probability = 0.5\n while True:\n if it > maxn:\n break\n elif \"water\" in cell_xy.material.name.lower():\n break\n elif should_finish:\n break\n it += 1\n section_length = random.randint(2,10)\n if random.random() < 0.5:\n sign = 1\n else:\n sign = -1\n if random.random() < 0.5:\n dx, dy = sign, 0\n else:\n dx, dy = 0, sign\n## print(dx,dy,section_length)\n ################################################\n for i in range(section_length):\n if should_finish:\n break\n x = cell_xy.coord[0] + dx\n y = cell_xy.coord[1] + dy\n new_cell = lm.get_cell_at(x,y)\n if new_cell is None:\n break\n elif new_cell.h - margin > cell_xy.h:\n if cell_xy.material.name != new_cell.material.name:\n break\n elif new_cell in path:\n break\n elif new_cell.name != \"river\":\n is_valid = True\n for neigh in new_cell.get_neighbors_von_neuman():\n if neigh:\n if not(neigh is cell_xy):\n if neigh.name == \"river\":\n is_valid = False\n break\n elif \"water\" in neigh.material.name.lower():\n should_finish = True\n elif neigh in path:\n is_valid = False\n break\n if is_valid:\n cell_xy = new_cell\n path.append(new_cell)\n## print(\"OK\",dx,dy,section_length)\n else:\n break\n else:\n break\n #4) change the end to first shallow shore cell\n actual_path = []\n for cell in path:\n if cell.name == \"river\":\n break\n actual_path.append(cell)\n if \"water\" in cell.material.name.lower():\n break\n else: #LAKE ?\n next_to_water = False\n for neigh in cell.get_neighbors_von_neuman():\n if neigh:\n if \"water\" in neigh.material.name.lower():\n next_to_water = True\n break\n if next_to_water:\n break\n if len(actual_path) < min_length:\n return\n if actual_path[0].material.name == actual_path[-1].material.name:\n return\n elif not(\"water\" in actual_path[-1].material.name.lower()):\n if random.random() < lake_probability:\n pass\n else:\n return\n #build images of river\n objs = {}\n for delta in imgs: #imgs[(dx,dy)][zoom]\n river_obj = MapObject(me, imgs[delta][0], \"river\", 1.)\n river_obj.is_ground = True\n river_obj.lm = lm\n objs[delta] = river_obj\n #5) add river cells to map and layer\n for i,cell in enumerate(actual_path):\n prepare_cell_for_river(lm, cell)\n dx,dy,corner = get_path_orientation(i, cell, actual_path)\n if rounded_river:\n c = objs.get((dx,dy,corner))\n else:\n c = objs.get((dx,dy,None))\n if not c:\n raise Exception(\"No river object for delta\", dx, dy, corner)\n assert cell.name != \"river\"\n c = c.add_copy_on_cell(cell)\n cell.name = \"river\"\n lm.static_objects.append(c)\n\n if actual_path:\n## print(\"RIVER BUILT:\", [cell.coord for cell in actual_path])\n if not(\"water\" in actual_path[-1].material.name.lower()):\n for neigh in actual_path[-1].get_neighbors_moore():\n if neigh and neigh.name != \"river\":\n prepare_cell_for_river(lm, neigh)\n river_obj = MapObject(me, imgs[(0,0,None)][0], \"river\", 1.)\n river_obj.is_ground = True\n river_obj.lm = lm\n river_obj = river_obj.add_copy_on_cell(neigh)\n neigh.name = \"river\"\n lm.static_objects.append(river_obj)\n return objs",
"def sprout(self):\n\t\tif numpy.random.rand() < self.reproduction_prob: \n\t\t\tsprout_agents_list = self.agents_list\n\t\t\tsprout_skill = self.skill\n\t\t\tsprout_stock = self.stock * self.inheritance\n\t\t\tself.stock -= sprout_stock\n\t\t\tsprout_stock_max = self.stock_max\n\t\t\tsprout_consumption_demanded = self.consumption_demanded\n\t\t\tsprout_reproduction_prob = self.reproduction_prob\n\t\t\tsprout_inheritance = self.inheritance\n\t\t\tsprout_strategy = self.strategy\n\t\t\tsprout_site = self.site\n\t\t\tsprout_threshold_debt = self.threshold_debt\n\t\t\tsprout_threshold_death = self.threshold_death\n\t\t\tsprout_interest_rate = self.interest_rate\n\t\t\tself.agents_list.append(Agent(sprout_agents_list, \n\t\t\t\t\t\t\t\t\t\tsprout_skill, \n\t\t\t\t\t\t\t\t\t\tsprout_stock, \n\t\t\t\t\t\t\t\t\t\tsprout_stock_max, \n\t\t\t\t\t\t\t\t\t\tsprout_consumption_demanded, \n\t\t\t\t\t\t\t\t\t\tsprout_reproduction_prob,\n\t\t\t\t\t\t\t\t\t\tsprout_inheritance, \n\t\t\t\t\t\t\t\t\t\tsprout_strategy,\n\t\t\t\t\t\t\t\t\t\tsprout_site, \n\t\t\t\t\t\t\t\t\t\tsprout_threshold_debt, \n\t\t\t\t\t\t\t\t\t\tsprout_threshold_death,\n\t\t\t\t\t\t\t\t\t\tsprout_interest_rate))",
"def place_building(self, building):\n if self.environment.grid.is_cell_empty(building.pos):\n self.environment.grid.place_agent(building, building.pos)\n self.environment.agents['residences'].append(building)\n else:\n try:\n self.available_cells.remove(building.pos)\n except:\n pass",
"def grow(self):\n if self.water_level == 0:\n self.plant += FOOD_GROWTH * random()\n if self.plant > LEVEL_MAX:\n self.plant = LEVEL_MAX",
"def grow(self):\n self.starve = 500 # useful to avoid looping AI snakes (they die younger -> bad fitness)\n self.body.append(self.old_tail) # that's why I keep old_tail",
"def __init__(\n self,\n width=20,\n height=20,\n initial_sheep=100,\n initial_wolves=50,\n sheep_reproduce=0.04,\n wolf_reproduce=0.05,\n wolf_gain_from_food=20,\n grass=False,\n grass_regrowth_time=30,\n sheep_gain_from_food=4,\n ):\n super().__init__()\n # Set parameters\n self.width = width\n self.height = height\n self.initial_sheep = initial_sheep\n self.initial_wolves = initial_wolves\n self.sheep_reproduce = sheep_reproduce\n self.wolf_reproduce = wolf_reproduce\n self.wolf_gain_from_food = wolf_gain_from_food\n self.grass = grass\n self.grass_regrowth_time = grass_regrowth_time\n self.sheep_gain_from_food = sheep_gain_from_food\n\n self.schedule = RandomActivationByTypeFiltered(self)\n self.grid = mesa.space.MultiGrid(self.width, self.height, torus=True)\n self.datacollector = mesa.DataCollector(\n {\n \"Wolves\": lambda m: m.schedule.get_type_count(Wolf),\n \"Sheep\": lambda m: m.schedule.get_type_count(Sheep),\n \"Grass\": lambda m: m.schedule.get_type_count(\n GrassPatch, lambda x: x.fully_grown\n ),\n }\n )\n\n # Create sheep:\n for i in range(self.initial_sheep):\n x = self.random.randrange(self.width)\n y = self.random.randrange(self.height)\n energy = self.random.randrange(2 * self.sheep_gain_from_food)\n sheep = Sheep(self.next_id(), (x, y), self, True, energy)\n self.grid.place_agent(sheep, (x, y))\n self.schedule.add(sheep)\n\n # Create wolves\n for i in range(self.initial_wolves):\n x = self.random.randrange(self.width)\n y = self.random.randrange(self.height)\n energy = self.random.randrange(2 * self.wolf_gain_from_food)\n wolf = Wolf(self.next_id(), (x, y), self, True, energy)\n self.grid.place_agent(wolf, (x, y))\n self.schedule.add(wolf)\n\n # Create grass patches\n if self.grass:\n for agent, (x, y) in self.grid.coord_iter():\n fully_grown = self.random.choice([True, False])\n\n if fully_grown:\n countdown = self.grass_regrowth_time\n else:\n countdown = self.random.randrange(self.grass_regrowth_time)\n\n patch = GrassPatch(self.next_id(), (x, y), self, fully_grown, countdown)\n self.grid.place_agent(patch, (x, y))\n self.schedule.add(patch)\n\n self.running = True\n self.datacollector.collect(self)",
"def sing(self):\n if self._energy < self._sing_cost:\n return\n\n self._energy = self._energy - self._sing_cost\n self._env.simulate()",
"def GAStep(self):\n\n self.updateMatingPool()\n self.newGeneration()",
"def fill_up(self):\n self.fuel = self.gas_tank_size",
"def inc_gains_of_free_cells(self):\r\n for cell in self.cells:\r\n if not cell.locked:\r\n cell.gain += 1\r\n cell.yank()",
"def forage(self):\n if self._energy < self._forage_cost:\n return\n\n cell = self._env.get_cell(self._i, self._j)\n if cell.get_newly() > 0:\n cell.store(1)\n cell.decrease_newly(1)\n\n self._energy = self._energy - self._forage_cost\n self._env.simulate()",
"def build_grains(self):\n\t\ttime = datetime.datetime.now()\n\t\tif self.probability == 0:\n\t\t\tfor cell in self.space.flat:\n\t\t\t\tif cell.state != 0 :\n\t\t\t\t\tcontinue\n\t\t\t\telif self.check_empty_neighbours(cell):\n\t\t\t\t\tcontinue\n\t\t\t\telse:\t\n\t\t\t\t\tneighbours = self.get_neighbours(cell)\n\t\t\t\t\tgrains = [0 for i in range(self.grains)]\n\t\t\t\t\tfor i in range(1,self.grains+1):\n\t\t\t\t\t\tfor neighbour in neighbours:\n\t\t\t\t\t\t\tif neighbour.state == i and neighbour.timestamp < time:\n\t\t\t\t\t\t\t\tgrains[i] = grains[i] + 1\n\t\t\t\t\tif grains == [0 for i in range(self.grains)]:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tnew_grain = 0\n\t\t\t\t\tfor i in range(self.grains):\n\t\t\t\t\t\tif grains[i] >= new_grain:\n\t\t\t\t\t\t\tnew_grain = i\n\t\t\t\t\tcell.change_state(time, new_grain)\n\t\t\t\t\tself.empty_cells = self.empty_cells - 1\n\t\telse:\n\t\t\tfor cell in self.space.flat:\n\t\t\t\tif cell.state != 0 :\n\t\t\t\t\tcontinue\n\t\t\t\telif self.check_empty_neighbours(cell):\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tneighbours = self.get_neighbours(cell)\n\t\t\t\t\tif self.decide_changing(cell,neighbours,5, time):\n\t\t\t\t\t\tneighbours = self.get_nearest_neighbours(cell)\n\t\t\t\t\t\tif self.decide_changing(cell,neighbours,3, time):\n\t\t\t\t\t\t\tneighbours = self.get_further_neighbours(cell)\n\t\t\t\t\t\t\tif self.decide_changing(cell,neighbours,3, time):\n\t\t\t\t\t\t\t\tneighbours = self.get_neighbours(cell)\n\t\t\t\t\t\t\t\tgrains = [0 for i in range(self.grains)]\n\t\t\t\t\t\t\t\tfor i in range(1,self.grains+1):\n\t\t\t\t\t\t\t\t\tfor neighbour in neighbours:\n\t\t\t\t\t\t\t\t\t\tif neighbour.state == i and neighbour.timestamp < time:\n\t\t\t\t\t\t\t\t\t\t\tgrains[i] = grains[i] + 1\n\t\t\t\t\t\t\t\tif grains == [0 for i in range(self.grains)]:\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\tnew_grain = 0\n\t\t\t\t\t\t\t\tfor i in range(self.grains):\n\t\t\t\t\t\t\t\t\tif grains[i] >= new_grain:\n\t\t\t\t\t\t\t\t\t\tnew_grain = i\n\t\t\t\t\t\t\t\trandom_number = random.random() * 100\n\t\t\t\t\t\t\t\tif random_number <= self.probability:\n\t\t\t\t\t\t\t\t\tcell.change_state(time, new_grain)\n\t\t\t\t\t\t\t\t\tself.empty_cells = self.empty_cells - 1\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tcontinue",
"def distribute_waterbag(self):\n # Generate particles by creating trials and finding particles with potential less than emittance, then assign the rest to momentum\n ptclsMade = 0\n phaseSpaceList = []\n while ptclsMade < self.npart:\n ranU = 0.0\n while ranU <= 0:\n ranU = random.random()\n\n # Generate some bounds on the transverse size to reduce waste in generating the bunch\n # Use the lemming method to find the maximum y\n trialH = np.sqrt(ranU)\n newH = self.emit*trialH\n y0 = np.sqrt(newH)\n #self.emittance = newH\n yMax = newton(self.whatsleft, y0)\n\n #bounding the horizontal coordinate is difficult, but it should not exceed the pole\n xMax = self.c\n #xMax = yMax\n\n trialValue = 1e10\n while trialValue >= newH:\n xTrial = 2.*(0.5 - random.random())*xMax\n yTrial = 2.*(0.5 - random.random())*yMax\n trialValue = self.compute_potential(xTrial, yTrial)\n\n initialValue = trialValue\n if initialValue < newH:\n pMag = np.sqrt(2*(newH - initialValue))\n pDir = 2*np.pi* random.random()\n pxHat = pMag * np.cos(pDir)\n pyHat = pMag * np.sin(pDir)\n xReal = xTrial * np.sqrt(self.betax)\n yReal = yTrial * np.sqrt(self.betay)\n pxReal = (pxHat - self.alphax*xTrial)/np.sqrt(self.betax)\n pyReal = (pyHat - self.alphay*yTrial)/np.sqrt(self.betay)\n ptclCoords = np.array([xReal, pxReal, yReal, pyReal])\n phaseSpaceList.append(ptclCoords)\n ptclsMade += 1\n\n #Add 3 more particles if creating a quiet start\n if self.quiet:\n self.exact_centroids(ptclCoords, phaseSpaceList)\n ptclsMade += 3\n else:\n print(\"Initial value generated exceeds limiting H. Sampling new value.\")\n\n self.particles[:,:4] = np.asarray(phaseSpaceList)",
"def add(self, idx):\n self.g += graph[self.visited[-1], self.not_visited[idx]]\n self.visited.append(self.not_visited.pop(idx))\n if len(self.not_visited) > 0:\n self.h = minimum_spanning_arborescence(self)\n else:\n self.h = 0",
"def add(self, node):\n cost = self.costfn(node)\n heapq.heappush(self.heap, (cost, node))\n self.states[node.state] = node",
"def step(self):\n y = np.random.rand(self.p.lambda_, self.p.d).T\n x = self.p.m.reshape(-1, 1) * y\n f = np.array(list(map(sum, x)))\n self.p.used_budget += self.p.lambda_\n self.p.population = Population(x, y, f)\n self.p.m_old = self.p.m.copy()\n self.p.m *= np.linalg.norm(y, axis=1).reshape(-1, 1)\n self.p.adapt()\n self.p.old_population = self.p.population.copy()",
"def step(self):\n\t\tnumpy.random.shuffle(self.agents_list)\n\t\tfor agent in self.agents_list:\n\t\t\tagent.produce()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.charge()\n\t\tfor agent in self.agents_list:\n\t\t\tif agent.strategy == 0: \n\t\t\t\tagent.retribute()\n\t\tfor agent in self.agents_list:\n\t\t\tif agent.strategy == 0: \n\t\t\t\tagent.give()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.consume()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.solve_consumption_deficit()\n\t\tfor site in self.sites:\n\t\t\tsite.recovery()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.sprout()",
"def mark_mine(self, cell):\n if cell in self.cells:\n self.mines.add(cell)\n self.cells.remove(cell)\n self.count -= 1",
"def take(self):\n print(\"You fill the kettle with water.\")\n inventory.remove('kettle')\n collect('filled kettle')",
"def take(self, agent):\n\n parent_folder = self.FolderID\n\n # Check if we have inventory turned on\n if not(parent_folder and agent.settings.ENABLE_INVENTORY_MANAGEMENT):\n logger.warning(\"Inventory not available, please enable settings.ENABLE_INVENTORY_MANAGEMENT\")\n return \n\n if not(parent_folder):\n # locate Object folder\n objects_folder = [ folder for folder in agent.inventory.folders if folder.Name == 'Objects' ]\n if objects_folder:\n parent_folder = objects_folder[0].FolderID\n else:\n logger.warning(\"Unable to locate top-level Objects folder to take item into inventory.\")\n return\n\n self.derez(agent, 4, parent_folder, uuid.uuid4(), agent.ActiveGroupID)",
"def add_guest(self, src: int, weight: float):\r\n if not self.has_guest(src):\r\n self.guests[src] = weight"
]
| [
"0.70932084",
"0.61752456",
"0.6172666",
"0.57589626",
"0.55338156",
"0.54990155",
"0.5498288",
"0.5404687",
"0.5382121",
"0.5283981",
"0.5247643",
"0.5209323",
"0.51918066",
"0.5176298",
"0.5169959",
"0.5154262",
"0.51332146",
"0.5123966",
"0.5116275",
"0.508177",
"0.5068731",
"0.50677645",
"0.5056098",
"0.5046295",
"0.4988042",
"0.4948517",
"0.49372566",
"0.4928814",
"0.4925404",
"0.49097538"
]
| 0.70183915 | 1 |
Add agent to model's spill queue. | def spill(self, agent):
self.spill_list.append(agent) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_to_grid_queue(self, agent):\n self.pipes[agent.grid_queue].send(\"add\")\n self.pipes[agent.grid_queue].send(agent)",
"def add_to_simulation(self,agent):\n self.agents[agent.name] = agent\n self.network.add_node(agent)\n \n #agent given a grid queue at initialization\n grid_queue = [gq for gq in self.grid_queues.values() if gq.accepts(agent)][agent.sex]\n agent.grid_queue = grid_queue.index\n self.add_to_grid_queue(agent)",
"def append(self, agent):\n self.agents.append(agent)",
"def add(self, agent):\n self._agents[agent.unique_id] = agent\n self.logger.add(agent)",
"def step(self):\n\n self.grains += 1\n\n if self.grains > self.spill_size:\n print('spill -> ', self.agent_id)\n self.model.spill(self)",
"def addGhostAgent(self, agent):\n self.ghostAgents.append(agent)",
"def add_transport(self, agent):\n with self.simulation_mutex:\n self.get(\"transport_agents\")[agent.name] = agent",
"def add_manager(self, agent):\n with self.simulation_mutex:\n self.get(\"manager_agents\")[agent.name] = agent",
"def _add_agent_to_graph(self, agent: mantrap.agents.base.DTAgent):\n from data import Node\n is_robot = agent.is_robot\n\n # In Trajectron each node has a certain type, which is either robot or pedestrian, an id and\n # state data. Enforce the Trajectron id to the internal ids format, to be able to query the\n # results later on.\n agent_history = agent.history\n acc_history = agent.compute_acceleration(agent_history, dt=self.dt)\n\n node_data = self._create_node_data(state_history=agent_history, accelerations=acc_history)\n node_tye = self._gt_env.NodeType.PEDESTRIAN if not is_robot else self._gt_env.NodeType.ROBOT\n node = Node(node_type=node_tye, node_id=agent.id, data=node_data, is_robot=is_robot)\n if is_robot:\n self._gt_scene.robot = node\n self._gt_scene.nodes.append(node)\n\n # Re-Create online environment with recently appended node.\n self._online_env = self.create_online_env(env=self._gt_env, scene=self._gt_scene)",
"def request_post_attention(self, agent):\n self.agents_to_settle.add(agent)",
"def add(self, state, action, reward, next_state, done):\r\n e = self.experience(state, action, reward, next_state, done)\r\n self.memory.append(e)",
"def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)",
"def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)",
"def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)",
"def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)",
"def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)",
"def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)",
"def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)",
"def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)",
"def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)",
"def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)",
"def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)",
"async def add(self, probes: Iterable[Probe]) -> None:\n for probe in probes:\n logger.info('probe-add: %s', probe)\n schedule = Schedule(probe, trio.current_time())\n self.index[probe] = schedule\n heapq.heappush(self.queue, schedule)\n async with self.queue_changed:\n self.queue_changed.notify()",
"def add_agent(self, environment_name, agent_name, agent_params):\n assert environment_name in self._environment_dict\n assert self._is_sweep is False or self._is_sweep is None\n self._is_sweep = False\n if agent_name in self._experiment_structure[environment_name]:\n raise AttributeError(\n f'An experiment for environment {environment_name} and builders {agent_name} already exists.'\n )\n\n environment_builder_params = self._environment_dict[environment_name]['build_params']\n\n try:\n exp = self._create_experiment(environment_name, environment_builder_params, agent_name, agent_params)\n self._experiment_structure[environment_name][agent_name] = exp\n except AttributeError as e:\n self.logger.error(\n f'Unable to create experiment for the environment {environment_name} and agent {agent_name}'\n )\n self.logger.exception(e)",
"def _push_queue(self):\n\n self.add_cons_vars(self._var_queue, sloppy=self.sloppy)\n self.add_cons_vars(self._cons_queue, sloppy = self.sloppy)\n\n if len(self._var_queue) > 0:\n self.regenerate_variables()\n if len(self._cons_queue) > 0:\n self.regenerate_constraints()\n\n self._var_queue = list()\n self._cons_queue = list()",
"def add(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n self.priorities.append(max(self.priorities, default=1)) # Save all new experiences as maximum priority",
"def add_agent(self, agent):\n\t\tif not (agent in self.agents_in_site):\n\t\t\tif (agent.site != None):\n\t\t\t\tagent.site.agents_in_site.remove(agent) \n\t\t\tself.agents_in_site.append(agent)\n\t\t\tagent.site = self",
"def add_reward(self, reward):\n self.quest_node['reward'] = reward\n self.reward = reward\n graph.push(self.quest_node)",
"def add_request_to_queue(self,request):\n self.queue.append(request)",
"def add(self, node):\n cost = self.costfn(node)\n heapq.heappush(self.heap, (cost, node))\n self.states[node.state] = node"
]
| [
"0.7249907",
"0.724359",
"0.6364773",
"0.6199888",
"0.610778",
"0.59546876",
"0.59015363",
"0.58994764",
"0.5738974",
"0.5690717",
"0.5538615",
"0.5536676",
"0.5536676",
"0.5536676",
"0.5536676",
"0.5536676",
"0.5536676",
"0.5536676",
"0.5536676",
"0.5536676",
"0.5536676",
"0.5536676",
"0.552642",
"0.54671663",
"0.5439537",
"0.53953516",
"0.53935874",
"0.52779317",
"0.5244252",
"0.51930594"
]
| 0.82388806 | 0 |
Process spill_list and advance the model one step. | def step(self):
for c in self.spill_list:
self._schedule.step() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def step(self):\n\n self.grains += 1\n\n if self.grains > self.spill_size:\n print('spill -> ', self.agent_id)\n self.model.spill(self)",
"def spill(self, agent):\n self.spill_list.append(agent)",
"def step(self, memories):\n return",
"def _preprocess(self, observation_list, reward_list, done_list,\n action_list, next_observation_list) -> (numpy.array, numpy.array):\n q_value: numpy.ndarray = self.model.predict(self._observation_list_preprocessor(next_observation_list))\n\n for i, reward in enumerate(reward_list):\n if done_list[i]:\n q_value[i][action_list[i]] = reward\n else:\n q_value[i][action_list[i]] = reward + self.gamma * numpy.max(q_value[i])\n\n return self._observation_list_preprocessor(observation_list), q_value",
"def process_batch(self, batch):\n # shapes are [time, ...original dims...]\n v_global = np.stack(batch[:,0]) # [time, agents, l_state_one_agent]\n # note that *_local objects have shape\n # [time, agents, ...original dim...]\n obs_others = np.stack(batch[:,1]) # [time,agents,h,w,c] or [time, agents, obs_others]\n v_local = np.stack(batch[:,2]) # [time,agents,l]\n actions = np.stack(batch[:,3]) # [time,agents]\n reward = np.stack(batch[:,4]) # [time]\n reward_local = np.stack(batch[:,5]) # [time,agents]\n v_global_next = np.stack(batch[:,6]) # [time, agents, l_state_one_agent]\n obs_others_next = np.stack(batch[:,7]) # [time,agents,h,w,c]\n v_local_next = np.stack(batch[:,8]) # [time,agents,l]\n done = np.stack(batch[:,9]) # [time]\n goals = np.stack(batch[:,10]) # [time, agents, l_goal]\n\n batch = None\n \n n_steps = v_global.shape[0]\n \n # For all global quantities, for each time step,\n # duplicate values <n_agents> times for\n # batch processing of all agents\n reward = np.repeat(reward, self.n_agents, axis=0)\n\n # In-place reshape for *_local quantities,\n # so that one time step for one agent is considered\n # one batch entry\n if self.experiment == 'sumo':\n obs_others.shape = (n_steps*self.n_agents, self.h_obs,\n self.w_obs, self.c_obs)\n obs_others_next.shape = (n_steps*self.n_agents, self.h_obs,\n self.w_obs, self.c_obs)\n elif self.experiment == 'particle':\n obs_others.shape = (n_steps*self.n_agents, self.l_obs_others)\n obs_others_next.shape = (n_steps*self.n_agents, self.l_obs_others)\n v_local.shape = (n_steps*self.n_agents, self.l_obs)\n reward_local.shape = (n_steps*self.n_agents)\n v_local_next.shape = (n_steps*self.n_agents, self.l_obs)\n\n actions_1hot, actions_others_1hot = self.process_actions(n_steps, actions)\n \n return n_steps, v_global, obs_others, v_local, actions_1hot, actions_others_1hot, reward, reward_local, v_global_next, obs_others_next, v_local_next, done, goals",
"def pre_compute(self, e_list):\n\t\tpass",
"def step(self, n, dlist):\n pass",
"def process(self, sess):\n\n sess.run(self.sync) # copy weights from shared to local\n rollout = next(self.rollout_provider)\n batch = process_rollout(rollout, gamma=self.config.discount)\n\n should_compute_summary = (self.task == 0 \n and self.local_steps % self.config.summary_every == 0)\n\n if should_compute_summary:\n fetches = [self.summary_op, self.train_op, self.global_step]\n else:\n fetches = [self.train_op, self.global_step]\n\n feed_dict = {\n self.local_network.x: batch.si,\n self.r: batch.r,\n self.w: batch.w,\n self.local_network.state_in[0]: batch.features[0],\n self.local_network.state_in[1]: batch.features[1],\n }\n\n fetched = sess.run(fetches, feed_dict=feed_dict)\n\n if should_compute_summary:\n self.summary_writer.add_summary(\n tf.Summary.FromString(fetched[0]), fetched[-1])\n self.summary_writer.flush()\n self.local_steps += 1",
"def prepare_next_batch(self) -> None:\n if not (\n self._forward_succesful and self._clip_succesful and self._noise_succesful\n ):\n raise RuntimeError(\n \"An error occured during model training. The model.prepare_next_batch() \"\n \" method must be called after model.forward(), model.clip_and_accumulate() \"\n \" and model.noise_gradient().\"\n )\n for model in self.models:\n for target_param, source_param in zip(\n model.parameters(), self.wrapped_model.parameters()\n ):\n target_param.data = source_param.data\n self._steps_taken += 1\n self._forward_succesful = self._clip_succesful = self._noise_succesful = False\n if self.watchdog:\n self.watchdog.inform(self._steps_taken)",
"def forc_model(self):\n lag1_loc = self.X[self.model_mask_cols].columns.get_loc('shrink_value_per_day_lag1_by_store')\n lag2_loc = self.X[self.model_mask_cols].columns.get_loc('shrink_value_per_day_lag2_by_store')\n for add in self.X.address1.unique():\n add_mask = self.X.address1 == add\n foo = self.X[ add_mask ].sort_values('visit_date', ascending=False)\n top_index = foo.index[0]\n clust = int(foo.cluster.values[0])\n # get values from last visit for store\n base_input = foo[self.model_mask_cols].values[0]\n base_actual = self.y[top_index]\n lag2_val = base_input[lag1_loc]\n lag1_val = base_actual\n\n for i in range(1, self.num_periods + 1):\n model = self.model_list[clust]\n inputs = base_input\n inputs[lag1_loc] = lag1_val\n inputs[lag2_loc] = lag2_val\n \n pred = model.predict(inputs.reshape(1, -1))\n self._update_cust_table(add, i, pred)\n \n lag2_val = lag1_val\n lag1_val = pred",
"def __process_smarty_request_list(self, request_list, address_input_data ):\n assert(len(address_input_data) == self.__total_addresses_in_request_list)\n\n processed_address_list = []\n address_iterator = iter(address_input_data)\n for unprocessed_request in request_list: \n params = {}\n processed_request = self.send_request(params, unprocessed_request)\n for lookup in processed_request:\n candidates = lookup.result\n address = next(address_iterator)\n if len(candidates) == 0:\n address.is_valid = False\n # TODO: add invalid addresses to list here\n else:\n address.longitude = candidates[0].metadata.longitude\n address.latitude = candidates[0].metadata.latitude\n address.line_1 = candidates[0].delivery_line_1\n address.line_2 = candidates[0].last_line\n address.is_valid = True\n self.num_addresses_processed+=1\n processed_address_list.append(address) \n return processed_address_list",
"def prepare_data(self, train_data, **kwargs):\n data_len = len(train_data[\"done\"])\n for index in range(data_len):\n if self.multi_step == 1:\n self.buff.add(train_data[\"cur_state\"][index],\n train_data[\"action\"][index],\n train_data[\"reward\"][index],\n train_data[\"next_state\"][index],\n float(train_data[\"done\"][index])) # Add replay buffer",
"def _process(self):\n self.kwargs[\"collect\"].process_scan_form_data(self.kwargs[\"data\"])",
"def movingAverage(listOfList=None, no_of_steps=100, needReorderData=True, reduceType=1, minNoOfTotal=10,\\\n\t\t\t\tminValueForFraction=0.8):\n\tif needReorderData:\n\t\tsortData = reOrderListOfListByFirstMember(listOfList=listOfList)\n\t\tlistOfList = sortData.listOfList\n\tfirstList = sortData.listOfList[0]\n\tstep = (firstList[-1]-firstList[0])/float(no_of_steps)\n\tstepIndex2Data = {}\n\tnoOfLists = len(listOfList)\n\tfor j in xrange(len(firstList)):\n\t\tx = firstList[j]\n\t\tstepIndex = int(x/step)\t#figure which bracket/bag all the data from this column should fall into.\n\t\tif stepIndex not in stepIndex2Data:\n\t\t\tstepIndex2Data[stepIndex] = PassingData(listOfList=[])\n\t\t\tfor i in xrange(noOfLists):\n\t\t\t\tstepIndex2Data[stepIndex].listOfList.append([])\n\t\t#y = y_ls[j]\n\t\t#stepIndex2Data[stepIndex].listOfList[0].append(x)\n\t\tfor i in xrange(noOfLists):\n\t\t\tvalueAtThatList = listOfList[i][j]\n\t\t\tstepIndex2Data[stepIndex].listOfList[i].append(valueAtThatList)\n\t\n\tstepIndexList = stepIndex2Data.keys()\n\tstepIndexList.sort()\n\t\n\tfractionFunction = lambda ls: sum([a>=minValueForFraction for a in ls])/float(len(ls))\n\treduceType2Function = {1: numpy.median, 2: numpy.mean, 3: fractionFunction}\n\treduceFunction = reduceType2Function.get(reduceType, numpy.median)\n\t\n\tn_x_ls = []\n\treturnListOfList = []\n\tfor i in xrange(noOfLists):\n\t\treturnListOfList.append([])\n\t\n\tfor stepIndex in stepIndexList:\n\t\tdata = stepIndex2Data.get(stepIndex)\n\t\tsubListOfList = data.listOfList\n\t\tif len(subListOfList[0])<minNoOfTotal:\n\t\t\tcontinue\n\t\t\n\t\tfor i in xrange(noOfLists):\n\t\t\tif i==0 and reduceType==3:\n\t\t\t\t_reduceFunction = numpy.median\n\t\t\telse:\n\t\t\t\t_reduceFunction = reduceFunction\n\t\t\t\n\t\t\treduce_value = _reduceFunction(subListOfList[i])\n\t\t\treturnListOfList[i].append(reduce_value)\n\treturn PassingData(listOfList=returnListOfList)",
"def predict(self, smiles_list):\n data = list(enumerate(smiles_list))\n num_data = len(data)\n num_sub_proc = min(self.num_sub_proc, num_data)\n\n q1 = Queue()\n manager = Manager()\n return_dict = manager.dict()\n proc_master = Process(target=self.creator,\n args=(q1, data, num_sub_proc))\n proc_master.start()\n\n # create slave process\n procs = []\n for sub_id in range(0, num_sub_proc):\n proc = Process(target=self.worker, args=(q1, return_dict))\n procs.append(proc)\n proc.start()\n\n q1.close()\n q1.join_thread()\n proc_master.join()\n for proc in procs:\n proc.join()\n keys = sorted(return_dict.keys())\n\n result_dict = dict()\n docking_score_list = list()\n if self.rescoring:\n docking_re_list = list()\n\n for key in range(num_data):\n if key in keys:\n result_dict0 = return_dict[key]\n if 'docking' in result_dict0:\n docking_score = result_dict0['docking']\n else:\n docking_score = np.array([99.999], dtype=np.float32)\n\n if self.rescoring:\n if 'docking_re' in result_dict0:\n docking_re = result_dict0['docking_re']\n else:\n docking_re = np.array([99.999], dtype=np.float32)\n\n else:\n docking_score = np.array([99.999], dtype=np.float32)\n if self.rescoring:\n docking_re = np.array([99.999], dtype=np.float32)\n\n docking_score_list += [docking_score]\n if self.rescoring:\n docking_re_list += [docking_re]\n\n result_dict['docking'] = docking_score_list\n if self.rescoring:\n result_dict['docking_re'] = docking_re_list\n\n if self.use_my_module:\n self.my_class.predict(self, smiles_list, result_dict, return_dict)\n\n return result_dict",
"def entrance(datapoints, params_list) -> RecordThunkIter:\n size = len(params_list)\n effectful = zip(datapoints, params_list)\n\n def g():\n \"\"\" SIDE EFFECTFUL \"\"\"\n data, param = next(effectful)\n return ((MakeDict.make_spot_record(record, param)\n for record in data)\n if data is not None\n else iter([]))\n\n for _ in range(size):\n yield g",
"def process(self, sess):\n\n sess.run(self.sync) # copy weights from shared to local\n rollout = self.pull_batch_from_queue()\n batch = process_rollout(rollout, gamma=0.99, lambda_=1.0)\n\n should_compute_summary = self.task == 0 and self.local_steps % 11 == 0\n\n if should_compute_summary:\n fetches = [self.summary_op, self.train_op, self.global_step]\n else:\n fetches = [self.train_op, self.global_step]\n\n feed_dict = {\n self.local_network.x: batch.si,\n self.ac: batch.a,\n self.adv: batch.adv,\n self.r: batch.r,\n self.local_network.state_in[0]: batch.features[0],\n self.local_network.state_in[1]: batch.features[1],\n }\n\n fetched = sess.run(fetches, feed_dict=feed_dict)\n\n if should_compute_summary:\n self.summary_writer.add_summary(tf.Summary.FromString(fetched[0]), fetched[-1])\n self.summary_writer.flush()\n self.local_steps += 1",
"def _next_batch(self, loader) -> list:\n return self.mover.move(loader.__next__())",
"def Step(self, state, action, reward, next_state, done, limit=-1):\n self.Memory.addMulti(state, action, reward, next_state, done, limit)",
"def process(self, lists, subqueries):\n pass",
"def _next_batch(self, loader: CustomIterator) -> list:\n return self.mover.move(loader.__next__())",
"def process(self, sess):\n sess.run(self.p_sync) # copy weights from shared to local\n\n rollout = self.pull_batch_from_queue()\n\n batch = process_rollout(rollout, gamma=0.99, lambda_=1.0)\n\n should_compute_summary = self.task == 0 and self.local_steps % 11 == 0\n\n if should_compute_summary:\n fetches = [self.summary_op, self.p_train_op, self.global_step]\n else:\n fetches = [self.p_train_op, self.global_step]\n\n feed_dict = {\n self.local_network.x: batch.si,\n self.ac: batch.a,\n self.adv: batch.adv,\n self.r: batch.r,\n self.local_network.p_state_in[0]: batch.features[0],\n self.local_network.p_state_in[1]: batch.features[1],\n }\n fetched = sess.run(fetches, feed_dict=feed_dict)\n\n if should_compute_summary:\n self.summary_writer.add_summary(tf.Summary.FromString(fetched[0]), fetched[-1])\n self.summary_writer.flush()\n\n # Update the distctiminator\n sess = tf.get_default_session()\n self.local_steps += 1",
"def _step(\n self,\n states: np.ndarray,\n actions: List[np.ndarray],\n rewards: np.ndarray,\n next_states: np.ndarray,\n dones: np.ndarray,\n ) -> None:\n self.memory.add(states, np.concatenate(actions), rewards, next_states, dones)\n self.step_count += 1\n\n if (\n len(self.memory) > self.batch_size\n and (self.step_count % self.update_every) == 0\n ):\n self._optimize()",
"def batchstore(self, reward, next_obs):\n self.count_oa[self.current_obs, self.current_act] += 1\n self.count_oao[self.current_obs, self.current_act, next_obs] += 1\n self.reward_oa[self.current_obs, self.current_act] += reward\n \n # updating the value table, estiamting the current state-action values\n self.valQoa[self.current_obs, self.current_act]\\\n += self.alpha * ((1-self.gamma) * reward\\\n + self.gamma * np.dot(self.X[next_obs], self.valQoa[next_obs])\\\n - self.valQoa[self.current_obs, self.current_act])\n\n self.next_obs = next_obs # just for consistency checking\n \n self.ret = (1-self.gamma)*reward + self.gamma * self.ret\n self.batch_step += 1\n self.total_step += 1",
"def make_gaf_list( self, pageslips_list ):\n new_item_list = []\n pageslip_count = 0\n for item in pageslips_list:\n try:\n parser = utility_code.Parser()\n record_number = utility_code.parseRecordNumber(item)\n book_barcode = parser.parse_bookbarcode( item )\n las_delivery_stop = utility_code.parseJosiahPickupAtCode(item)\n las_customer_code = parser.parse_josiah_location_code( item )\n patron_name = utility_code.parsePatronName(item)\n patron_barcode = utility_code.parsePatronBarcode(item)\n title = parser.parse_title( item )\n las_date = utility_code.prepareLasDate()\n note = parser.parse_note( item )\n full_line = '''\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\"''' % ( record_number, book_barcode, las_delivery_stop, las_customer_code, patron_name, patron_barcode, title, las_date, note )\n new_item_list.append( full_line )\n pageslip_count = pageslip_count + 1\n if pageslip_count % 10 == 0:\n log.debug( '`%s` pageslips processed so far...' % pageslip_count )\n except Exception, e:\n subject = 'annex process pageslips problem'\n message = 'iterating through item_list; problem with item \"%s\"; exception is: %s' % ( item, unicode(repr(e)) )\n logger.error( message )\n m = Mailer( subject, message )\n m.send_email()\n log.info( '`%s` items parsed' % pageslip_count )\n log.debug( 'new_item_list, ```%s```' % pprint.pformat(new_item_list) )\n return new_item_list",
"def replay(self):\n # Start only have enough memories\n if len(self.memory) < self.train_start:\n return\n\n batch_size = min(self.batch_size, len(self.memory))\n\n # Use mini_batch, sampling form the memory\n mini_batch = random.sample(self.memory, batch_size)\n\n # Since we are suing batch, we need to collect input and target\n input_update = np.zeros((batch_size, self.input_shape[0]))\n target_update = np.zeros((batch_size, self.output_num))\n\n for i in range(batch_size):\n state, action, reward, next_state, done = mini_batch[i]\n target = self.model.predict(state)[0]\n\n # Add future discounted reward\n if not done:\n # Use target_model here, because we want to keep the weights\n # not changing in one complete game\n target[action] = (1 - ALPHA) * reward + ALPHA * \\\n (self.gamma * np.amax(self.target_model.\n predict(next_state)[0]))\n else:\n target[action] = reward\n\n # Record the info into batch collection\n input_update[i] = state\n target_update[i] = target\n\n # Update model (also use a batch)\n self.model.fit(input_update, target_update, batch_size=batch_size,\n epochs=1, verbose=0)",
"def test_skip_list_no_skip(self):\n mock_sqr = SequenceRun()\n mock_sqr.instrument_run_id = TestConstant.instrument_run_id.value\n\n mock_workflow = Workflow()\n mock_workflow.wfr_id = f\"wfr.{_rand(32)}\"\n mock_workflow.type_name = WorkflowType.BCL_CONVERT.value\n mock_workflow.end_status = WorkflowStatus.SUCCEEDED.value\n mock_workflow.sequence_run = mock_sqr\n mock_workflow.output = \"\"\n\n when(fastq_update_step).perform(...).thenReturn(\"FASTQ_UPDATE_STEP\")\n when(google_lims_update_step).perform(...).thenReturn('GOOGLE_LIMS_UPDATE_STEP')\n when(dragen_wgs_qc_step).perform(...).thenReturn('DRAGEN_WGS_QC_STEP')\n when(dragen_tso_ctdna_step).perform(...).thenReturn('DRAGEN_TSO_CTDNA_STEP')\n when(dragen_wts_step).perform(...).thenReturn('DRAGEN_WTS_STEP')\n\n skiplist = {\n 'global': [],\n 'by_run': {}\n }\n\n results = orchestrator.next_step(mock_workflow, skiplist, None)\n logger.info(results)\n\n self.assertTrue('DRAGEN_WGS_QC_STEP' in results)\n self.assertTrue('DRAGEN_TSO_CTDNA_STEP' in results)\n self.assertTrue('DRAGEN_WTS_STEP' in results)",
"def evaluate_batch(self, pipelines):",
"def forward_batch(self,batcher, phase=0):\n pass",
"def source_input(env, \r\n number, \r\n counter,\r\n generation,\r\n generation_list_come,\r\n generation_list_wait,\r\n generation_list_begin,\r\n generation_list_finish,\r\n df_simtime,\r\n generation_list_name,\r\n g1_list_name): \r\n# global g1_list_name\r\n for i in range(number):\r\n if i == 0:\r\n t = generation_list_come[i]#到达时间服从指数分布,此处的t为间隔时间\r\n else:\r\n t = generation_list_come[i] - generation_list_come[i-1]\r\n yield env.timeout(t)\r\n serve_time = np.random.choice(df_simtime['sim_time'])#得到模拟数据\r\n # print(serve_time)\r\n c = document(env, \r\n g1_list_name[i], \r\n generation, \r\n counter, \r\n time_in_fac,\r\n generation_list_begin,\r\n generation_list_wait,\r\n generation_list_finish,\r\n serve_time,\r\n generation_list_name)\r\n env.process(c)"
]
| [
"0.5933351",
"0.5655595",
"0.5262983",
"0.5020804",
"0.49819398",
"0.49550286",
"0.49170116",
"0.4878095",
"0.48729232",
"0.486313",
"0.48542383",
"0.4846436",
"0.48377305",
"0.48368984",
"0.48039612",
"0.47895807",
"0.47695318",
"0.47682866",
"0.47671297",
"0.47658867",
"0.47426534",
"0.4701569",
"0.46847516",
"0.46827808",
"0.46796256",
"0.4677893",
"0.4673661",
"0.46711242",
"0.4644671",
"0.46385142"
]
| 0.6663731 | 0 |
Returns the cost of a particular sequence of actions. If those actions include an illegal move, return 999999. | def getCostOfActions(self, actions):
if actions == None: return 999999
x, y = self.getStartState()
cost = 0
for action in actions:
# Check figure out the next state and see whether its' legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
cost += self.costFn((x, y))
return cost | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getCostOfActions(self, actions):\n if actions == None: return 999999\n x,y= self.getStartState()\n cost = 0\n for action in actions:\n # Check figure out the next state and see whether its' legal\n dx, dy = Actions.directionToVector(action)\n x, y = int(x + dx), int(y + dy)\n if self.walls[x][y]: return 999999\n cost += self.costFn((x,y))\n return cost",
"def getCostOfActions(self, actions):\n if actions == None: return 999999\n x,y= self.getStartState()\n cost = 0\n for action in actions:\n # Check figure out the next state and see whether its' legal\n dx, dy = Actions.directionToVector(action)\n x, y = int(x + dx), int(y + dy)\n if self.walls[x][y]: return 999999\n cost += self.costFn((x,y))\n return cost",
"def getCostOfActions(self, actions):\n if actions == None: return 999999\n initstat,goals1= self.getStartState()\n x,y=initstat\n cost = 0\n for action in actions:\n # Check figure out the next state and see whether its' legal\n dx, dy = Actions.directionToVector(action)\n x, y = int(x + dx), int(y + dy)\n if self.walls[x][y]: return 999999\n cost += self.costFn((x,y))\n return cost",
"def getCostOfActions(self, actions):\n x, y = self.getStartState()[0]\n cost = 0\n for action in actions:\n # figure out the next state and see whether it's legal\n dx, dy = Actions.directionToVector(action)\n x, y = int(x + dx), int(y + dy)\n if self.walls[x][y]:\n return 999999\n cost += 1\n return cost",
"def getCostOfActions(gameState, pos, actions):\n if actions == None: return 999999\n x,y = pos\n cost = 0\n print \"ACTIONS Performed: \", actions\n for action in actions:\n # Check figure out the next state and see whether its' legal\n dx, dy = Actions.directionToVector(action)\n x, y = int(x + dx), int(y + dy)\n print \"Checking\", (x,y)\n if gameState.data.layout.walls[x][y]: print \"Wall at\", (x, y); return 999999\n cost += 1 #self.costFn((x,y))\n return cost",
"def getCostOfActions(self, actions):\n x,y= self.getStartState()[0]\n cost = 0\n for action in actions:\n # figure out the next state and see whether it's legal\n dx, dy = Actions.directionToVector(action)\n x, y = int(x + dx), int(y + dy)\n if self.walls[x][y]:\n return 999999\n cost += 1\n return cost",
"def getCostOfActions(self, actions):\n x,y= self.getStartState()[0]\n cost = 0\n for action in actions:\n # figure out the next state and see whether it's legal\n dx, dy = Actions.directionToVector(action)\n x, y = int(x + dx), int(y + dy)\n if self.walls[x][y]:\n return 999999\n cost += 1\n return cost",
"def getCostOfActions(self, actions):\n x,y= self.getStartState()[0]\n cost = 0\n for action in actions:\n # figure out the next state and see whether it's legal\n dx, dy = Actions.directionToVector(action)\n x, y = int(x + dx), int(y + dy)\n if self.walls[x][y]:\n return 999999\n cost += 1\n return cost",
"def actionsCost(self, actions):\r\n\r\n if (actions is None):\r\n return 999999\r\n\r\n x, y = self.startingState()\r\n cost = 0\r\n\r\n for action in actions:\r\n # Check figure out the next state and see whether its' legal\r\n dx, dy = Actions.directionToVector(action)\r\n x, y = int(x + dx), int(y + dy)\r\n if (self.walls[x][y]):\r\n return 999999\r\n\r\n cost += self.costFn((x, y))\r\n\r\n return cost",
"def getCostOfActions(self, actions):\n if actions == None: return 999999\n x, y = self.startingPosition\n for action in actions:\n dx, dy = Actions.directionToVector(action)\n x, y = int(x + dx), int(y + dy)\n if self.walls[x][y]: return 999999\n return len(actions)",
"def getCostOfActions(self, actions):\n if actions == None: return 999999\n x, y = self.startingPosition\n for action in actions:\n dx, dy = Actions.directionToVector(action)\n x, y = int(x + dx), int(y + dy)\n if self.walls[x][y]: return 999999\n return len(actions)",
"def get_action_cost(state: tuple, action: int) -> float:\n assert len(state) == 3 and state[0] < 5 and state[1] < 4 and state[2] < 3 and action < 3\n if state[0] == 0:\n return 0.0\n if state[0] == 1 and action == 0:\n return -20.0 + 10.0 * 0.5\n return -20.0 # Penalty = 20 due to Team Number = 9",
"def getCostOfActions(self, actions):\n if actions == None: return 999999\n x,y= self.startingPosition\n for action in actions:\n dx, dy = Actions.directionToVector(action)\n x, y = int(x + dx), int(y + dy)\n if self.walls[x][y]: return 999999\n return len(actions)",
"def actionsCost(self, actions):\n\n if (actions is None):\n return 999999\n\n x, y = self.startingPosition\n for action in actions:\n dx, dy = Actions.directionToVector(action)\n x, y = int(x + dx), int(y + dy)\n if self.walls[x][y]:\n return 999999\n\n return len(actions)",
"def _cost(self, action):\n return float(self.spacecraft.crashed)",
"def get_cost_of_actions_helper(actions, board, starting_point):\n total_cost = 0\n for action in actions:\n cost = action.piece.get_num_tiles()\n total_cost += cost\n return total_cost",
"def get_cost(self, action: Action) -> N:\n pass",
"def bcost(action):\r\n # An action is an (a, b, arrow) tuple; a and b are\r\n # times; arrow is a string.\r\n a, b, arrow = action\r\n return max(a, b)",
"def bcost(action):\n # An action is an (a, b, arrow) tuple; a and b are\n # times; arrow is a string.\n a, b, arrow = action\n return max(a, b)",
"def getAction(self, gameState: GameState):\n _max = float(\"-inf\")\n action = None\n for move in gameState.getLegalActions(0):\n util = minimax(self.evaluationFunction, 1, 0,\n gameState.generateSuccessor(0, move), self.depth)\n if util > _max or _max == float(\"-inf\"):\n _max = util\n action = move\n\n return action",
"def step_cost(self, state, action, result=None):\n return 1 # Override this if actions have different costs",
"def action_cost(self, action):\n\n\t\tif len(action) == 1:\n\n\t\t\t# Only 1 person is crossing\n\t\t\tpersonI = action[0]\n\t\t\treturn self.crossingTime[personI]\n\n\t\telif len(action) == 2:\n\n\t\t\t# 2 people are crossing\n\t\t\tpersonI = action[0]\n\t\t\tpersonJ = action[1]\n\t\t\tctPersonI = self.crossingTime[personI] # the Crossing Time of the first person\n\t\t\tctPersonJ = self.crossingTime[personJ] # the Crossing Time of the second person\n\t\t\treturn max(ctPersonI, ctPersonJ)",
"def path_cost(self, c, state1, action, state2):\n # print(c)\n accao, direcao = action.split()\n if accao == WALK:\n return c + 1\n elif accao == PUSH:\n return c + 1",
"def getCost(self, state, action):\n util.raiseNotDefined()",
"def path_cost(self, c, state1, action, state2):\n # This should probably just be 1 every state....\n return c + 1",
"def path_cost(self, c, state1, action, state2):\n # This should probably just be 1 every state....\n return c + 1",
"def path_cost(self, c, state1, move, state2):\n # THIS WAS TAKEN DIRECTLY FROM THE AIMA code provided by the textbook\n current_cost = c\n\n return current_cost + 1",
"def chooseAction(self, gameState):\n\n actions = gameState.getLegalActions(self.index)\n # actions.remove(Directions.STOP)\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n for idx,a in enumerate(actions):\n baby = self.getSuccessor(gameState, a)\n qsum = [self.evaluate(baby, action) for action in baby.getLegalActions(self.index)]\n values[idx] += min(qsum) \n\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n foodLeft = len(self.getFood(gameState).asList())\n if foodLeft <= 2:\n bestDist = 9999\n for action in actions:\n successor = self.getSuccessor(gameState, action)\n pos2 = successor.getAgentPosition(self.index)\n dist = self.getMazeDistance(self.start,pos2)\n if dist < bestDist:\n bestAction = action\n bestDist = dist\n return bestAction\n\n return random.choice(bestActions)",
"def _cost(self, action):\n raise NotImplementedError",
"def path_cost(self, c, state1, action, state2):\n\t\treturn c + 1"
]
| [
"0.7820553",
"0.7820553",
"0.78143483",
"0.78090173",
"0.7793637",
"0.778846",
"0.778846",
"0.778846",
"0.7624677",
"0.7528754",
"0.7528754",
"0.750788",
"0.7489202",
"0.7310127",
"0.71119225",
"0.7026165",
"0.6846151",
"0.6645367",
"0.6599338",
"0.65375865",
"0.6534645",
"0.65156955",
"0.6496414",
"0.6435141",
"0.64002126",
"0.64002126",
"0.63966316",
"0.6395226",
"0.6295306",
"0.6292643"
]
| 0.78454506 | 0 |
Your heuristic for the FoodSearchProblem goes here. This heuristic must be consistent to ensure correctness. First, try to come up with an admissible heuristic; almost all admissible heuristics will be consistent as well. If using A ever finds a solution that is worse uniform cost search finds, your heuristic is not consistent, and probably not admissible! On the other hand, inadmissible or inconsistent heuristics may find optimal solutions, so be careful. The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a Grid (see game.py) of either True or False. You can call foodGrid.asList() to get a list of food coordinates instead. If you want access to info like walls, capsules, etc., you can query the problem. For example, problem.walls gives you a Grid of where the walls are. If you want to store information to be reused in other calls to the heuristic, there is a dictionary called problem.heuristicInfo that you can use. For example, if you only want to count the walls once and store that | def foodHeuristic(state, problem):
position, foodGrid = state
"*** YOUR CODE HERE ***"
"""
Mi heurística consiste en hacer simplemente el máximo de las distancias reales del state a cada nodo con comida
He provado diferentes heurísticas y esta es la que me expande menos nodos, aunque no es la más óptima temporalmente
Tardé mucho tiempo en darme cuenta de que había una función que calculaba la distancia real entre dos nodos
NOTA: NO EJECUTAR CON LABERINTOS MÁS GRANDES QUE EL tinySearch. El algoritmo requiere muchísimo tiempo
"""
max = 0 # Inicializo el máximo en 0
for food in foodGrid.asList(): # Esto me da cada food como un nodo (x,y), pero sólo los nodos que tengan comida
distance = mazeDistance(position, food, problem.startingGameState) # Distancia real del state a una comida
if max < distance: # Cálculo del máximo
max = distance
return max
# La siguiente heurística también servía, y de hecho tardaba mucho menos, pero el autograder me daba 2/4
# ya que se expandían más de 12.000 nodos.
# return len(foodGrid.asList()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def foodHeuristic(state, problem):\n import itertools\n\n\n\n def manhattan(startPosition, targetPosition):\n xy1 = startPosition\n xy2 = targetPosition\n return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])\n\n position, foodGrid = state\n\n return len(foodGrid.asList())\n #\n # \"\"\"\n # The below algorithm is from:\n # https://stackoverflow.com/questions/9994913/pacman-what-kinds-of-heuristics-are-mainly-used\n #\n # Find real/manhattan distance between two currently furthest fruits in labyrinth - let's call that x.\n # Find real/manhattan distance from current Pacman position to the closer of previous two fruits - let's call that y.\n # Then, answer is just: x + y.\n # The interpretation of this x + y formula could be something like this:\n #\n # x - either way, you will have to travel this distance, at least at the end\n # y - while you are at the some of the two furthest fruits, it's better to collect\n # the food that is near to it so you don't have to go back\n # \"\"\"\n # maxFoodPairDistance = 0\n #\n # if len(foodGrid.asList()) >= 2:\n #\n # #calculate manhattan/real distance between each pair of food (all permutations in foodGrid) and find the maximum of them, and\n # #store the pair with max distance in maxFoodPair\n # for foodPair in itertools.permutations(foodGrid.asList(),2):\n # #foodPairDistance = mazeDistance(foodPair[0], foodPair[1], problem.startingGameState)\n # foodPairDistance = manhattan(foodPair[0], foodPair[1])\n # if foodPairDistance >= maxFoodPairDistance:\n # maxFoodPairDistance = foodPairDistance\n # maxFoodPair = foodPair\n #\n # #get the real distance between pacman and nearest food among the max distance food pair we get above. Using real distance instead\n # #of manhattan distance here just to \"reduce\" the number of nodes expand to get additional point. But that's a bit of a cheating\n # #because the mazeDistance function use of breadth First search - which itself is a search with nodes expansion not counted here\n # #minPacmanToFoodDistance = min([mazeDistance(position, foodPosition, problem.startingGameState) for foodPosition in maxFoodPair])\n # minPacmanToFoodDistance = min([manhattan(position, foodPosition) for foodPosition in maxFoodPair])\n #\n # #When only one food left, just return the real distance between pacman and food\n # elif len(foodGrid.asList()) == 1:\n # foodPosition = foodGrid.asList()[0]\n # #minPacmanToFoodDistance = mazeDistance(position, foodPosition, problem.startingGameState)\n # minPacmanToFoodDistance = manhattan(position, foodPosition)\n # else:\n # minPacmanToFoodDistance = 0\n #\n # return minPacmanToFoodDistance + maxFoodPairDistance",
"def foodHeuristic(state, problem):\n\n position, foodGrid = state\n\n # *** Your Code Here ***\n if len(foodGrid.asList()) == 0: # If no food, then no need to go on\n return 0\n trackHeuristic = []\n # Manhattan dist between curr node position and all foods\n # If there is food, iterate through all available foods\n for food in foodGrid.asList():\n currentHeuristic = distance.manhattan(position, food)\n trackHeuristic.append(currentHeuristic)\n return max(trackHeuristic)",
"def foodHeuristic(state, problem):\n util.raiseNotDefined()",
"def __init__(self, gameState, costFn = lambda x: 1, goal=(1,1), start=None, warn=True, visualize=True):\n self.goal=(1,1)\n self.goals=[]\n self.walls = gameState.getWalls()\n self.startState = gameState.getPacmanPosition()\n if start != None: self.startState = start\n\n n=0\n try:\n for j in range(1, 40):\n n=j\n x=gameState.hasWall(1, j)\n except:\n n=n\n m=0\n try:\n for i in range(1, 40):\n m=i\n x=gameState.hasWall(i, 1)\n except:\n m=m\n print('maze dimension: ',m,'x',n)\n\n for i in range(1,m):\n for j in range(1,n):\n if (gameState.hasFood(i,j)):\n if(gameState.getNumFood()==1):\n self.goal=(i,j)\n else:\n x=(i,j)\n self.goals.append(x)\n\n #print('goals',self.getFoodPositions())\n self.costFn = costFn\n self.visualize = visualize\n #x=getFoodPosition(gameState)\n #print(\"food positions: \" )\n print(\"[R12] Initial position of pacman is \"+str(gameState.getPacmanPosition()))\n print(\"[R10] Number of foods is \"+str(gameState.getNumFood()))\n if(gameState.getNumFood()>1):\n print(\"[R10] Final goal positions are \", self.goals)\n else:\n print(\"[R10] Final goal position is \"+str(self.goals))\n print(\"[R11] Ghost Positions is/are \"+str(gameState.getGhostPositions()))\n print(\"[R15] has the game food? \"+str(gameState.hasFood(*goal)))\n if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)):\n print('Warning: this does not look like a regular search maze')\n\n # For display purposes\n self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE",
"def betterEvaluationFunction(currentGameState):\n pos = currentGameState.getPacmanPosition() # current position\n food = currentGameState.getFood() # food grid\n legalMoves = currentGameState.getLegalActions() # possible moves\n successors = [currentGameState.generatePacmanSuccessor(action) for action in legalMoves]\n successorPos = [currentGameState.generatePacmanSuccessor(action).getPacmanPosition() for action in legalMoves] # position of possible next state\n\n sFood = [s.getNumFood() for s in successors]\n if sFood:\n avgFood = currentGameState.getNumFood() - float(sum(sFood))/len(sFood)\n else:\n avgFood = 0\n\n numFood = 0\n for s in successorPos:\n if food[s[0]][s[1]]:\n numFood += 1\n # counts food pellets around current position\n\n pellets = currentGameState.getCapsules() # positions of power pellets\n if pellets:\n pelletDistance = [util.manhattanDistance(pos, d) for d in pellets]\n closestPellet = min(pelletDistance)\n else:\n closestPellet = 0\n\n minDist = 9999\n total = 0\n n = 0\n for x in range(food.width):\n for y in range(food.height):\n if food[x][y]:\n dist = util.manhattanDistance((x,y), pos)\n total += dist\n n += 1\n if dist < minDist: minDist = dist # returns distance to closest food, want to min\n if n != 0:\n avgDist = total/n\n else:\n avgDist = 0\n\n newGhostStates = currentGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n newGhostPositions = currentGameState.getGhostPositions() # list of ghost positions\n disList = [util.manhattanDistance((x,y), pos) for x,y in newGhostPositions]\n ghostHeuristic = min(disList) # returns distance from closest ghost, want to max\n\n score = currentGameState.getScore()\n numMoves = len(legalMoves) # number of available moves\n if currentGameState.getNumFood() == 0:\n return 999 + score\n if ghostHeuristic == 0:\n return -999\n if newScaredTimes[0] > ghostHeuristic + 1: # if ate pellet, chase ghost\n return score + newScaredTimes[0] - ghostHeuristic\n if ghostHeuristic > 4: # if far away from ghost\n return score + avgFood*20 + numMoves + numFood - minDist*4 - closestPellet/2 # 10/(minDist)\n return score + ghostHeuristic**2 + numMoves/2 + avgFood*10 + numFood/2 - minDist - closestPellet/4",
"def get_heuristic(self, state):\n\n def get_manhattan_distance(coord_a, coord_b):\n \"\"\"Returns the manhattan distance between coord_a and coord_b.\"\"\"\n return abs(coord_a.x - coord_b.x) + abs(coord_a.y - coord_b.y)\n\n \n def get_num_obstacles(coord_a, coord_b):\n \"\"\"Returns the number of obstacles (wriggler segments or walls) between\n coord_a and coord_b.\n \n This function assumes that coord_b is larger (in either/both x and y)\n than coord_a.\n \"\"\"\n obstacle_count = 0\n \n for x in range(coord_a.x, coord_b.x + 1):\n for y in range(coord_a.y, coord_b.y + 1):\n coord = Coordinate(x, y)\n if coord in self.wall_coords or coord in state:\n obstacle_count += 1\n \n return obstacle_count\n\n\n head_coord = state.wriggler_list[0].get_head()\n tail_coord = state.wriggler_list[0].get_tail()\n \n head_manhattan_distance = get_manhattan_distance(head_coord, self.goal_coord)\n tail_manhattan_distance = get_manhattan_distance(tail_coord, self.goal_coord)\n \n # Calculate and return heuristic value depending on which heuristic to use\n if self.heuristic == Heuristic.MANHATTAN_DIST:\n # Return the shortest Manhattan distance of wriggler0's tail or head to the goal\n return min(head_manhattan_distance, tail_manhattan_distance)\n \n else: # self.heuristic == Heuristic.NUM_OBSTACLES:\n # Return the number of obstacles between wriggler0's tail/head to the goal\n # The tail/head is selected based on which is closer to the goal\n if head_manhattan_distance <= tail_manhattan_distance:\n # The head is closer or the same distance away\n return get_num_obstacles(head_coord, self.goal_coord)\n \n else:\n # The tail is closer\n return get_num_obstacles(tail_coord, self.goal_coord)",
"def heuristic(state, puzzle):\n h = 0\n for i in range(puzzle.dimension):\n for j in range(puzzle.dimension):\n # (0, 0) -> 1 as value, (0, 2) -> 3 as value, etc\n value = i * puzzle.dimension + j + 1\n if value == puzzle.dimension ** 2: # value is ' '\n value = ' '\n current_position = puzzle.get_coordinates(state, value)\n goal_position = (i, j)\n h += util.manhattanDistance(current_position, goal_position)\n h /= 2\n return h",
"def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n # Useful information you can extract from a GameState (pacman.py)\n newPos = currentGameState.getPacmanPosition()\n newFood = currentGameState.getFood()\n newGhostStates = currentGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n \n \n \"*** YOUR CODE HERE ***\"\n #the number of food in successorGameState\n numFood = newFood.count()\n #print([newGhostStates[i].configuration.pos for i in range(len(newGhostStates))])\n #the effect of the distance between pacman and normal ghost, scared ghost\n minPacGhost = newFood.height + newFood.width\n eatScaredChance_Max = 0\n eatableGhost = None\n for i in range(len(newGhostStates)):\n pacGhostDis = manhattanDistance(newPos,newGhostStates[i].configuration.pos)\n eatScaredChance = max([0,newGhostStates[i].scaredTimer-pacGhostDis])\n if pacGhostDis < minPacGhost and newGhostStates[i].scaredTimer==0:\n minPacGhost = pacGhostDis\n if eatScaredChance > eatScaredChance_Max:\n eatScaredChance_Max= eatScaredChance\n eatableGhost = newGhostStates[i].configuration.pos\n \n #print((eatScaredChance_Max,newPos,eatableGhost))\n #the secure distance is 3, after both pacman and ghost make move, pacman is still safe\n #find the most dangerous distance\n pac_Ghost_Distance = min([minPacGhost,3])\n #find the closet food to pacman\n minDistance = newFood.height * newFood.width\n manhattan_PriorityQueue = util.PriorityQueue()\n for y in range(newFood.height):\n for x in range(newFood.width):\n if newFood[x][y] == True:\n manhattan_PriorityQueue.push((x,y),manhattanDistance(newPos,(x,y)))\n for i in range(5):\n if manhattan_PriorityQueue.isEmpty():\n break\n else:\n maze = mazeDistance(newPos,manhattan_PriorityQueue.pop(),currentGameState)\n if maze < minDistance:\n minDistance = maze\n #calculate the evaluation value\n evaluation = currentGameState.getScore() + 200 * eatScaredChance_Max + 500 * pac_Ghost_Distance + (1/(minDistance))\\\n / max([numFood,1])\n #print(evaluation)\n return evaluation",
"def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n GhostLocs = successorGameState.getGhostPositions()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n capsuleLocations = successorGameState.getCapsules()\n\n \"*** YOUR CODE HERE ***\"\n \"\"\" factors: proximity to food, proximity to ghosts \n \"\"\" \n if successorGameState.isWin():\n return 10000\n if successorGameState.isLose():\n return -10000\n\n FoodDistances = []\n foodLocations = newFood.asList()\n for food in foodLocations:\n FoodDistances.append(manhattanDistance(newPos,food))\n closestFood = min(FoodDistances)\n closestFoodLocation = foodLocations[FoodDistances.index(closestFood)]\n\n\n GhostsToMe = []\n GhostsToFood = []\n for ghost in GhostLocs:\n GhostsToMe.append(manhattanDistance(newPos,ghost))\n GhostsToFood.append(manhattanDistance(closestFoodLocation,ghost))\n closestGhostToMe = min(GhostsToMe)\n closestGhostToClosestFood = min(GhostsToFood)\n closestGhostLocation = GhostLocs[GhostsToMe.index(closestGhostToMe)] \n Hueristic = 0.0\n if closestGhostToClosestFood < closestFood:\n if closestGhostToMe > 5:\n Hueristic = (1.0/(closestFood+1.0))*20 - len(foodLocations)*10 - (1/closestGhostToMe)*5\n else:\n Hueristic = (-1/closestGhostToMe)*10000\n #Ghost is closer to me than nearest food so avoid ghost\n else:\n Hueristic = (1.0/(closestFood+1.0))*20 - len(foodLocations)*10 - (1/closestGhostToMe)*5\n return Hueristic",
"def heuristic(state, problem):\n # It would take a while for Flat Earther's to get accustomed to this paradigm\n # but hang in there.\n current = problem.G.node[state.state]\n final = problem.G.node[problem.end_node]\n clon = (current['x'], 0, 0)\n clat = (current['y'], 0, 0)\n flon = (final['x'], 0, 0)\n flat = (final['y'], 0, 0)\n hn = util.points2distance((clon, clat), (flon, flat))\n return hn\n # util.raiseNotDefined()",
"def compute_heuristic(self, state):\n if self._shape_reward_mode == \"optimal\":\n problem = self.problems[self._problem_idx]\n\n # Add action literals to state to enable planning\n state_lits = set(state.literals)\n action_lits = set(\n self.action_space.all_ground_literals(state, valid_only=False))\n state_lits |= action_lits\n\n problem_path = \"\"\n try:\n # generate a temporary file to hand over to the external planner\n fd, problem_path = tempfile.mkstemp(dir=TMP_PDDL_DIR, text=True)\n with os.fdopen(fd, \"w\") as f:\n problem.write(f, initial_state=state_lits, fast_downward_order=True)\n\n return get_fd_optimal_plan_cost(\n self.domain.domain_fname, problem_path)\n finally:\n try:\n os.remove(problem_path)\n except FileNotFoundError:\n pass\n else:\n return self._heuristic(state)",
"def aStarSearch(problem, heuristic=myHeuristic):\n\n #frontier = util.PriorityQueue()\n #startState = problem.getStartState()\n #startNode = (startState, ['East'], 0)\n #frontier.push(startNode, 0)\n\n #currentState, actions, currentCost = frontier.pop()\n #return ['West','West', 'West','West','South','South','East', 'South','South','West','West']\n\n fronteira = util.PriorityQueue()\n\n nohExplorado = [] #(state, cost)\n\n startState = problem.getStartState()\n nohInicial = (startState, [], 0) #(state, action, cost)\n\n fronteira.push(nohInicial, 0)\n\n while not fronteira.isEmpty():\n\n #pega o Noh de menor \"custo\" na fila\n curEstado, todasAcoes, curCusto = fronteira.pop()\n\n #Coloca Noh atual na lista de explorados\n nohAtual = (curEstado, curCusto)\n nohExplorado.append((curEstado, curCusto))\n\n if problem.isGoalState(curEstado):\n #print(todasAcoes)\n return todasAcoes\n\n else:\n #Lista de Sucessores (successor, action, stepCost) e examina cada um\n sucessores = problem.getSuccessors(curEstado)\n for sucEstado, sucAcao, sucCusto in sucessores:\n novaAcao = todasAcoes + [sucAcao]\n novoCusto = problem.getCostOfActions(novaAcao)\n novoNoh = (sucEstado, novaAcao, novoCusto)\n\n #Checa se o sucessor jah foi visitado\n jah_foi_explorado = False\n for explorado in nohExplorado:\n exEstado, exCusto = explorado\n if (sucEstado == exEstado) and (novoCusto >= exCusto):\n jah_foi_explorado = True\n\n #Se nao foi explorado, coloca na fronteira\n if not jah_foi_explorado:\n fronteira.push(novoNoh, novoCusto + heuristic(sucEstado, problem))\n\n\n return todasAcoes",
"def spreadOutAndFindDot(self, gameState):\n # Here are some useful elements of the startState\n currentPosition = gameState.getPacmanPosition(self.index)\n foodList = gameState.getFood().asList()\n walls = gameState.getWalls()\n randomFood = []\n problem = []\n\n #problem = AnyFoodSearchProblem(gameState, self.index)\n\n # if min(manhattan(currentPosition, foodPosition) for foodPosition in food.asList()) > 10:\n # return [Directions.STOP]\n #print(\"self.targets = \", self.targets)\n if self.index == 0:\n TargetFood = ClosestFood(currentPosition, foodList)\n #self.targets.append(TargetFood)\n problem = PositionSearchProblem(gameState, 0, goal=TargetFood, start=currentPosition, warn=False, visualize=False)\n return search.aStarSearch(problem, manhattanHeuristic)\n if self.index == 1:\n TargetFood = ClosestFood(currentPosition, foodList)\n \"\"\"\n want to find a way to avoid both agents coming up with the same target. But the below doesn't work because\n each agent has their own self.targets. How to keep a common list of targets?\n \"\"\"\n # if TargetFood in self.targets:\n # tempFoodList = foodList.copy()\n # tempFoodList.pop(tempFoodList.index(TargetFood))\n # TargetFood = ClosestFood(currentPosition, tempFoodList)\n # self.targets.append(TargetFood)\n # else:\n # self.targets.append(TargetFood)\n problem = PositionSearchProblem(gameState, 1, goal=TargetFood, start=currentPosition, warn=False, visualize=False)\n return search.aStarSearch(problem, manhattanHeuristic)\n if self.index == 2:\n TargetFood = RandomFood(currentPosition, foodList)\n problem = PositionSearchProblem(gameState, 2, goal=TargetFood, start=currentPosition, warn=False, visualize=False)\n return search.aStarSearch(problem, manhattanHeuristic)\n if self.index == 3:\n TargetFood = RandomFood(currentPosition, foodList)\n problem = PositionSearchProblem(gameState, 3, goal=TargetFood, start=currentPosition, warn=False, visualize=False)\n return search.aStarSearch(problem, manhattanHeuristic)\n #return search.bfs(problem)\n\n #util.raiseNotDefined()",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n n = Directions.NORTH\n e = Directions.EAST\n\n result = []\n qu = util.PriorityQueue()\n visited = set([])\n current = (problem.getStartState(), \"\", 0)\n qu.update(current, 0)\n costs = {}\n parents = {}\n parents[problem.getStartState()] = (problem.getStartState(), \"\")\n\n while not qu.isEmpty():\n cost, current = qu.pop()\n visited.add(current[0])\n\n if problem.isGoalState(current[0]):\n result = current[0]\n break\n\n for each in problem.getSuccessors(current[0]):\n if each[0] not in visited:\n qu.update(each, cost + each[2] + heuristic(each[0], problem))\n if each[0] not in costs:\n costs[each[0]] = cost + each[2]\n parents[each[0]] = (current[0], each[1])\n elif costs[each[0]] > cost + each[2] + heuristic(each[0], problem):\n costs[each[0]] = cost + each[2] + heuristic(each[0], problem)\n parents[each[0]] = (current[0], each[1])\n\n path = []\n while parents[result][0] != result:\n path.append(parents[result][1])\n result = parents[result][0]\n\n path.reverse()\n result = []\n for each in path:\n if each == \"South\":\n result.append(s)\n elif each == \"West\":\n result.append(w)\n elif each == \"North\":\n result.append(n)\n elif each == \"East\":\n result.append(e)\n\n return result\n util.raiseNotDefined()\n\n util.raiseNotDefined()",
"def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n # Useful information you can extract from a GameState (pacman.py)\n pos = currentGameState.getPacmanPosition()\n ghostState = currentGameState.getGhostStates()\n scaredTimes = [gs.scaredTimer for gs in ghostState]\n\n # get all the positions\n ghostPositions = []\n for ghost in ghostState:\n ghostPositions.append(ghost.getPosition())\n foodPosition = currentGameState.getFood()\n wallPosition = currentGameState.getWalls()\n capsulePosition = currentGameState.getCapsules()\n width = wallPosition.width\n length = wallPosition.height\n\n if currentGameState.isWin():\n return float('inf')\n if currentGameState.isLose():\n return -float('inf') \n \n # score for the over all grid\n score = currentGameState.getScore()\n\n # use bfs for food to help avoid the block that bothers the manhattan distance\n # depth limit at 5\n food_depth = -1\n max_depth = 3\n ghost_dis = -1\n # cur_depth\n frontier = []\n frontier.append((pos[0], pos[1], 0)) # depth of the pos is 0\n while (len(frontier) > 0 and frontier[0][2] < max_depth):\n if food_depth >= 0 and ghost >= 0: # if already have find food and ghost, break the loop\n break\n cur = frontier.pop(0)\n cur_x = cur[0]\n cur_y = cur[1]\n cur_depth = cur[2]\n if cur_y != 0 and wallPosition[cur_x][cur_y - 1] == False: # up\n if ghost_dis < 0 and (cur_x, cur_y - 1) in ghostPositions and scaredTimes[ghostPositions.index((cur_x, cur_y - 1))] == 0: # ghost still not found, is ghost, ghost is not scared\n ghost_dis = cur_depth\n elif food_depth < 0 and (cur_x, cur_y - 1) in foodPosition: # never met food, has food\n food_depth = cur_depth\n else:\n frontier.append((cur_x, cur_y - 1, cur_depth + 1))\n if cur_y != length - 1 and wallPosition[cur_x][cur_y + 1] == False: # down\n if ghost_dis < 0 and (cur_x, cur_y + 1) in ghostPositions and scaredTimes[ghostPositions.index((cur_x, cur_y + 1))] == 0: # ghost still not found, is ghost, ghost is not scared\n ghost_dis = cur_depth\n elif food_depth < 0 and (cur_x, cur_y + 1) in foodPosition: # never met food, has food\n food_depth = cur_depth\n else:\n frontier.append((cur_x, cur_y + 1, cur_depth + 1))\n if cur_x != 0 and wallPosition[cur_x - 1][cur_y] == False: # left\n if ghost_dis < 0 and (cur_x - 1, cur_y) in ghostPositions and scaredTimes[ghostPositions.index((cur_x - 1, cur_y))] == 0: # ghost still not found, is ghost, ghost is not scared\n ghost_dis = cur_depth\n elif food_depth < 0 and (cur_x - 1, cur_y) in foodPosition: # never met food, has food\n food_depth = cur_depth\n else:\n frontier.append((cur_x - 1, cur_y, cur_depth + 1))\n if cur_x != width - 1 and wallPosition[cur_x + 1][cur_y] == False: # left\n if ghost_dis < 0 and (cur_x + 1, cur_y) in ghostPositions and scaredTimes[ghostPositions.index((cur_x + 1, cur_y))] == 0: # ghost still not found, is ghost, ghost is not scared\n ghost_dis = cur_depth\n elif food_depth < 0 and (cur_x + 1, cur_y) in foodPosition: # never met food, has food\n food_depth = cur_depth\n else:\n frontier.append((cur_x + 1, cur_y, cur_depth + 1))\n\n for cp in capsulePosition:\n cp_dis = manhattanDistance(cp, pos)\n score -= cp_dis\n\n if food_depth < 0: # does not find food with bfs\n all_heu = []\n for food in foodPosition.asList():\n all_heu.append(manhattanDistance(pos, food))\n food_depth = min(all_heu)\n \n # if ghost_dis < 0 and ghost_dis == 0:\n # return 1 / food_depth\n # print('food_depth', food_depth)\n # print('ghost_dis', ghost_dis)\n # if ghost_dis == 0:\n # return float('inf')\n # return ((1 / food_depth) - (1 / ghost_dis))\n\n return (score - food_depth + ghost_dis)",
"def __init__(self, startingGameState):\n self.walls = startingGameState.getWalls()\n self.startingPosition = startingGameState.getPacmanPosition()\n top, right = self.walls.height - 2, self.walls.width - 2\n self.corners = ((1, 1), (1, top), (right, 1), (right, top))\n for corner in self.corners:\n if not startingGameState.hasFood(*corner):\n print('Warning: no food in corner ' + str(corner))\n self._expanded = 0 # DO NOT CHANGE; Number of search nodes expanded\n # Please add any code here which you would like to use\n # in initializing the problem\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n Mi espacio de estados consistirá en que cada estado será una tupla del tipo (pos, grid), donde:\n * pos es la posición en coordenadas (x,y) (como antes)\n * grid contendrá una grid 2x2 con la información relevante de la comida en las esquinas. Esto es:\n - En cada item de la grid habrá un true o un false, en función de si en esa esquina hay o no comida.\n - Por ejemplo, si la grid es:\n | True False |\n | True True |\n entonces significa que ya habremos comido la comida de la esquina (right,top)\n \"\"\"\n self.startingFood = startingGameState.getFood()\n self.cornersFood = game.Grid(2, 2) # Defino la matriz tipo grid de dimensión 2x2\n self.cornersFood[0][0] = self.startingFood[1][top] # Asigno manualmente cada valor a la grid\n self.cornersFood[0][1] = self.startingFood[right][top] # El problema es que yo enumero diferente la matriz\n self.cornersFood[1][0] = self.startingFood[1][1] # Es decir, a[0][0] es la esquina superior izquierda\n self.cornersFood[1][1] = self.startingFood[right][1]\n self.startFoodPosition = (self.startingPosition, self.cornersFood)",
"def findHeuristic(self, _, __):\n popSize = 100\n retain = 0.25\n random_select = 0.1\n mutate = 0.1\n\n popList = self.populationList(popSize)\n\n solved = False\n count = 0\n while not solved:\n # evolves current\n popList = (self.evolve(popList, retain, random_select, mutate))\n# print(popList) # for troubleshooting\n for i in popList:\n if (self.fitness(i) == 0):\n print(\"solution: \", i)\n solved = True\n break\n # if plateus at a local minima, then end after 50 generations\n if count >= 50:\n if (self.fitness(i) <= 10):\n print(\"solution: \", i)\n solved = True\n break\n if solved is True:\n break\n print(\"-----------------\")\n\n # will modify mutation, random_select and retain values to help leave a\n # local minima. More randomness the longer it takes up to specific points\n if count % 3 == 0:\n if mutate < 0.2:\n mutate += 0.01\n if random_select < 0.3:\n random_select += 0.01\n count += 1\n\n return exit(0)",
"def cornersHeuristic(state, problem):\n corners = problem.corners # These are the corner coordinates\n walls = problem.walls # These are the walls of the maze, as a Grid (game.py)\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n En este ejercicio me he dado cuenta de un problema de mi definición del espacio de estados:\n - El espacio de estados consiste en tuplas ((x,y), grid), donde (x,y) es la posición en coordenadas\n y grid es la tabla de true/false.\n - El problema es que yo he pensado la tabla grid en forma de matriz matemática, de manera que los índices\n no van de acuerdo con la posición de las esquinas, sinó con los índices de una matriz.\n Para solucionar este problema sin tener que modificar todo lo anterior (dado que no me queda tiempo) lo que he\n tenido que hacer es crear una lista y añadir de forma ordenada los valores true/false, para que se corresponda\n cada uno con su esquina.\n \n Mi heurística consiste en lo siguiente:\n * Calculo la distancia desde la posición en la que me sitúo hasta todos los corners no visitados (los que aún\n tienen comida) y me quedo con la mínima de estas distancias, y con el corner que me de esa mínima.\n * Calculo la distancia desde ese corner (el mínimo de antes) hasta todos los otros posibles corners no visitados\n y de nuevo me quedo con la mínima distancia y con el corner que me da esa mínima.\n * Repito este proceso hasta que no queden corners.\n Entonces lo que hago es definir una nueva lista de corners, newListOfCorners que irá extrayendo los corners a medida\n que su distanca sea calculada. Por ejemplo, si tengo los cuatro corners con comida y estoy en una posición \n aleatoria, la lista newListOfCorners estará llena. Se calculará la distancia a cada corner y el corner que de la \n mínima será extraído de newListOfCorners. Entonces se calculará la distancia desde este corner hasta los restantes\n tres corners de newListOfCorners y el corner de esos tres que me de la mínima será extraído de la lista. Etc...\n \"\"\"\n\n # Ordenamos la lista de True's y False's para que vaya acorde con el orden de la lista corners:\n visitedCorners = []\n visitedCorners.append(state[1][1][0])\n visitedCorners.append(state[1][0][0])\n visitedCorners.append(state[1][1][1])\n visitedCorners.append(state[1][0][1])\n corners = list(corners) # De aquí saco una lista que contenga los corners ordenados.\n # Ahora los corners y la lista de visitedCorners contendrán la información de forma ordenada y coherente\n minimum = 9999999999999999 # Defino un mínimo muy grande para asegurarme que nunca sea superado\n total = 0 # Inicializo el total a cero\n newListOfCorners = [] # Creo una nueva lista para añadir los corners no estudiados\n for corner in corners: # Primero vamos a llenar la lista de corners con los que me interesen: los que tienen comida\n if visitedCorners[corners.index(corner)]: # Miramos que el corner tenga comida, sino pasamos\n newListOfCorners.append(corner) # Si tiene comida, lo añadimos\n minimCorner = corners[0] # Inicializo el minimCorner a un corner aleatorio para que no me de problemas más tarde\n actualState = state[0] # Lo mismo\n\n while not len(newListOfCorners) == 0: # Mientras la lista no esté vacía...\n for corner in newListOfCorners: # Cogemos un corner de la lista\n distanceToCorner = manhattanHeuristicToCorners(actualState, corner) # Calculamos dist. a corner\n if distanceToCorner < minimum: # Calculamos el mínimo\n minimum = distanceToCorner\n minimCorner = corner\n total += minimum # Y lo añadimos al total\n actualState = minimCorner # Reactualizamos cada variable para volver a empezar el bucle\n minimum = 9999999999999999999999999999999\n newListOfCorners.remove(minimCorner)\n return total",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n # Initialize data structures\n parent_node = {}\n path_to_node = {}\n priority_queue = util.PriorityQueue()\n\n p_c = 0.5\n h_c = 1 - p_c\n\n # Get the start node\n start_node = problem.getStartState()\n parent_node[start_node] = None\n path_to_node[start_node] = []\n priority_queue.update(start_node, 0)\n\n #goal_found = False\n\n while not priority_queue.isEmpty():\n # Get the next node\n node_to_expand = priority_queue.pop()\n # Check if goal state is reached\n if problem.isGoalState(node_to_expand):\n break\n next_nodes = problem.getSuccessors(node_to_expand)\n path_to_parent = path_to_node[node_to_expand]\n\n for one_node in next_nodes:\n point, move, cost = one_node\n curr_path = path_to_node[node_to_expand] + [move]\n curr_cost = problem.getCostOfActions(curr_path)\n heuristic_cost = heuristic(point, problem)\n # Check if current node already exists in the previously visited nodes\n if point in path_to_node:\n prev_cost = problem.getCostOfActions(path_to_node[point])\n if prev_cost > curr_cost:\n path_to_node[point] = curr_path\n priority_queue.update(point, curr_cost + heuristic_cost)\n \n else:\n path_to_node[point] = curr_path\n priority_queue.update(point, curr_cost + heuristic_cost)\n \n # current_cost = problem.getCostOfActions(point) * p_c + heuristic(point, problem) * h_c\n\n print(node_to_expand) \n return path_to_node[node_to_expand]\n \n# nodes_to_expand = set()\n# # get max value node in the fringe node\n# min_val = float(\"inf\")\n# for one_node in fringe_node:\n# # Compute the cost to reach a node\n# total_cost = cost_to_point[one_node] * p_c + heuristic(one_node,problem) * h_c\n# if total_cost < min_val:\n# min_val = total_cost\n# \n# for one_node in fringe_node:\n# # Compute the cost to reach a node\n# total_cost = cost_to_point[one_node] * p_c + heuristic(one_node,problem) * h_c\n# if total_cost == min_val:\n# nodes_to_expand.add(one_node)\n# fringe_node.remove(one_node)\n#\n# # Expand the fringe node \n# for one_node in nodes_to_expand:\n# path_to_parent = path_to_point[one_node]\n# for nxt_node in problem.getSuccessors(one_node):\n# pos = nxt_node[0]\n# mv = nxt_node[1]\n# # check if point already present in path to point\n# prev_cost = float(\"inf\")\n# if pos in cost_to_point:\n# prev_cost = cost_to_point[pos]\n# new_path = path_to_parent + [mv]\n# if prev_cost > problem.getCostOfActions(new_path):\n# path_to_point[pos] = new_path\n# cost_to_point[pos] = problem.getCostOfActions(new_path)\n# fringe_node.append(pos)\n#\n# # Check if destination is reached in the fringe node\n# for one_node in fringe_node:\n# if problem.isGoalState(one_node):\n# final_node = one_node\n# goal_found = True\n# break\n# \n# #print(len(fringe_node))\n# print(final_node)\n# print(path_to_point[final_node])\n# return path_to_point[final_node] \n\n util.raiseNotDefined()",
"def betterEvaluationFunction(gameState):\n\n score = gameState.getScore()\n food_grid = gameState.getFood()\n longest_road = food_grid.height + food_grid.width # longest manheten distance.\n score -= longest_road * len(gameState.getCapsules()) # giving the number of pills left some values.\n num_food = gameState.getNumFood()\n\n #Calculating the closest capsule distance\n capsules_distances = [util.manhattanDistance(gameState.getPacmanPosition(), capsule) for capsule in\n gameState.getCapsules()]\n closest_capsule_dist = 1\n if len(capsules_distances) > 0:\n closest_capsule_dist = min(capsules_distances)\n capsules = gameState.getCapsules()\n capsule_value = closest_capsule_dist\n\n #Calculating ghosts distances and if we are chasing them or they're chasing us\n scared_value = 0\n ghost_distance = 0\n num_of_ghosts = len(gameState.getGhostStates())\n for ghost_state in gameState.getGhostStates():\n if ghost_state.scaredTimer > 0:\n scared_value = util.manhattanDistance(gameState.getPacmanPosition(), ghost_state.configuration.pos)\n else:\n curr_ghost_distance = util.manhattanDistance(gameState.getPacmanPosition(), ghost_state.configuration.pos)\n if curr_ghost_distance <= 1:\n return -100000000\n ghost_distance += curr_ghost_distance\n if num_of_ghosts == 0:\n ghost_distance /= 1\n\n #Calculating the distances to all food.\n food_distances = []\n food_grid = gameState.getFood()\n for x in range(food_grid.width):\n for y in range(food_grid.height):\n if food_grid[x][y] is True:\n food_distances.append(util.manhattanDistance(gameState.getPacmanPosition(), (x, y)))\n\n #Calcukating the closest food distance(top 3 if available)\n closest_food_list = []\n closest_food_value = 0\n total_food_dist = 0\n if (num_food > 0):\n if num_food <= 2:\n closest_food_value = min(food_distances)\n else:\n for _ in range(3):\n if len(food_distances) != 0:\n closest_food_list.append(min(food_distances))\n food_distances.remove(closest_food_list[-1])\n closest_food_value = random.choice(closest_food_list)\n total_food_dist = sum(food_distances) / num_food\n\n #Giving more and less value to some of the parameters\n N_score = 1000000\n N_scared = 50\n if (num_food >= 0.3 * food_grid.width * food_grid.height):\n N_capsules = 5 # if food is more than 30%+- then chase capsules more\n else:\n N_capsules = 20\n N_closest_food = 12\n N_total_food = 5\n N_ghosts = 1\n return N_score * (score) ** 3 - N_capsules * capsule_value - N_scared * (scared_value) ** 2 - N_closest_food * (\n closest_food_value) ** 2 - N_total_food * (total_food_dist) + N_ghosts * (ghost_distance) ** 2",
"def GeneralHeuristic(self, gameState, state):\n heuristic = 0\n ghost, disToGhost = self.getGhostGoal(gameState)\n if ghost is not None:\n enemies = [gameState.getAgentState(i) for i in self.getOpponents(gameState)]\n ghosts = [a for a in enemies if not a.isPacman and a.scaredTimer < 2 and a.getPosition() is not None]\n if ghosts != None and len(ghosts) > 0:\n ghostpositions = [ghost.getPosition() for ghost in ghosts]\n ghostDists = [self.getMazeDistance(state, ghostposition) for ghostposition in ghostpositions]\n ghostDist = min(ghostDists)\n if ghostDist < 2:\n heuristic = pow((5 - ghostDist), 5)\n return heuristic",
"def heuristic(state, problem):\n # It would take a while for Flat Earther's to get accustomed to this paradigm\n # but hang in there.\n node1 = problem.G.node[state]\n node2 = problem.G.node[problem.end_node]\n xy1 = ((node1['x'],0,0), (node1['y'],0,0))\n xy2 = ((node2['x'],0,0), (node2['y'],0,0))\n return util.points2distance(xy1, xy2)\n # util.raiseNotDefined()",
"def heuristic(self, state: ODState) -> int:\n h = 0\n if self.assigned_goals is None:\n for agent in state.new_agents:\n h += self.grid.get_heuristic(agent.coords, agent.color)\n for j in range(len(state.new_agents), len(state.agents)):\n h += self.grid.get_heuristic(state.agents[j].coords, state.agents[j].color)\n else:\n for agent in state.new_agents:\n h += self.grid.get_heuristic(agent.coords, self.assigned_goals[agent.id])\n for j in range(len(state.new_agents), len(state.agents)):\n h += self.grid.get_heuristic(state.agents[j].coords, self.assigned_goals[state.agents[j].id])\n return h",
"def aStarSearch(problem, heuristic=nullHeuristic):\n visited = []\n solution = []\n intialCost = 0\n priorityQueue = util.PriorityQueue()\n priorityQueue.push((problem.getStartState(),solution,intialCost),intialCost)\n \n while not priorityQueue.isEmpty():\n coord, solution, totalStep = priorityQueue.pop()\n if problem.isGoalState(coord):\n return solution\n if not coord in visited:\n visited+=[coord]\n for position, direction, step in problem.getSuccessors(coord):\n newSolution = solution+[direction]\n g = totalStep + step\n newTotalCost = g + heuristic(position, problem)\n priorityQueue.push((position, newSolution, g), newTotalCost)",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n from game import Actions\n\n waiting_list = util.PriorityQueue()\n COSTS = {}\n start_state = problem.getStartState()\n COSTS[start_state] = 0\n waiting_list.push(start_state,0)\n parents = {}\n \n while not waiting_list.isEmpty():\n q_state = waiting_list.pop()\n if problem.isGoalState(q_state):\n target_state = q_state\n break\n for child in problem.getSuccessors(q_state):\n n_cost = COSTS[q_state] + child[2]\n \n if child[0] not in COSTS or n_cost < COSTS[q_state]:\n COSTS[child[0]] = n_cost\n prior = n_cost + heuristic(child[0], problem)\n waiting_list.push(child[0], prior)\n parents[child[0]] = q_state\n\n sequence = []\n prev_state = target_state\n while target_state in parents.keys():\n target_state = parents[target_state]\n direction = Actions.vectorToDirection([prev_state[0] - target_state[0], prev_state[1] - target_state[1]])\n prev_state = target_state\n sequence.append(direction)\n \n return sequence[::-1]",
"def a_star_search(problem, heuristic=null_heuristic):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = util.PriorityQueue()\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.push(state, 0)\r\n\r\n while (True):\r\n state = fringe.pop()\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n states = problem.get_successors(state)\r\n # push into fringe\r\n for stat in states:\r\n if stat[0] not in path:\r\n \"\"\"\r\n it does worse in corners problems, to work better needs heavy huristic, not worth in\r\n in corners problem expandend nodes grow expo\r\n all others are better\r\n counter = 0 # in some situation it helps, in some it doesnt\r\n #print(stat[0].pieces)\r\n for x in stat[0].pieces[0]:\r\n if x:\r\n counter += 1\r\n \"\"\"\r\n counter = 0\r\n fringe.push(stat[0], stat[2] + counter + heuristic(stat[0], problem)) # problem.get_cost_of_actions([stat[1]])\r\n\r\n while (True):\r\n\r\n for key, val in acts.items():\r\n for va in val:\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n if state == problem.get_start_state():\r\n break\r\n\r\n final.reverse()\r\n\r\n return final",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n\n #Creamos las estructuras de datos necesarias (priority queue y set)\n openNodes = util.PriorityQueue()\n closedNodes = set([])\n\n #Guardamos el nodo inicial\n node = Node(problem.getStartState(), '', 0, None)\n\n #Calculamos funcion heuristica y el coste acumulado para sacar la funcion de evaluacion del nodo inicial\n fn = problem.getCostOfActions(node.path) + heuristic(node.name, problem);\n\n #Lo metemos en la cola con su funcion de evaluacion como prioridad\n openNodes.push(node, fn)\n\n #Iteramos para cada nodo\n while True:\n if openNodes.isEmpty():\n break #ERROR: throw exception\n else :\n #sacamos el nodo de arriba de la cola\n node = openNodes.pop()\n if problem.isGoalState(node.name): #Comprobamos si el nodo es Goal. Si lo es terminamos.\n break\n else: #Expandimos los nodos sucesores del nodo si no estan en closed\n if nodeIsClosed(node, closedNodes) is False:\n for successor in problem.getSuccessors(node.name):\n n, p, c = successor\n succNode = Node(n, p, c, node)\n if nodeIsClosed(succNode, closedNodes) is False:\n fn = problem.getCostOfActions(findPath(succNode)) + heuristic(succNode.name, problem);\n openNodes.push(succNode, fn)\n #Metemos el nodo en closed\n closedNodes.add(node)\n\n #Devolvemos el camino al Goal\n return findPath(node)",
"def getAction(self, state):\n \"*** YOUR CODE HERE ***\"\n x, y = state.getPacmanPosition(self.index)\n numPacmen = state.getNumPacmanAgents()\n if not MyAgent.customFood:\n MyAgent.customFood = state.getFood()\n MyAgent.foodLeft = len(MyAgent.customFood.asList())\n\n #if not self.foodIsThere(x, y):\n # self.path = None\n #trueLen = len(state.getFood().asList())\n #if not self.path and self.index < trueLen and trueLen < numPacmen:\n # problem = MySearchProblem(state, self.index, 1, state.getFood())\n # self.path = search.bfs(problem)\n if self.path and self.path[0] == 'place':\n if sum(MyAgent.finding) == 1:\n MyAgent.specialWalls[(x, y)] = self.path[1]\n self.path = None\n\n if not self.path and MyAgent.foodLeft > 0:\n problem = MySearchProblem(state, self.index, min(foodCount, MyAgent.foodLeft), MyAgent.customFood, MyAgent.specialWalls, MyAgent.finding)\n\n self.path = cbfs(problem)\n\n \n\n nx, ny = x, y\n if not self.path:\n return state.getLegalActions(self.index)[0]\n for i in range(len(self.path)):\n action = self.path[i]\n if action == 'place':\n MyAgent.finding[self.index] = False\n break\n MyAgent.finding[self.index] = True\n dx, dy = Actions.directionToVector(action)\n nx, ny = int(nx + dx), int(ny + dy)\n check = MyAgent.customFood[nx][ny]\n if check:\n MyAgent.foodLeft -= 1\n MyAgent.customFood[nx][ny] = False\n\n if not self.path:\n return state.getLegalActions(self.index)[0]\n dir = self.path.pop(0)\n return dir",
"def astar(grid, heuristic):\r\n evaluatedMap = {}\r\n unevaluatedMap = {}\r\n start = grid.getStart()\r\n goal = grid.getGoals()[0]\r\n startG = 0\r\n startH = heuristic(start,goal)\r\n currentNode = Node(start,startH,startG)\r\n unevaluatedMap[currentNode.coord] = currentNode\r\n \r\n while len(unevaluatedMap) > 0:\r\n # I tried using a PriorityQueue but because a node could end up with \r\n # an updated priority it really didn't make sense to use one and\r\n # instead had to just serach the dictionary each time for the smallest\r\n # priority which is the sum of g and h\r\n currentNode = min(unevaluatedMap.values(),key=lambda x:x.g + x.h)\r\n \r\n # if the current node is the goal then create the path by iterating backwards\r\n # and pushing the current node to the front of the path and then moving to the\r\n # parent node\r\n if currentNode.coord == goal:\r\n path = []\r\n while currentNode.parentNode:\r\n path.insert(0,currentNode.coord)\r\n currentNode = currentNode.parentNode\r\n path.insert(0,currentNode.coord)\r\n grid.setPath(path)\r\n return\r\n \r\n # Move the current node to the evaluated map and delete it from\r\n # the unevaluated map\r\n evaluatedMap[currentNode.coord] = currentNode\r\n del unevaluatedMap[currentNode.coord]\r\n \r\n # Mark the current node as having been visited\r\n grid.addVisited(currentNode.coord)\r\n \r\n # Get the neighbors of the current node\r\n neighbors = grid.getNeighbors(currentNode.coord)\r\n\r\n # For each neighbor check if that neighbor has alread been evaluated\r\n # if it has then skip that neighbor. If it hasn't and it isn't in the\r\n # unevaluated map add it with a high cost and heuristic.\r\n # Get the neighbor from the unevaluated map and calculate the current\r\n # cost. If the current cost is less than what existed update the neighbor\r\n # and add it back to the list otherwise skip to next neighbor\r\n for neighbor in neighbors:\r\n ncoord = (neighbor[0])\r\n if (ncoord) in evaluatedMap:\r\n continue\r\n if (ncoord) not in unevaluatedMap:\r\n node = Node(ncoord,float('inf'),float('inf'))\r\n unevaluatedMap[ncoord] = node\r\n \r\n node = unevaluatedMap[ncoord]\r\n calc_cost = currentNode.g + neighbor[1]\r\n if calc_cost >= node.g:\r\n continue\r\n \r\n node.parentNode = currentNode\r\n node.g = calc_cost\r\n node.h = heuristic(ncoord,goal)",
"def uniform_cost_search(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = util.PriorityQueue()\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.push(state, 0)\r\n\r\n while (True):\r\n state = fringe.pop()\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n #states = problem.get_successors(state)\r\n # push into fringe\r\n for stat in states:\r\n if stat[0] not in path:\r\n fringe.push(stat[0], stat[1].piece.get_num_tiles()) #problem.get_cost_of_actions([stat[1]])\r\n\r\n while (True):\r\n if state == problem.get_start_state():\r\n break\r\n for key, val in acts.items():\r\n for va in val:\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n\r\n final.reverse()\r\n\r\n return final"
]
| [
"0.83897704",
"0.830523",
"0.8223044",
"0.6986859",
"0.69107854",
"0.6910324",
"0.6817668",
"0.67753005",
"0.6737241",
"0.67104596",
"0.66949224",
"0.66923463",
"0.6692185",
"0.66603684",
"0.66597563",
"0.66365445",
"0.66059625",
"0.6605135",
"0.65819836",
"0.6560537",
"0.65250653",
"0.6522871",
"0.65228426",
"0.65186626",
"0.6508927",
"0.64982593",
"0.6481689",
"0.6480773",
"0.64656276",
"0.64630616"
]
| 0.85888267 | 0 |
Returns a path (a list of actions) to the closest dot, starting from gameState. | def findPathToClosestDot(self, gameState):
# Here are some useful elements of the startState
startPosition = gameState.getPacmanPosition()
food = gameState.getFood()
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState)
"*** YOUR CODE HERE ***"
return breadthFirstSearch(problem)
# util.raiseNotDefined() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def findPathToClosestDot(self, gameState):\n # Here are some useful elements of the startState\n startPosition = gameState.getPacmanPosition(self.index)\n food = gameState.getFood()\n walls = gameState.getWalls()\n problem = AnyFoodSearchProblem(gameState, self.index)\n\n\n explored = []\n actions = []\n initial = problem.getStartState()\n frontier = util.Queue()\n\n frontier.push((initial, actions))\n\n while not frontier.isEmpty():\n node, actions = frontier.pop()\n if node in explored:\n continue\n explored.append(node)\n if problem.isGoalState(node):\n return actions\n for successor, action, cost in problem.getSuccessors(node):\n frontier.push((successor, actions + [action]))",
"def findPathToClosestDot(self, gameState):\n # Here are some useful elements of the startState\n startPosition = gameState.getPacmanPosition(self.index)\n food = gameState.getFood()\n walls = gameState.getWalls()\n problem = AnyFoodSearchProblem(gameState, self.index)\n\n\n \"*** YOUR CODE HERE ***\"\n return search.bfs(problem)",
"def findPathToClosestDot(self, gameState):\n # Here are some useful elements of the startState\n startPosition = gameState.getPacmanPosition(self.index)\n food = gameState.getFood()\n walls = gameState.getWalls()\n problem = AnyFoodSearchProblem(gameState, self.index)\n\n return search.breadthFirstSearch(problem)\n util.raiseNotDefined()",
"def findPathToClosestDot(self, gameState):\n\n # Here are some useful elements of the startState\n # startPosition = gameState.getPacmanPosition()\n # food = gameState.getFood()\n # walls = gameState.getWalls()\n # problem = AnyFoodSearchProblem(gameState)\n\n # *** Your Code Here ***\n problem = AnyFoodSearchProblem(gameState)\n return search.uniformCostSearch(problem)",
"def findPathToClosestDot(self, gameState):\n startPosition = gameState.getPacmanPosition(self.index)\n food = gameState.getFood()\n walls = gameState.getWalls()\n problem = AnyFoodSearchProblem(gameState, self.index)\n return search.bfs(problem)\n util.raiseNotDefined()",
"def solution_path(self) -> list[State]:",
"def action(self):\n\n # assume the smart opponent can always choose the best step\n # Depth First Search\n steps = 2\n stack = [(self.game_in_head, (), 0)]\n maxmin = None\n good_paths = []\n\n while len(stack) > 0:\n parent_node, path, score = stack.pop(-1)\n if len(path) >= steps*2:\n \n # leaf node in the search tree\n if maxmin is None:\n maxmin = score\n good_paths.append(path)\n elif maxmin == score:\n good_paths.append(path)\n elif maxmin < score:\n maxmin = score\n good_paths.clear()\n good_paths.append(path)\n else:\n # root node, find its leaves\n children_nodes = self.one_step_infe(parent_node, path, score)\n stack += children_nodes\n\n path_dec = random.choice(good_paths) \n if self.colour == 'upper':\n return path_dec[0] \n elif self.colour == 'lower':\n return path_dec[1]",
"def findPathToClosestDot(self, gameState, sector=None):\n\n startPosition = gameState.getPacmanPosition(self.index)\n problem = AnyFoodSearchProblem(gameState, self.index)\n foodList = gameState.getFood().asList()\n\n # Find closest food in sector\n searchSpace = [food for food in foodList if food[0] in sector] if sector else foodList\n\n if searchSpace:\n _, closestDot = min([(util.manhattanDistance(startPosition, food), food) for food in searchSpace])\n else:\n return False\n\n # A* Search for path to specified food using Manhattan Heuristic\n heuristic = util.manhattanDistance\n explored = []\n frontier = util.PriorityQueue()\n frontier.push((problem.getStartState(), []), heuristic(problem.getStartState(), closestDot))\n while(not frontier.isEmpty()):\n currentNode, actions = frontier.pop()\n if(currentNode in explored):\n continue\n explored.append(currentNode)\n if(currentNode == closestDot):\n return actions\n for successor, action, successorCost in problem.getSuccessors(currentNode):\n newActions = actions + [action]\n frontier.push((successor, newActions), problem.getCostOfActions(newActions) + heuristic(successor, closestDot))\n return []",
"def find_closest_path(self):\n\t\tclosest_distance = sys.maxint\n\t\tclosest_path = 0\n\t\tbike_position = (self.map_model.bike.xB, self.map_model.bike.yB)\n\t\tfor path_index in range(len(self.map_model.paths)):\n\t\t\tnearest_point = geometry.nearest_point_on_path(self.map_model.paths[path_index], bike_position)\n\t\t\tdistance_to_bike = geometry.distance(nearest_point, bike_position)\n\t\t\tif (closest_distance > distance_to_bike):\n\t\t\t\tclosest_distance = distance_to_bike\n\t\t\t\tclosest_path = path_index \n\t\tdisp_next = self.displacement_to_turn(target_path = (closest_path+1)%len(self.map_model.paths))\n\t\ttarget_path = (closest_path+1)%len(self.map_model.paths)\n\t\tdistance_next = geometry.distance_from_path(bike_position, self.map_model.paths[target_path])\n\t\tif disp_next - np.abs(distance_next)>-0.01:\n\t\t\tclosest_path = np.mod(closest_path + 1,len(self.map_model.paths))\n\t\treturn closest_path",
"def path_between_states(self):\n\n start_given = (self.row_before, self.col_before) # row, col before state transition\n finish_given = (self.row_after, self.col_after) # row, col after state transition\n\n # find_path based on a* algorithm\n path = find_path(Customer.GRID, start_given, finish_given, Customer.POSSIBLE_MOVES)\n\n # if empty path fillin values to enable next step interpolation into 1s resolution\n if start_given == finish_given:\n path = [(self.row_before, self.col_before), (self.row_after, self.col_after)]\n\n self.path = path",
"def calculate_path(self):\n\n mid_states = []\n\n # Add in between states\n for i in range(Constants.NUMBER_LAPS):\n mid_states = mid_states + Constants.LAP_STATES\n\n # Concatenate beginning, middle and end states to obtain full path of states\n self.path_states = Constants.BEGINNING_STATES + mid_states + Constants.END_STATES\n\n # Determine the amount of times that the smallbot will drive forward during the path\n self.times_driven_forward = self.path_states.count('CREEP_FORWARD')\n\n print(\"Calculated path: \", self.path_states)",
"def getAction(self, state):\n if 'actionIndex' not in dir(self): self.actionIndex = 0\n\n if self.actionIndex == 0:\n self.actions = self.spreadOutAndFindDot(state)\n if len(self.actions) == 1:\n return self.actions[0]\n else:\n self.actionIndex += 1\n return self.actions[0]\n else:\n i = self.actionIndex\n self.actionIndex += 1\n if i < len(self.actions):\n return self.actions[i]\n else:\n self.actionIndex = 0\n return Directions.STOP\n # self.actions = self.spreadOutAndFindDot(state)\n # if len(self.actions) == 1:\n # return self.actions[0]\n # else:\n # self.actionIndex += 1\n # return self.actions[0]\n\n #raise NotImplementedError()",
"def transition_path(self):\n node, path_back = self, []\n while node:\n path_back.append(node.action)\n node = node.parent\n return list(reversed(path_back))",
"def getPath(self):\r\n\t\treturn self.pathToGoal",
"def get_action_path(self, node):\n action_path = []\n \n while node.parent_node:\n # Add this action to the action path\n action_path.append(node.action)\n \n # Reset node's value to the node's parent to save the parent's action\n # in the next iteration\n node = node.parent_node\n \n # Reverse the order of action_path so that actions appear in the order of \n # initial state to goal state\n return action_path[::-1]",
"def replanning_path(self):\n start_state = self.extract_start_state()\n goal_state = self.extract_goal_state()",
"def getAction(self, state):\n width = state.getWidth()\n numPacmen = state.getNumPacmanAgents()\n\n # Give each agent a sector of the map\n sectorStart = int(width*(self.index) / numPacmen)\n sectorEnd = int(width*(self.index + 1) / numPacmen)\n sector = range(sectorStart, sectorEnd)\n\n # Generate path\n if not self.path:\n self.path = self.findPathToClosestDot(state) if self.sectorClean else self.findPathToClosestDot(state, sector)\n # Agent has cached path, execute it\n if self.path:\n return self.path.pop(0)\n else:\n self.sectorClean = True\n return self.getAction(state)\n return 'Stop'",
"def get_closest_distance_to_path(self, path):\n min_distance_to_line = float(\"inf\")\n for p in path:\n game_path = p[:]\n\n game_path.sort(key = lambda coord: calculate_distance(self, coord))\n point_A = game_path[0] # Closest point out of all the points on the path to to the tower\n\n try:\n point_after_A = p[p.index(point_A) + 1]\n point_before_A = p[p.index(point_A) - 1]\n closest_to_A = min(point_after_A, point_before_A, key = lambda point: calculate_distance(point_A, point))\n except:\n if p.index(point_A) == 0:\n closest_to_A = p[p.index(point_A) + 1]\n \n elif p.index(point_A) == len(p) - 1:\n closest_to_A = p[p.index(point_A) - 1]\n finally:\n if closest_to_A[0] != point_A[0]:\n m = (closest_to_A[1] - point_A[1]) / (closest_to_A[0] - point_A[0])\n else:\n m = 2\n\n b = point_A[1] - m * point_A[0]\n\n closest_distance = abs(-m * self.x + self.y - b) / math.sqrt((-m) ** 2 + 1)\n min_distance_to_line = min(closest_distance, min_distance_to_line)\n \n return min_distance_to_line",
"def backtracking(goal):\n path = []\n current = goal\n while current.came_from:\n path.insert(0, current.move)\n current = current.came_from\n return ''.join(path)",
"def solution(self):\n return [node.move for node in self.path()[1:]]",
"def get_one_path(commands):\n path = []\n last_position = (0, 0)\n for command in commands:\n path += list(apply_one_command(last_position, command))\n last_position = path[-1]\n return path",
"def calculate_target_path(self):\n self.path = self.game.find_path(self, self.target)\n if not self.path:\n print(f\"{self.name} can't path to {self.target.name} {self.target.x}, {self.target.y}\")\n self.broken_target(self.target)\n self.target = None",
"def get_action(self, state):\n if self.globSeq == []:\n self.globSeq = self.graphSearch(state)\n if not(self.globSeq):\n return Directions.STOP\n return self.globSeq.pop(0)",
"def get_action(self, state):\n if self.globSeq == []:\n self.globSeq = self.graphSearch(state)\n if not(self.globSeq):\n return Directions.STOP\n return self.globSeq.pop(0)",
"def getPath(self) -> List['StateNode']:\n rest = []\n if self.previous is not None:\n rest = self.previous.getPath()\n return rest + [self]",
"def movee(self):\n\n #return the initial state if he cant move and he's in the initial state\n if not self.move and self.index == 0:\n return self.path[self.index]\n\n #return the goal state if he's at the goal state\n if self.index == len(self.path):\n return self.path[-1]\n\n #return the next move and increments the index attribute\n nextMove = self.path[self.index]\n self.index += 1\n\n return nextMove",
"def get_action(self, state):\n\n if self.path:\n return self.path.pop(0)\n else:\n self.path = self.get_path(state)\n return self.path.pop(0)",
"def path(self, target):\n return self.get_paths(target, use_edges=False, downwards=True)[0]",
"def get_action(self, game_state):\n self.observation_history.append(game_state)\n\n my_state = game_state.get_agent_state(self.index)\n my_pos = my_state.get_position()\n if my_pos != nearest_point(my_pos):\n # We're halfway from one position to the next\n return game_state.get_legal_actions(self.index)[0]\n else:\n return self.choose_action(game_state)",
"def path_to_command_thymio(path):\n\n current_x = path[0][0]\n current_y = path[1][0]\n\n next_x = path[0][1]\n next_y = path[1][1]\n\n # next-prev\n delta_x = path[0][1] - path[0][0]\n delta_y = path[1][1] - path[1][0]\n\n # delat_x = 0 and delta_y = -/+ 1 (or delat_x = -/+ 1 and delta_y = 0): go straight\n turn = STRAIGHT\n\n # delat_x = -1 and delta_y = 1 (or delat_x = 1 and delta_y = -1): turn to the right\n if delta_x * delta_y < 0:\n turn = RIGHT\n\n # delat_x = -1 and delta_y = -1 (or delat_x = 1 and delta_y = 1): turn to the left\n if delta_x * delta_y == 1:\n turn = LEFT\n\n new_path = np.array([path[0][1:], path[1][1:]])\n\n return turn, new_path"
]
| [
"0.7966998",
"0.715944",
"0.7031334",
"0.7026767",
"0.68767905",
"0.6658364",
"0.64656806",
"0.6376818",
"0.6200842",
"0.6165224",
"0.6043223",
"0.60112387",
"0.59972686",
"0.59689665",
"0.5964047",
"0.5963518",
"0.58828336",
"0.5865455",
"0.58330965",
"0.58273715",
"0.5808226",
"0.5799647",
"0.57847744",
"0.57847744",
"0.5754906",
"0.57149184",
"0.57080346",
"0.56964964",
"0.5690157",
"0.56890976"
]
| 0.7227898 | 1 |
Returns the maze distance between any two points, using the search functions you have already built. The gameState can be any game state Pacman's position in that state is ignored. | def mazeDistance(point1, point2, gameState):
x1, y1 = point1
x2, y2 = point2
walls = gameState.getWalls()
assert not walls[x1][y1], 'point1 is a wall: ' + str(point1)
assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)
prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False, visualize=False)
return len(search.bfs(prob)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mazeDistance(point1, point2, gameState):\n x1, y1 = int(point1[0]),int(point1[1])\n x2, y2 = int(point2[0]),int(point2[1])\n walls = gameState.getWalls()\n \n assert not walls[x1][y1], 'point1 is a wall: ' + point1\n assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)\n prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False, visualize=False)\n return len(breadthFirstSearch(prob))",
"def distanceFromGhost(self, state):\n pacPos = state.getPacmanPosition()\n ghoPos = state.getGhostPositions()\n d = abs(pacPos[1]-ghoPos[0][1]) + abs(pacPos[0]-ghoPos[0][0])\n return d",
"def getMazeDistanceDefense(self, p1, p2):\n try:\n return self.distancerDefense.getDistance(p1, p2)\n except Exception:\n return self.getMazeDistance(p1, p2)",
"def get_maze_distance(self, pos1, pos2):\n d = self.distancer.get_distance(pos1, pos2)\n return d",
"def __call__(self, state: Grid2D.State):\n if self.problem.goals:\n pos = state.agent_position\n return min([manhattan_distance_2d(pos, g) for g in self.problem.goals])\n return INFINITY",
"def getMazeDistance(self, pos1, pos2):\n\n return self.distancer.getDistance(pos1, pos2)",
"def distance_from_goal(self, state):\n empty_finder_regex = re.compile('\\{}'.format(state.empty_token))\n possible_goal = ['*'] * len(empty_finder_regex.findall(state.text))\n possible_goal += self.desired_arrangement\n possible_goal += ['*'] * (len(state.text) - len(possible_goal))\n\n distance = 0\n for index, bin in enumerate(state.configuration):\n if bin != possible_goal[index]:\n distance += 1\n\n return distance",
"def heuristic(state, problem):\n # It would take a while for Flat Earther's to get accustomed to this paradigm\n # but hang in there.\n node1 = problem.G.node[state]\n node2 = problem.G.node[problem.end_node]\n xy1 = ((node1['x'],0,0), (node1['y'],0,0))\n xy2 = ((node2['x'],0,0), (node2['y'],0,0))\n return util.points2distance(xy1, xy2)\n # util.raiseNotDefined()",
"def findPathToClosestDot(self, gameState):\n\n # Here are some useful elements of the startState\n # startPosition = gameState.getPacmanPosition()\n # food = gameState.getFood()\n # walls = gameState.getWalls()\n # problem = AnyFoodSearchProblem(gameState)\n\n # *** Your Code Here ***\n problem = AnyFoodSearchProblem(gameState)\n return search.uniformCostSearch(problem)",
"def calc_distance(first: Waypoint, second: Waypoint) -> int:\n return int(distance.vincenty(first.coords(), second.coords()).m)",
"def getGhostGoal(self, gameState):\n enemies = [gameState.getAgentState(i) for i in self.getOpponents(gameState)]\n ghost = [a for a in enemies if not a.isPacman and a.getPosition() != None]\n myPos = self.getCurrentObservation().getAgentState(self.index).getPosition()\n if len(ghost) > 0:\n dis = 9999\n nearestPacman = ghost[0]\n for p in ghost:\n temp = self.getMazeDistance(myPos, p.getPosition())\n if temp < dis:\n dis = temp\n nearestPacman = p\n return nearestPacman.getPosition(), dis\n else:\n return None, None",
"def manhatam_distance(self) -> int:\n return abs(self.position[0]) + abs(self.position[1])",
"def __call__(self, state: Grid2D.State):\n if self.problem.goals:\n pos = state.agent_position\n return max(\n min([abs(pos[0] - g[0]) for g in self.problem.goals]),\n min([abs(pos[1] - g[1]) for g in self.problem.goals]),\n )\n return INFINITY",
"def __manhattan(self, x_state, y_state, x_goal, y_goal):\n distance = (abs(x_state - x_goal) + abs(y_state - y_goal))\n return distance",
"def goal_distance(self, goal_state, current_state):\n relative_goal = self.relative_goal(goal_state, current_state)\n\n goal_distance = {\n \"cube_pos\": 0.0,\n \"cube_quat\": 0.0,\n \"cube_face_angle\": np.linalg.norm(relative_goal[\"cube_face_angle\"]),\n }\n\n return goal_distance",
"def findDistance(self, p1, p2, img, draw=True):\n\n if self.results.multi_hand_landmarks:\n x1, y1 = self.lmList[p1][0], self.lmList[p1][1]\n x2, y2 = self.lmList[p2][0], self.lmList[p2][1]\n cx, cy = (x1 + x2) // 2, (y1 + y2) // 2\n\n if draw:\n cv2.circle(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED)\n cv2.circle(img, (x2, y2), 15, (255, 0, 255), cv2.FILLED)\n cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), 3)\n cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED)\n\n length = math.hypot(x2 - x1, y2 - y1)\n return length, img, [x1, y1, x2, y2, cx, cy]",
"def foodHeuristic(state, problem):\n import itertools\n\n\n\n def manhattan(startPosition, targetPosition):\n xy1 = startPosition\n xy2 = targetPosition\n return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])\n\n position, foodGrid = state\n\n return len(foodGrid.asList())\n #\n # \"\"\"\n # The below algorithm is from:\n # https://stackoverflow.com/questions/9994913/pacman-what-kinds-of-heuristics-are-mainly-used\n #\n # Find real/manhattan distance between two currently furthest fruits in labyrinth - let's call that x.\n # Find real/manhattan distance from current Pacman position to the closer of previous two fruits - let's call that y.\n # Then, answer is just: x + y.\n # The interpretation of this x + y formula could be something like this:\n #\n # x - either way, you will have to travel this distance, at least at the end\n # y - while you are at the some of the two furthest fruits, it's better to collect\n # the food that is near to it so you don't have to go back\n # \"\"\"\n # maxFoodPairDistance = 0\n #\n # if len(foodGrid.asList()) >= 2:\n #\n # #calculate manhattan/real distance between each pair of food (all permutations in foodGrid) and find the maximum of them, and\n # #store the pair with max distance in maxFoodPair\n # for foodPair in itertools.permutations(foodGrid.asList(),2):\n # #foodPairDistance = mazeDistance(foodPair[0], foodPair[1], problem.startingGameState)\n # foodPairDistance = manhattan(foodPair[0], foodPair[1])\n # if foodPairDistance >= maxFoodPairDistance:\n # maxFoodPairDistance = foodPairDistance\n # maxFoodPair = foodPair\n #\n # #get the real distance between pacman and nearest food among the max distance food pair we get above. Using real distance instead\n # #of manhattan distance here just to \"reduce\" the number of nodes expand to get additional point. But that's a bit of a cheating\n # #because the mazeDistance function use of breadth First search - which itself is a search with nodes expansion not counted here\n # #minPacmanToFoodDistance = min([mazeDistance(position, foodPosition, problem.startingGameState) for foodPosition in maxFoodPair])\n # minPacmanToFoodDistance = min([manhattan(position, foodPosition) for foodPosition in maxFoodPair])\n #\n # #When only one food left, just return the real distance between pacman and food\n # elif len(foodGrid.asList()) == 1:\n # foodPosition = foodGrid.asList()[0]\n # #minPacmanToFoodDistance = mazeDistance(position, foodPosition, problem.startingGameState)\n # minPacmanToFoodDistance = manhattan(position, foodPosition)\n # else:\n # minPacmanToFoodDistance = 0\n #\n # return minPacmanToFoodDistance + maxFoodPairDistance",
"def manhattan_distance(state, goal):\r\n hval = 0\r\n for index, value in enumerate(state):\r\n if value == 0: # Underestimate by excluding calculation of the blank tile\r\n continue\r\n abs_x = abs((co_ords[index])[0] - (co_ords[goal.index(value)])[0])\r\n abs_y = abs((co_ords[index])[1] - (co_ords[goal.index(value)])[1])\r\n hval += abs_x + abs_y\r\n return hval",
"def findPathToClosestDot(self, gameState):\n # Here are some useful elements of the startState\n startPosition = gameState.getPacmanPosition(self.index)\n food = gameState.getFood()\n walls = gameState.getWalls()\n problem = AnyFoodSearchProblem(gameState, self.index)\n\n\n \"*** YOUR CODE HERE ***\"\n return search.bfs(problem)",
"def manhattenDistance(self, position, goal):\n\n\t\treturn sum(abs(a-b) for a,b in zip(position,goal))",
"def findPathToClosestDot(self, gameState):\n # Here are some useful elements of the startState\n startPosition = gameState.getPacmanPosition()\n food = gameState.getFood()\n walls = gameState.getWalls()\n problem = AnyFoodSearchProblem(gameState)\n\n \"*** YOUR CODE HERE ***\"\n return breadthFirstSearch(problem)\n # util.raiseNotDefined()",
"def distance(self, other_room):\n return self.p[0].distanceSquare(other_room.p[0])",
"def solve(instructions: Iterator[Instruction], state: StateProtocol) -> int:\n for instruction in instructions:\n state.apply(instruction)\n\n return state.manhatam_distance",
"def SimpleHeuristic(self, gameState, myPos, goal):\n return self.getMazeDistance(myPos, goal)",
"def heuristic_manhattan_distance(self):\n distance = 0\n\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n i1, j1 = self._get_coordinates(self.position[i][j], self.PUZZLE_END_POSITION)\n distance += abs(i - i1) + abs(j - j1)\n\n return distance",
"def heuristic(current, goal):\r\n\r\n return Vector2.fromCell(current).distanceTo(Vector2.fromCell(goal))",
"def getDistance(self):\n return sqrt(self.state[0] * self.state[0] + self.state[2] * self.state[2])",
"def player_goal_distance(self) -> float:\n route = self.best_route\n return sum(route.values())",
"def evaluate_distance(self):\n\n fitness = 0\n routes = split_to_routes(self)\n\n for route in routes:\n route = [home] + route + [home]\n for i in range(1,len(route)):\n # Calculates full distance, including from last city\n # to first, to terminate the trip\n pos_from = route[i - 1]\n pos_to = route[i]\n distance = dm[pos_from][pos_to]\n fitness += distance\n\n return int(fitness)",
"def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n # Useful information you can extract from a GameState (pacman.py)\n \"\"\n foodPos = currentGameState.getFood().asList() \n foodDist = [] \n ghostStates = currentGameState.getGhostStates() \n capPos = currentGameState.getCapsules() \n currentPos = list(currentGameState.getPacmanPosition()) \n \n for food in foodPos:\n food2pacmanDist = manhattanDistance(food, currentPos)\n foodDist.append(-1*food2pacmanDist)\n \n if not foodDist:\n foodDist.append(0)\n\n return max(foodDist) + currentGameState.getScore()"
]
| [
"0.7405596",
"0.62337524",
"0.6202874",
"0.61033416",
"0.60770017",
"0.5981286",
"0.5948442",
"0.5916513",
"0.58608997",
"0.58241653",
"0.5822506",
"0.5700954",
"0.5659414",
"0.5651273",
"0.56431204",
"0.56091815",
"0.5609093",
"0.56061876",
"0.5589039",
"0.5576174",
"0.5566773",
"0.55030054",
"0.5494721",
"0.54876333",
"0.54545903",
"0.54526603",
"0.54477984",
"0.5438109",
"0.5428188",
"0.5400387"
]
| 0.7542628 | 1 |
(file open for reading) > query dictionary Read query_file and return information in the query dictionary format. | def process_query(query_file):
query_data = query_file.readlines()
query_dict = {}
x = 1
search_dict = {}
search_dict['username'] = query_data[x].strip('\n')
x += 1
operation_list = []
while query_data[x] != 'FILTER\n':
operation_list.append(query_data[x].strip('\n'))
x += 1
search_dict['operations'] = operation_list
query_dict['search'] = search_dict
x += 1
filter_dict = {}
filter_format(filter_dict, query_data, 'name-includes', x)
filter_format(filter_dict, query_data, 'location-includes', x)
filter_format(filter_dict, query_data, 'follower', x)
filter_format(filter_dict, query_data, 'following', x)
query_dict['filter'] = filter_dict
present_dict = {}
sort_by = query_data[-2].strip('sort-by ')
present_dict['sort-by'] = sort_by.strip('\n')
format_type = query_data[-1].lstrip('format ')
present_dict['format'] = format_type
query_dict['present'] = present_dict
return query_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_query (file):\n\n # initialize all the dictionaries and lists we will be using\n query_data = {}\n query_data ['search'] = {'operations':[]}\n query_data ['filter'] = {}\n query_data ['present'] = {}\n\n temp = ''\n\n file.readline() # for when the file says SEARCH\n\n query_data ['search']['username'] = file.readline().strip()\n\n temp = file.readline().strip()\n while temp != 'FILTER': # go until the the filter section\n query_data ['search']['operations'].append (temp)\n temp = file.readline().strip()\n\n temp = file.readline().strip()\n while temp != 'PRESENT': # go until the present section\n # we make the key everything from the beginning to the first space\n # then the value is everything after the first space\n query_data ['filter'][temp[:temp.find(' ')]] = temp[temp.find(' ') + 1:]\n temp = file.readline().strip()\n\n temp = file.readline().strip()\n while temp != '': # go until the end of the file\n # same process as the previous while loop\n query_data ['present'][temp[:temp.find(' ')]] = temp[temp.find(' ') + 1:]\n temp = file.readline().strip()\n\n return query_data",
"def process_query(file: TextIO) -> 'NGOMatch':\n query_dict = {}\n query_dict['skills'] = {}\n query_dict['interest'] = []\n line = file.readline().strip()\n \n query_dict['skills']['technical'] = []\n query_dict['skills']['interpersonal'] = []\n \n line = file.readline().strip()\n line = file.readline().strip()\n while line != 'Interpersonal':\n query_dict['skills']['technical'].append(line)\n line = file.readline().strip() \n \n line = file.readline().strip()\n while line != 'INTEREST':\n query_dict['skills']['interpersonal'].append(line)\n line = file.readline().strip()\n \n line = file.readline().strip() \n while line != 'NUMBER':\n query_dict['interest'].append(line)\n line = file.readline().strip()\n \n line = file.readline().strip()\n while line != 'SORT':\n query_dict['number'] = line\n line = file.readline().strip()\n \n line = file.readline().strip()\n while line != '':\n if line[:5] == 'skill':\n query_dict['sort-by']['skill'] = line[5:].strip()\n if line [:8] == 'interest':\n query_dict['sort-by']['interest'] = line[8:].strip()\n line = file.readline().strip()\n \n return query_dict",
"def load_query(query_filename):\n with open(query_filename) as f:\n return f.read()",
"def fetch_dictionary(query):\n\n dictionary = {}\n\n query = re.sub(r'[0-9]*\\(', '', query)\n query = re.sub(r'\\)', '', query)\n query_words = [pre_process(term) for term in query.split()] # Terms are pre-processed before fetched\n\n pattern_word = re.compile('^[a-z0-9]*')\n pattern_list = re.compile('^[a-z0-9]*: ')\n\n with open(DICTIONARY_FILE, \"r\") as file:\n for line in file:\n word_in_dictionary = re.findall(pattern_word, line)[0]\n\n if any(word_in_dictionary == word for word in query_words):\n result = re.split(pattern_list, line)\n\n # ast library allows the text to be converted directly to dictionary to appropriate data type\n dictionary[word_in_dictionary] = ast.literal_eval(result[1])\n\n file.close()\n\n return dictionary",
"def get_data_query(file_name):\n with open(file_name, 'r') as graphql_query:\n return graphql_query.read()",
"def readQueryFromFile(location):\n try:\n queryFileHandler = open(location, \"r\")\n except EnvironmentError as e:\n errMsg = (\"Error reading the SMT query from the file \"\n \"located at %s: %s\" % (location, e))\n raise GameTimeError(errMsg)\n else:\n with queryFileHandler:\n return Query(queryFileHandler.read())",
"def readQrelsDict(fileName):\n result = {}\n for e in readQrels(fileName):\n result.setdefault(e.queryId, {})[e.docId] = int(e.relGrade)\n return result",
"def read_sql_file(file_path: str, query_format: QueryFormat = QueryFormat.CROSS_PRODUCT) \\\n -> Tuple[Dict, str, str, str]:\n\n if not file_path or file_path.split(\".\")[-1] != \"sql\":\n raise ValueError(\"The given file-path doesn't point to a .sql file. Please correct this!\")\n\n if not os.path.isfile(file_path):\n raise ValueError(\"The given path does not point to an existing file!\")\n\n if query_format != QueryFormat.CROSS_PRODUCT and query_format != QueryFormat.JOIN_ON:\n raise ValueError(\"Incorrect QueryFormat given!\")\n\n with open(file_path) as file:\n sql_file = file.read()\n\n sql_commands = sql_file.split(\";\\n\")\n\n command_dict = {}\n\n for command in sql_commands:\n command = re.sub(re.escape(\"SELECT COUNT(*) FROM\"), \"\", command, flags=re.IGNORECASE)\n command = re.split(\"WHERE\", command, flags=re.IGNORECASE)\n if len(command) > 1 and command[0] and command[1]:\n if query_format == QueryFormat.CROSS_PRODUCT:\n tables = command[0].strip().split(\",\")\n elif query_format == QueryFormat.JOIN_ON:\n join_atts = re.findall(r\"\\((.*?)\\)\", command[0])\n tables = re.sub(r\"\\((.*?)\\)\", \"\", command[0])\n tables = re.sub(re.escape(\" INNER JOIN \"), \",\", tables, flags=re.IGNORECASE)\n tables = re.sub(re.escape(\" ON \"), \"\", tables, flags=re.IGNORECASE)\n tables = [tab.strip() for tab in tables.split(\",\")]\n tables.sort()\n command[0] = \",\".join(tables)\n command[1] = command[1].strip()\n if command[0] not in command_dict:\n command_dict[command[0]] = []\n\n if query_format == QueryFormat.JOIN_ON:\n command[1] = \" AND \".join(join_atts) + \" AND \" + command[1]\n\n command_dict[command[0]].append(command[1])\n\n return command_dict, \"sql\", \",\", \"\"",
"def parse_database(dict_file):\n database = {}\n with open(dict_file) as input_file:\n for line in (line for line in input_file if line[0] != '#'):\n key, value = line.strip().split(maxsplit=1)\n database[key] = value\n return database",
"def load_queries(self, file):\n queries = []\n with open(file, 'r') as f:\n for line in f:\n reg_match = re.match(r'^(\\d+).(.*)', line)\n tokens = self.es_helper.get_tokens(reg_match.group(2).strip())\n queries.append(Query(reg_match.group(1).strip(), self.es_helper, tokens))\n self.queries = queries",
"def read_data(db_name, query_file):\r\n con = sqlite3.connect(db_name)\r\n cursor = con.cursor()\r\n\r\n sql = open(query_file,'r')\r\n query = sql.read()\r\n sql.close()\r\n\r\n data = pd.read_sql_query(query, con=con)\r\n data.drop_duplicates(subset=['Title'], inplace=True)\r\n data = data[data['Type']=='movie']\r\n data.set_index('imdbID', inplace=True)\r\n\r\n con.commit()\r\n con.close()\r\n\r\n return data",
"def access(self, queryName):\n self.fileHandle = open(self.fileName, 'r+b')\n self.mm = mmap.mmap(self.fileHandle.fileno(), 0)\n self.mm.seek(self.records[queryName])\n row = self.mm.readline().decode('utf-8').rstrip().split(\"\\t\")\n self.fileHandle.close()\n\n return self.pretty(row)",
"def run_search(dict_file, postings_file, queries_file, results_file):\n print('running search on the queries...')\n\n with open(dict_file, mode=\"rb\") as dictionary_file,\\\n open(postings_file, mode=\"rb\") as posting_file,\\\n open(queries_file, encoding=\"utf8\") as q_in,\\\n open(results_file, mode=\"w\", encoding=\"utf8\") as q_out:\n\n ''' load dictionary and postings '''\n # dict(k,v) -> token, Entry(frequency, offset, size)\n # postings -> the dict containing the entries and metadata of the postings file\n # skiplist -> list of all doc IDs\n dictionary = pickle.load(dictionary_file)\n postings = Posting(dictionary, posting_file)\n file_list = postings['__all__']\n\n ''' process query, and write the query result to result file '''\n for query in q_in:\n query = preprocess_query(query)\n algebra = boolean.BooleanAlgebra()\n # Simplify query, e.g. tautology\n expression = algebra.parse(query, simplify=True)\n # special cases after simplification\n if str(expression) == \"0\":\n print(\"\", end='\\n', file=q_out)\n continue\n elif str(expression) == \"1\":\n print(\" \".join(map(str, file_list)), end='\\n', file=q_out)\n continue\n\n print(\" \".join(map(str, shunting(get_input(str(expression))).eval(\n postings, file_list).list)), end='\\n', file=q_out)\n\n # add posting skiplist and list of all docIDs to corresponding symbol\n # for sym in expression.symbols:\n # if normalize(sym) == \"IGNORE\":\n # norm_sym = str(normalize(sym))\n # setattr(sym, \"obj\", norm_sym)\n # setattr(sym, \"skiplist\", postings[norm_sym])\n # setattr(sym, \"list\", postings[norm_sym].list)\n # setattr(sym, \"file_list\", file_list.list)\n\n # evaluate the query\n # args[]: list of sub-terms\n # For symbols and base elements this tuple is empty,\n # for boolean functions it contains one or more symbols, elements or sub-expressions.\n # print(\" \".join(map(str, expression.evaluate_query(expression.args).list)),\n # end='\\n', file=q_out)",
"def sdf_reader(cls, filename, dbIdentifier = \"LM_ID\"):\n res_dict = {}\n with open(filename) as fp:\n line = fp.readline()\n line_id = \"\"\n line_dict = {}\n while line:\n if line.startswith(\">\"):\n if dbIdentifier in line:\n if line_id:\n res_dict[line_id] = line_dict\n line_dict = {}\n line_id = \"\"\n line_id = fp.readline().rstrip()\n else:\n key = line.split(\"<\")[1].split(\">\")[0]\n line_dict[key] = fp.readline().rstrip()\n line = fp.readline()\n\n fp.close()\n return res_dict",
"def lookup(collated_file,query_file):\r\n x=open(query_file,\"r\")\r\n query=[]\r\n for i in x:\r\n i=i.replace(\"\\n\",\"\")\r\n query.append(i)\r\n y=open(collated_file,\"r\")\r\n collection=[]\r\n for i in y :\r\n i=i.replace(\"\\n\",\"\")\r\n i=i.split(\":\")\r\n collection.append(i)\r\n answer=[]\r\n for i in range(len(query)):\r\n answer.append(BinarySearch(collection,0,len(collection)-1,query[i]))\r\n y = open(\"song_ids.txt\", \"w\")\r\n for i in range(len(answer)):\r\n y.write(str(answer[i]) + \"\\n\")",
"def read_qvalues(namefile):\n db = shelve.open(namefile)\n hashes = db['hashes']\n nif = db['nif']\n qvalue = db['qvalue']\n year = db['year']\n methodvalues = db['methodvalues']\n db.close()\n return hashes, nif, year, qvalue, methodvalues",
"def _read_query(self):\n try:\n # Open Google Drive and read the sql file\n self.query = GDrive().read_drive_file(self.input_source_id)\n except Exception as e:\n raise e",
"def read_query_files(**kwargs):\n # Read query files contents and write to query_list\n query_list = {\"query_group\": \"\", \"queries\": []}\n query_group = kwargs[\"queries_dir\"].split(\"/\")[-1]\n query_list.update(query_group=query_group)\n logging.debug(\"Queries dir: \" + kwargs[\"queries_dir\"])\n try:\n for query_filename in sorted(os.listdir(kwargs[\"queries_dir\"])):\n logging.debug(\"Validating query filename: \" + query_filename)\n if validate_query_file(query_filename=query_filename):\n with open(\n kwargs[\"queries_dir\"] + \"/\" + query_filename, \"r\"\n ) as query_filepath:\n logging.debug(\n \"Reading query with filename: \" + query_filename\n )\n query_mapdql = query_filepath.read().replace(\"\\n\", \" \")\n query_mapdql = query_mapdql.replace(\n \"##TAB##\", kwargs[\"source_table\"]\n )\n query_list[\"queries\"].append(\n {\"name\": query_filename, \"mapdql\": query_mapdql}\n )\n logging.info(\"Read all query files\")\n return query_list\n except FileNotFoundError:\n logging.exception(\"Could not find queries directory.\")\n return False",
"def read_file(file_path):\n\n output_dict = dict()\n try:\n if os.path.exists(file_path):\n with open(file_path) as fd:\n output = fd.readlines()\n for idx in range(len(output)):\n key_info = output[idx].split('=')[0].strip()\n value_info = output[idx].split('=')[1].strip()\n output_dict[key_info] = value_info\n return output_dict\n except Exception as e:\n SysTools.logger.warning(\"Read file:%s failed, reason:%s\" % (file_path, str(e)))",
"def read(self):\n dictionary = {}\n with open(self.path) as file:\n key_header = \"\"\n for line in file:\n entry = line.strip().split()\n if len(entry) == 0:\n continue\n if len(entry) == 1:\n key_header = entry[0]+\"_\"\n else:\n key = entry[0].strip()\n value = reduce(lambda x1, y1: x1+\" \" + y1, entry[1:])\n dictionary[key_header+key] = value\n return dictionary",
"def run_search(dict_file, postings_file, queries_file, results_file):\n print('running search on the queries...')\n\n with open(dict_file, mode=\"rb\") as dictionary_file,\\\n open(postings_file, mode=\"rb\") as posting_file,\\\n open(queries_file, encoding=\"utf8\") as q_in,\\\n open(results_file, mode=\"w\", encoding=\"utf8\") as q_out:\n\n ''' \n load dictionary and postings \n - num_of_doc -> The number of the documents indexed\n - dict(k,v) -> token, Enftry(frequency, offset, size)\n - postings -> list of tuples (doc ID, token frequency)\n '''\n num_of_doc = pickle.load(dictionary_file)\n dictionary = pickle.load(dictionary_file)\n postings = Posting(dictionary, posting_file)\n\n ''' \n process query, and write the query result (i.e., the 10 \n most relevant doc IDs) to the result file \n '''\n for query in q_in:\n print(*find_10_most_relevant(query, dictionary,\n postings, num_of_doc), end='\\n', file=q_out)",
"def init_query():\n file_path = Config.config['init_files']['query_file']\n with open(file_path) as file:\n for line in file:\n # ignore empty line\n if line == '\\n':\n continue\n yield Query(line)",
"def run(self, file_path: str, save_file_path: str, inner_separator: str = None, outer_separator: str = None,\n query_format: QueryFormat = QueryFormat.CROSS_PRODUCT) \\\n -> Dict[int, Dict[str, List[str or Tuple[str, str]]]]:\n\n if query_format is None: query_format = QueryFormat.CROSS_PRODUCT\n\n command_dict, file_type, inner_separator, outer_separator = self.read_file(file_path=file_path,\n inner_separator=inner_separator,\n outer_separator=outer_separator,\n query_format=query_format)\n solution_dict = self.create_solution_dict(command_dict=command_dict, file_type=file_type,\n inner_separator=inner_separator)\n self.save_solution_dict(solution_dict=solution_dict, save_file_path=save_file_path)\n\n return solution_dict",
"def read(file_):\n if not os.path.isfile(file_):\n raise AssertionError()\n\n dict_ = {}\n for line in open(file_).readlines():\n\n list_ = shlex.split(line)\n\n is_empty = (list_ == [])\n\n if not is_empty:\n is_keyword = list_[0].isupper()\n else:\n is_keyword = False\n\n if is_empty:\n continue\n\n if is_keyword:\n keyword = list_[0]\n dict_[keyword] = {}\n continue\n\n process(list_, dict_, keyword)\n\n dict_ = auxiliary(dict_)\n\n # We perform some basic consistency checks regarding the user's request.\n check_initialization_dict(dict_)\n\n return dict_",
"def load_map_from_sql_stdout(self, in_file, skip_header = False):\n data = dict()\n with open(in_file) as pairs_file:\n for line in pairs_file:\n if skip_header:\n skip_header = False\n continue\n (key, val) = line.strip().split(\"\\t\")\n data[key] = val\n return data",
"def _read_input(self, in_file):\n result = {}\n with open(in_file, \"r\") as f:\n reader = csv.DictReader(f, delimiter=str(\"\\t\"))\n for row in reader:\n result[row[\"accession\"]] = {\n \"transcript_sequence\": row[\"transcript_sequence\"],\n \"cds_start_i\": int(row[\"cds_start_i\"]),\n \"cds_end_i\": int(row[\"cds_end_i\"]),\n }\n\n return result",
"def file_read(filename):\n f = open(filename, 'r') \n d_str = f.read() \n f.close()\n\n d = dict(eval(d_str))\n return d",
"def load_query_distribution(filename):\n\n query_distribution = []\n with open(filename, 'r') as f_in:\n for line in f_in:\n q = json.loads(line)\n query_distribution.append((q['qid'], q['frequency']))\n return query_distribution",
"def read_data(path: str):\n documents = {}\n queries = {}\n relevance = {}\n for doc in json.load(open(path + 'cranfield_data.json')):\n title = re.sub(r'\\s+', ' ', doc['title'])\n body = re.sub(r'\\s+', ' ', doc['body'][len(doc['title']):])\n documents[doc['id']] = Article(title=title, body=body)\n \n for query in json.load(open(path + 'cran.qry.json')):\n queries[query['query number']] = query['query']\n for rel in json.load(open(path + 'cranqrel.json')):\n query_id = int(rel['query_num'])\n doc_id = int(rel['id'])\n if query_id in relevance:\n relevance[query_id].append((doc_id, rel['position']))\n else:\n relevance[query_id] = [(doc_id, rel['position'])]\n return documents, queries, relevance",
"def run_queries_file(self, file_path, semantic_flag, city_choice,stem, result_path=\"\"):\n api = datamuse.Datamuse()\n\n with open(file_path , \"r\") as q:\n queries = dict()\n queries_list = q.read().split(\"</top>\")\n for query in queries_list:\n if query == \"\":\n continue\n tmp = query.split(\"<title>\")\n query_number= tmp[0].split(':')[1].replace('\\n','').replace(' ','')\n tmp= tmp[1].split(\"<desc>\")\n query_content =tmp[0].replace('\\n',' ')\n semantic_words = \"\"\n # ask from api the synonyms of each word on query\n if semantic_flag:\n for word in query_content.split():\n synonyms = api.suggest(s=word, max=1)\n for item in synonyms:\n if item[\"word\"] != word.lower():\n if item[\"word\"].split()[0] == word.lower():\n item[\"word\"]=item[\"word\"].split()[1]\n semantic_words += \" \" + item[\"word\"]\n #add the synonyms into query content\n query_content += semantic_words\n #add the description into query content\n queries[query_number] = self.remove_stop_words(query_content+tmp[1].split(\"<narr>\")[0][12:].replace('\\n',' '))\n\n p = Parse.Parser(self.stop_words)\n searcher = Searcher(queries, self.term_dictionary if not stem else self.term_dictionary_with_stemming, self.documents, self.avgl, self.posting_and_dictionary_path,p)\n\n results = searcher.run(city_choice,stem)\n #write the results to disk\n if result_path != \"\":\n self.write_results_to_disk(result_path,results)\n return results"
]
| [
"0.7843726",
"0.7040409",
"0.6731658",
"0.6676415",
"0.65146685",
"0.62667567",
"0.62590575",
"0.6256823",
"0.6215728",
"0.62133837",
"0.61976427",
"0.6147864",
"0.61316043",
"0.61125714",
"0.60755527",
"0.6060231",
"0.6012341",
"0.6004686",
"0.59304434",
"0.5879766",
"0.5842372",
"0.5842293",
"0.5822576",
"0.5779766",
"0.57570463",
"0.57526577",
"0.5737886",
"0.57362527",
"0.5721251",
"0.57022786"
]
| 0.80009 | 0 |
(dict, list, str, int) > dict Return a dict with key filter_type of query_data given the index. | def filter_format(filter_dict, query_data, filter_type, index):
filter_list = ''
count = 0
while query_data[index] != 'PRESENT\n':
if filter_type in query_data[index]:
count += 1
filter_keyword = query_data[index].strip(filter_type)
filter_list = (filter_keyword.strip('\n'))
index += 1
if count > 0:
filter_dict[filter_type] = filter_list
return filter_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __index_data_body(index, doc_type, doc_id, source):\n\n index_data = {\n \"_index\": index,\n \"_type\": doc_type,\n \"_id\": doc_id,\n \"_source\": source\n }\n\n return index_data",
"def fetch_querydict(self):\n query = dict()\n query[\"filtered\"] = dict()\n if self.q_dict and isinstance(self.q_dict, dict):\n query_list, filter_list = self.build_query_structure()\n if query_list:\n query[\"filtered\"][\"query\"] = {\"bool\": {\"must\": query_list}}\n if filter_list:\n query[\"filtered\"][\"filter\"] = {\"bool\": {\"must\": filter_list}}\n return query",
"def format_data(\n start: Optional[Union[datetime.datetime, int]] = None,\n filters: Optional[List[str]] = None,\n) -> dict:\n data: dict = {}\n\n if filters:\n data[\"filter\"] = {\"type\": filters}\n if isinstance(start, int):\n data[\"idFrom\"] = start\n elif start:\n data[\"dateFrom\"] = start\n\n return data",
"def _convertFilterToSQLite(self, mongofilter: t.Mapping[t.Text, t.Any]\n ) -> t.Mapping[t.Text, t.Any]:\n if any(field[0] == \"$\" for field in mongofilter):\n invalid_fields = [f for f in mongofilter if f[0] == \"$\"]\n warnings.warn(RuntimeWarning(\n f\"You have a top level mongo query operator {invalid_fields} \"\n \"in your filter. This should work, as long as you don't try \"\n \"querying any non-sqlite primitive types, ie array/object.\"))\n return mongofilter\n\n if not any(op[0] == \"$\" for val in mongofilter.values()\n if isinstance(val, dict) # val might be just a str\n for op in val\n ):\n # check if any column filters have a mongodb query operator\n # ie $eq, $lte, etc.\n # if they don't, we can easily convert the given row to sql\n sql_filter, = self._convertDataToSQLite((mongofilter, ))\n return sql_filter\n\n sql_filter = dict()\n prim_types = {\n TDX_TYPE.BOOLEAN, TDX_TYPE.DATE, TDX_TYPE.STRING, TDX_TYPE.NUMBER}\n banned_types = {TDX_TYPE.NDARRAY}\n\n # convert ugly TDX Data Schema to Map of Column to Type.\n # might need to be changed in the future for new TDX dataschema schema\n dataschema: t.Dict[t.Text, TDX_TYPE] = {}\n for column, column_type in self.tdx_data_schema.items():\n if isinstance(column_type, collections.abc.Mapping):\n dataschema[column] = TDX_TYPE(\n column_type.get(\"__tdxType\", [TDX_TYPE.OBJECT])[0])\n elif isinstance(column_type, collections.abc.Sequence):\n dataschema[column] = TDX_TYPE.ARRAY\n\n for field, val in mongofilter.items():\n tdx_type = dataschema[field]\n if tdx_type in prim_types: # no need for conversion\n sql_filter[field] = val\n continue\n\n if tdx_type in banned_types: # cannot query\n raise TypeError(\n f\"All queries are banned on tdx_type {tdx_type}. \"\n \"Given item was {field}.\")\n\n if not isinstance(val, dict) or all(op[0] != \"$\" for op in val):\n # val is array/or dict with NO mongo query ops\n # can convert normally\n con_row, = self._convertDataToSQLite([{field: val}])\n sql_filter[field] = con_row[field]\n continue\n\n raise TypeError(\n \"MongoDB Style Queries are only supported on items \"\n f\"with TDX Type values of {prim_types}. Given \"\n \"item was {field} with type {tdx_type}. \"\n f\"Mongo Op given was {next(op for op in val if op[0] == '$')}\")\n return sql_filter",
"def build_query_structure(self):\n query_list = list()\n filter_list = list()\n for key, val in self.q_dict.items():\n if key in self.es_query_keys:\n query_list.append(\n {\"match\": {\".\".join(key.split(\"_\")): val[0]}})\n elif key in self.es_date_keys:\n filter_list.append(\n {\"range\": {\".\".join(key.split(\"_\")): val}})\n elif \":\" in val[0]:\n #for handling queries like dd_dct=gte:1\n range_val = val[0].split(\":\")\n filter_list.append({\"range\": {\".\".join(key.split(\"_\")): {\n range_val[0]: int(range_val[1])}}})\n else:\n filter_list.append(\n {\"terms\": {\".\".join(key.split(\"_\")): val}})\n return query_list, filter_list",
"def perform_query(index_node, query_type):\n\n when = now()\n\n resp = do_query(index_node.name,\n [filter.value for filter in query_type.filters.all()],\n [facet.name for facet in query_type.facets.all()])\n\n client, _ = Host.objects.get_or_create(name=gethostname())\n \n response = Response.objects.create(\n index_node=index_node,\n status_code=resp['status_code'],\n datetime=when,\n client=client,\n query_type=query_type)\n\n if response.status_code == 200:\n \n response_data = ResponseData.objects.create(\n num_found=resp['num_found']\n )\n for facet_name, counts in resp['facet_counts'].items():\n for value, count in counts.items():\n facet, _ = Facet.objects.get_or_create(name=facet_name)\n facet_value, _ = \\\n FacetValue.objects.get_or_create(facet=facet,\n value=value)\n facet_value_count = \\\n FacetValueCount.objects.create(count=count,\n facet_value=facet_value,\n response_data=response_data)\n response.data = response_data\n response.save()",
"def indexRecords(self,indexTypes):\n indexed = self.indexed = {}\n for type in indexTypes:\n indexed[type] = {}\n for record in self.records:\n type = record.name\n if type in indexTypes:\n indexed[type][record.getId().lower()] = record",
"def create_or_update_filter(column, value, filter_type='eq', _filter=None):\n if _filter is None:\n _filter = {}\n\n _filter[column] = {filter_type: value}\n\n return _filter",
"def _extract_filter_type_and_value(data):\n if data.startswith(\"in:\"):\n value = list(six.text_type(data[3:]).split(\",\"))\n filter_type = 'in'\n elif data.startswith(\"nin:\"):\n value = list(six.text_type(data[4:]).split(\",\"))\n filter_type = 'nin'\n elif data.startswith(\"neq:\"):\n value = six.text_type(data[4:])\n filter_type = 'neq'\n elif data.startswith(\"gt:\"):\n value = six.text_type(data[3:])\n filter_type = 'gt'\n elif data.startswith(\"gte:\"):\n value = six.text_type(data[4:])\n filter_type = 'gte'\n elif data.startswith(\"lt:\"):\n value = six.text_type(data[3:])\n filter_type = 'lt'\n elif data.startswith(\"lte:\"):\n value = six.text_type(data[4:])\n filter_type = 'lte'\n elif data.startswith(\"eq:\"):\n value = six.text_type(data[3:])\n filter_type = 'eq'\n elif data.startswith(\"has:\"):\n value = six.text_type(data[4:])\n filter_type = 'has'\n else:\n value = data\n filter_type = 'eq'\n\n return filter_type, value",
"def filter_query_result(self, result, varenv):\n if isinstance(result, list):\n filter_result = []\n for elem in result:\n # need this pointer to get index results properly sorted.\n elem.list = result\n filter_result.append(self.filter_query_result(elem, varenv))\n elif isinstance(result, dict):\n filter_result = {}\n for key, asked in result.query.original_query.iteritems():\n if key[0] in '@:':\n basekey = key[1:]\n if basekey == 'id':\n filter_result[key] = asked\n # horrible hack to collect up the guids we care about...\n if asked is None:\n varenv.guid_list.append(result[key[0] + 'guid'])\n elif (basekey in QueryPrimitive.directives or\n basekey in QueryPrimitive.special):\n # should we output these?\n filter_result[key] = asked\n elif key[0] == '@' and result.query.get(\n '@optional') and key not in result:\n # XXX here we actually will give you an empty result\n # we could give you nothing at all\n filter_result[key] = None\n elif basekey == 'guid':\n filter_result[key] = result[key]\n elif basekey == 'value':\n # sanitize results.\n filter_result[key] = self.sanitize_value(\n result[key], result[key[0] + 'datatype'], varenv)\n elif basekey in QueryPrimitive.values:\n # this better be what you said!!!\n filter_result[key] = result[key]\n elif basekey == 'index':\n filter_result[key] = self.generate_index_read_result(result)\n elif basekey in QueryPrimitive.pointers:\n # might be direct sub-query or constraint, or query\n if isinstance(asked, dict):\n # sub-query, return it\n filter_result[key] = self.filter_query_result(result[key], varenv)\n else:\n if asked is None:\n # we'll be asking for the id of this thing, not just the guid.\n varenv.lookup_manager.guid_list.append(result[key])\n\n # just give back the guid\n filter_result[key] = result[key]\n elif valid_relname(key):\n # skip optional results we didn't get a value for.\n if result.query.get('@optional') and key not in result:\n # XXX should we give you None as a result rather than leaving it out completely?\n pass\n else:\n # is this a ResultError or an InternalError?\n if key not in result:\n raise MQLInternalError(\n result.query, \"No return result for '%(key)s'\", key=key)\n else:\n filter_result[key] = self.filter_query_result(result[key], varenv)\n\n elif key[0] == '?':\n # it's possible that we didn't find any order information, so give back null in that case\n filter_result[key] = result.get(key, None)\n else:\n raise MQLInternalError(\n result.query,\n \"Didn't expect to see %(key)s in original query while filtering\",\n key=key)\n\n result.filter = filter_result\n elif result is None:\n # there's no result here even though we expected one.\n filter_result = result\n else:\n raise MQLInternalError(\n result.query, \"Didn't understand result\", result=result)\n\n return filter_result",
"def data(\n self,\n index: Optional[Union[int, slice, str]] = None,\n ) -> Union[Dict, List[Dict]]:\n self._retrieve_data()\n if index is None:\n return self._result_data.copy()\n if isinstance(index, (int, slice)):\n return self._result_data[index]\n if isinstance(index, str):\n return [data for data in self._result_data if data.get(\"job_id\") == index]\n raise TypeError(f\"Invalid index type {type(index)}.\")",
"def get_default_filters_dict(class_of_filters,measure,**filters):\n\tif \"datadrop__in\" in filters:\n\t\tfilters.pop(\"datadrop__in\")\n\tif class_of_filters==\"short_student\":\n\t\treturnDict= {'All':{},\n\t\t\t'Male':{'upn__gender':\"M\"},\n\t\t\t'Female':{'upn__gender':\"F\"},\n\t\t\t'PP':{'upn__pp':True},\n\t\t\t'NPP':{'upn__pp':False},\n\t\t\t'EAL':{'upn__eal':True},\n\t\t\t'LAC':{'upn__lac':True},\n\t\t\t'FSM Ever':{'upn__fsm_ever':True},\n\t\t\t'NSEN':{'upn__sen':\"N\"},\n\t\t\t'KSEN':{'upn__sen':\"K\"},\n\t\t\t'EHCP':{'upn__sen':\"E\"},\n\t\t\t'All Lower':{'upn__wide_banding':\"L\"},\n\t\t\t'All Middle':{'upn__wide_banding':\"M\"},\n\t\t\t'All Higher':{'upn__wide_banding':\"H\"},\n\t\t\t'No Band':{'upn__wide_banding':\"N\"}\n\t\t\t}\n\telif class_of_filters==\"student\":\n\t\treturnDict= {'All':{},\n\t\t\t'Male':{'upn__gender':\"M\"},\n\t\t\t'Female':{'upn__gender':\"F\"},\n\t\t\t'PP':{'upn__pp':True},\n\t\t\t'NPP':{'upn__pp':False},\n\t\t\t'EAL':{'upn__eal':True},\n\t\t\t'LAC':{'upn__lac':True},\n\t\t\t'FSM Ever':{'upn__fsm_ever':True},\n\t\t\t'NSEN':{'upn__sen':\"N\"},\n\t\t\t'KSEN':{'upn__sen':\"K\"},\n\t\t\t'EHCP':{'upn__sen':\"E\"},\n\t\t\t'Lower Extreme':{'upn__narrow_banding':\"Lx\"},\n\t\t\t'Lower':{'upn__narrow_banding':\"L\"},\n\t\t\t'Middle':{'upn__narrow_banding':\"M\"},\n\t\t\t'Middle (Lower)':{'upn__narrow_banding':\"Ml\"},\n\t\t\t'Middle (Higher)':{'upn__narrow_banding':\"Mh\"},\n\t\t\t'Higher':{'upn__narrow_banding':\"H\"},\n\t\t\t'Higher Extreme':{'upn__narrow_banding':\"Hx\"},\n\t\t\t'No Band':{'upn__wide_banding':\"N\"},\n\t\t\t'Low Boys':{'upn__wide_banding':\"L\",'upn__gender':\"M\"},\n\t\t\t'Middle Boys':{'upn__wide_banding':\"M\",'upn__gender':\"M\"},\n\t\t\t'High Boys':{'upn__wide_banding':\"H\",'upn__gender':\"M\"},\n\t\t\t'Low Girls':{'upn__wide_banding':\"L\",'upn__gender':\"F\"},\n\t\t\t'Middle Girls':{'upn__wide_banding':\"M\",'upn__gender':\"F\"},\n\t\t\t'High Girls':{'upn__wide_banding':\"H\",'upn__gender':\"F\"},\n\t\t\t'High Girls':{'upn__wide_banding':\"H\",'upn__gender':\"F\"},\n\t\t\t'Low PP Boys':{'upn__wide_banding':\"L\",'upn__gender':\"M\",'upn__pp':True},\n\t\t\t'Middle PP Boys':{'upn__wide_banding':\"M\",'upn__gender':\"M\",'upn__pp':True},\n\t\t\t'High PP Boys':{'upn__wide_banding':\"H\",'upn__gender':\"M\",'upn__pp':True},\n\t\t\t'Low PP Girls':{'upn__wide_banding':\"L\",'upn__gender':\"F\",'upn__pp':True},\n\t\t\t'Middle PP Girls':{'upn__wide_banding':\"M\",'upn__gender':\"F\",'upn__pp':True},\n\t\t\t'High PP Girls':{'upn__wide_banding':\"H\",'upn__gender':\"F\",'upn__pp':True},\n\t\t\t}\n\telif class_of_filters==\"att8bucket\":\n\t\treturnDict= {'All':{},\n\t\t\t'Maths':{'subject__attainment8bucket':'ma'},\n\t\t\t'English':{'subject__attainment8bucket':'en'},\n\t\t\t'EBacc':{'subject__attainment8bucket':'eb'},\n\t\t\t'Open':{'subject__attainment8bucket':'op'},\n\t\t\t}\n\telif class_of_filters==\"banding\":\n\t\treturnDict= {'All':{},\n\t\t\t'All Lower':{'upn__wide_banding':'L'},\n\t\t\t'Lower Extreme':{'upn__narrow_banding':'Lx'},\n\t\t\t'Lower':{'upn__narrow_banding':'L'},\n\t\t\t'All Middle':{'upn__wide_banding':'M'},\n\t\t\t'Middle (Lower)':{'upn__narrow_banding':'Ml'},\n\t\t\t'Middle (Higher)':{'upn__narrow_banding':'Mh'},\n\t\t\t'All Higher':{'upn__wide_banding':'H'},\n\t\t\t'Higher':{'upn__narrow_banding':'H'},\n\t\t\t'Higher Extreme':{'upn__narrow_banding':'Hx'},\n\t\t\t'No Banding':{'upn__wide_banding':'N'},\n\t\t\t}\n\telif class_of_filters==\"subject_blocks\":\n\t\treturnDict= {'All':{},\n\t\t\t'Core':{'subject__option_subject':False},\n\t\t\t'Option':{'subject__option_subject':True},\n\t\t\t'EBacc':{'subject__ebacc_subject':True},\n\t\t\t'Non-EBacc':{'subject__ebacc_subject':False},\n\t\t\t}\n\telif \"staff\" in class_of_filters:\n\t\tfilters.pop('datadrop',None)\n\t\tfilters.pop('datadrop__name',None)\n\t\tif \"classgroup\" in filters:\n\t\t\tfilters['class_code']=filters['classgroup'].class_code\n\t\t\tfilters.pop('classgroup',None)\n\t\treturnDict={'All':{}}\n\t\tstaff_set=set(classgroup.objects.filter(**filters).exclude(staff=\"---\")\n\t\t\t.values_list('staff').distinct())\n\t\tstaff_list=[]\n\t\tfor st in staff_set:\n\t\t\tfor s in st:\n\t\t\t\tstaff_list.append(s)\n\t\tstaff_list.sort()\n\t\tfor code in staff_list:\n\t\t\tclasses=classgroup.objects.filter(staff=code,**filters).distinct()\n\t\t\tif \"short\" not in class_of_filters:\n\t\t\t\tfor cl in classes:\n\t\t\t\t\treturnDict[code+\" \"+cl.class_code]={\"classgroup\":cl}\n\t\t\treturnDict['All ' +code]={\"classgroup__in\":classes}\n\telse:\n\t\t\"\"\"if not a fixed set of filters, populate from objects in db based on\n\t\tclass, code specific to each class removes invalid filters and replaces\n\t\tthem with valid ones where possible\"\"\"\n\t\tif class_of_filters==\"classgroup\" :\n\t\t\tfilters.pop('datadrop',None)\n\t\t\tfilters.pop('datadrop__name',None)\n\t\t\tif \"classgroup\" in filters:\n\t\t\t\tfilters['class_code']=filters['classgroup'].class_code\n\t\t\t\tfilters.pop('classgroup',None)\n\n\t\telif class_of_filters==\"subject\" or class_of_filters==\"faculty\":\n\t\t\tif \"subject\" in filters:\n\t\t\t\tfilters['name']=filters['subject'].name\n\t\t\t\tfilters.pop('subject',None)\n\t\t\telif \"subject__name\" in filters:\n\t\t\t\tfilters['name']=filters['subject__name']\n\t\t\t\tfilters.pop('subject__name',None)\n\t\t\tfilters.pop('datadrop',None)\n\t\t\tfilters.pop('datadrop__name',None)\n\n\t\telif class_of_filters==\"datadrop\":\n\t\t\tif \t\"datadrop__name\" in filters:\n\t\t\t\tfilters['name']=filters['datadrop__name']\n\t\t\t\tfilters.pop('datadrop__name',None)\n\t\t\tif \"datadrop\" in filters:\n\t\t\t\tfilters['id']=filters['datadrop'].id\n\t\t\t\tfilters.pop('datadrop',None)\n\t\t\tif \"subject\" in filters or \"faculty\" in filters:\n\t\t\t\tfilters['cohort__in']=yeargroup.objects.filter(\n\t\t\t\t\tsubject=filters['subject'])\n\t\t\t\tfilters.pop('subject',None)\n\t\t\telif \"subject__name\" in filters:\n\t\t\t\tfilters['cohort__in']=yeargroup.objects.filter(\n\t\t\t\t\tsubject__name__contains=filters['subject__name'])\n\t\t\t\tfilters.pop('subject__name',None)\n\t\t\tif \"classgroup\" in filters:\n\t\t\t\tfilters['cohort']=filters['classgroup'].cohort\n\t\t\t\tfilters.pop('classgroup',None)\n\n\t\telif class_of_filters==\"yeargroup\" :\n\t\t\tif \"subject__name\" in filters and measure==\"progress\":\n\t\t\t\tfilters['subject__in']=subject.objects.filter(\n\t\t\t\t\tname__contains=filters['subject__name'])\n\t\t\t\tfilters.pop('subject__name',None)\n\t\t\tif \"cohort\" in filters and measure==\"progress\":\n\t\t\t\tfilters['cohort']=filters['cohort'].cohort\n\t\t\tfilters.pop('subject',None)\n\n\t\t#get queryset or set of objects from db based on filters\n\t\tif class_of_filters in ['yeargroup','datadrop','subject',\n\t\t'classgroup']:\n\t\t\tqset=apps.get_model('analysis',class_of_filters).\\\n\t\t\t\tobjects.filter(**filters)\n\t\telif class_of_filters==\"faculty\":\n\t\t\tqset=['Maths','English','Science','Humanities','MFL',\n\t\t\t\t'Arts','Technology','IT',None]\n\t\t\tfor sub in subject.objects.filter(**filters):\n\t\t\t\tif sub.faculty not in qset:\n\t\t\t\t\tqset.add(sub.faculty)\n\n\t\t#sorting set for each class\n\t\tif class_of_filters==\"yeargroup\":\n\t\t\tclass_of_filters=\"subject__cohort\"\n\t\t\tqset=qset.order_by('cohort')\n\t\telif class_of_filters==\"datadrop\":\n\t\t\tqset=qset.order_by('cohort','-date')\n\t\telif class_of_filters==\"subject\":\n\t\t\tqset=qset.order_by('name','faculty')\n\t\telif class_of_filters==\"classgroup\":\n\t\t\tqset=qset.order_by('class_code')\n\t\telif class_of_filters==\"faculty\":\n\t\t\tclass_of_filters=\"subject__faculty\"\n\t\t#populate returning dictionary with set/queryset\n\t\treturnDict={}\n\t\treturnDict['All']={}\n\t\tif class_of_filters==\"subject\":\n\t\t\tfor q in qset:\n\t\t\t\treturnDict[q.name]={'subject__name':q.name}\n\t\telse:\n\t\t\tfor q in qset:\n\t\t\t\tif q is None and \"faculty\" in class_of_filters:\n\t\t\t\t\treturnDict[\"Other\"]={class_of_filters:q}\n\t\t\t\telse:\n\t\t\t\t\treturnDict[q.__str__()]={class_of_filters:q}\n\tif measure in avg_headline_measures or measure in pct_headline_measures:\n\t\tfor outerkey,dict in returnDict.items():\n\t\t\tdict=clean_filters(dict)\n\treturn returnDict",
"def dict_value_filter(key, data, dfilter, logger):\n\n logger.info(u'dict_value_filter:{l}'.format(l=locals()))\n newdata = {}\n if isinstance(data, dict):\n for nextkey, nextdata in data.items():\n returned_data = dict_value_filter(nextkey, nextdata, dfilter,\n logger)\n if bool(returned_data):\n newdata[nextkey] = returned_data\n elif isinstance(data, list):\n logger.info('Processing List:{}'.format(data))\n\n for item in data:\n logger.info(u'Process list:{}'.format(data))\n if isinstance(item, dict):\n logger.info('Found a dictionary:{}'.format(item))\n logger.info('Calling dict_value_filter:{k},{d},{f}'\n ''.format(k=key,d=item, f=dfilter))\n returned_data = dict_value_filter(key, item, dfilter, logger)\n if bool(returned_data):\n newdata = returned_data\n elif dfilter in unicode(data):\n newdata = data\n else:\n logger.info(u'Skipping data entry:{d}'.format(d=data))\n\n return newdata",
"def _filter_results(self, result):\n out_result = {}\n for change_type in result:\n temp_dict = {}\n for key in result[change_type]:\n log.debug(\"change_type = %s\", change_type)\n if self.ignore_added and (change_type == \"+++\"):\n continue\n log.debug(\"result[change_type] = %s, key = %s\",\n unicode(result[change_type]), key)\n log.debug(\"self._is_incex_key = %s\",\n self._is_incex_key(\n key,\n result[change_type][key]))\n if not self._is_incex_key(key, result[change_type][key]):\n temp_dict[key] = result[change_type][key]\n if len(temp_dict) > 0:\n out_result[change_type] = temp_dict\n\n return out_result",
"def grab_filt(self, filt, analyte=None):\n if isinstance(filt, str):\n try:\n ind = self.make_fromkey(filt)\n except ValueError:\n print((\"\\n\\n***Filter key invalid. Please consult \"\n \"manual and try again.\"))\n elif isinstance(filt, dict):\n try:\n ind = self.make_fromkey(filt[analyte])\n except ValueError:\n print((\"\\n\\n***Filter key invalid. Please consult manual \"\n \"and try again.\\nOR\\nAnalyte missing from filter \"\n \"key dict.\"))\n elif filt:\n ind = self.make(analyte)\n else:\n ind = ~np.zeros(self.size, dtype=bool)\n return ind",
"def _get_config(self):\n config_dict = self.CONFIG_REGISTRY.get(self._data_type)\n\n # If there is no config for this data_type, use default config and set\n # the query based on the data_type.\n if not config_dict:\n config_dict = self.DEFAULT_CONFIG\n config_dict['query'] = 'data_type:\"{0}\"'.format(self._data_type)\n\n config_dict['index'] = self._index\n config_dict['data_type'] = self._data_type\n return config_dict",
"def get_query_info():\n all_rt_heat_metric_list = get_rt_query_count()\n # 有查询的数据\n query_dataset_dict = {}\n for each_rt in all_rt_heat_metric_list:\n query_dataset_dict[each_rt[\"key\"]] = each_rt[\"doc_count\"]\n return query_dataset_dict",
"def getFilter(self, type: int) -> int:\n ...",
"def create_filter_query(self, collection_name: str, field: str, filter_type: str, filter_values: Union[List[str], str]=None):\n if filter_type == 'contains':\n # return [{'field' : field, 'filter_type' : 'contains', \"condition\":\"==\", \"condition_value\": filter_values}]\n return [{'field': field, 'filter_type': 'regexp', 'condition': '==', 'condition_value': '.*' + str(filter_values) + '.*'}]\n if filter_type == 'exact_match':\n return [{'field' : field, 'filter_type' : 'exact_match', \"condition\":\"==\", \"condition_value\": filter_values}]\n if filter_type == 'categories':\n return [{'field' : field, 'filter_type' : 'categories', \"condition\":\"==\", \"condition_value\": filter_values}]\n if filter_type == 'exists':\n if filter_values is None or filter_values == '==':\n return [{'field' : field, 'filter_type' : 'exists', \"condition\":\"==\", \"condition_value\":\" \"}]\n elif filter_values == '!=':\n return [{'field' : field, 'filter_type' : 'exists', \"condition\":\"!=\", \"condition_value\":\" \"}]\n if filter_type == '<=' or filter_type == '>=' or filter_type == '>' or filter_type == '<' or filter_type == '==':\n if self.collection_schema(collection_name)[field] == 'date':\n return [{'field' : field, 'filter_type' : 'date', \"condition\":filter_type, \"condition_value\": filter_values}]\n elif self.collection_schema(collection_name)[field] == 'numeric':\n return [{'field' : field, 'filter_type' : 'numeric', \"condition\":filter_type, \"condition_value\":filter_values}]\n else:\n raise ValueError(f\"{filter_type} has not been defined. Please choose one of contains/exact_match/exists/categories/>=/<=/>/<.\")",
"def retrieve(self, data_only_filter=\"all\", return_type=\"python\"):\n if return_type == \"python\":\n if data_only_filter == \"all\":\n return dict(dict_data=self.data_dict, list_data=self.data_list)\n elif data_only_filter == \"list\":\n return self.data_list\n elif data_only_filter == \"dict\":\n return self.data_dict\n else:\n print(\">>>> Data filter only: {'all', 'list', 'dict'}, your: %s\" % data_only_filter)\n exit(1)\n elif return_type == \"model\":\n if data_only_filter == \"all\":\n return dict(dict_data=DictModel(name=\"obj_dict\", raw_data=self.data_dict),\n list_data=DictModel(name=\"obj_list\", raw_data=self.data_list))\n elif data_only_filter == \"list\":\n return DictModel(name=\"obj_dict\", raw_data=self.data_dict)\n elif data_only_filter == \"dict\":\n return DictModel(name=\"obj_list\", raw_data=self.data_list)\n else:\n print(\">>>> Data filter only: {'all', 'list', 'dict'}, your: %s\" % data_only_filter)\n exit(1)\n else:\n print(\">>>> Return type only: {'python', 'model'}, your: %s\" % return_type)\n exit(1)",
"def index(self):\n return dict(data='index')",
"def get_query_from_data(data):\n query_params = {}\n for key, value in data.items():\n if key == 'title':\n Product._set_query_params(query_params, key, value)\n elif isinstance(value, str):\n param_key = f'params.{str(key)}'\n Product._set_query_params(query_params, param_key, value)\n return query_params",
"def listtypeindexes(self):\n\n indexes = {}\n for dtype,value in sorted(self._allowed_patterns.items()):\n if value.has_key('index'):\n indexes[dtype] = value['index']\n\n return indexes",
"def type_index(context, request):\n\n return {'types': db.DBSession.query(db.Type).order_by(db.Type.id).all()}",
"def createOrUpdate ( server, searchType, data, filters, parent_key=None, print_status=True ):\n object = server.query ( searchType, filters=filters )\n if print_status:\n print ( \"----- Found \" + str(len(object)) + \" objects for \" + searchType + \" \" + str(filters) )\n if len(object) == 0:\n return server.insert ( searchType, data, parent_key=parent_key )\n elif len(object) == 1:\n searchKey = server.build_search_key ( searchType, object[0]['code'] )\n return server.update ( searchKey, data=data, parent_key=parent_key )\n else:\n raise Exception ( \"Found multiple rows in \" + searchType + \" matching filters: \" + str(filters) )",
"def _dict_with_specs(inst_type_query, specs_key='extra_specs'):\n inst_type_dict = dict(inst_type_query)\n specs = {x['key']: x['value'] for x in inst_type_query[specs_key]}\n inst_type_dict[specs_key] = specs\n return inst_type_dict",
"def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}",
"def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}",
"def query(self, form):\n query = {}\n index = self.data.get('index', '')\n index = index.encode('utf-8', 'replace')\n\n if not self.operator_visible:\n operator = self.operator\n else:\n operator = form.get(self.data.getId() + '-operator', self.operator)\n\n operator = operator.encode('utf-8', 'replace')\n\n if not index:\n return query\n\n if self.hidden:\n value = self.default\n else:\n value = form.get(self.data.getId(), '')\n\n if not value:\n return query\n\n catalog = getToolByName(self.context, 'portal_catalog')\n if index in catalog.Indexes:\n if catalog.Indexes[index].meta_type == 'BooleanIndex':\n if value == 'False':\n value = False\n elif value == 'True':\n value = True\n\n query[index] = {'query': value, 'operator': operator}\n return query",
"def _init_prepare_types(self):\n # len(db)-1 wouldn't work here because there could be missing\n # index due to generic filtering\n self.types = {\n key: fit_integer_type(np.max(db.index.values), is_signed=False)\n for key, db in iteritems(self.by_dbs)}"
]
| [
"0.5705551",
"0.5667574",
"0.5598227",
"0.55603284",
"0.5518409",
"0.5467773",
"0.54619396",
"0.54030484",
"0.53398377",
"0.532923",
"0.5268471",
"0.5182582",
"0.5160404",
"0.51402986",
"0.51049006",
"0.508951",
"0.5088857",
"0.5058746",
"0.50552565",
"0.50269693",
"0.5026853",
"0.50258404",
"0.50198257",
"0.5009867",
"0.49656203",
"0.49607807",
"0.49605793",
"0.49605793",
"0.4959921",
"0.49548167"
]
| 0.65318596 | 0 |
(Twitterverse dictionary, str) > list of str Return a list of all users following twitter_name in twitter_dict. >>> twitter_file = open('data.txt', 'r') >>> twitter_dictionary = process_data(twitter_file) >>> all_followers(twitter_dictionary, 'NicoleKidman') ['PerezHilton', 'q', 'p', 'tomCruise'] >>> twitter_file = open('data.txt', 'r') >>> twitter_dictionary = process_data(twitter_file) >>> all_followers(twitter_dictionary, 'katieH') ['PerezHilton', 'tomCruise'] | def all_followers(twitter_dict, twitter_name):
following_list = []
for user in twitter_dict:
f_list = twitter_dict[user]['following']
if twitter_name in f_list:
following_list.append(user)
return following_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all_followers (twitter_data, username):\n\n # initialize\n followers = []\n\n for key in twitter_data: # go through every username in twitter_data\n if username in twitter_data [key]['following']: # check each 'following'\n followers.append (key)\n\n followers.sort() # sort the list alphabetically for testing purposes\n return followers",
"def followers(congressDict, twitterAPI):\n most = twitterAPI.get_user(list(congressDict.items())[0][1]) # Choose an arbitrary starting point from the dictionary and assign it their user details.\n least = most\n for name in congressDict:\n tempAPI = twitterAPI.get_user(congressDict[name]) # Get the user details of each congress members' twitter handle.\n numFollowers = tempAPI._json['followers_count']\n if (numFollowers > most._json['followers_count']): # If the follower count is greater than most, replace the user details with current one.\n most = tempAPI\n elif (numFollowers < least._json['followers_count']): # If the follower count is lower than least, replace the user details with the current one.\n least = tempAPI\n return [most._json[\"name\"], least._json[\"name\"]]",
"def search_helper(name_list, operation, twitter_dict): \r\n return_list = []\r\n \r\n for name in name_list:\r\n if operation == 'following':\r\n search_specified_list = twitter_dict[name]['following']\r\n for following_names in search_specified_list: \r\n if following_names not in return_list: \r\n return_list.append(following_names) \r\n \r\n elif operation == 'followers':\r\n followers = all_followers(twitter_dict, name)\r\n for followers_name in followers: \r\n if followers_name not in return_list: \r\n return_list.append(followers_name) \r\n \r\n return return_list",
"def followers(self, handles):\n print(handles)\n followers_list = {}\n for handle in handles:\n followers = self.twitter_client.followers_ids(screen_name=handle)\n\n r = []\n for page in self.paginate(followers, 100):\n results = self.twitter_client.lookup_users(user_ids=page)\n for result in results:\n r.append(result.screen_name)\n followers_list[handle] = r\n return followers_list",
"def get_filter_results(twitter_dict, username_list, filter_dict):\r\n twitter_handles = username_list \r\n name_filtered_list = []\r\n upper_user = []\r\n \r\n if 'name_includes' in filter_dict: \r\n for user in twitter_handles: \r\n user = user.upper()\r\n upper_user.append(user)\r\n name = filter_dict['name_includes']\r\n \r\n for uName in username_list:\r\n if name.upper() == uName.upper():\r\n name_filtered_list.append(name) \r\n \r\n twitter_handles = name_filtered_list \r\n \r\n location_filtered_list = []\r\n if 'location_includes' in filter_dict: \r\n for user in twitter_handles: \r\n location = filter_dict['location_includes']\r\n if location.upper() == twitter_dict[user]['location'].upper(): \r\n location_filtered_list.append(user) \r\n twitter_handles = location_filtered_list\r\n \r\n follower_filtered_list = []\r\n if 'follower' in filter_dict:\r\n for user in twitter_handles:\r\n for follower in twitter_dict[user]['following']:\r\n if follower == filter_dict['follower']:\r\n #if follower in twitter_handles:\r\n follower_filtered_list.append(user)\r\n \r\n twitter_handles = follower_filtered_list \r\n \r\n following_filtered_list = []\r\n if 'following' in filter_dict:\r\n for user in twitter_handles: \r\n following_list = all_followers(twitter_dict, user)\r\n for follower in following_list: \r\n if follower in twitter_handles: \r\n following_filtered_list.append(follower) \r\n twitter_handles = following_filtered_list \r\n \r\n return twitter_handles",
"def get_user_followers(self, text: str) -> List[str]:\n # Get all follower information\n followers: List[_InstagramUser] = self._user_follower_info()\n # Convert each folllower to just their name\n names: List[str] = list([x.username for x in followers])\n return names",
"def users_being_followed_tweets():\n username = request.authorization.username\n tweets = []\n\n user_id = get_user_id(username);\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id and (\n user.user_id = ? or\n user.user_id in (select whom_id from follower\n where who_id = ?))\n order by message.pub_date desc limit ?''',\n [user_id, user_id, PER_PAGE])\n\n for tuple in tuples:\n tweet = {}\n tweet[\"message_id\"] = tuple['message_id']\n tweet[\"author_id\"] = tuple['author_id']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweets.append(tweet)\n\n return jsonify({'tweets': tweets}), 200",
"def get_followers(user):\n if user.has_key('followers_list'):\n pass\n else:\n if user.has_key('followers_count'):\n if user['followers_count'] > 4999:\n pages = user['followers_count'] / 5000\n f_list = []\n for page in range(pages):\n try:\n follower_set = api.GetFollowers(user_id=user['id'], cursor=page, count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n f_list = friends_list + f_list\n time.sleep(60)\n user['followers_list'] = f_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(f_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)\n else:\n try:\n follower_set = api.GetFollowers(user_id=user['id'], count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n user['followers_list'] = friends_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(friends_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)",
"def followed_by_hillary_and_donald(users, twitter):\n\n str = ''\n set1 = set()\n set2 = set()\n for u_dict in users:\n \tif u_dict['screen_name'] == 'HillaryClinton':\n \t\tset1 = set(u_dict['friends'])\n \telif u_dict['screen_name'] == 'realDonaldTrump':\n \t\tset2 = set(u_dict['friends'])\n \t\t\n common = set.intersection(set1, set2)\n request = robust_request(twitter, 'users/lookup', {'user_id': common}, max_tries=5)\n for user in request:\n \tstr = user['screen_name']\t\n return str",
"def get_all_followers(self):\n return get_all_(self.get_followers)",
"def followers(self):\r\n url = '{0}/followers'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json",
"def followers(self):\r\n url = '{0}/followers'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json",
"def followers(self):\r\n url = '{0}/followers'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json",
"def followers(self):\r\n url = '{0}/followers'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json",
"def followers(self):\r\n request = http.Request('GET', '{0}/followers/'.format(self.get_url()))\r\n\r\n return request, parsers.parse_json",
"def followers(self):\r\n request = http.Request('GET', '{0}/followers/'.format(self.get_url()))\r\n\r\n return request, parsers.parse_json",
"def followers(self):\r\n request = http.Request('GET', '{0}/followers/'.format(self.get_url()))\r\n\r\n return request, parsers.parse_json",
"def followers(self):\n return self.data.get(\"followers\")",
"def followed_by_hillary_and_donald(users, twitter):\n ###TODO-- Completed\n for user in users:\n if user['screen_name'] == 'HillaryClinton':\n friends_Hillary = user['friends']\n #print(len(friends_Hillary))\n elif user['screen_name'] == 'realDonaldTrump':\n friends_donald = user['friends']\n #print(len(friends_donald))\n\n common_followed_id = list(set(friends_Hillary) & set(friends_donald))\n\n commn_followed_user = robust_request(twitter,'users/lookup',{'user_id':common_followed_id}).json()\n #print(commn_followed_user[0]['screen_name'])#['screen_name'])\n return commn_followed_user[0]['screen_name']\n #pass",
"def get_followers1(user):\n if user.has_key('followers_list'):\n pass\n else:\n if user.has_key('followers_count'):\n if user['followers_count'] > 4999:\n pages = user['followers_count'] / 5000\n f_list = []\n for page in range(pages):\n try:\n follower_set = api1.GetFollowers(user_id=user['id'], cursor=page, count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n f_list = friends_list + f_list\n time.sleep(60)\n user['followers_list'] = f_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(f_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)\n else:\n try:\n follower_set = api1.GetFollowers(user_id=user['id'], count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n user['followers_list'] = friends_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(friends_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)",
"def follower(account):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not account:\n if \"default_account\" in mph.config:\n account = [mph.config[\"default_account\"]]\n for a in account:\n a = Account(a, morphene_instance=stm)\n print(\"\\nFollowers statistics for @%s (please wait...)\" % a.name)\n followers = a.get_followers(False)\n followers.print_summarize_table(tag_type=\"Followers\")",
"def getFollowerIDs(self, screen_name):\n follower_ids = []\n for follower_id in tweepy.Cursor(self.api.followers_ids,id=screen_name).items():\n print follower_id\n follower_ids.append(follower_id)\n return follower_ids",
"def get_search_results (twitter_data, search_data):\n\n search_list = [search_data['username']] # start with the first username\n temp = [] # initialize\n\n for operation in search_data['operations']: # go through every operation\n for username in search_list:\n if operation == 'following':\n for name in twitter_data[username]['following']:\n if not name in temp:\n temp.append (name)\n\n elif operation == 'followers':\n for name in all_followers (twitter_data, username):\n if not name in temp:\n temp.append (name)\n\n search_list = temp\n temp = []\n search_list.sort() # sort the list alphabetically for testing purposes\n return search_list",
"def tweets_following_users(username):\n user_profile = query_db('select * from user where username = ?',\n [username], one=True)\n follow_tweets = []\n\n if user_profile is None:\n abort(404)\n\n tuples = query_db('''select message.* from message, follower where\n follower.whom_id = message.author_id and follower.who_id = ?\n order by message.pub_date desc limit ?''', [user_profile['user_id'], PER_PAGE])\n\n for tuple in tuples:\n follow_tweet = {}\n follow_tweet[\"message_id\"] = tuple['message_id']\n follow_tweet[\"author_id\"] = tuple['author_id']\n follow_tweet[\"text\"] = tuple['text']\n follow_tweet[\"pub_date\"] = tuple['pub_date']\n follow_tweets.append(follow_tweet)\n\n return jsonify({'follow_tweets': follow_tweets}), 200",
"def getFollowers():\n followers = []\n next_max_id = True\n while next_max_id:\n # first iteration hack\n if next_max_id is True:\n next_max_id = ''\n\n _ = GetInstagramAnswer.igApi.getUserFollowers(GetInstagramAnswer.igApi.username_id, maxid=next_max_id)\n followers.extend(GetInstagramAnswer.igApi.LastJson.get('users',[]))\n next_max_id = GetInstagramAnswer.igApi.LastJson.get('next_max_id','')\n return \"You have currently \"+str(len(followers))+\" Followers on Instagram.\"",
"def get_followers(twitter,screen_name,filename,count):\n url = 'https://api.twitter.com/1.1/followers/ids.json?&screen_name=@'+screen_name+'&skip_status=true&include_user_entities=false&count='+str(count) \n consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)\n access = oauth.Token(key=access_token, secret=access_token_secret)\n client = oauth.Client(consumer, access)\n try:\n response,data = client.request(url)\n dataStr = data.decode('utf-8') \n if('Rate limit exceeded' in dataStr ):\n print('rate limit exceeded error.. sleep for 15 min')\n time.sleep(61 * 15)\n response,data = client.request(url)\n \n jsonid = json.loads(dataStr)\n li = list(jsonid['ids'])\n output = open(filename, 'wb')\n pickle.dump(li, output)\n output.close()\n except:\n pass\n \n return li",
"def process_data (file):\n\n twitter_data = {}\n\n username = file.readline().strip()\n\n while username != '': # leave when all lines of the file have been read\n # initialize everything we need for entering data\n twitter_data [username] = {}\n twitter_data [username] ['bio'] = ''\n twitter_data [username] ['following'] = []\n\n twitter_data [username] ['name'] = file.readline().strip()\n twitter_data [username] ['location'] = file.readline().strip()\n twitter_data [username] ['web'] = file.readline().strip()\n\n bio = file.readline() # get the first line of the bio\n while bio.strip() != 'ENDBIO': # go until we get the line 'ENDBIO'\n twitter_data [username] ['bio'] += bio.strip() + '\\n'\n bio = file.readline()\n # we dont want the final '\\n' in the bio so we splice it\n twitter_data [username] ['bio'] = twitter_data [username] ['bio'] [:-1]\n\n following = file.readline() # get the first element for the list\n while following.strip() != 'END': # go until we get the line 'END'\n twitter_data [username] ['following'].append (following.strip())\n following = file.readline()\n\n username = file.readline().strip()\n return twitter_data",
"def get_followers(self):\n rsp = self.session.get(self.url + \"/followers\")\n soup = self.getSoup(rsp.content)\n followers = soup.find_all('div', class_ = 'zm-person-item')\n if not followers:\n return\n i, follower = 0, None\n for follower in followers:\n i += 1\n yield follower.find('a', recursive = False)['href']\n while not i % Page_Items_Num:\n data = {\n 'offset' : i,\n 'start' : follower['id'].split('-')[-1],\n '_xsrf' : self.session.getCookie()['_xsrf']\n }\n rsp = self.session.post(self.url + \"/followers\", data = data)\n if rsp.json()['r'] == 0:\n followers = self.getSoup(rsp.json()['msg'][1]).find_all('div', class_ = 'zm-person-item')\n for follower in followers:\n i += 1\n yield follower.find('a', recursive = False)['href']\n else:\n return",
"def _user_follower_info(self, uid: int = 0) -> List[_InstagramUser]:\n # If no uid was specified, use the authenticated user's uid\n if uid == 0:\n uid = self.uid\n\n followers: List[Dict[str, Any]] = self.api.getTotalFollowers(uid)\n user_followers = list([_InstagramUser(x) for x in followers])\n return user_followers",
"def user_followers(username, max: int = None):\n for user_dict in client.user_relationships(username, max=max, type=\"followers\"):\n print(json.dumps(user_dict))"
]
| [
"0.83895",
"0.7141314",
"0.680105",
"0.6771905",
"0.6646324",
"0.66039145",
"0.6573563",
"0.6453545",
"0.644731",
"0.63955975",
"0.62907255",
"0.62907255",
"0.62907255",
"0.62907255",
"0.6255389",
"0.6255389",
"0.6255389",
"0.62464976",
"0.6245325",
"0.620537",
"0.6181806",
"0.6159641",
"0.6138146",
"0.61297315",
"0.6077491",
"0.60320157",
"0.6021054",
"0.5915483",
"0.5904801",
"0.58885926"
]
| 0.8723968 | 0 |
(Twitterverse dictionary, search specification dictionary) > list of str Return a list of users from twitter_dict that fit the specification declared by search_dict. >>> data_file = open('data.txt', 'r') >>> twitter_dict = process_data(data_file) >>> query_file = open('query3.txt', 'r') >>> query_dict = process_query(query_file) >>> search_dict = query_dict['search'] >>> get_search_results(twitter_dict, search_dict) ['PerezHilton', 'tomCruise', 'q', 'p'] >>> data_file = open('data.txt', 'r') >>> twitter_dict = process_data(data_file) >>> query_file = open('query2.txt', 'r') >>> query_dict = process_query(query_file) >>> search_dict = query_dict['search'] >>> get_search_results(twitter_dict, search_dict) ['a', 'b'] | def get_search_results(twitter_dict, search_dict):
search_list = [search_dict['username']]
search_specified_list = []
for user in search_list:
search_users_list = [user]
for operation in search_dict['operations']:
search_users_list = search_helper(search_users_list, operation,\
twitter_dict)
search_specified_list += search_users_list
return search_specified_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_filter_results(twitter_dict, username_list, filter_dict):\r\n twitter_handles = username_list \r\n name_filtered_list = []\r\n upper_user = []\r\n \r\n if 'name_includes' in filter_dict: \r\n for user in twitter_handles: \r\n user = user.upper()\r\n upper_user.append(user)\r\n name = filter_dict['name_includes']\r\n \r\n for uName in username_list:\r\n if name.upper() == uName.upper():\r\n name_filtered_list.append(name) \r\n \r\n twitter_handles = name_filtered_list \r\n \r\n location_filtered_list = []\r\n if 'location_includes' in filter_dict: \r\n for user in twitter_handles: \r\n location = filter_dict['location_includes']\r\n if location.upper() == twitter_dict[user]['location'].upper(): \r\n location_filtered_list.append(user) \r\n twitter_handles = location_filtered_list\r\n \r\n follower_filtered_list = []\r\n if 'follower' in filter_dict:\r\n for user in twitter_handles:\r\n for follower in twitter_dict[user]['following']:\r\n if follower == filter_dict['follower']:\r\n #if follower in twitter_handles:\r\n follower_filtered_list.append(user)\r\n \r\n twitter_handles = follower_filtered_list \r\n \r\n following_filtered_list = []\r\n if 'following' in filter_dict:\r\n for user in twitter_handles: \r\n following_list = all_followers(twitter_dict, user)\r\n for follower in following_list: \r\n if follower in twitter_handles: \r\n following_filtered_list.append(follower) \r\n twitter_handles = following_filtered_list \r\n \r\n return twitter_handles",
"def get_search_results (twitter_data, search_data):\n\n search_list = [search_data['username']] # start with the first username\n temp = [] # initialize\n\n for operation in search_data['operations']: # go through every operation\n for username in search_list:\n if operation == 'following':\n for name in twitter_data[username]['following']:\n if not name in temp:\n temp.append (name)\n\n elif operation == 'followers':\n for name in all_followers (twitter_data, username):\n if not name in temp:\n temp.append (name)\n\n search_list = temp\n temp = []\n search_list.sort() # sort the list alphabetically for testing purposes\n return search_list",
"def get_filter_results (twitter_data, search_list, filter_data):\n\n #initialize\n filter_list = []\n\n for operation in filter_data:\n if operation == 'name-includes':\n for username in search_list:\n # since case doesnt matter, eveything is made uppercase and\n # then is checked\n if filter_data [operation].upper() in \\\n twitter_data [username]['name'].upper():\n filter_list.append (username)\n\n elif operation == 'location-includes':\n for username in search_list:\n # same case as above\n if filter_data [operation].upper() in \\\n twitter_data [username]['location'].upper():\n filter_list.append (username)\n\n elif operation == 'follower':\n for username in search_list:\n if username in \\\n twitter_data[filter_data [operation]]['following']:\n filter_list.append (username)\n\n elif operation == 'following':\n for username in search_list:\n if username in all_followers(twitter_data, filter_data[operation]):\n filter_list.append (username)\n\n search_list = filter_list\n filter_list = []\n\n filter_list = search_list\n filter_list.sort() # sort the list alphabetically for testing purposes\n\n return filter_list",
"def search(phrase):\n return {\n 'products': search_products(phrase),\n 'orders': search_orders(phrase),\n 'users': search_users(phrase)}",
"def get_search_results(query):\n global index, doc_names\n result = ranked = list()\n doc_list = set(doc_names.keys())\n flag = 0\n for word in query:\n if word in index:\n flag = 1\n doc_list = doc_list.intersection(index[word].keys())\n else:\n return []\n\n if flag != 0:\n for doc_id in doc_list:\n positions = list()\n for word in query:\n positions.append(index[word][doc_id])\n doc_result = [(doc_id, x) for x in position_merge(positions)]\n result += doc_result\n ranked = sorted(result, key=lambda x: (x[0], x[1]))\n return ranked",
"def search_helper(name_list, operation, twitter_dict): \r\n return_list = []\r\n \r\n for name in name_list:\r\n if operation == 'following':\r\n search_specified_list = twitter_dict[name]['following']\r\n for following_names in search_specified_list: \r\n if following_names not in return_list: \r\n return_list.append(following_names) \r\n \r\n elif operation == 'followers':\r\n followers = all_followers(twitter_dict, name)\r\n for followers_name in followers: \r\n if followers_name not in return_list: \r\n return_list.append(followers_name) \r\n \r\n return return_list",
"def run_search(dict_file, postings_file, queries_file, results_file):\n print('running search on the queries...')\n\n with open(dict_file, mode=\"rb\") as dictionary_file,\\\n open(postings_file, mode=\"rb\") as posting_file,\\\n open(queries_file, encoding=\"utf8\") as q_in,\\\n open(results_file, mode=\"w\", encoding=\"utf8\") as q_out:\n\n ''' \n load dictionary and postings \n - num_of_doc -> The number of the documents indexed\n - dict(k,v) -> token, Enftry(frequency, offset, size)\n - postings -> list of tuples (doc ID, token frequency)\n '''\n num_of_doc = pickle.load(dictionary_file)\n dictionary = pickle.load(dictionary_file)\n postings = Posting(dictionary, posting_file)\n\n ''' \n process query, and write the query result (i.e., the 10 \n most relevant doc IDs) to the result file \n '''\n for query in q_in:\n print(*find_10_most_relevant(query, dictionary,\n postings, num_of_doc), end='\\n', file=q_out)",
"def search_users(request, usernames_only=True):\n BUFFER_LEN = getattr(settings, 'RESULTS_BUFFER_LEN', 500)\n # SR_LIMIT = getattr(settings, 'SR_LIMIT', 50)\n # SR_MIN_SUBS = getattr(settings, 'SR_MIN_SUBS', 100)\n # SR_MAX_SUBS = getattr(settings, 'SR_MAX_SUBS', 5000000)\n\n # f_ignore_sr_li\n # f_ignore_sr_max\n # f_exclude_sr_li\n\n # fetch users and number of same subscribed subreddits for all users\n # that are subscribed to the same subreddits than auth user; fetch a max\n # of BUFFER_LEN items. This query only touches the dtr5app_subscribed\n # table and does not requore any join with other tables.\n query_params = []\n query_string = ''\n\n # part 1\n query_params += []\n query_string += '''\n SELECT au.id, au.username, COUNT(r1.user_id) AS sr_count\n FROM dtr5app_subscribed r1\n\n INNER JOIN dtr5app_subscribed r2\n ON r1.sr_id = r2.sr_id AND r1.user_id <> r2.user_id\n\n INNER JOIN auth_user au\n ON r2.user_id = au.id\n\n INNER JOIN dtr5app_sr sr\n ON r1.sr_id = sr.id\n\n WHERE au.is_active IS TRUE AND last_login IS NOT NULL '''\n\n # part 1.1\n # if the user has set a maximum size for subreddits to be considered\n # in search. this can be used to filter all the huge default subs that\n # most redditors belong to.\n if request.user.profile.f_ignore_sr_max:\n query_params += [request.user.profile.f_ignore_sr_max]\n query_string += ''' AND sr.subscribers < %s '''\n\n # part 1.2\n # a list of Sr.display_name values. these subreddits should NOT be\n # considered when producing matches.\n # Subreddit names should appear as case insensitive! The f_ignore_sr_li\n # list of subreddit names is supposed to be \"cleaned up\" already, with\n # the appropriate lettercase of a subreddit's name.\n if request.user.profile.f_ignore_sr_li:\n query_params += request.user.profile.f_ignore_sr_li\n x = ', '.join(['%s'] * len(request.user.profile.f_ignore_sr_li))\n query_string += ''' AND sr.id NOT IN (\n SELECT id FROM dtr5app_sr sr2\n WHERE sr2.display_name IN (''' + x + ' )) '\n\n # part 1.9\n query_params += [request.user.id]\n query_string += ''' AND r1.user_id = %s AND au.id IN (\n SELECT id FROM auth_user u\n INNER JOIN dtr5app_profile p\n ON u.id = p.user_id\n WHERE 1=1 '''\n\n # part 2: sex --> TODO: search by gender!\n # li = li.filter(profile__sex=request.user.profile.f_sex)\n if request.user.profile.f_sex > 0:\n query_params += [request.user.profile.f_sex]\n query_string += ''' AND p.sex = %s '''\n\n # part 3: date of birth\n dob_earliest, dob_latest = get_dob_range(request.user.profile.f_minage,\n request.user.profile.f_maxage)\n query_params += [dob_earliest, dob_latest]\n query_string += ''' AND p.dob >= %s AND p.dob <= %s '''\n\n # part 4: lat/lng\n # li = li.filter(profile__lat__gte=lat_min, profile__lat__lte=lat_max,\n # profile__lng__gte=lng_min, profile__lng__lte=lng_max)\n #\n # Values too close are inaccurate because of location fuzzying. Also,\n # f_distance must be at least 1, so that the signup flow doesn't intercept\n # it because it has no value set! Leave this to only search distances\n # above 5 km or so, and return \"worldwide\" for any value below 5 km.\n #\n if request.user.profile.f_distance > 5:\n lat_min, lng_min, lat_max, lng_max = get_latlng_bounderies(\n request.user.profile.lat,\n request.user.profile.lng,\n request.user.profile.f_distance)\n query_params += [lat_max, lng_min, lat_min, lng_max]\n query_string += ''' AND p.lat <= %s AND p.lng >= %s\n AND p.lat >= %s AND p.lng <= %s '''\n\n # part 5: exclude auth user themself\n # li = li.exclude(pk=request.user.pk)\n query_params += [request.user.id]\n query_string += ''' AND NOT (u.id = %s) '''\n\n # part 6: exclude users who already have a like/nope flag from auth user\n # li = li.exclude(flags_received__sender=request.user)\n query_params += [request.user.id]\n query_string += ''' AND NOT (u.id IN (SELECT U1.receiver_id AS Col1\n FROM dtr5app_flag U1 WHERE U1.sender_id = %s)) '''\n\n # part 7: exclude globally blocked usernames\n # li = li.exclude(username__in=get_blocked_usernames_list())\n # --> TODO: currently empty.\n pass\n\n # part 8: have at least one picture URL in the JSON string\n # li = li.exclude(profile___pics='[]')\n # TODO: for now, allow no-picture profiles, to make testing easier\n # query_params += []\n # query_string += ''' AND NOT (p._pics = '[]') '''\n\n # finish up\n query_params += [BUFFER_LEN]\n query_string += ''' ) GROUP BY r1.user_id, au.id\n ORDER BY sr_count DESC LIMIT %s '''\n\n # execute the query with the collected params\n users = User.objects.raw(query_string, query_params)\n\n # print('='*50)\n # print(repr(users))\n # print('='*50)\n # for u in users:\n # print('share {} subs with {}:{}'.format(u.sr_count, u.id, u.username))\n # print('='*50)\n\n # default return a list of only usernames\n if usernames_only:\n return [x.username for x in users]\n else:\n return users",
"def search(self, query_string):\n terms = query_string.lower().split()\n result = set(self.wordDict[terms[0]])\n if len(result) == 0:\n return list()\n else:\n for t in terms[2:]:\n records_containing_t = self.wordDict[t]\n result = result.intersection(records_containing_t)\n return [self.get_record_dict(id).getTuple() for id in result]",
"def search_users(phrase):\n sv = (SearchVector('email', weight='A') +\n SearchVector('first_name', weight='B') +\n SearchVector('last_name', weight='B') +\n SearchVector('default_billing_address__first_name', weight='B') +\n SearchVector('default_billing_address__last_name', weight='B'))\n rank = SearchRank(sv, SearchQuery(phrase))\n return User.objects.annotate(rank=rank).filter(\n rank__gte=0.2).order_by('-rank')",
"def query_twitter(session, provided_ioc):\n ioc_dicts = []\n\n if provided_ioc.startswith(\"@\"):\n ioc_dicts.append({\"invalid\": \"{} <-- Monitoring users is prohibited!\".format(provided_ioc)})\n return ioc_dicts\n\n encoded_ioc = urllib.quote_plus(provided_ioc)\n search_tweets = session.search(encoded_ioc, rpp=100, lang=\"en\")\n\n for tweet in search_tweets:\n if tweet._json[\"user\"][\"name\"] == provided_ioc.replace(\"#\", \"\") or \\\n tweet._json[\"user\"][\"screen_name\"] == provided_ioc.replace(\"#\", \"\"):\n ioc_dicts.append({\"invalid\": \"{} <-- Monitoring users is prohibited!\".format(provided_ioc)})\n return ioc_dicts\n\n if \"retweeted_status\" in tweet._json.keys():\n if tweet._json[\"retweeted_status\"][\"user\"][\"name\"] == provided_ioc.replace(\"#\", \"\") or \\\n tweet._json[\"retweeted_status\"][\"user\"][\"screen_name\"] == provided_ioc.replace(\"#\", \"\"):\n ioc_dicts.append({\"invalid\": \"{} <-- Monitoring users is prohibited!\".format(provided_ioc)})\n return ioc_dicts\n\n urls = []\n for x in tweet._json[\"entities\"][\"urls\"]:\n if not x[\"expanded_url\"].startswith(\"https://twitter.com/i/web/status/\"):\n urls.append(x[\"expanded_url\"])\n\n hashtags = []\n for x in tweet._json[\"entities\"][\"hashtags\"]:\n hashtags.append(\"#{}\".format(x[\"text\"]))\n\n ioc_dict = {}\n ioc_dict[\"search_term\"] = provided_ioc\n ioc_dict[\"url\"] = \"\\n\".join(urls)\n ioc_dict[\"hashtags\"] = \"\\n\".join(hashtags)\n ioc_dict[\"timestamp\"] = tweet._json[\"created_at\"]\n ioc_dict[\"tweet\"] = tweet._json[\"text\"]\n\n if \"retweeted_status\" in tweet._json.keys():\n ioc_dict[\"timestamp\"] = tweet._json[\"retweeted_status\"][\"created_at\"]\n ioc_dict[\"tweet\"] = tweet._json[\"retweeted_status\"][\"text\"]\n\n ioc_dicts.append(ioc_dict)\n return ioc_dicts",
"def get_results_for(t_client, search_q):\n results = t_client.search(q=\"#\"+search_q)\n\n # This can be refactored\n return [\n {\n \"author\": \"@%s\" % t.from_user,\n \"text\": t.text,\n \"id\": t.id,\n \"date_h\": t.created_at.strftime(\"%H:%M:%S %d/%m/%Y\"),\n \"date\": time.mktime(t.created_at.timetuple()),\n } for t in results\n ]",
"def run_search(dict_file, postings_file, queries_file, results_file):\n print('running search on the queries...')\n\n with open(dict_file, mode=\"rb\") as dictionary_file,\\\n open(postings_file, mode=\"rb\") as posting_file,\\\n open(queries_file, encoding=\"utf8\") as q_in,\\\n open(results_file, mode=\"w\", encoding=\"utf8\") as q_out:\n\n ''' load dictionary and postings '''\n # dict(k,v) -> token, Entry(frequency, offset, size)\n # postings -> the dict containing the entries and metadata of the postings file\n # skiplist -> list of all doc IDs\n dictionary = pickle.load(dictionary_file)\n postings = Posting(dictionary, posting_file)\n file_list = postings['__all__']\n\n ''' process query, and write the query result to result file '''\n for query in q_in:\n query = preprocess_query(query)\n algebra = boolean.BooleanAlgebra()\n # Simplify query, e.g. tautology\n expression = algebra.parse(query, simplify=True)\n # special cases after simplification\n if str(expression) == \"0\":\n print(\"\", end='\\n', file=q_out)\n continue\n elif str(expression) == \"1\":\n print(\" \".join(map(str, file_list)), end='\\n', file=q_out)\n continue\n\n print(\" \".join(map(str, shunting(get_input(str(expression))).eval(\n postings, file_list).list)), end='\\n', file=q_out)\n\n # add posting skiplist and list of all docIDs to corresponding symbol\n # for sym in expression.symbols:\n # if normalize(sym) == \"IGNORE\":\n # norm_sym = str(normalize(sym))\n # setattr(sym, \"obj\", norm_sym)\n # setattr(sym, \"skiplist\", postings[norm_sym])\n # setattr(sym, \"list\", postings[norm_sym].list)\n # setattr(sym, \"file_list\", file_list.list)\n\n # evaluate the query\n # args[]: list of sub-terms\n # For symbols and base elements this tuple is empty,\n # for boolean functions it contains one or more symbols, elements or sub-expressions.\n # print(\" \".join(map(str, expression.evaluate_query(expression.args).list)),\n # end='\\n', file=q_out)",
"def search_results(self):\r\n route_name = self.request.matched_route.name\r\n mdict = self.matchdict\r\n rdict = self.GET\r\n\r\n if 'terms' in mdict:\r\n phrase = \" \".join(mdict['terms'])\r\n else:\r\n phrase = rdict.get('search', '')\r\n\r\n # Always search the fulltext content\r\n with_content = True\r\n\r\n conn_str = self.settings.get('sqlalchemy.url', False)\r\n searcher = get_fulltext_handler(conn_str)\r\n\r\n # check if we have a page count submitted\r\n params = self.params\r\n page = params.get('page', 0)\r\n count = params.get('count', 50)\r\n\r\n if rdict.get('search_mine') or 'username' in mdict:\r\n with_user = True\r\n else:\r\n with_user = False\r\n\r\n username = None\r\n if with_user:\r\n if 'username' in mdict:\r\n username = mdict.get('username')\r\n elif self.request.user and self.request.user.username:\r\n username = self.request.user.username\r\n\r\n res_list = searcher.search(\r\n phrase,\r\n content=with_content,\r\n username=username if with_user else None,\r\n ct=count,\r\n page=page,\r\n )\r\n\r\n # if the route name is search_ajax we want a json response\r\n # else we just want to return the payload data to the mako template\r\n if 'ajax' in route_name or 'api' in route_name:\r\n return {\r\n 'success': True,\r\n 'message': \"\",\r\n 'payload': {\r\n 'search_results': [dict(res) for res in res_list],\r\n 'result_count': len(res_list),\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'username': username,\r\n }\r\n }\r\n else:\r\n return {\r\n 'search_results': res_list,\r\n 'count': len(res_list),\r\n 'max_count': 50,\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'username': username,\r\n }",
"def search_results(request):\r\n mdict = request.matchdict\r\n rdict = request.GET\r\n\r\n if 'terms' in mdict:\r\n phrase = \" \".join(mdict['terms'])\r\n else:\r\n phrase = rdict.get('search', '')\r\n\r\n if rdict.get('search_mine') or 'username' in mdict:\r\n with_user = True\r\n else:\r\n with_user = False\r\n\r\n username = None\r\n if with_user:\r\n if 'username' in mdict:\r\n username = mdict.get('username')\r\n elif request.user and request.user.username:\r\n username = request.user.username\r\n\r\n # with content is always in the get string\r\n search_content = asbool(rdict.get('with_content', False))\r\n\r\n conn_str = request.registry.settings.get('sqlalchemy.url', False)\r\n searcher = get_fulltext_handler(conn_str)\r\n\r\n # check if we have a page count submitted\r\n page = rdict.get('page', 0)\r\n count = rdict.get('count', 10)\r\n\r\n try:\r\n res_list = searcher.search(\r\n phrase,\r\n content=search_content,\r\n username=username if with_user else None,\r\n ct=count,\r\n page=page\r\n )\r\n except ValueError:\r\n request.response.status_int = 404\r\n ret = {'error': \"Bad Request: Page number out of bound\"}\r\n return _api_response(request, ret)\r\n\r\n constructed_results = []\r\n for res in res_list:\r\n return_obj = dict(res)\r\n return_obj['tags'] = [dict(tag[1]) for tag in res.tags.items()]\r\n\r\n # the hashed object is there as well, we need to pull the url and\r\n # clicks from it as total_clicks\r\n return_obj['url'] = res.hashed.url\r\n return_obj['total_clicks'] = res.hashed.clicks\r\n\r\n constructed_results.append(return_obj)\r\n\r\n return _api_response(request, {\r\n 'search_results': constructed_results,\r\n 'result_count': len(constructed_results),\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'with_content': search_content,\r\n 'username': username,\r\n })",
"def search_all_user(search_dict,cur):\n record=None\n if search_dict!={}:\n psql_where=\"\"\n \n for (key,value) in search_dict.items() :\n psql_where= f\"\"\"{key}='{value}' and \"\"\"+psql_where\n psql_base=f\"\"\" select distinct id,last_name,name,email,tel,user_name,user_type,\n (select count(*) from users where {psql_where[:-4]}) as nb\n from users \n where \"\"\"\n\n psql=psql_base+psql_where[:-4]\n record=cur.fetchall()\n cur.execute(psql)\n record=cur.fetchall()\n \n else:\n record=None\n\n \n return record",
"async def get_search_results(search_string: str):\n database = get_db()\n result = []\n search_string = search_string.lower()\n search_strings = search_utils.preprocess_search_string(\n search_string[:150]\n )\n query_search = database.AQLQuery(\n query=search_queries.QUERY_SEARCH,\n bindVars={\n \"search_string_tib\": search_strings['tib'],\n \"search_string_chn\": search_strings['chn'],\n \"search_string_skt\": search_strings['skt'],\n \"search_string_pli\": search_strings['pli'],\n \"search_string_skt_fuzzy\": search_strings['skt_fuzzy']\n },\n batchSize=300,\n rawResults=True,\n )\n query_result = query_search.result[0]\n result = search_utils.postprocess_results(search_string, query_result)\n return {\"searchResults\": result}",
"def get_users(twitter, screen_names):\n ###TODO-- Completed\n\n #create a request for Twitter to fetch data, using robust_request function, limiting to 200\n #get the requests for every screen_name and store it in a list\n requests = [robust_request(twitter,'users/lookup',{'screen_name':screen_name, 'count':200}).json()[0] for screen_name in screen_names]\n\n #for request in requests:\n # print(request)\n\n return requests",
"def search_user(message, search):\n found = []\n search = search.lower()\n users = hf.get_users()\n for user in users:\n if search in user['name'].lower():\n found.append('{} ({})'.format(user['name'], user[\"id\"]))\n if len(found) == 0:\n message.reply('No user found by that key: {}.'.format(search))\n return\n message.reply('Users found: {}'.format(', '.join(found)))",
"def get_users(twitter, screen_names):\n request = robust_request(twitter, 'users/lookup', {'screen_name': screen_names}, max_tries=5)\n user_info = []\n for user in request:\n \tuser_info.append(user)\n return user_info",
"def do_search(queries):\n global documents, list_document\n results = {}\n query = tokenize(queries)\n if query == []:\n sys.exit()\n # find document ids containing all query terms. Works by\n # intersecting the posting lists for all query terms.\n relevant_document_ids = intersection(\n [set(postings[term].keys()) for term in query])\n if not relevant_document_ids:\n documents.clear()\n list_document[:] = []\n flash('empty')\n else:\n scores = sorted([(id,similarity(query,id))\n for id in relevant_document_ids],\n key=lambda x: x[1],\n reverse=True)\n print \"Score: filename\"\n global total_document_found\n total_document_found = 0\n for (id,score) in scores:\n print str(score)+\": \"+document_filenames[id]\n results[document_filenames[id]] = score\n total_document_found += 1\n flash(\"Total document found : \" + str(total_document_found) + \" of \" + str(N))\n return results",
"def search_user(search_dict,cur):\n if search_dict!={}:\n\n psql_base=\"\"\" select distinct * \n from users \n where \"\"\"\n\n psql_where=\"\"\n \n for (key,value) in search_dict.items() :\n psql_where= f\"\"\"{key}='{value}' and \"\"\"+psql_where\n\n psql=psql_base+psql_where[:-4]+\";\"\n cur.execute(psql)\n record=cur.fetchall()\n\n else:\n record=None\n columns_names=['name','last_name','email','tel','user_name','password','user_type']\n #['prenom','nom','email','tel','Nom utilisateur','type utilisateur','nombre de resultat']\n \n return columns_names,record",
"def perform_query(tweets_dict, index, tf, idf, rt, likes, score, get_input=True, query=None):\n print(\"Insert your query:\\n\")\n if get_input:\n query = input()\n ranked_docs = search(query, index, idf, tf, rt, likes, score) \n return query, ranked_docs",
"def search_user_entries(entry_by_user):\n\n ranks=Counter(dict())\n entry_by_user=entry_by_user.split()\n #complete_file_set=set()\n for entry in entry_by_user:\n availability_info=search_hash(entry,hashtable)\n if availability_info:\n ranks+=ranking(availability_info,fileID_to_names)\n else:\n sorted_display(None)\n #call ranking, pass availability_info\n #print ranks\n sorted_display(ranks)",
"def configurations(corpus, search, **kwargs):\n\n from corpkit.dictionaries.wordlists import wordlists\n from corpkit.dictionaries.roles import roles\n from corpkit.interrogation import Interrodict\n from corpkit.interrogator import interrogator\n from collections import OrderedDict\n\n if search.get('l') and search.get('w'):\n raise ValueError('Search only for a word or a lemma, not both.')\n\n # are we searching words or lemmata?\n if search.get('l'):\n dep_word_or_lemma = 'dl'\n gov_word_or_lemma = 'gl'\n word_or_token = search.get('l')\n else:\n if search.get('w'):\n dep_word_or_lemma = 'd'\n gov_word_or_lemma = 'g'\n word_or_token = search.get('w')\n\n # make nested query dicts for each semantic role\n queries = {'participant': \n\n {'left_participant_in': \n {dep_word_or_lemma: word_or_token,\n 'df': roles.participant1,\n 'f': roles.event},\n\n 'right_participant_in':\n {dep_word_or_lemma: word_or_token,\n 'df': roles.participant2,\n 'f': roles.event},\n\n 'premodified':\n {'f': roles.premodifier, \n gov_word_or_lemma: word_or_token},\n\n 'postmodified':\n {'f': roles.postmodifier, \n gov_word_or_lemma: word_or_token},\n\n 'and_or':\n {'f': 'conj:(?:and|or)',\n 'gf': roles.participant,\n gov_word_or_lemma: word_or_token},\n },\n\n 'process':\n\n {'has_subject':\n {'f': roles.participant1,\n gov_word_or_lemma: word_or_token},\n\n 'has_object':\n {'f': roles.participant2,\n gov_word_or_lemma: word_or_token},\n\n 'modalised_by':\n {'f': r'aux',\n 'w': wordlists.modals,\n gov_word_or_lemma: word_or_token},\n\n 'modulated_by':\n {'f': 'advmod',\n 'gf': roles.event,\n gov_word_or_lemma: word_or_token},\n\n 'and_or':\n {'f': 'conj:(?:and|or)',\n 'gf': roles.event, \n gov_word_or_lemma: word_or_token},\n \n },\n\n 'modifier':\n\n {'modifies':\n {'df': roles.modifier,\n dep_word_or_lemma: word_or_token},\n\n 'modulated_by':\n {'f': 'advmod',\n 'gf': roles.modifier,\n gov_word_or_lemma: word_or_token},\n\n 'and_or':\n {'f': 'conj:(?:and|or)',\n 'gf': roles.modifier,\n gov_word_or_lemma: word_or_token},\n\n }\n }\n\n # allow passing in of single function\n if search.get('f'):\n if search.get('f').lower().startswith('part'):\n queries = queries['participant']\n elif search.get('f').lower().startswith('proc'):\n queries = queries['process']\n elif search.get('f').lower().startswith('mod'):\n queries = queries['modifier']\n else:\n newqueries = {}\n for k, v in queries.items():\n for name, pattern in v.items():\n newqueries[name] = pattern\n queries = newqueries\n queries['and_or'] = {'f': 'conj:(?:and|or)', gov_word_or_lemma: word_or_token}\n\n # count all queries to be done\n # total_queries = 0\n # for k, v in queries.items():\n # total_queries += len(v)\n \n kwargs['search'] = queries\n \n # do interrogation\n data = corpus.interrogate(**kwargs)\n \n # remove result itself\n # not ideal, but it's much more impressive this way.\n if isinstance(data, Interrodict):\n for k, v in data.items():\n v.results = v.results.drop(word_or_token, axis=1, errors='ignore')\n v.totals = v.results.sum(axis=1)\n data[k] = v\n return Interrodict(data)\n else:\n return data",
"async def twitter_search(self, query, limit=5):\n try:\n results = await self.bot.loop.run_in_executor(None, self.api.search_users, query, limit)\n except tweepy.TweepError as e:\n log.error(str(e))\n raise TwitterError('Unknown error from the Twitter API, this has been logged.') from e\n if not results:\n raise TwitterError('No result.')\n\n embed = discord.Embed(colour=0x738bd7)\n for user in results:\n name = '{} - @{}'.format(user.name, user.screen_name)\n description = textwrap.shorten(user.description, 1024) if user.description else 'No description.'\n embed.add_field(name=name, value=description, inline=False)\n await self.bot.say(embed=embed)",
"def getUsersLookup(self, **kwargs):\n screen_name = handleShouldBeList(kwargs.get('screen_name', None))\n user_id = handleShouldBeList(kwargs.get('user_id', None))\n\n params = {\n 'include_entities': kwargs.get('include_entities', None),\n 'tweet_mode': kwargs.get('tweet_mode', None)\n }\n\n if screen_name:\n params['screen_name'] = ','.join(screen_name)\n\n if user_id:\n params['user_id'] = ','.join(str(uid) for uid in user_id)\n \n query = createQuery(params)\n uri = self.api_url + '/users/lookup.json'\n\n response = self.session.post(uri + query).json()\n return response",
"def search(query):\n has_some = []\n # list of all users\n users = database_controller.get_all_users()\n # has_all starts off with every user, removes those who don't have one of the required skills\n has_all = users.copy()\n # iterate through every user and look up their skills\n for user in users:\n for skillpath, min_level in query.items():\n # get the skill_id from the database\n skill_id = database_controller.get_skill(skillpath).id\n # get the association of the user with current skill and according minimum level\n skill_assoc = database_controller.get_assocs(users_id=user.id,\n skill_id=skill_id,\n level=min_level,\n type=\"first\")\n # skill_assoc will be None if the user does not have the skill on the desired level or higher\n if skill_assoc is not None:\n # adds user to has_some, in case he does not have other skills\n if user not in has_some:\n has_some.append(user)\n # if the user does not have the current skill at the required level, he gets removed from has_all\n else:\n if user in has_all:\n has_all.remove(user)\n # remove intersection of has_all and has_some\n for user in has_all:\n if user in has_some:\n has_some.remove(user)\n # extract ProfileModels from the results and return them in a dictionary\n has_all = database_controller.get_profile_models(has_all)\n has_some = database_controller.get_profile_models(has_some)\n # sort the results by descending order of sum of queried skills\n has_all.sort(key=lambda p: database_controller.sum_relevant_skills(p, list(query.keys())), reverse=True)\n has_some.sort(key=lambda p: database_controller.sum_relevant_skills(p, list(query.keys())), reverse=True)\n return dict(has_all=has_all, has_some=has_some)",
"def extract_tweets(consumer_key,consumer_secret,access_token,access_token_secret,search_key):\n # Step 1 - Authenticate\n consumer_key= str(consumer_key)\n consumer_secret= str(consumer_secret)\n\n access_token=str(access_token)\n access_token_secret=str(access_token_secret)\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n\n api = tweepy.API(auth)\n\n #Step 3 - Retrieve Tweets\n public_tweets = api.search(search_key)\n tweets_list=[]\n for tweet in public_tweets:\n tweets_list.append(tweet.text)\n return tweets_list",
"def get_data(inp):\n movies = __get_movies(inp)\n series = __get_series(inp)\n\n exist_title(movies, series)\n is_response_larger_than_max_results(movies, series)\n\n search_dict = {}\n\n if movies['Response'] != 'False':\n for movie in movies['Search']:\n search_dict.update({'movie': __get_title_info(movie['imdbID'])})\n\n if series['Response'] != 'False':\n for show in series['Search']:\n search_dict.update({'series': __get_title_info(show['imdbID'])})\n\n return search_dict"
]
| [
"0.67444634",
"0.63622165",
"0.61439055",
"0.5823485",
"0.57738215",
"0.56694967",
"0.56202865",
"0.561638",
"0.5604377",
"0.55924684",
"0.557445",
"0.552582",
"0.5494284",
"0.5484312",
"0.5452842",
"0.53725946",
"0.5361354",
"0.5322175",
"0.5318818",
"0.5298553",
"0.5283197",
"0.5275534",
"0.5272872",
"0.5261899",
"0.52271116",
"0.5226018",
"0.52242327",
"0.52104056",
"0.52031106",
"0.5201065"
]
| 0.7901854 | 0 |
(list of str, str, twitterverse dictionary) > list of str Return the list of users that result from operation having applied to name_list from the twitter_dict. >>> data_file = open('data.txt', 'r') >>> twitter_dict = process_data(data_file) >>> query_file = open('query3.txt', 'r') >>> query_dict = process_query(query_file) >>> search_dict = query_dict['search'] >>> search_list = [search_dict['username']] >>> operation = 'following' >>> search_helper(search_list, operation, twitter_dict) ['katieH', 'NicoleKidman'] >>> data_file = open('data.txt', 'r') >>> twitter_dict = process_data(data_file) >>> query_file = open('query2.txt', 'r') >>> query_dict = process_query(query_file) >>> search_dict = query_dict['search'] >>> search_list = [search_dict['username']] >>> operation = 'followers' >>> search_helper(search_list, operation, twitter_dict) ['PerezHilton', 'tomfan'] | def search_helper(name_list, operation, twitter_dict):
return_list = []
for name in name_list:
if operation == 'following':
search_specified_list = twitter_dict[name]['following']
for following_names in search_specified_list:
if following_names not in return_list:
return_list.append(following_names)
elif operation == 'followers':
followers = all_followers(twitter_dict, name)
for followers_name in followers:
if followers_name not in return_list:
return_list.append(followers_name)
return return_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_search_results(twitter_dict, search_dict):\r\n\r\n search_list = [search_dict['username']] \r\n search_specified_list = []\r\n\r\n for user in search_list:\r\n search_users_list = [user]\r\n \r\n for operation in search_dict['operations']:\r\n search_users_list = search_helper(search_users_list, operation,\\\r\n twitter_dict)\r\n \r\n search_specified_list += search_users_list\r\n \r\n return search_specified_list",
"def get_filter_results(twitter_dict, username_list, filter_dict):\r\n twitter_handles = username_list \r\n name_filtered_list = []\r\n upper_user = []\r\n \r\n if 'name_includes' in filter_dict: \r\n for user in twitter_handles: \r\n user = user.upper()\r\n upper_user.append(user)\r\n name = filter_dict['name_includes']\r\n \r\n for uName in username_list:\r\n if name.upper() == uName.upper():\r\n name_filtered_list.append(name) \r\n \r\n twitter_handles = name_filtered_list \r\n \r\n location_filtered_list = []\r\n if 'location_includes' in filter_dict: \r\n for user in twitter_handles: \r\n location = filter_dict['location_includes']\r\n if location.upper() == twitter_dict[user]['location'].upper(): \r\n location_filtered_list.append(user) \r\n twitter_handles = location_filtered_list\r\n \r\n follower_filtered_list = []\r\n if 'follower' in filter_dict:\r\n for user in twitter_handles:\r\n for follower in twitter_dict[user]['following']:\r\n if follower == filter_dict['follower']:\r\n #if follower in twitter_handles:\r\n follower_filtered_list.append(user)\r\n \r\n twitter_handles = follower_filtered_list \r\n \r\n following_filtered_list = []\r\n if 'following' in filter_dict:\r\n for user in twitter_handles: \r\n following_list = all_followers(twitter_dict, user)\r\n for follower in following_list: \r\n if follower in twitter_handles: \r\n following_filtered_list.append(follower) \r\n twitter_handles = following_filtered_list \r\n \r\n return twitter_handles",
"def get_search_results (twitter_data, search_data):\n\n search_list = [search_data['username']] # start with the first username\n temp = [] # initialize\n\n for operation in search_data['operations']: # go through every operation\n for username in search_list:\n if operation == 'following':\n for name in twitter_data[username]['following']:\n if not name in temp:\n temp.append (name)\n\n elif operation == 'followers':\n for name in all_followers (twitter_data, username):\n if not name in temp:\n temp.append (name)\n\n search_list = temp\n temp = []\n search_list.sort() # sort the list alphabetically for testing purposes\n return search_list",
"def get_filter_results (twitter_data, search_list, filter_data):\n\n #initialize\n filter_list = []\n\n for operation in filter_data:\n if operation == 'name-includes':\n for username in search_list:\n # since case doesnt matter, eveything is made uppercase and\n # then is checked\n if filter_data [operation].upper() in \\\n twitter_data [username]['name'].upper():\n filter_list.append (username)\n\n elif operation == 'location-includes':\n for username in search_list:\n # same case as above\n if filter_data [operation].upper() in \\\n twitter_data [username]['location'].upper():\n filter_list.append (username)\n\n elif operation == 'follower':\n for username in search_list:\n if username in \\\n twitter_data[filter_data [operation]]['following']:\n filter_list.append (username)\n\n elif operation == 'following':\n for username in search_list:\n if username in all_followers(twitter_data, filter_data[operation]):\n filter_list.append (username)\n\n search_list = filter_list\n filter_list = []\n\n filter_list = search_list\n filter_list.sort() # sort the list alphabetically for testing purposes\n\n return filter_list",
"def userNames(lst, url, tableName):\n n = len(lst)\n # https://docs.python.org/3/library/itertools.html#itertools.product\n # https://stackoverflow.com/questions/3034014/how-to-apply-itertools-product-to-elements-of-a-list-of-lists\n lst2 = list(itertools.product(*lst))\n lst3 = list(map(\"\".join, lst2))\n #\n # Maybe use checkUsernameSequences here,\n # then add a check to reduce the amount of possibilities before building lst?\n #\n\n seq = checkUsernameSequences(n, lst, url, tableName, minLen = 2, maxLen = 2)\n # does not include the single characters since minLen > 1\n\n lst4 = filt(seq, lst3)\n \"\"\"# next time:\n find matching strings. That should (hopefully) reduce the space to search. \n REMEMBER, this filtering will miss all single character usernames!!!\n\n https://docs.python.org/3/library/re.html#regular-expression-syntax\n https://stackoverflow.com/questions/3640359/regular-expressions-search-in-list\n https://stackoverflow.com/questions/3040716/python-elegant-way-to-check-if-at-least-one-regex-in-list-matches-a-string\n https://stackoverflow.com/questions/19300020/python-match-a-string-with-regex\n https://stackoverflow.com/questions/37974047/if-any-strings-in-a-list-match-regex\n\"\"\"\n\n lst5 = [x for x in lst4 if checkUsername(x, url, tableName)]\n # lst = list(map(checkUsername, lst2))\n return lst5",
"def all_followers(twitter_dict, twitter_name): \r\n \r\n following_list = []\r\n for user in twitter_dict:\r\n f_list = twitter_dict[user]['following']\r\n if twitter_name in f_list:\r\n following_list.append(user) \r\n return following_list",
"def search_users(request, usernames_only=True):\n BUFFER_LEN = getattr(settings, 'RESULTS_BUFFER_LEN', 500)\n # SR_LIMIT = getattr(settings, 'SR_LIMIT', 50)\n # SR_MIN_SUBS = getattr(settings, 'SR_MIN_SUBS', 100)\n # SR_MAX_SUBS = getattr(settings, 'SR_MAX_SUBS', 5000000)\n\n # f_ignore_sr_li\n # f_ignore_sr_max\n # f_exclude_sr_li\n\n # fetch users and number of same subscribed subreddits for all users\n # that are subscribed to the same subreddits than auth user; fetch a max\n # of BUFFER_LEN items. This query only touches the dtr5app_subscribed\n # table and does not requore any join with other tables.\n query_params = []\n query_string = ''\n\n # part 1\n query_params += []\n query_string += '''\n SELECT au.id, au.username, COUNT(r1.user_id) AS sr_count\n FROM dtr5app_subscribed r1\n\n INNER JOIN dtr5app_subscribed r2\n ON r1.sr_id = r2.sr_id AND r1.user_id <> r2.user_id\n\n INNER JOIN auth_user au\n ON r2.user_id = au.id\n\n INNER JOIN dtr5app_sr sr\n ON r1.sr_id = sr.id\n\n WHERE au.is_active IS TRUE AND last_login IS NOT NULL '''\n\n # part 1.1\n # if the user has set a maximum size for subreddits to be considered\n # in search. this can be used to filter all the huge default subs that\n # most redditors belong to.\n if request.user.profile.f_ignore_sr_max:\n query_params += [request.user.profile.f_ignore_sr_max]\n query_string += ''' AND sr.subscribers < %s '''\n\n # part 1.2\n # a list of Sr.display_name values. these subreddits should NOT be\n # considered when producing matches.\n # Subreddit names should appear as case insensitive! The f_ignore_sr_li\n # list of subreddit names is supposed to be \"cleaned up\" already, with\n # the appropriate lettercase of a subreddit's name.\n if request.user.profile.f_ignore_sr_li:\n query_params += request.user.profile.f_ignore_sr_li\n x = ', '.join(['%s'] * len(request.user.profile.f_ignore_sr_li))\n query_string += ''' AND sr.id NOT IN (\n SELECT id FROM dtr5app_sr sr2\n WHERE sr2.display_name IN (''' + x + ' )) '\n\n # part 1.9\n query_params += [request.user.id]\n query_string += ''' AND r1.user_id = %s AND au.id IN (\n SELECT id FROM auth_user u\n INNER JOIN dtr5app_profile p\n ON u.id = p.user_id\n WHERE 1=1 '''\n\n # part 2: sex --> TODO: search by gender!\n # li = li.filter(profile__sex=request.user.profile.f_sex)\n if request.user.profile.f_sex > 0:\n query_params += [request.user.profile.f_sex]\n query_string += ''' AND p.sex = %s '''\n\n # part 3: date of birth\n dob_earliest, dob_latest = get_dob_range(request.user.profile.f_minage,\n request.user.profile.f_maxage)\n query_params += [dob_earliest, dob_latest]\n query_string += ''' AND p.dob >= %s AND p.dob <= %s '''\n\n # part 4: lat/lng\n # li = li.filter(profile__lat__gte=lat_min, profile__lat__lte=lat_max,\n # profile__lng__gte=lng_min, profile__lng__lte=lng_max)\n #\n # Values too close are inaccurate because of location fuzzying. Also,\n # f_distance must be at least 1, so that the signup flow doesn't intercept\n # it because it has no value set! Leave this to only search distances\n # above 5 km or so, and return \"worldwide\" for any value below 5 km.\n #\n if request.user.profile.f_distance > 5:\n lat_min, lng_min, lat_max, lng_max = get_latlng_bounderies(\n request.user.profile.lat,\n request.user.profile.lng,\n request.user.profile.f_distance)\n query_params += [lat_max, lng_min, lat_min, lng_max]\n query_string += ''' AND p.lat <= %s AND p.lng >= %s\n AND p.lat >= %s AND p.lng <= %s '''\n\n # part 5: exclude auth user themself\n # li = li.exclude(pk=request.user.pk)\n query_params += [request.user.id]\n query_string += ''' AND NOT (u.id = %s) '''\n\n # part 6: exclude users who already have a like/nope flag from auth user\n # li = li.exclude(flags_received__sender=request.user)\n query_params += [request.user.id]\n query_string += ''' AND NOT (u.id IN (SELECT U1.receiver_id AS Col1\n FROM dtr5app_flag U1 WHERE U1.sender_id = %s)) '''\n\n # part 7: exclude globally blocked usernames\n # li = li.exclude(username__in=get_blocked_usernames_list())\n # --> TODO: currently empty.\n pass\n\n # part 8: have at least one picture URL in the JSON string\n # li = li.exclude(profile___pics='[]')\n # TODO: for now, allow no-picture profiles, to make testing easier\n # query_params += []\n # query_string += ''' AND NOT (p._pics = '[]') '''\n\n # finish up\n query_params += [BUFFER_LEN]\n query_string += ''' ) GROUP BY r1.user_id, au.id\n ORDER BY sr_count DESC LIMIT %s '''\n\n # execute the query with the collected params\n users = User.objects.raw(query_string, query_params)\n\n # print('='*50)\n # print(repr(users))\n # print('='*50)\n # for u in users:\n # print('share {} subs with {}:{}'.format(u.sr_count, u.id, u.username))\n # print('='*50)\n\n # default return a list of only usernames\n if usernames_only:\n return [x.username for x in users]\n else:\n return users",
"def followed_by_hillary_and_donald(users, twitter):\n\n str = ''\n set1 = set()\n set2 = set()\n for u_dict in users:\n \tif u_dict['screen_name'] == 'HillaryClinton':\n \t\tset1 = set(u_dict['friends'])\n \telif u_dict['screen_name'] == 'realDonaldTrump':\n \t\tset2 = set(u_dict['friends'])\n \t\t\n common = set.intersection(set1, set2)\n request = robust_request(twitter, 'users/lookup', {'user_id': common}, max_tries=5)\n for user in request:\n \tstr = user['screen_name']\t\n return str",
"def search_user_entries(entry_by_user):\n\n ranks=Counter(dict())\n entry_by_user=entry_by_user.split()\n #complete_file_set=set()\n for entry in entry_by_user:\n availability_info=search_hash(entry,hashtable)\n if availability_info:\n ranks+=ranking(availability_info,fileID_to_names)\n else:\n sorted_display(None)\n #call ranking, pass availability_info\n #print ranks\n sorted_display(ranks)",
"def followed_by_hillary_and_donald(users, twitter):\n ###TODO-- Completed\n for user in users:\n if user['screen_name'] == 'HillaryClinton':\n friends_Hillary = user['friends']\n #print(len(friends_Hillary))\n elif user['screen_name'] == 'realDonaldTrump':\n friends_donald = user['friends']\n #print(len(friends_donald))\n\n common_followed_id = list(set(friends_Hillary) & set(friends_donald))\n\n commn_followed_user = robust_request(twitter,'users/lookup',{'user_id':common_followed_id}).json()\n #print(commn_followed_user[0]['screen_name'])#['screen_name'])\n return commn_followed_user[0]['screen_name']\n #pass",
"def get_users(twitter, screen_names):\n ###TODO-- Completed\n\n #create a request for Twitter to fetch data, using robust_request function, limiting to 200\n #get the requests for every screen_name and store it in a list\n requests = [robust_request(twitter,'users/lookup',{'screen_name':screen_name, 'count':200}).json()[0] for screen_name in screen_names]\n\n #for request in requests:\n # print(request)\n\n return requests",
"async def team_search(self, ctx: commands.Context, username: str):\n all_usernames = {team_id: team.username for team_id, team in self.teams.items()\n if team is not None}\n suggestions = []\n log.info(repr(fuzzywuzzy.process.extract(\n username, all_usernames, limit=5)))\n for fuzz_username, rating, fuzz_id in fuzzywuzzy.process.extract(\n username, all_usernames, limit=5):\n if rating < 50:\n break\n fuzz_team = self.teams[fuzz_id]\n suggestions.append(\n f'(ID: **{fuzz_team.team_id}**) **{fuzz_team.display_name[:40]}**'\n f' -- {len(fuzz_team.users)} registered members')\n if suggestions:\n await ctx.send('\\n'.join(suggestions))\n else:\n await ctx.send(f\"Couldn't find any teams whose usernames resembled `{username}`\")",
"def get_data_user(twitter, screen_names):\n\n data_user = []\n for name in screen_names:\n request = robust_request(twitter, 'users/lookup', {'screen_name': name}, max_tries=5)\n user = [val for val in request]\n friends = []\n request = robust_request(twitter, 'friends/ids', {'screen_name': name, 'count': 5000}, max_tries=5)\n friends = sorted([str(val) for val in request])\n fr = {'screen_name': user[0]['screen_name'],\n 'id': str(user[0]['id']),\n 'friends_id': friends}\n data_user.append(fr)\n \n return data_user",
"def getTwitterUsers(users,credentials=False):\n userList = ','.join(users)\n chain(twitterCall.s('lookup_user',{'screen_name':userList},credentials), pushTwitterUsers.s())()",
"def get_users(twitter, screen_names):\n request = robust_request(twitter, 'users/lookup', {'screen_name': screen_names}, max_tries=5)\n user_info = []\n for user in request:\n \tuser_info.append(user)\n return user_info",
"def test_filter_2(self):\n usernames = ['Kinder', 'Ken', 'Alan', 'Tracy']\n filter_dict = {'name-includes': 'Ken', 'location-includes': 'Spadina',\n 'following': 'Kinder', 'follower': 'Alan'}\n\n actual = tf.get_filter_results(twitter_dict, usernames, filter_dict)\n expected = ['Ken']\n self.assertEqual(actual, expected)",
"def user_list(request):\r\n query = request.GET.get('q', '')\r\n # if query has 2 or more characters\r\n if len(query) >= 2:\r\n names = query.split(' ')\r\n # if query has a first and last name\r\n if len(names) == 2:\r\n first, last = names\r\n # if first and last name have 2 or more letters\r\n if len(first) >= 2 and len(last) >= 2:\r\n results = User.objects.filter(Q(\r\n first_name__icontains=first, \r\n last_name__icontains=last) | Q(first_name__icontains=last, \r\n last_name__icontains=first)).exclude(pk=request.user.pk)\r\n # if first name has 2 or more letters\r\n elif len(first) >= 2:\r\n results = User.objects.filter(Q(\r\n first_name__icontains=first) | Q(\r\n last_name__icontains=first)).exclude(pk=request.user.pk)\r\n # if last name has 2 or more letters\r\n elif len(last) >= 2:\r\n results = User.objects.filter(Q(\r\n first_name__icontains=last) | Q(\r\n last_name__icontains=last)).exclude(pk=request.user.pk)\r\n # if first and last name have less than 2 letters\r\n else:\r\n results = []\r\n # if query only has one name\r\n else:\r\n results = User.objects.filter(Q(\r\n username__icontains=query)).exclude(pk=request.user.pk)\r\n # if query has less than 2 letters\r\n else:\r\n results = []\r\n d = {\r\n 'results': results,\r\n }\r\n t = loader.get_template('usermessages/results.html')\r\n context = Context(d)\r\n data = {\r\n 'results': t.render(context),\r\n }\r\n return HttpResponse(json.dumps(data), mimetype='application/json')",
"def search(word, current_directory, search_result_list=search_list):\n if search_result_list:\n for counter in range(len(search_result_list)):\n search_result_list.pop()\n if current_directory:\n searcher_object = CompleteSearch(current_directory, word)\n searcher_object.start()\n searcher_object.join()\n return remove_equals(search_result_list)\n\n else:\n for cleaner in range(len(search_result_list)):\n search_result_list.pop()\n for driver in drivers():\n searcher_object = CompleteSearch(driver, word)\n searcher_object.start()\n return remove_equals(search_result_list)",
"def query_twitter(session, provided_ioc):\n ioc_dicts = []\n\n if provided_ioc.startswith(\"@\"):\n ioc_dicts.append({\"invalid\": \"{} <-- Monitoring users is prohibited!\".format(provided_ioc)})\n return ioc_dicts\n\n encoded_ioc = urllib.quote_plus(provided_ioc)\n search_tweets = session.search(encoded_ioc, rpp=100, lang=\"en\")\n\n for tweet in search_tweets:\n if tweet._json[\"user\"][\"name\"] == provided_ioc.replace(\"#\", \"\") or \\\n tweet._json[\"user\"][\"screen_name\"] == provided_ioc.replace(\"#\", \"\"):\n ioc_dicts.append({\"invalid\": \"{} <-- Monitoring users is prohibited!\".format(provided_ioc)})\n return ioc_dicts\n\n if \"retweeted_status\" in tweet._json.keys():\n if tweet._json[\"retweeted_status\"][\"user\"][\"name\"] == provided_ioc.replace(\"#\", \"\") or \\\n tweet._json[\"retweeted_status\"][\"user\"][\"screen_name\"] == provided_ioc.replace(\"#\", \"\"):\n ioc_dicts.append({\"invalid\": \"{} <-- Monitoring users is prohibited!\".format(provided_ioc)})\n return ioc_dicts\n\n urls = []\n for x in tweet._json[\"entities\"][\"urls\"]:\n if not x[\"expanded_url\"].startswith(\"https://twitter.com/i/web/status/\"):\n urls.append(x[\"expanded_url\"])\n\n hashtags = []\n for x in tweet._json[\"entities\"][\"hashtags\"]:\n hashtags.append(\"#{}\".format(x[\"text\"]))\n\n ioc_dict = {}\n ioc_dict[\"search_term\"] = provided_ioc\n ioc_dict[\"url\"] = \"\\n\".join(urls)\n ioc_dict[\"hashtags\"] = \"\\n\".join(hashtags)\n ioc_dict[\"timestamp\"] = tweet._json[\"created_at\"]\n ioc_dict[\"tweet\"] = tweet._json[\"text\"]\n\n if \"retweeted_status\" in tweet._json.keys():\n ioc_dict[\"timestamp\"] = tweet._json[\"retweeted_status\"][\"created_at\"]\n ioc_dict[\"tweet\"] = tweet._json[\"retweeted_status\"][\"text\"]\n\n ioc_dicts.append(ioc_dict)\n return ioc_dicts",
"def userSuggestions(database):\n firstname=str(input(\"who do you want to have follow suggestions for :\"))\n usr,find=getByName(database,firstname)\n if not find:\n print(\"the User could not be found\")\n return\n else:\n following=[]\n followers=[]\n for folower in usr.folowed:\n followers.append(folower)\n for folowed in usr.folow:\n following.append(folowed)\n results=[]\n print(\"On what do you want your suggestions to be based on?\\n1. Mutual Interests\\n2. Mutual Connections\\n3. Both\")\n choice=int(input(\"Your choice :\"))\n for key ,usrs in database.items():\n if key not in following: \n correspondant=0\n if choice == 1 or choice == 3:\n for interest in usr.interest:\n if interest in usrs.interest:\n correspondant+=1\n if choice == 2 or choice == 3:\n for folower in followers:\n for folows in usrs.folowed:\n if key == folows:\n correspondant+=1\n results.append([key,correspondant])\n for i in range(len(results)):\n for j in range(0, len(results)-i-1):\n if results[j][1] > results[j+1][1] :\n results[j], results[j+1] = results[j+1], results[j]\n for k in range(5):\n print(results[k][0])",
"def test_filter_3(self):\n\n usernames = ['Kinder', 'Ken', 'Alan', 'Tracy']\n filter_dict = {'name-includes': 'Ken', 'location-includes': 'Spadina',\n 'following': 'Kinder', 'follower': 'Tracy'}\n\n actual = tf.get_filter_results(twitter_dict, usernames, filter_dict)\n expected = []\n self.assertEqual(actual, expected)",
"def get_users_by_name(query):\n\n user_list = None\n if query == None:\n user_list = User.objects.filter(Q(user_profile__isnull=False))\n else:\n user_list = User.objects.filter(Q(first_name__icontains=query) | Q(last_name__icontains=query)).distinct()\n return user_list",
"def all_followers (twitter_data, username):\n\n # initialize\n followers = []\n\n for key in twitter_data: # go through every username in twitter_data\n if username in twitter_data [key]['following']: # check each 'following'\n followers.append (key)\n\n followers.sort() # sort the list alphabetically for testing purposes\n return followers",
"def __searchUser(self, args = []):\n\n try:\n if len(args) == 0:\n self.__cm.send(p.T_QUERY, '')\n else:\n self.__cm.send(p.T_QUERY, args)\n\n reply = self.__cm.receive()\n\n if (reply is not None and reply.type == p.T_RESULT):\n [ self.__parseUserRecord(r) for r in reply.payload.split() ] \n self.__agent.printList(self.__userList)\n else:\n raise Exception, \"An error occured while fetching user data! The user list is outdated.\"\n \n except Exception, e:\n self.__handleError('List', e)",
"def getName(sentence): #Jasper, Suraj\n userWords = sentence.lower()\n userWords = userWords.split()\n \n # ways of introduction:\n # \"Hello, my name is ___\"\n # \"Hi, I'm ____\"\n # \"Howdy, I'm called ____\"\n # Order: Greeting -> pronoun -> Name -> question (optional)\n # eg. \"Hello, I'm Jasper. How are you?\"\n\n if (userWords[0] in greetings): #the added code that stops iam from being added into the name if 2 greeting are added\n userWords.pop(0) #pop and not .remove because\n \n \n if (userWords[0] == \"i\" and len(userWords) > 1):\n if (userWords[1] in [\"m\",\"am\"]):\n userWords.insert(0, \" \".join(userWords[0:2]))\n userWords.pop(2)\n userWords.pop(1)\n \n userName = \"\"\n for userWord in userWords: #iterate throught the user's words\n foundWord = False #sets True when there's a similar word in the other list\n for word in greetings: #iterates and compares the chosen word from the user's list of words to the words list\n if userWord == word and foundWord == False:\n foundWord = True\n if foundWord == False:\n userName = userName + userWord + \" \"\n return userName #this is the found name",
"def getUsersLookup(self, **kwargs):\n screen_name = handleShouldBeList(kwargs.get('screen_name', None))\n user_id = handleShouldBeList(kwargs.get('user_id', None))\n\n params = {\n 'include_entities': kwargs.get('include_entities', None),\n 'tweet_mode': kwargs.get('tweet_mode', None)\n }\n\n if screen_name:\n params['screen_name'] = ','.join(screen_name)\n\n if user_id:\n params['user_id'] = ','.join(str(uid) for uid in user_id)\n \n query = createQuery(params)\n uri = self.api_url + '/users/lookup.json'\n\n response = self.session.post(uri + query).json()\n return response",
"def _readUsers(directory):\n\tfiles = os.listdir(directory)\n\tthese_users = []\n\tfor text_file in files:\n\t\tcurrent_file = directory + '/' + text_file\n\t\tf = open(current_file, 'r')\n\t\ttweet_list = f.readlines()\n\t\tsplitted = [string.replace('\"', '').strip().split(',') for string in tweet_list]\n\t\tfor user in splitted:\n\t\t\tif user[1] not in these_users:\n\t\t\t\tthese_users.append(user[1])\n\t\t\telse:\n\t\t\t\tcontinue\n\treturn these_users",
"def test_get_filter_results_name_includes(self):\r\n\r\n data_file = open('data.txt', 'r')\r\n twitter_dict = tf.process_data(data_file)\r\n data_file.close()\r\n\r\n actual = tf.get_filter_results(twitter_dict, ['tomCruise', \\\r\n 'PerezHilton'], {'name_includes': 'tomCruise'})\r\n expected = ['tomCruise']\r\n self.assertEqual(actual, expected)",
"def test_filter_4(self):\n\n usernames = ['Kinder', 'Ken', 'Alan', 'Tracy']\n filter_dict = {'location-includes': 'i',\n 'following': 'K', 'follower': 'Ken'}\n\n actual = tf.get_filter_results(twitter_dict, usernames, filter_dict)\n expected = []\n self.assertEqual(actual, expected)",
"def match_words_to_search(chunks, searchresult, compare_func, join=True):\n wordlist = [hebstrip(w)[1] for w in word_bound.split(searchresult)]\n wordset = set(wordlist)\n genlist = [\n m\n for m in [\n match_one(rlist, wordset)\n for rlist in chunks.linked_heb\n if rlist.data\n ]\n if m\n ]\n ours = [i[0] for i in genlist]\n theirs = [i[1] for i in genlist]\n if join:\n return compare_func(\" \".join(ours), \" \".join(wordlist)), theirs\n else:\n return compare_func(ours, wordlist), theirs"
]
| [
"0.7890037",
"0.7527938",
"0.7475859",
"0.7460602",
"0.6427471",
"0.5894421",
"0.5808291",
"0.57747966",
"0.576515",
"0.5687546",
"0.56544125",
"0.5608119",
"0.55629736",
"0.55307204",
"0.54812115",
"0.5464305",
"0.5451584",
"0.5441889",
"0.54186994",
"0.5417747",
"0.540404",
"0.5392619",
"0.5381299",
"0.53804344",
"0.53689915",
"0.534864",
"0.5346302",
"0.53322095",
"0.532098",
"0.53145593"
]
| 0.83645403 | 0 |
(Twitterverse dictionary, list of str, filter specification dictionary) > list of str >>> data_file = open('data.txt', 'r') >>> twitter_dict = process_data(data_file) >>> query_file = open('query2.txt', 'r') >>> query_dict = process_query(query_file) >>> username_list = get_search_results(twitter_dict, search_dict) >>> filter_dict = query_dict['filter'] >>> get_filter_results(twitter_dict, username_list, filter_dict) | def get_filter_results(twitter_dict, username_list, filter_dict):
twitter_handles = username_list
name_filtered_list = []
upper_user = []
if 'name_includes' in filter_dict:
for user in twitter_handles:
user = user.upper()
upper_user.append(user)
name = filter_dict['name_includes']
for uName in username_list:
if name.upper() == uName.upper():
name_filtered_list.append(name)
twitter_handles = name_filtered_list
location_filtered_list = []
if 'location_includes' in filter_dict:
for user in twitter_handles:
location = filter_dict['location_includes']
if location.upper() == twitter_dict[user]['location'].upper():
location_filtered_list.append(user)
twitter_handles = location_filtered_list
follower_filtered_list = []
if 'follower' in filter_dict:
for user in twitter_handles:
for follower in twitter_dict[user]['following']:
if follower == filter_dict['follower']:
#if follower in twitter_handles:
follower_filtered_list.append(user)
twitter_handles = follower_filtered_list
following_filtered_list = []
if 'following' in filter_dict:
for user in twitter_handles:
following_list = all_followers(twitter_dict, user)
for follower in following_list:
if follower in twitter_handles:
following_filtered_list.append(follower)
twitter_handles = following_filtered_list
return twitter_handles | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_filter_results (twitter_data, search_list, filter_data):\n\n #initialize\n filter_list = []\n\n for operation in filter_data:\n if operation == 'name-includes':\n for username in search_list:\n # since case doesnt matter, eveything is made uppercase and\n # then is checked\n if filter_data [operation].upper() in \\\n twitter_data [username]['name'].upper():\n filter_list.append (username)\n\n elif operation == 'location-includes':\n for username in search_list:\n # same case as above\n if filter_data [operation].upper() in \\\n twitter_data [username]['location'].upper():\n filter_list.append (username)\n\n elif operation == 'follower':\n for username in search_list:\n if username in \\\n twitter_data[filter_data [operation]]['following']:\n filter_list.append (username)\n\n elif operation == 'following':\n for username in search_list:\n if username in all_followers(twitter_data, filter_data[operation]):\n filter_list.append (username)\n\n search_list = filter_list\n filter_list = []\n\n filter_list = search_list\n filter_list.sort() # sort the list alphabetically for testing purposes\n\n return filter_list",
"def get_search_results(twitter_dict, search_dict):\r\n\r\n search_list = [search_dict['username']] \r\n search_specified_list = []\r\n\r\n for user in search_list:\r\n search_users_list = [user]\r\n \r\n for operation in search_dict['operations']:\r\n search_users_list = search_helper(search_users_list, operation,\\\r\n twitter_dict)\r\n \r\n search_specified_list += search_users_list\r\n \r\n return search_specified_list",
"def test_filter_2(self):\n usernames = ['Kinder', 'Ken', 'Alan', 'Tracy']\n filter_dict = {'name-includes': 'Ken', 'location-includes': 'Spadina',\n 'following': 'Kinder', 'follower': 'Alan'}\n\n actual = tf.get_filter_results(twitter_dict, usernames, filter_dict)\n expected = ['Ken']\n self.assertEqual(actual, expected)",
"def _filter_data(analyzed_tweet_data: list, start_date, end_date, hashtags, mentions, urls):\n # filter by dates\n filtered_data = get_tweets_in_daterange(\n analyzed_tweet_data, start_date, end_date)\n print(\"Done filtering on date...\")\n if hashtags:\n filtered_data = _filter_search_values(\n 'hashtags', hashtags, filtered_data)\n print(f'Done filtering on hashtags: {hashtags}')\n if mentions:\n filtered_data = _filter_search_values(\n 'mentions', mentions, filtered_data)\n print(f'Done filtering on mentions: {mentions}')\n if urls:\n filtered_data = _filter_search_values(\n 'tweet_urls', urls, filtered_data)\n print(f'Done filtering on urls: {urls}')\n\n return filtered_data",
"def test_filter_3(self):\n\n usernames = ['Kinder', 'Ken', 'Alan', 'Tracy']\n filter_dict = {'name-includes': 'Ken', 'location-includes': 'Spadina',\n 'following': 'Kinder', 'follower': 'Tracy'}\n\n actual = tf.get_filter_results(twitter_dict, usernames, filter_dict)\n expected = []\n self.assertEqual(actual, expected)",
"def test_filter_4(self):\n\n usernames = ['Kinder', 'Ken', 'Alan', 'Tracy']\n filter_dict = {'location-includes': 'i',\n 'following': 'K', 'follower': 'Ken'}\n\n actual = tf.get_filter_results(twitter_dict, usernames, filter_dict)\n expected = []\n self.assertEqual(actual, expected)",
"def test_filter_1(self):\n usernames = ['Kinder', 'Ken', 'Alan', 'Tracy']\n filter_dict = {}\n\n actual = tf.get_filter_results(twitter_dict, usernames, filter_dict)\n expected = ['Kinder', 'Ken', 'Alan', 'Tracy']\n self.assertEqual(actual, expected)",
"def test_filter_7(self):\n\n usernames = ['Kinder', 'Ken', 'Alan', 'Tracy']\n filter_dict = {'following': 'Kinder'}\n\n actual = tf.get_filter_results(twitter_dict, usernames, filter_dict)\n expected = ['Ken', 'Alan', 'Tracy']\n self.assertEqual(actual, expected)",
"def process_query (file):\n\n # initialize all the dictionaries and lists we will be using\n query_data = {}\n query_data ['search'] = {'operations':[]}\n query_data ['filter'] = {}\n query_data ['present'] = {}\n\n temp = ''\n\n file.readline() # for when the file says SEARCH\n\n query_data ['search']['username'] = file.readline().strip()\n\n temp = file.readline().strip()\n while temp != 'FILTER': # go until the the filter section\n query_data ['search']['operations'].append (temp)\n temp = file.readline().strip()\n\n temp = file.readline().strip()\n while temp != 'PRESENT': # go until the present section\n # we make the key everything from the beginning to the first space\n # then the value is everything after the first space\n query_data ['filter'][temp[:temp.find(' ')]] = temp[temp.find(' ') + 1:]\n temp = file.readline().strip()\n\n temp = file.readline().strip()\n while temp != '': # go until the end of the file\n # same process as the previous while loop\n query_data ['present'][temp[:temp.find(' ')]] = temp[temp.find(' ') + 1:]\n temp = file.readline().strip()\n\n return query_data",
"def apply_search_filters():\n params = {'api_key': API_KEY}\n for k in demisto.args():\n if demisto.getArg(k):\n params['term'] = k\n params['query'] = demisto.getArg(k)\n break\n return params",
"def test_filter_6(self):\n\n usernames = ['Kinder', 'Ken', 'Alan', 'Tracy']\n filter_dict = {'location-includes': 'Spadina'}\n\n actual = tf.get_filter_results(twitter_dict, usernames, filter_dict)\n expected = ['Kinder', 'Ken', 'Alan']\n self.assertEqual(actual, expected)",
"def get_search_results (twitter_data, search_data):\n\n search_list = [search_data['username']] # start with the first username\n temp = [] # initialize\n\n for operation in search_data['operations']: # go through every operation\n for username in search_list:\n if operation == 'following':\n for name in twitter_data[username]['following']:\n if not name in temp:\n temp.append (name)\n\n elif operation == 'followers':\n for name in all_followers (twitter_data, username):\n if not name in temp:\n temp.append (name)\n\n search_list = temp\n temp = []\n search_list.sort() # sort the list alphabetically for testing purposes\n return search_list",
"def test_filter(self):\n usernames = ['Kinder', 'Ken', 'Alan', 'Tracy']\n filter_dict = {'follower': 'Alan'}\n\n actual = tf.get_filter_results(twitter_dict, usernames, filter_dict)\n expected = ['Kinder', 'Ken', 'Tracy']\n self.assertEqual(actual, expected)",
"def test_filter_5(self):\n\n usernames = ['Kinder', 'Ken', 'Alan', 'Tracy']\n filter_dict = {'location-includes': 'Wilson'}\n\n actual = tf.get_filter_results(twitter_dict, usernames, filter_dict)\n expected = ['Tracy']\n self.assertEqual(actual, expected)",
"def filter_format(filter_dict, query_data, filter_type, index): \r\n \r\n filter_list = ''\r\n count = 0 \r\n \r\n while query_data[index] != 'PRESENT\\n': \r\n if filter_type in query_data[index]: \r\n count += 1\r\n filter_keyword = query_data[index].strip(filter_type)\r\n filter_list = (filter_keyword.strip('\\n'))\r\n \r\n index += 1 \r\n \r\n if count > 0: \r\n filter_dict[filter_type] = filter_list\r\n return filter_dict",
"def filter(self):\n\t\tparameters = {}\n\n\t\tif self.keywords:\n\t\t\tparameters['track'] = ','.join(self.keywords)\n\n\t\tif self.locations:\n\t\t\tparameters['locations'] = ','.join([','.join([str(latlong) for latlong in loc]) for loc in self.locations])\n\n\t\tif self.usernames:\n\t\t\tparameters['follow'] = ','.join([str(u) for u in self.usernames])\n\n\t\tself.launch('statuses/filter.json', parameters)",
"def search_helper(name_list, operation, twitter_dict): \r\n return_list = []\r\n \r\n for name in name_list:\r\n if operation == 'following':\r\n search_specified_list = twitter_dict[name]['following']\r\n for following_names in search_specified_list: \r\n if following_names not in return_list: \r\n return_list.append(following_names) \r\n \r\n elif operation == 'followers':\r\n followers = all_followers(twitter_dict, name)\r\n for followers_name in followers: \r\n if followers_name not in return_list: \r\n return_list.append(followers_name) \r\n \r\n return return_list",
"def process_query(query_file):\r\n query_data = query_file.readlines()\r\n query_dict = {}\r\n x = 1 \r\n search_dict = {}\r\n search_dict['username'] = query_data[x].strip('\\n')\r\n x += 1\r\n operation_list = []\r\n \r\n while query_data[x] != 'FILTER\\n': \r\n operation_list.append(query_data[x].strip('\\n'))\r\n x += 1\r\n \r\n search_dict['operations'] = operation_list \r\n query_dict['search'] = search_dict \r\n x += 1\r\n \r\n filter_dict = {}\r\n filter_format(filter_dict, query_data, 'name-includes', x)\r\n filter_format(filter_dict, query_data, 'location-includes', x)\r\n filter_format(filter_dict, query_data, 'follower', x)\r\n filter_format(filter_dict, query_data, 'following', x)\r\n query_dict['filter'] = filter_dict\r\n \r\n present_dict = {}\r\n sort_by = query_data[-2].strip('sort-by ')\r\n present_dict['sort-by'] = sort_by.strip('\\n')\r\n \r\n format_type = query_data[-1].lstrip('format ')\r\n present_dict['format'] = format_type\r\n query_dict['present'] = present_dict\r\n \r\n return query_dict",
"def test_get_filter_results_name_includes(self):\r\n\r\n data_file = open('data.txt', 'r')\r\n twitter_dict = tf.process_data(data_file)\r\n data_file.close()\r\n\r\n actual = tf.get_filter_results(twitter_dict, ['tomCruise', \\\r\n 'PerezHilton'], {'name_includes': 'tomCruise'})\r\n expected = ['tomCruise']\r\n self.assertEqual(actual, expected)",
"def process(self, filter_words, count=1):\n user = self.__api.get_user(self.__username)\n\n # print user.screen_name\n # print user.followers_count\n if self.__appMode == 1 and self.__TimeLineMode == 1:\n self.get_timeline(filter_words)\n else:\n if self.__friendMode:\n print(\"Getting all Twitter Friends \\n\")\n for friend in user.friends():\n self.get_tweet(friend.screen_name, filter_words, count)\n else:\n for screen_name in self.__priorityCoin:\n self.get_tweet(screen_name, filter_words, count)\n print('Twitter Data Extraction done!!')",
"def get_searchdata(user, filter=None):\n # note to Jonas, useful python debuging tool. \n #import pdb\n maps = get_maps(user)\n searchdata = []\n for the_map in maps.values():\n topic = the_map.main_topic\n for subtopic in the_map.subtopics.values():\n \n #pdb.set_trace()\n for url in subtopic.urls.values():\n searchdata.append([topic, subtopic.text, url])\n return searchdata",
"def doFiltering(self, searchfunc, filters=None):\n F=[]\n for f in self.filters:\n F.append(f.getFilter())\n #print F\n sets = []\n for f in F:\n col, val, op, boolean = f\n names = searchfunc(col, val, op)\n sets.append((set(names), boolean))\n names = sets[0][0]\n for s in sets[1:]:\n b=s[1]\n if b == 'AND':\n names = names & s[0]\n elif b == 'OR':\n names = names | s[0]\n elif b == 'NOT':\n names = names - s[0]\n names = list(names)\n self.updateResults(len(names))\n return names",
"def test_get_filter_results_location_includes(self):\r\n\r\n data_file = open('data.txt', 'r')\r\n twitter_dict = tf.process_data(data_file)\r\n data_file.close()\r\n\r\n actual = tf.get_filter_results(twitter_dict, ['tomCruise', \\\r\n 'PerezHilton', 'tomfan'], {'follower': 'katieH'})\r\n expected = ['tomCruise', 'PerezHilton']\r\n self.assertEqual(actual, expected)",
"def split(args):\n tweet_iter = None\n if args.query:\n tweet_iter = search_twitter(args)\n else:\n with io.open(args.json, 'r', encoding='utf-8') as f:\n tweet_iter = [json.loads(f.read())]\n return process_tweets(tweet_iter)",
"def get_data( filepath_query, filepath_results ):\n with open( filepath_query, 'r' ) as query_file:\n query = json.load( query_file )\n \n query_text = query['query']['multi_match']['query']\n query_scores = query['nlp_scores']\n query_data = {\n 'query_text' : query_text,\n 'bias_score' : query_scores['bias_score'],\n 'vocab_richness' : query_scores['stylo_scores']['vocab_richness'],\n 'hapax_legomena' : query_scores['stylo_scores']['hepax_legomena'],\n 'wordlength' : query_scores['stylo_scores']['readability_measures']['average_wordlength'],\n 'sentlength' : query_scores['stylo_scores']['readability_measures']['average_sentlength'],\n 'spelling_errors' : query_scores['stylo_scores']['spelling_errors'],\n 'topics' : query_scores['topics']\n }\n\n with open( filepath_results ) as results_file:\n results = json.load( results_file )\n \n results_data = []\n for doc in results:\n argID = doc['_source']['argsMeID']\n premise = doc['_source']['premise']\n average_wordlength = doc['nlp_scores']['stylo_scores']['readability_measures']['average_wordlength']\n average_sentlength = doc['nlp_scores']['stylo_scores']['readability_measures']['average_sentlength']\n bias_score = doc['nlp_scores']['bias_score']\n bias_distance = doc['bias_distance']\n stylo_distance = doc['stylo_distance']\n topic_match_count = doc['topic_match_count']\n old_score = doc['old_score']\n new_score = doc['new_score']\n scoring_distance = doc['scoring_distance']\n old_rank = doc['old_rank']\n new_rank = doc['new_rank']\n \n doc_data = {\n 'argID' : argID,\n 'premise' : premise,\n 'wordlength' : average_wordlength,\n 'sentlength' : average_sentlength,\n 'bias_score' : bias_score,\n 'bias_distance' : bias_distance,\n 'stylo_distance' : stylo_distance,\n 'topic_match_count' : topic_match_count,\n 'old_score' : old_score,\n 'new_score' : new_score,\n 'scoring_distance' : scoring_distance,\n 'old_rank' : old_rank,\n 'new_rank' : new_rank\n }\n results_data.append( doc_data )\n\n data_tuple = ( query_data, results_data )\n return data_tuple",
"def get_filters():\n filters = {}\n print('Hello Friend! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:\n try:\n city = input (\"Which city's data would you like to look at? (Chicago, New York City or Washington)\")\n break\n except (ValueError, KeyboardInterrpt, TypeError):\n print(\"Please enter either Chicago, New York City or Washington\")\n finally:\n filters['city_name'] = city\n print(\"Looking at data for {}\".format(city))\n\n # TO DO: get user input for month (all, january, february, ... , june)\n while True:\n try:\n month = input (\"Which month's (January, February, March, April, May, June or all) data would you like to look at?\")\n break\n except (ValueError, KeyboardInterrpt, TypeError):\n print(\"Please enter either all, January, February, March, April, May or June.\")\n finally:\n filters['month_name'] = month\n print(\"Looking at data for {}\".format(month))\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n while True:\n try:\n day = input (\"Which day of the week would you like to look at data for?\")\n break\n except (ValueError, KeyboardInterrpt, TypeError):\n print(\"Please enter either Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday or ALL.\")\n finally:\n filters['day_name'] = day\n print(\"Looking at data for {}\".format(day))\n\n print('-'*40)\n return city, month, day",
"def search(self, filter: str = None) -> dict:\n r = requests.get(self.url, headers=self.headers)\n\n if filter:\n data = r.json()\n return filter_list(data=data, filter_by=filter)\n\n return r.json()",
"def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:\n\t try:\n\t city = input('Input city name (chicago, new york city, washington): ').lower()\n\t if city in cities:\n break\n\t except ValueError:\n\t print('That\\'s not a valid String!')\n\n # TO DO: get user input for month (all, january, february, ... , june)\n while True:\n\t try:\n\t month = input('Input month name (all, january, february, ... , june): ').lower()\n\t if month in months:\n break\n\t except ValueError:\n\t print('That\\'s not a valid String!')\n\n # TO DO: get user input for day of week (all, monday, tuesday, ..., sunday)\n while True:\n\t try:\n\t day = input('Input day of week (all, monday, tuesday, ..., sunday): ').lower()\n\t if day in days:\n break\n\t except ValueError:\n\t print('That\\'s not a valid String!')\n\n print('-'*40)\n return city, month, day",
"def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n # Get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:\n city = input('Would you like to see data for Chicago, New York City or Washington?')\n if city.lower() in CITY_DATA:\n break\n print('ERROR: City does not match. Please try again.')\n\n # Get user input for month (all, january, february, ... , june)\n while True:\n month = input(\"Type month (January, February, March, April, May or June) to filter by or type 'all' for no filter\")\n if month.lower() in MONTH_LIST or month.lower() == 'all':\n break\n print(\"ERROR: Input was not a month from January to June nor all. Please try again.\")\n\n # Get user input for day of week (all, monday, tuesday, ... sunday)\n while True:\n day = input(\"Type day of the week to filter or type 'all' for no filter\")\n if day.lower() in DAY_LIST or day.lower() == 'all':\n break\n print(\"ERROR: Input was not a day of the week nor all.\")\n\n print('-'*40)\n return city, month, day",
"def test_get_filter_results_location_includes(self):\r\n\r\n data_file = open('data.txt', 'r')\r\n twitter_dict = tf.process_data(data_file)\r\n data_file.close()\r\n\r\n actual = tf.get_filter_results(twitter_dict, ['tomCruise', \\\r\n 'PerezHilton'], {'location_includes': 'Hollywood, California'})\r\n expected = ['PerezHilton']\r\n self.assertEqual(actual, expected)"
]
| [
"0.7434036",
"0.68169296",
"0.63130534",
"0.63128114",
"0.6289486",
"0.6159073",
"0.61330956",
"0.60395426",
"0.6004611",
"0.600197",
"0.5982023",
"0.59766793",
"0.59600365",
"0.59467745",
"0.582324",
"0.58030814",
"0.5769613",
"0.576816",
"0.5756492",
"0.5753108",
"0.57031924",
"0.56816345",
"0.5677645",
"0.56766015",
"0.5668696",
"0.55842614",
"0.5540817",
"0.551837",
"0.5515601",
"0.5508014"
]
| 0.76796806 | 0 |
(Twitterverse dictionary, list of str, presentation specification dictionary) > str Return final_list of users from twitter_dict in the order and format as indicated by present_dict. >>> data_file = open('data.txt', 'r') >>> twitter_dict = process_data(data_file) >>> query_file = open('query2.txt', 'r') >>> query_dict = process_query(query_file) >>> username_list = get_search_results(twitter_dict, search_dict) >>> filter_dict = query_dict['filter'] >>> final_list = get_filter_results(twitter_dict, username_list, filter_dict) >>> present_dict = query_dict['present'] >>> get_present_string(twitter_dict, final_list, present_dict) | def get_present_string(twitter_dict, final_list, present_dict):
if present_dict['sort-by'] == 'username':
tweet_sort(twitter_dict, final_list, username_first)
if present_dict['sort-by'] == 'name':
tweet_sort(twitter_dict, final_list, name_first)
if present_dict['sort-by'] == 'popularity':
tweet_sort(twitter_dict, final_list, more_popular)
present_output = ''
format_long = ''
format_short = []
if 'long' in present_dict['format']:
for user in final_list:
website = twitter_dict[user]["web"]
bio = '\n' + twitter_dict[user]["bio"]
if("web" in twitter_dict[user]):
website = twitter_dict[user]["web"]
format_long = ('----------\n' + user + '\n' + 'name: ' +\
twitter_dict[user]['name'] + '\n' + 'location: ' +\
twitter_dict[user]['location'] + '\n' + 'website: '\
+ website + '\n' + 'bio:' + bio + '\n' +\
'following: '+\
str(twitter_dict[user]['following']) + '\n')
present_output += format_long
present_output += '----------'
elif 'short' in present_dict['format']:
present_output = str(final_list)
return present_output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_present_string (twitter_data, filter_list, present_data):\n\n #initialize\n present_string = ''\n present_list = filter_list\n\n if present_data ['sort-by'] == 'username':\n tweet_sort (twitter_data, present_list, username_first)\n\n elif present_data ['sort-by'] == 'name':\n tweet_sort (twitter_data, present_list, name_first)\n\n elif present_data ['sort-by'] == 'popularity':\n tweet_sort (twitter_data, present_list, more_popular)\n\n if present_data ['format'] == 'long':\n present_string += '----------'\n if len(present_list) >= 1:\n for username in present_list:\n present_string += '\\n' + \\\n username + '\\n' + \\\n 'name: ' + twitter_data [username]['name'] + '\\n' + \\\n 'location: ' + twitter_data [username]['location'] + '\\n' + \\\n 'website: ' + twitter_data [username]['web'] + '\\n' + \\\n 'bio:\\n' + twitter_data [username]['bio'] + '\\n' + \\\n 'following: ' + \\\n str(twitter_data [username]['following']) + '\\n' + \\\n '----------'\n else:\n present_string += '\\n----------'\n\n present_string += '\\n'\n\n else:\n present_string = str(present_list)\n\n return present_string",
"def get_filter_results(twitter_dict, username_list, filter_dict):\r\n twitter_handles = username_list \r\n name_filtered_list = []\r\n upper_user = []\r\n \r\n if 'name_includes' in filter_dict: \r\n for user in twitter_handles: \r\n user = user.upper()\r\n upper_user.append(user)\r\n name = filter_dict['name_includes']\r\n \r\n for uName in username_list:\r\n if name.upper() == uName.upper():\r\n name_filtered_list.append(name) \r\n \r\n twitter_handles = name_filtered_list \r\n \r\n location_filtered_list = []\r\n if 'location_includes' in filter_dict: \r\n for user in twitter_handles: \r\n location = filter_dict['location_includes']\r\n if location.upper() == twitter_dict[user]['location'].upper(): \r\n location_filtered_list.append(user) \r\n twitter_handles = location_filtered_list\r\n \r\n follower_filtered_list = []\r\n if 'follower' in filter_dict:\r\n for user in twitter_handles:\r\n for follower in twitter_dict[user]['following']:\r\n if follower == filter_dict['follower']:\r\n #if follower in twitter_handles:\r\n follower_filtered_list.append(user)\r\n \r\n twitter_handles = follower_filtered_list \r\n \r\n following_filtered_list = []\r\n if 'following' in filter_dict:\r\n for user in twitter_handles: \r\n following_list = all_followers(twitter_dict, user)\r\n for follower in following_list: \r\n if follower in twitter_handles: \r\n following_filtered_list.append(follower) \r\n twitter_handles = following_filtered_list \r\n \r\n return twitter_handles",
"def get_search_results(twitter_dict, search_dict):\r\n\r\n search_list = [search_dict['username']] \r\n search_specified_list = []\r\n\r\n for user in search_list:\r\n search_users_list = [user]\r\n \r\n for operation in search_dict['operations']:\r\n search_users_list = search_helper(search_users_list, operation,\\\r\n twitter_dict)\r\n \r\n search_specified_list += search_users_list\r\n \r\n return search_specified_list",
"def followed_by_hillary_and_donald(users, twitter):\n\n str = ''\n set1 = set()\n set2 = set()\n for u_dict in users:\n \tif u_dict['screen_name'] == 'HillaryClinton':\n \t\tset1 = set(u_dict['friends'])\n \telif u_dict['screen_name'] == 'realDonaldTrump':\n \t\tset2 = set(u_dict['friends'])\n \t\t\n common = set.intersection(set1, set2)\n request = robust_request(twitter, 'users/lookup', {'user_id': common}, max_tries=5)\n for user in request:\n \tstr = user['screen_name']\t\n return str",
"def search_helper(name_list, operation, twitter_dict): \r\n return_list = []\r\n \r\n for name in name_list:\r\n if operation == 'following':\r\n search_specified_list = twitter_dict[name]['following']\r\n for following_names in search_specified_list: \r\n if following_names not in return_list: \r\n return_list.append(following_names) \r\n \r\n elif operation == 'followers':\r\n followers = all_followers(twitter_dict, name)\r\n for followers_name in followers: \r\n if followers_name not in return_list: \r\n return_list.append(followers_name) \r\n \r\n return return_list",
"def get_search_results (twitter_data, search_data):\n\n search_list = [search_data['username']] # start with the first username\n temp = [] # initialize\n\n for operation in search_data['operations']: # go through every operation\n for username in search_list:\n if operation == 'following':\n for name in twitter_data[username]['following']:\n if not name in temp:\n temp.append (name)\n\n elif operation == 'followers':\n for name in all_followers (twitter_data, username):\n if not name in temp:\n temp.append (name)\n\n search_list = temp\n temp = []\n search_list.sort() # sort the list alphabetically for testing purposes\n return search_list",
"def query_twitter(session, provided_ioc):\n ioc_dicts = []\n\n if provided_ioc.startswith(\"@\"):\n ioc_dicts.append({\"invalid\": \"{} <-- Monitoring users is prohibited!\".format(provided_ioc)})\n return ioc_dicts\n\n encoded_ioc = urllib.quote_plus(provided_ioc)\n search_tweets = session.search(encoded_ioc, rpp=100, lang=\"en\")\n\n for tweet in search_tweets:\n if tweet._json[\"user\"][\"name\"] == provided_ioc.replace(\"#\", \"\") or \\\n tweet._json[\"user\"][\"screen_name\"] == provided_ioc.replace(\"#\", \"\"):\n ioc_dicts.append({\"invalid\": \"{} <-- Monitoring users is prohibited!\".format(provided_ioc)})\n return ioc_dicts\n\n if \"retweeted_status\" in tweet._json.keys():\n if tweet._json[\"retweeted_status\"][\"user\"][\"name\"] == provided_ioc.replace(\"#\", \"\") or \\\n tweet._json[\"retweeted_status\"][\"user\"][\"screen_name\"] == provided_ioc.replace(\"#\", \"\"):\n ioc_dicts.append({\"invalid\": \"{} <-- Monitoring users is prohibited!\".format(provided_ioc)})\n return ioc_dicts\n\n urls = []\n for x in tweet._json[\"entities\"][\"urls\"]:\n if not x[\"expanded_url\"].startswith(\"https://twitter.com/i/web/status/\"):\n urls.append(x[\"expanded_url\"])\n\n hashtags = []\n for x in tweet._json[\"entities\"][\"hashtags\"]:\n hashtags.append(\"#{}\".format(x[\"text\"]))\n\n ioc_dict = {}\n ioc_dict[\"search_term\"] = provided_ioc\n ioc_dict[\"url\"] = \"\\n\".join(urls)\n ioc_dict[\"hashtags\"] = \"\\n\".join(hashtags)\n ioc_dict[\"timestamp\"] = tweet._json[\"created_at\"]\n ioc_dict[\"tweet\"] = tweet._json[\"text\"]\n\n if \"retweeted_status\" in tweet._json.keys():\n ioc_dict[\"timestamp\"] = tweet._json[\"retweeted_status\"][\"created_at\"]\n ioc_dict[\"tweet\"] = tweet._json[\"retweeted_status\"][\"text\"]\n\n ioc_dicts.append(ioc_dict)\n return ioc_dicts",
"def get_filter_results (twitter_data, search_list, filter_data):\n\n #initialize\n filter_list = []\n\n for operation in filter_data:\n if operation == 'name-includes':\n for username in search_list:\n # since case doesnt matter, eveything is made uppercase and\n # then is checked\n if filter_data [operation].upper() in \\\n twitter_data [username]['name'].upper():\n filter_list.append (username)\n\n elif operation == 'location-includes':\n for username in search_list:\n # same case as above\n if filter_data [operation].upper() in \\\n twitter_data [username]['location'].upper():\n filter_list.append (username)\n\n elif operation == 'follower':\n for username in search_list:\n if username in \\\n twitter_data[filter_data [operation]]['following']:\n filter_list.append (username)\n\n elif operation == 'following':\n for username in search_list:\n if username in all_followers(twitter_data, filter_data[operation]):\n filter_list.append (username)\n\n search_list = filter_list\n filter_list = []\n\n filter_list = search_list\n filter_list.sort() # sort the list alphabetically for testing purposes\n\n return filter_list",
"def all_followers(twitter_dict, twitter_name): \r\n \r\n following_list = []\r\n for user in twitter_dict:\r\n f_list = twitter_dict[user]['following']\r\n if twitter_name in f_list:\r\n following_list.append(user) \r\n return following_list",
"def followers(congressDict, twitterAPI):\n most = twitterAPI.get_user(list(congressDict.items())[0][1]) # Choose an arbitrary starting point from the dictionary and assign it their user details.\n least = most\n for name in congressDict:\n tempAPI = twitterAPI.get_user(congressDict[name]) # Get the user details of each congress members' twitter handle.\n numFollowers = tempAPI._json['followers_count']\n if (numFollowers > most._json['followers_count']): # If the follower count is greater than most, replace the user details with current one.\n most = tempAPI\n elif (numFollowers < least._json['followers_count']): # If the follower count is lower than least, replace the user details with the current one.\n least = tempAPI\n return [most._json[\"name\"], least._json[\"name\"]]",
"def display_phrasewise_list(prob_dict):\n print(\"***********Phrase pairs and their ranks*****************\")\n for f_phrase in prob_dict:\n e_phrases = prob_dict[f_phrase]\n s = [(phrase, e_phrases[phrase]) for phrase in sorted(e_phrases, key=e_phrases.get, reverse=True)]\n print(f_phrase ,\"->\",s)\n print(\"----------------------------------------------------------------------\")",
"def followed_by_hillary_and_donald(users, twitter):\n ###TODO-- Completed\n for user in users:\n if user['screen_name'] == 'HillaryClinton':\n friends_Hillary = user['friends']\n #print(len(friends_Hillary))\n elif user['screen_name'] == 'realDonaldTrump':\n friends_donald = user['friends']\n #print(len(friends_donald))\n\n common_followed_id = list(set(friends_Hillary) & set(friends_donald))\n\n commn_followed_user = robust_request(twitter,'users/lookup',{'user_id':common_followed_id}).json()\n #print(commn_followed_user[0]['screen_name'])#['screen_name'])\n return commn_followed_user[0]['screen_name']\n #pass",
"def process(self, filter_words, count=1):\n user = self.__api.get_user(self.__username)\n\n # print user.screen_name\n # print user.followers_count\n if self.__appMode == 1 and self.__TimeLineMode == 1:\n self.get_timeline(filter_words)\n else:\n if self.__friendMode:\n print(\"Getting all Twitter Friends \\n\")\n for friend in user.friends():\n self.get_tweet(friend.screen_name, filter_words, count)\n else:\n for screen_name in self.__priorityCoin:\n self.get_tweet(screen_name, filter_words, count)\n print('Twitter Data Extraction done!!')",
"def print_query_results(top, ranked_docs, tweets_dict):\n print(\"\\n======================\\nTop {} results out of {} for the seached query:\\n\".format(top, len(ranked_docs)))\n for tweet_id in ranked_docs[:top]:\n tweet_object = tweets_dict[tweet_id]\n txt = tweet_object[\"text\"]\n usr = tweet_object[\"user\"][\"name\"]\n date = tweet_object[\"created_at\"]\n hashtags = tweet_object[\"entities\"][\"hashtags\"]\n favs = tweet_object[\"favorite_count\"]\n rt = tweet_object[\"retweet_count\"]\n urls = tweet_object[\"entities\"][\"urls\"]\n print(\"\\n==================================================================\\n\")\n print(\"Username %s | Tweet: %s\\n Date %s\\n Likes %s| Retweets %s\"%(usr, txt, date, favs, rt))\n if hashtags:\n print(\"Hashtags: \")\n for hashtag in hashtags:\n print(hashtag)\n if urls:\n print(\"URLs: \")\n for url in urls:\n print(url[\"url\"])",
"def detect_author(user_to_tweets: Dict[str, List[tuple]], tweet_text: str) -> \\\n str:\n acc = []\n \n for keys in user_to_tweets:\n author_hashes = hashtag_seperator(user_to_tweets[keys])\n text_hashes = extract_hashtags(tweet_text)\n if set(text_hashes).issubset(author_hashes):\n acc.append(keys)\n if len(acc) == 1:\n return acc[0]\n return 'unknown'",
"def all_followers (twitter_data, username):\n\n # initialize\n followers = []\n\n for key in twitter_data: # go through every username in twitter_data\n if username in twitter_data [key]['following']: # check each 'following'\n followers.append (key)\n\n followers.sort() # sort the list alphabetically for testing purposes\n return followers",
"def parse_prediction(self, predictions):\n\t\tusers = list()\n\t\tprint(predictions)\n\t\tfor prediction in predictions:\n\t\t\tfor email in prediction:\n\t\t\t\tusers.append(email)\n\t\t\t\t\n\t\treturn users",
"def extract_relevant(self):\n item_extraction = self.data\n my_dict = {'tweeted_time': item_extraction['created_at'],\n 'tweet_id': item_extraction['id'],\n # If the time comes when the below becomes more significant, it will be no trouble at all to make an\n # additional column for it, but delimiting it with a ` creates less clutter in the Database\n 'in_reply_to':\n \"NAME/\" + str(item_extraction['in_reply_to_screen_name']) + \"`\" +\n \"STATUSID/\" + str(item_extraction['in_reply_to_status_id_str']) + \"`\" +\n \"USERID/\" + str(item_extraction['in_reply_to_user_id_str']),\n 'lang': item_extraction['lang'],\n 'place': item_extraction['place'], 'source': item_extraction['source']}\n if item_extraction['place'] is not None:\n my_dict['place'] = item_extraction['place']['full_name']\n if 'retweeted_status' in item_extraction.keys():\n my_dict['original_author_id'] = item_extraction['retweeted_status']['user']['id']\n my_dict['original_author_handle'] = item_extraction['retweeted_status']['user']['screen_name']\n tester = item_extraction['retweeted_status']['text']\n cleaned = ' '.join(re.sub(\"(RT : )|(@[\\S]+)|(&\\S+)|(http\\S+)\", \" \", tester).split())\n removed_others = \" \".join(re.sub(\"(#\\S+)\", ' ', cleaned).split())\n final_text = ''.join(list(filter(lambda x: x.isalpha() or x is ' ', removed_others)))\n # This final text will make it a lot easier to run NLP\n final_text = final_text.strip().replace(' ', ' ').replace(' ', ' ')\n my_dict['plain_text'] = final_text.lower()\n my_dict['tweet'] = cleaned\n else:\n my_dict['original_author_id'] = item_extraction['user']['id']\n my_dict['original_author_handle'] = item_extraction['user']['screen_name']\n cleaned = ' '.join(re.sub(\"(@[\\S]+)|(&\\S+)|(http\\S+)\", \" \", item_extraction['text']).split())\n removed_others = \" \".join(re.sub(\"(#\\S+)\", ' ', cleaned).split())\n final_text = ''.join(list(filter(lambda x: x.isalpha() or x is ' ', removed_others)))\n final_text = final_text.strip().replace(' ', ' ').replace(' ', ' ')\n my_dict['plain_text'] = final_text.lower()\n my_dict['tweet'] = cleaned\n return my_dict",
"def twitter_text(\n self,\n text: str,\n urls: List[Dict[str, str]],\n user_mentions: List[Dict[str, Any]],\n media: List[Dict[str, Any]],\n ) -> Element:\n\n to_process: List[Dict[str, Any]] = []\n # Build dicts for URLs\n for url_data in urls:\n to_process.extend(\n {\n \"type\": \"url\",\n \"start\": match.start(),\n \"end\": match.end(),\n \"url\": url_data[\"url\"],\n \"text\": url_data[\"expanded_url\"],\n }\n for match in re.finditer(re.escape(url_data[\"url\"]), text, re.IGNORECASE)\n )\n # Build dicts for mentions\n for user_mention in user_mentions:\n screen_name = user_mention[\"screen_name\"]\n mention_string = \"@\" + screen_name\n to_process.extend(\n {\n \"type\": \"mention\",\n \"start\": match.start(),\n \"end\": match.end(),\n \"url\": \"https://twitter.com/\" + urllib.parse.quote(screen_name),\n \"text\": mention_string,\n }\n for match in re.finditer(re.escape(mention_string), text, re.IGNORECASE)\n )\n # Build dicts for media\n for media_item in media:\n short_url = media_item[\"url\"]\n expanded_url = media_item[\"expanded_url\"]\n to_process.extend(\n {\n \"type\": \"media\",\n \"start\": match.start(),\n \"end\": match.end(),\n \"url\": short_url,\n \"text\": expanded_url,\n }\n for match in re.finditer(re.escape(short_url), text, re.IGNORECASE)\n )\n # Build dicts for emojis\n for match in POSSIBLE_EMOJI_RE.finditer(text):\n orig_syntax = match.group(\"syntax\")\n codepoint = emoji_to_hex_codepoint(unqualify_emoji(orig_syntax))\n if codepoint in codepoint_to_name:\n display_string = \":\" + codepoint_to_name[codepoint] + \":\"\n to_process.append(\n {\n \"type\": \"emoji\",\n \"start\": match.start(),\n \"end\": match.end(),\n \"codepoint\": codepoint,\n \"title\": display_string,\n }\n )\n\n to_process.sort(key=lambda x: x[\"start\"])\n p = current_node = Element(\"p\")\n\n def set_text(text: str) -> None:\n \"\"\"\n Helper to set the text or the tail of the current_node\n \"\"\"\n if current_node == p:\n current_node.text = text\n else:\n current_node.tail = text\n\n db_data: Optional[DbData] = self.zmd.zulip_db_data\n current_index = 0\n for item in to_process:\n # The text we want to link starts in already linked text skip it\n if item[\"start\"] < current_index:\n continue\n # Add text from the end of last link to the start of the current\n # link\n set_text(text[current_index : item[\"start\"]])\n current_index = item[\"end\"]\n if item[\"type\"] != \"emoji\":\n elem = url_to_a(db_data, item[\"url\"], item[\"text\"])\n assert isinstance(elem, Element)\n else:\n elem = make_emoji(item[\"codepoint\"], item[\"title\"])\n current_node = elem\n p.append(elem)\n\n # Add any unused text\n set_text(text[current_index:])\n return p",
"def twitter_data(filename, dictionary):\r\n new_data = []\r\n with codecs.open(filename, 'r', 'utf8') as f:\r\n for line in f:\r\n new_line = []\r\n stuff = [x for x in line.lower().split() if\r\n ((has_letter(x) or len(x) >= 1) and keep_word(x, num_words, count_dict))]\r\n for word in stuff:\r\n new_line.append(dictionary.get(word, 1))\r\n if len(new_line) > 0:\r\n new_data.append(new_line)\r\n return new_data",
"def extract_important(tweet_objects_list):\n # This section extracts important information such as most common hashtags\n hashtag_dictionary = {}\n for tweet in tweet_objects_list:\n if \"hashtags\" in tweet:\n for individual_hashtag in tweet[\"hashtags\"]:\n if not individual_hashtag[\"text\"].lower() in hashtag_dictionary:\n hashtag_dictionary[individual_hashtag[\"text\"].lower()] = 1\n else:\n hashtag_dictionary[individual_hashtag[\"text\"].lower()] += 1\n frequency = Counter(hashtag_dictionary)\n most_frequent_hashtags = frequency.most_common(50)\n\n user_dictionary = {}\n for tweet in tweet_objects_list:\n if \"user_mentions\" in tweet:\n for individual_user in tweet[\"user_mentions\"]:\n if not individual_user[\"screen_name\"] in user_dictionary:\n user_dictionary[individual_user[\"screen_name\"].lower()] = 1\n else:\n user_dictionary[individual_user[\"screen_name\"].lower()] += 1\n frequency = Counter(user_dictionary)\n most_frequent_users = frequency.most_common(50)\n symbol_dictionary = {}\n for tweet in tweet_objects_list:\n if \"symbols\" in tweet:\n for individual_symbol in tweet[\"symbols\"]:\n if not individual_symbol[\"text\"] in symbol_dictionary:\n symbol_dictionary[individual_symbol[\"text\"]] = 1\n else:\n symbol_dictionary[individual_symbol[\"text\"]] += 1\n frequency = Counter(symbol_dictionary)\n most_frequent_symbols = frequency.most_common(50)\n return most_frequent_hashtags, most_frequent_users, most_frequent_symbols",
"def userNames(lst, url, tableName):\n n = len(lst)\n # https://docs.python.org/3/library/itertools.html#itertools.product\n # https://stackoverflow.com/questions/3034014/how-to-apply-itertools-product-to-elements-of-a-list-of-lists\n lst2 = list(itertools.product(*lst))\n lst3 = list(map(\"\".join, lst2))\n #\n # Maybe use checkUsernameSequences here,\n # then add a check to reduce the amount of possibilities before building lst?\n #\n\n seq = checkUsernameSequences(n, lst, url, tableName, minLen = 2, maxLen = 2)\n # does not include the single characters since minLen > 1\n\n lst4 = filt(seq, lst3)\n \"\"\"# next time:\n find matching strings. That should (hopefully) reduce the space to search. \n REMEMBER, this filtering will miss all single character usernames!!!\n\n https://docs.python.org/3/library/re.html#regular-expression-syntax\n https://stackoverflow.com/questions/3640359/regular-expressions-search-in-list\n https://stackoverflow.com/questions/3040716/python-elegant-way-to-check-if-at-least-one-regex-in-list-matches-a-string\n https://stackoverflow.com/questions/19300020/python-match-a-string-with-regex\n https://stackoverflow.com/questions/37974047/if-any-strings-in-a-list-match-regex\n\"\"\"\n\n lst5 = [x for x in lst4 if checkUsername(x, url, tableName)]\n # lst = list(map(checkUsername, lst2))\n return lst5",
"def get_person_text(self, uid):\n words = \"\"\n\n query = \"\"\"\nSELECT ?overview ?researchO ?label\nWHERE\n{\n <%s> <http://vivoweb.org/ontology/core#overview> ?overview .\n <%s> <http://vivoweb.org/ontology/core#researchOverview> ?researchO .\n <%s> <http://www.w3.org/2000/01/rdf-schema#label> ?label .\n}\n \"\"\" % (uid, uid, uid)\n self.setQuery(query)\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n words = \"%s %s %s\" % (g['results']['bindings'][0]['overview']['value'], g['results']['bindings'][0]['researchO']['value'], g['results']['bindings'][0]['label']['value'])\n except:\n print \"Select failed: %s\" % query\n\n self.setQuery(\"\"\"\nPREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX vivo: <http://vivoweb.org/ontology/core#>\nPREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\nSELECT ?name\nWHERE\n{\n ?auth vivo:relates <%s> .\n ?auth rdf:type vivo:Authorship .\n ?auth vivo:relates ?art .\n filter (?art!=<%s>) .\n ?art <http://vivoweb.org/ontology/core#dateTimeValue> ?date .\n ?date <http://vivoweb.org/ontology/core#dateTime> ?year .\n filter (?year>\"2009-01-01T00:00:00Z\"^^xsd:dateTime) .\n ?art rdfs:label ?name .\n}\nLIMIT 20\n\"\"\" % (uid, uid))\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n for t in g['results']['bindings']:\n words = words + \" \" + t['name']['value']\n\n except:\n print \"Select failed\"\n traceback.print_exc(file=sys.stdout)\n\n self.setQuery(\"\"\"\nPREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX vivo: <http://vivoweb.org/ontology/core#>\nPREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\n\nSELECT ?name\nWHERE\n{\n ?grant vivo:relates <%s> .\n ?grant rdf:type vivo:Grant .\n ?grant <http://vivoweb.org/ontology/core#dateTimeInterval> ?date .\n ?date <http://vivoweb.org/ontology/core#end> ?end .\n ?end <http://vivoweb.org/ontology/core#dateTime> ?year .\n filter (?year>\"2009-01-01T00:00:00Z\"^^xsd:dateTime) .\n ?grant rdfs:label ?name .\n}\n\n \"\"\" % (uid))\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n\n for t in g['results']['bindings']:\n words = words + \" \" + t['name']['value']\n\n except:\n print \"Select failed\"\n traceback.print_exc(file=sys.stdout)\n\n\n\n\n return words",
"def userSuggestions(database):\n firstname=str(input(\"who do you want to have follow suggestions for :\"))\n usr,find=getByName(database,firstname)\n if not find:\n print(\"the User could not be found\")\n return\n else:\n following=[]\n followers=[]\n for folower in usr.folowed:\n followers.append(folower)\n for folowed in usr.folow:\n following.append(folowed)\n results=[]\n print(\"On what do you want your suggestions to be based on?\\n1. Mutual Interests\\n2. Mutual Connections\\n3. Both\")\n choice=int(input(\"Your choice :\"))\n for key ,usrs in database.items():\n if key not in following: \n correspondant=0\n if choice == 1 or choice == 3:\n for interest in usr.interest:\n if interest in usrs.interest:\n correspondant+=1\n if choice == 2 or choice == 3:\n for folower in followers:\n for folows in usrs.folowed:\n if key == folows:\n correspondant+=1\n results.append([key,correspondant])\n for i in range(len(results)):\n for j in range(0, len(results)-i-1):\n if results[j][1] > results[j+1][1] :\n results[j], results[j+1] = results[j+1], results[j]\n for k in range(5):\n print(results[k][0])",
"def process_query (file):\n\n # initialize all the dictionaries and lists we will be using\n query_data = {}\n query_data ['search'] = {'operations':[]}\n query_data ['filter'] = {}\n query_data ['present'] = {}\n\n temp = ''\n\n file.readline() # for when the file says SEARCH\n\n query_data ['search']['username'] = file.readline().strip()\n\n temp = file.readline().strip()\n while temp != 'FILTER': # go until the the filter section\n query_data ['search']['operations'].append (temp)\n temp = file.readline().strip()\n\n temp = file.readline().strip()\n while temp != 'PRESENT': # go until the present section\n # we make the key everything from the beginning to the first space\n # then the value is everything after the first space\n query_data ['filter'][temp[:temp.find(' ')]] = temp[temp.find(' ') + 1:]\n temp = file.readline().strip()\n\n temp = file.readline().strip()\n while temp != '': # go until the end of the file\n # same process as the previous while loop\n query_data ['present'][temp[:temp.find(' ')]] = temp[temp.find(' ') + 1:]\n temp = file.readline().strip()\n\n return query_data",
"def merge_utterance_lines(utt_dict):\n new_utterances = {}\n for uid, utt in utt_dict.items():\n merged = False\n if utt.reply_to is not None and utt.speaker is not None:\n u0 = utt_dict[utt.reply_to]\n if u0.root == utt.root and u0.speaker == utt.speaker:\n new_utterances[u0.id].text += \" \" + utt.text\n merged = True\n if not merged:\n new_utterances[utt.id] = utt\n return new_utterances",
"def show_result(self, person):\n myopps = {}\n for entry in self.result.keys():\n if person in entry:\n parts = entry.split(':')\n if person == parts[0]:\n opp = parts[1]\n else:\n opp = parts[0]\n myopps[opp] = self.result[entry]\n slist = list(myopps.items())\n return sorted(slist, key=lambda x: x[1][1], reverse=True)",
"def extract_tweets(consumer_key,consumer_secret,access_token,access_token_secret,search_key):\n # Step 1 - Authenticate\n consumer_key= str(consumer_key)\n consumer_secret= str(consumer_secret)\n\n access_token=str(access_token)\n access_token_secret=str(access_token_secret)\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n\n api = tweepy.API(auth)\n\n #Step 3 - Retrieve Tweets\n public_tweets = api.search(search_key)\n tweets_list=[]\n for tweet in public_tweets:\n tweets_list.append(tweet.text)\n return tweets_list",
"def getDetailedSpeakers(self, speakers):\n\n new_speakers = []\n try:\n for speaker in speakers:\n for user in self.users_data:\n if speaker[\"username\"] == user[\"username\"]:\n new_speakers.append(user)\n return new_speakers\n except KeyError as e:\n print(e)\n return \"Invalid\"",
"def search_user_entries(entry_by_user):\n\n ranks=Counter(dict())\n entry_by_user=entry_by_user.split()\n #complete_file_set=set()\n for entry in entry_by_user:\n availability_info=search_hash(entry,hashtable)\n if availability_info:\n ranks+=ranking(availability_info,fileID_to_names)\n else:\n sorted_display(None)\n #call ranking, pass availability_info\n #print ranks\n sorted_display(ranks)"
]
| [
"0.7329098",
"0.6643551",
"0.6021003",
"0.5802761",
"0.56354165",
"0.5613348",
"0.5397948",
"0.5390319",
"0.538293",
"0.53705984",
"0.5061108",
"0.5039742",
"0.50106114",
"0.49880728",
"0.4931404",
"0.4930936",
"0.49084416",
"0.48675862",
"0.48386815",
"0.48338753",
"0.48333615",
"0.4810279",
"0.4786728",
"0.47850448",
"0.47648543",
"0.47636887",
"0.4756631",
"0.47025767",
"0.46887863",
"0.4685363"
]
| 0.77881116 | 0 |
(Twitterverse dictionary, list of str, function) > NoneType Sort the results list using the comparison function cmp and the data in twitter_data. >>> twitter_data = {\ | def tweet_sort(twitter_data, results, cmp):
# Insertion sort
for i in range(1, len(results)):
current = results[i]
position = i
while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:
results[position] = results[position - 1]
position = position - 1
results[position] = current | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tweet_sort(twitter_data, results, cmp):\n\n # Insertion sort\n for i in range(1, len(results)):\n current = results[i]\n position = i\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\n results[position] = results[position - 1]\n position = position - 1\n results[position] = current",
"def sort_tweets(tweets):\n tweets.sort(key=lambda x: x.get_date())\n return tweets",
"def sort_results(boxes):\n return sorted(results[k], key=lambda x : x['score'], reverse=True)",
"def sorted_data():\n stock_data = scrape_data()\n filtered_data = list(filter(sort_func, stock_data))\n return filtered_data",
"def sort_results(self):\n pass",
"def order_scores(doctors):\n\n # return doctors.sort(key=operator.methodcaller('get_review_score'))\n # print doctors\n print\n print\n ret_docs = sorted(doctors, key=operator.itemgetter('review_score'), reverse=True)\n # ret_docs = doctors.sort(key=lambda k: k['review_score'])\n # print ret_docs\n return ret_docs",
"def sort_data(data):\n data.sort(key=itemgetter(3,2))\n return data",
"def FilterAndSort(entries, domain):\n result = filter(\n lambda x: ((domain is None or x.main_page_url == domain)\n and x.confidence > .7\n and x.number_of_hits >= 2), entries)\n return sorted(result, key=operator.attrgetter('score'), reverse=True)",
"def sort_by_popularity(tweets: list) -> list:\n tweets_by_popularity = sorted(tweets, key=lambda x: (x.retweets, -x.time), reverse=True) # Use lambda functions when an anonymous function is required for a short period of time.\n return tweets_by_popularity",
"def _sort_results(self, results: dict) -> List:\n return [results[url][\"display_name\"] for url in self.urls_list]",
"def data_for_sorting() -> NoReturn:\n raise NotImplementedError",
"def data_for_sorting() -> NoReturn:\n raise NotImplementedError",
"def get_search_results (twitter_data, search_data):\n\n search_list = [search_data['username']] # start with the first username\n temp = [] # initialize\n\n for operation in search_data['operations']: # go through every operation\n for username in search_list:\n if operation == 'following':\n for name in twitter_data[username]['following']:\n if not name in temp:\n temp.append (name)\n\n elif operation == 'followers':\n for name in all_followers (twitter_data, username):\n if not name in temp:\n temp.append (name)\n\n search_list = temp\n temp = []\n search_list.sort() # sort the list alphabetically for testing purposes\n return search_list",
"def get_filter_results (twitter_data, search_list, filter_data):\n\n #initialize\n filter_list = []\n\n for operation in filter_data:\n if operation == 'name-includes':\n for username in search_list:\n # since case doesnt matter, eveything is made uppercase and\n # then is checked\n if filter_data [operation].upper() in \\\n twitter_data [username]['name'].upper():\n filter_list.append (username)\n\n elif operation == 'location-includes':\n for username in search_list:\n # same case as above\n if filter_data [operation].upper() in \\\n twitter_data [username]['location'].upper():\n filter_list.append (username)\n\n elif operation == 'follower':\n for username in search_list:\n if username in \\\n twitter_data[filter_data [operation]]['following']:\n filter_list.append (username)\n\n elif operation == 'following':\n for username in search_list:\n if username in all_followers(twitter_data, filter_data[operation]):\n filter_list.append (username)\n\n search_list = filter_list\n filter_list = []\n\n filter_list = search_list\n filter_list.sort() # sort the list alphabetically for testing purposes\n\n return filter_list",
"def _sort_results(\n self, results: Dict[tuple, List[dict]]\n ) -> List[List[tuple[int, float, float]]]:\n\n sorted_weather = [results[url] for url in self.urls_list]\n all_results = []\n\n for num, location in enumerate(self.locations):\n city_weather_list = sorted_weather[num * 6 : (num + 1) * 6]\n city_result = [\n (\n city_weather_list[0][\"daily\"][day][\"dt\"],\n city_weather_list[0][\"daily\"][day][\"temp\"][\"min\"],\n city_weather_list[0][\"daily\"][day][\"temp\"][\"max\"],\n )\n for day in range(6)\n ]\n\n for day in range(1, 6):\n weather = city_weather_list[day]\n temp = [w[\"temp\"] for w in weather[\"hourly\"]]\n city_result.append((weather[\"current\"][\"dt\"], min(temp), max(temp)))\n\n all_results.append(sorted(city_result, key=lambda x: x[0]))\n\n return all_results",
"def _sort_by_price(self, data):\n # Separate the data by currency\n alch = []\n fusing = []\n chaos = []\n exalted = []\n \n for item in data:\n price = item['price']\n if \"alchemy\" in price:\n alch.append(item)\n elif \"fusing\" in price:\n fusing.append(item)\n elif \"chaos\" in price:\n chaos.append(item)\n elif \"exalted\" in price:\n exalted.append(item)\n \n alch = natsorted(alch, key=lambda item: item['price'])\n fusing = natsorted(fusing, key=lambda item: item['price'])\n chaos = natsorted(chaos, key=lambda item: item['price'])\n exalted = natsorted(exalted, key=lambda item: item['price'])\n \n result = []\n result.extend(alch)\n result.extend(fusing)\n result.extend(chaos)\n result.extend(exalted)\n return result",
"def _toposort_with_ordered_mech_tuples(self, data):\n result = []\n for dependency_set in toposort(data):\n d_iter = iter(dependency_set)\n result.extend(sorted(dependency_set, key=lambda item : next(d_iter).mechanism.name))\n return result",
"def score_tweets(objects):\n scores = {}\n for tweet in objects:\n data = tweet._json\n rt = data['retweet_count']\n fave = data['favorite_count']\n fol = data['user']['followers_count']\n weight = 1.5\n score = ((weight * rt + fave) / (fol / 2)) * 1000\n scores[score] = data['id']\n embeds = []\n for item in sorted(scores.items(), reverse=True)[:13]: #sorted returns tuple\n embed = twitter.get_oembed(id=item[1],align='center')\n embeds.append(embed['html'])\n return embeds",
"def sort_results(results_list, sorting_type):\n if sorting_type == \"Oldest\":\n return sort_results_by_date(results_list, False)\n elif sorting_type == \"Description A-Z\":\n return sort_results_alphabetically_on_description(results_list, False)\n elif sorting_type == \"Description Z-A\":\n return sort_results_alphabetically_on_description(results_list, True)\n else:\n return sort_results_by_date(results_list, True)",
"def sortby(self):\n ...",
"def sort_object_info(results, sortkey):\n\n if sortkey == \"unsorted\":\n return results\n elif sortkey == \"name\":\n return sorted(results, key=lambda r: r[\"name\"])\n elif sortkey == \"ext\":\n def _get_ext(n):\n # Get extension for sorting\n if n[\"type\"] == \"dataobject\":\n return n[\"name\"].split(\".\")[-1]\n else:\n # Use name for sorting collections\n return n[\"name\"]\n\n return sorted(results, key=_get_ext)\n elif sortkey == \"size\":\n return sorted(results, key=lambda k: k.get(\"size\", 0))\n elif sortkey == \"date\":\n return sorted(results, key=lambda k: k.get(\"modify_time\", 0))\n else:\n exit_with_error(\"Sort option {} not supported.\".format(sortkey))",
"def test_sort_data_by_time():\n data = race.read_file_to_list()\n sorted_data = race.sort_data_by_time(data)\n assert data != sorted_data\n assert len(data) == len(sorted_data)\n assert type(sorted_data) == list\n for lines in sorted_data:\n assert type(lines) == dict",
"def get_filter_results(twitter_dict, username_list, filter_dict):\r\n twitter_handles = username_list \r\n name_filtered_list = []\r\n upper_user = []\r\n \r\n if 'name_includes' in filter_dict: \r\n for user in twitter_handles: \r\n user = user.upper()\r\n upper_user.append(user)\r\n name = filter_dict['name_includes']\r\n \r\n for uName in username_list:\r\n if name.upper() == uName.upper():\r\n name_filtered_list.append(name) \r\n \r\n twitter_handles = name_filtered_list \r\n \r\n location_filtered_list = []\r\n if 'location_includes' in filter_dict: \r\n for user in twitter_handles: \r\n location = filter_dict['location_includes']\r\n if location.upper() == twitter_dict[user]['location'].upper(): \r\n location_filtered_list.append(user) \r\n twitter_handles = location_filtered_list\r\n \r\n follower_filtered_list = []\r\n if 'follower' in filter_dict:\r\n for user in twitter_handles:\r\n for follower in twitter_dict[user]['following']:\r\n if follower == filter_dict['follower']:\r\n #if follower in twitter_handles:\r\n follower_filtered_list.append(user)\r\n \r\n twitter_handles = follower_filtered_list \r\n \r\n following_filtered_list = []\r\n if 'following' in filter_dict:\r\n for user in twitter_handles: \r\n following_list = all_followers(twitter_dict, user)\r\n for follower in following_list: \r\n if follower in twitter_handles: \r\n following_filtered_list.append(follower) \r\n twitter_handles = following_filtered_list \r\n \r\n return twitter_handles",
"def _sort_torrents(ctx, torrent_list, sort_type):\n\n if sort_type == 'seeders':\n return sorted(torrent_list, key=lambda t: t['seeders'], reverse=True)",
"def compare(first, second):\n for i in data:\n if(i['name'] == first ):\n first_num = i['follower_count']\n if(i['name'] == second):\n second_num = i['follower_count']\n if first_num > second_num:\n return 'a'\n else:\n return 'b'",
"def sort_results(metric_results):\n\n means, stds, params_list = metric_results\n dtype = [('index', int), ('params_list', object), ('std', float), ('mean', float)]\n\n #Sort will fail when attempting to rank based on the\n #dictionary 'params_list' when encountering identical mean and\n #standard deviations. To avoid this, use a list of distinct\n #integers to break the tie.\n values = zip(range(len(means)), params_list, stds, means)\n\n a = np.sort(np.array(list(values), dtype=dtype),\n kind='mergesort', order=['mean', 'std', 'index'])\n return np.flip(a, axis=-1)",
"def main(str_text):\n\n frequencies = count_value(str_text)\n sorted_data = sort_dict(frequencies)\n\n return sorted_data",
"def get_sorted_results(self):\n results = self.results.values()\n return sorted(results, key=lambda r: r.rank(), reverse=True)",
"def sort_rain_dictionary(raindata_dict_from_urlstream):\n\traindata_from_dictionary_to_sort = raindata_dict_from_urlstream.items() #[ [key, value] for key, value in raindata_dict.items() ] # .items() already gives the unpacking of the dictionary items\n\tsorted_dictionary_raindata = sorted(raindata_from_dictionary_to_sort, reverse=True) # Sort accept a second arguement kyword reverse=True Do not leave space on KEY assignment here only\n\treturn sorted_dictionary_raindata",
"def pubs_by_articles_published( data ) :\n # let's be Pythonic and use counter\n result = [ (k,v) for k,v in Counter([x['SO'] for x in data]).iteritems() ]\n # now sort\n result.sort( lambda a,b : cmp(b[1],a[1]) )\n return result"
]
| [
"0.7567489",
"0.650274",
"0.6290785",
"0.62436324",
"0.623184",
"0.61069995",
"0.60515857",
"0.59873897",
"0.5878488",
"0.5872183",
"0.58214015",
"0.58214015",
"0.57846797",
"0.5771403",
"0.5755637",
"0.57172424",
"0.5692742",
"0.56895584",
"0.56837285",
"0.56682867",
"0.5616115",
"0.56026036",
"0.55884874",
"0.55380154",
"0.5509384",
"0.5489393",
"0.54822695",
"0.5438575",
"0.54144394",
"0.54113036"
]
| 0.76030976 | 0 |
(Twitterverse dictionary, str, str) > int Return 1 if user a has more followers than user b, 1 if fewer followers, and the result of sorting by username if they have the same, based on the data in twitter_data. >>> twitter_data = {\ | def more_popular(twitter_data, a, b):
a_popularity = len(all_followers(twitter_data, a))
b_popularity = len(all_followers(twitter_data, b))
if a_popularity > b_popularity:
return -1
if a_popularity < b_popularity:
return 1
return username_first(twitter_data, a, b) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def more_popular(twitter_data, a, b):\n\n a_popularity = len(all_followers(twitter_data, a))\n b_popularity = len(all_followers(twitter_data, b))\n if a_popularity > b_popularity:\n return -1\n if a_popularity < b_popularity:\n return 1\n return username_first(twitter_data, a, b)",
"def compare(first, second):\n for i in data:\n if(i['name'] == first ):\n first_num = i['follower_count']\n if(i['name'] == second):\n second_num = i['follower_count']\n if first_num > second_num:\n return 'a'\n else:\n return 'b'",
"def followers(congressDict, twitterAPI):\n most = twitterAPI.get_user(list(congressDict.items())[0][1]) # Choose an arbitrary starting point from the dictionary and assign it their user details.\n least = most\n for name in congressDict:\n tempAPI = twitterAPI.get_user(congressDict[name]) # Get the user details of each congress members' twitter handle.\n numFollowers = tempAPI._json['followers_count']\n if (numFollowers > most._json['followers_count']): # If the follower count is greater than most, replace the user details with current one.\n most = tempAPI\n elif (numFollowers < least._json['followers_count']): # If the follower count is lower than least, replace the user details with the current one.\n least = tempAPI\n return [most._json[\"name\"], least._json[\"name\"]]",
"def all_followers (twitter_data, username):\n\n # initialize\n followers = []\n\n for key in twitter_data: # go through every username in twitter_data\n if username in twitter_data [key]['following']: # check each 'following'\n followers.append (key)\n\n followers.sort() # sort the list alphabetically for testing purposes\n return followers",
"def users_being_followed_tweets():\n username = request.authorization.username\n tweets = []\n\n user_id = get_user_id(username);\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id and (\n user.user_id = ? or\n user.user_id in (select whom_id from follower\n where who_id = ?))\n order by message.pub_date desc limit ?''',\n [user_id, user_id, PER_PAGE])\n\n for tuple in tuples:\n tweet = {}\n tweet[\"message_id\"] = tuple['message_id']\n tweet[\"author_id\"] = tuple['author_id']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweets.append(tweet)\n\n return jsonify({'tweets': tweets}), 200",
"def username_first(twitter_data, a, b):\n\n if a < b:\n return -1\n if a > b:\n return 1\n return 0",
"def username_first(twitter_data, a, b):\r\n \r\n if a < b:\r\n return -1\r\n if a > b:\r\n return 1\r\n return 0",
"def follow_closely(api_, follow_username):\n big_list = True\n max_id = ''\n following = []\n\n while big_list:\n api_.getSelfUsersFollowing(maxid=max_id)\n followers_ = api_.LastJson\n for f in followers_['users']:\n following.append(f)\n big_list = followers_['big_list']\n if not big_list:\n break\n # this key only exists if there is more pages\n max_id = followers_['next_max_id']\n\n for f in following:\n if f['username'] == follow_username:\n return True, f",
"def followed_by_hillary_and_donald(users, twitter):\n ###TODO-- Completed\n for user in users:\n if user['screen_name'] == 'HillaryClinton':\n friends_Hillary = user['friends']\n #print(len(friends_Hillary))\n elif user['screen_name'] == 'realDonaldTrump':\n friends_donald = user['friends']\n #print(len(friends_donald))\n\n common_followed_id = list(set(friends_Hillary) & set(friends_donald))\n\n commn_followed_user = robust_request(twitter,'users/lookup',{'user_id':common_followed_id}).json()\n #print(commn_followed_user[0]['screen_name'])#['screen_name'])\n return commn_followed_user[0]['screen_name']\n #pass",
"def all_followers(twitter_dict, twitter_name): \r\n \r\n following_list = []\r\n for user in twitter_dict:\r\n f_list = twitter_dict[user]['following']\r\n if twitter_name in f_list:\r\n following_list.append(user) \r\n return following_list",
"def get_followers(user):\n if user.has_key('followers_list'):\n pass\n else:\n if user.has_key('followers_count'):\n if user['followers_count'] > 4999:\n pages = user['followers_count'] / 5000\n f_list = []\n for page in range(pages):\n try:\n follower_set = api.GetFollowers(user_id=user['id'], cursor=page, count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n f_list = friends_list + f_list\n time.sleep(60)\n user['followers_list'] = f_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(f_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)\n else:\n try:\n follower_set = api.GetFollowers(user_id=user['id'], count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n user['followers_list'] = friends_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(friends_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)",
"def get_followers1(user):\n if user.has_key('followers_list'):\n pass\n else:\n if user.has_key('followers_count'):\n if user['followers_count'] > 4999:\n pages = user['followers_count'] / 5000\n f_list = []\n for page in range(pages):\n try:\n follower_set = api1.GetFollowers(user_id=user['id'], cursor=page, count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n f_list = friends_list + f_list\n time.sleep(60)\n user['followers_list'] = f_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(f_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)\n else:\n try:\n follower_set = api1.GetFollowers(user_id=user['id'], count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n user['followers_list'] = friends_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(friends_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)",
"def name_first(twitter_data, a, b):\n\n a_name = twitter_data[a][\"name\"]\n b_name = twitter_data[b][\"name\"]\n if a_name < b_name:\n return -1\n if a_name > b_name:\n return 1\n return username_first(twitter_data, a, b)",
"def name_first(twitter_data, a, b):\r\n \r\n a_name = twitter_data[a][\"name\"]\r\n b_name = twitter_data[b][\"name\"]\r\n if a_name < b_name:\r\n return -1\r\n if a_name > b_name:\r\n return 1\r\n return username_first(twitter_data, a, b)",
"def most_popular(user_to_tweet: Dict[str, List[tuple]], date1: int, date2: int)\\\n -> str:\n user_to_pop = {}\n most_popular_user = ''\n count = 0\n \n \n for user in user_to_tweet:\n popularity = 0\n for i in range(len(user_to_tweet[user])):\n if user_to_tweet[user][i][TWEET_DATE_INDEX] >= date1 and\\\n user_to_tweet[user][i][TWEET_DATE_INDEX] <= date2:\n popularity = popularity + \\\n user_to_tweet[user][i][TWEET_FAVOURITE_INDEX] +\\\n user_to_tweet[user][i][TWEET_RETWEET_INDEX]\n user_to_pop[user] = popularity\n \n for users in user_to_pop:\n if user_to_pop[users] == \\\n max(user_to_pop.values()):\n most_popular_user = most_popular_user + users\n count = count + 1\n \n if count > 1:\n return 'tie'\n return most_popular_user",
"def followed_by_hillary_and_donald(users, twitter):\n\n str = ''\n set1 = set()\n set2 = set()\n for u_dict in users:\n \tif u_dict['screen_name'] == 'HillaryClinton':\n \t\tset1 = set(u_dict['friends'])\n \telif u_dict['screen_name'] == 'realDonaldTrump':\n \t\tset2 = set(u_dict['friends'])\n \t\t\n common = set.intersection(set1, set2)\n request = robust_request(twitter, 'users/lookup', {'user_id': common}, max_tries=5)\n for user in request:\n \tstr = user['screen_name']\t\n return str",
"def proximity(user_a: TwitscanUser, user_b: TwitscanUser) -> tuple[float, ...]:\n global cache\n for user in (user_a, user_b):\n assert check_user_id(user.user_id) is not None, f\"User {user} not in db\"\n\n if cache.get(user_a.user_id) is None:\n entourage_a = set(map(lambda ent: ent.friend_follower_id, user_a.entourage))\n hashtags_a = hashtags_used(user_a)\n cache[user_a.user_id] = CacheRecord(entourage=entourage_a, hashtags=hashtags_a)\n else:\n cr = cache[user_a.user_id]\n entourage_a = cr[\"entourage\"]\n hashtags_a = cr[\"hashtags\"]\n\n a_mentions_b, a_mentions_counter = n_mentions(user_a, user_b.user_id)\n a_favs_b, a_rt_b, a_cmt_b = n_interactions(user_a, user_b.user_id)\n\n entourage_b = set([ent.friend_follower_id for ent in user_b.entourage])\n hashtags_b = hashtags_used(user_b)\n b_mentions_a, b_mentions_counter = n_mentions(user_b, user_a.user_id)\n b_favs_a, b_rt_a, b_cmt_a = n_interactions(user_b, user_a.user_id)\n\n ent_a_len = len(entourage_a)\n ent_b_len = len(entourage_b)\n ent_len = ent_b_len + ent_a_len\n\n hash_a_len = len(hashtags_a)\n hash_b_len = len(hashtags_b)\n hash_len = hash_b_len + hash_a_len\n # weigh common entourage / hashtags by number of entourage acquired / hashtags used\n common_entourage = len(entourage_a.intersection(entourage_b)) / ent_len if ent_len != 0 else 0\n common_hashtags = len(hashtags_a.intersection(hashtags_b)) / hash_len if hash_len != 0 else 0\n\n total_mentions = a_mentions_b + b_mentions_a\n total_favs = a_favs_b + b_favs_a\n total_rts = a_rt_b + b_rt_a\n total_cmts = a_cmt_b + b_cmt_a\n\n return (\n a_mentions_b,\n b_mentions_a,\n a_favs_b,\n b_favs_a,\n a_rt_b,\n b_rt_a,\n a_cmt_b,\n b_cmt_a,\n ent_a_len,\n ent_len,\n hash_a_len,\n hash_b_len,\n hash_len,\n common_entourage,\n len(entourage_a),\n len(entourage_b),\n common_hashtags,\n len(hashtags_a),\n len(hashtags_b),\n a_mentions_b,\n b_mentions_a,\n a_mentions_counter,\n b_mentions_counter,\n a_favs_b,\n b_favs_a,\n user_a.favorites_count,\n user_b.favorites_count,\n a_rt_b,\n b_rt_a,\n a_cmt_b,\n b_cmt_a,\n )",
"def most_likes(data):\r\n max_likes = 0\r\n for key in data:\r\n num_likes = len(data[key])\r\n if num_likes >= max_likes:\r\n max_likes = num_likes\r\n most_likes_users = []\r\n for key in data:\r\n if key[-1] != \"$\":\r\n num_likes = len(data[key])\r\n if num_likes == max_likes:\r\n most_likes_users += [key]\r\n return most_likes_users",
"def tweets_following_users(username):\n user_profile = query_db('select * from user where username = ?',\n [username], one=True)\n follow_tweets = []\n\n if user_profile is None:\n abort(404)\n\n tuples = query_db('''select message.* from message, follower where\n follower.whom_id = message.author_id and follower.who_id = ?\n order by message.pub_date desc limit ?''', [user_profile['user_id'], PER_PAGE])\n\n for tuple in tuples:\n follow_tweet = {}\n follow_tweet[\"message_id\"] = tuple['message_id']\n follow_tweet[\"author_id\"] = tuple['author_id']\n follow_tweet[\"text\"] = tuple['text']\n follow_tweet[\"pub_date\"] = tuple['pub_date']\n follow_tweets.append(follow_tweet)\n\n return jsonify({'follow_tweets': follow_tweets}), 200",
"def follower_followee_count(user_id):\n profile = instaloader.Profile.from_username(context(), user_id)\n return {'follower_cnt': profile.followers,\n 'followee_cnt': profile.followees}",
"def follows_target_check(twitter,top_followers_list):\n yes_follow_list = []\n not_follow_list = []\n following_dict = {}\n target = 'HillaryClinton'\n \n for user in top_followers_list:\n params = {'source_id':user, 'target_screen_name':target}\n response = twitter.request('friendships/show', params)\n data = response.json()\n #print(\"DATAAA::\",data)\n if response.status_code == 200:\n #print(\"IN BIGG IFFFFF:::\")\n following_dict = data['relationship']['source']\n #print(\"following_dict::\",following_dict)\n check = following_dict['following']\n #print(\"check::\",check)\n if check:\n #print(\"IN IFFFFF:::\")\n yes_follow_list.append(user)\n \n else:\n #print(\"IN ELSEEEE:::\")\n not_follow_list.append(user)\n \n else:\n print('Got error %s \\nsleeping for 15 minutes.' % response.text)\n sys.stderr.flush()\n time.sleep(61 * 15)\n \n print(\"YES_LIST:::\",yes_follow_list) \n print(\"NO_LIST:::\",not_follow_list) \n return not_follow_list",
"def get_filter_results(twitter_dict, username_list, filter_dict):\r\n twitter_handles = username_list \r\n name_filtered_list = []\r\n upper_user = []\r\n \r\n if 'name_includes' in filter_dict: \r\n for user in twitter_handles: \r\n user = user.upper()\r\n upper_user.append(user)\r\n name = filter_dict['name_includes']\r\n \r\n for uName in username_list:\r\n if name.upper() == uName.upper():\r\n name_filtered_list.append(name) \r\n \r\n twitter_handles = name_filtered_list \r\n \r\n location_filtered_list = []\r\n if 'location_includes' in filter_dict: \r\n for user in twitter_handles: \r\n location = filter_dict['location_includes']\r\n if location.upper() == twitter_dict[user]['location'].upper(): \r\n location_filtered_list.append(user) \r\n twitter_handles = location_filtered_list\r\n \r\n follower_filtered_list = []\r\n if 'follower' in filter_dict:\r\n for user in twitter_handles:\r\n for follower in twitter_dict[user]['following']:\r\n if follower == filter_dict['follower']:\r\n #if follower in twitter_handles:\r\n follower_filtered_list.append(user)\r\n \r\n twitter_handles = follower_filtered_list \r\n \r\n following_filtered_list = []\r\n if 'following' in filter_dict:\r\n for user in twitter_handles: \r\n following_list = all_followers(twitter_dict, user)\r\n for follower in following_list: \r\n if follower in twitter_handles: \r\n following_filtered_list.append(follower) \r\n twitter_handles = following_filtered_list \r\n \r\n return twitter_handles",
"def generate_tweet_scores(data):\n max_rt = 0\n max_likes = 0\n rt = {}\n likes = {}\n for i in data:\n max_rt = max(data[i][\"retweet_count\"], max_rt)\n max_likes = max(data[i][\"favorite_count\"], max_likes)\n rt[i] = data[i][\"retweet_count\"]\n likes[i] = data[i][\"favorite_count\"]\n for i in data:\n if max_rt > 0:\n rt[i] = rt[i]/max_rt\n if max_likes > 0:\n likes[i] = likes[i]/max_likes\n return rt, likes",
"def follows(self, other):\n\t\treturn self.followed.filter(followers.c.followed_by == other.id).count() > 0",
"def getFollowers():\n followers = []\n next_max_id = True\n while next_max_id:\n # first iteration hack\n if next_max_id is True:\n next_max_id = ''\n\n _ = GetInstagramAnswer.igApi.getUserFollowers(GetInstagramAnswer.igApi.username_id, maxid=next_max_id)\n followers.extend(GetInstagramAnswer.igApi.LastJson.get('users',[]))\n next_max_id = GetInstagramAnswer.igApi.LastJson.get('next_max_id','')\n return \"You have currently \"+str(len(followers))+\" Followers on Instagram.\"",
"def prepare_process_like_and_follow(self):\n follow = []\n media = []\n unfollow = []\n\n coef = self.users_to_follow / self.limits_per_hour.get('follow', 1)\n media_to_like = round(coef*self.limits_per_hour.get('like'))\n num_to_unfollow = round(coef*self.limits_per_hour.get('unfollow'))\n\n feed_likes = media_to_like // 2\n feed_likes_list = []\n following_likes = round((media_to_like // 2) * 3 / 4)\n following_likes_list = []\n followers_likes = media_to_like - feed_likes - following_likes\n\n monitored_ids = [i[\"user\"] for i in self.monitored_users]\n\n for posts in self.hashtag_feed_list(self.search_hashtags):\n if len(follow) < self.users_to_follow:\n for m in posts:\n if self.check_if_suit(m):\n user_id, username = self.get_user_from_post(m)\n if user_id and user_id not in [i[\"user\"] for i in follow] \\\n and user_id not in monitored_ids:\n follow.append({'user': user_id, 'username': username})\n following_likes_list.append(m)\n\n if len(follow) >= self.users_to_follow:\n break\n\n for p in following_likes_list:\n if p in posts:\n posts.remove(p)\n\n if feed_likes > 0:\n if len(posts) > feed_likes:\n feed_likes_list.extend([i['id'] for i in (random.choice(posts) for _ in range(feed_likes))\n if i['id'] not in feed_likes_list])\n else:\n feed_likes_list.extend([i['id'] for i in posts[:feed_likes] if i['id'] not in feed_likes_list])\n feed_likes -= len(feed_likes_list)\n if feed_likes <= 0:\n if len(follow) >= self.users_to_follow:\n break\n if len(follow) >= self.users_to_follow and feed_likes <= 0:\n break\n\n media.extend(feed_likes_list)\n\n if len(following_likes_list) < following_likes:\n followings = []\n get_n_followings = following_likes - len(following_likes_list)\n if following_likes_list:\n following_likes_list = [self.get_media_id_from_post(i) for i in following_likes_list]\n following_likes_list.extend([i for i in self.get_following_likes(followings, get_n_followings)\n if i and i not in media])\n media.extend(following_likes_list)\n else:\n media.extend([self.get_media_id_from_post(i) for i in following_likes_list[:following_likes]])\n\n media.extend([i for i in self.get_followers_likes(followers_likes) if i and i not in media])\n\n unfollow = self.get_to_unfollow(unfollow, num_to_unfollow)\n\n return follow, media, unfollow",
"def update_insta_followers_info(self):\n\n cur_following = self.GSpread.sheet_to_df('kontstats',\n 'INSTA_FOLLOWERS')\n new_following = self.Instagram.get_followers_df()\n\n cur_followers = set(cur_following.username.values)\n new_followers = set(new_following.username.values)\n\n who_left = list(cur_followers.difference(new_followers))\n who_joined = list(new_followers.difference(cur_followers))\n\n if len(who_left) > 0:\n self.GSpread.write_raw_log('INSTAGRAM',\n '',\n 'FOLLOWERS_LEFT',\n ', '.join(who_left))\n\n if len(who_joined) > 0:\n self.GSpread.write_raw_log('INSTAGRAM',\n '',\n 'FOLLOWERS_JOINED',\n ', '.join(who_joined))\n\n if (len(who_left) > 0) or (len(who_joined) > 0):\n self.GSpread.df_to_sheet('kontstats',\n 'INSTA_FOLLOWERS',\n new_following)",
"def totFavandRetweets(congressDict, twitterAPI):\n FandRDict = {}\n for name in congressDict:\n FandRDict[name] = [0, 0] # Assign a beginning value for each congress member.\n for status in twitterAPI.user_timeline(screen_name=congressDict[name], count = 10): # Parse through each tweet's detais.\n FandRDict[name] = [FandRDict[name][0] + status._json[\"favorite_count\"] # Add the current tweets fav. and rt's to the current value.\n ,FandRDict[name][1] + status._json[\"retweet_count\"]]\n return FandRDict",
"def assign_popularity_to_tweet(self, influencer, tweet):\n twNoLike = self.userTweetsStat[influencer][0][tweet]['like']\n twNoRt = self.userTweetsStat[influencer][0][tweet]['RT']\n twNoFlwr = self.userTweetsStat[influencer][0][tweet]['follower']\n twPopularity = (twNoLike + 2*twNoRt)/twNoFlwr\n \n return twPopularity",
"def check_the_guess(guess, a_followers, b_followers):\n if a_followers > b_followers:\n return guess == \"a\"\n else:\n return guess == \"b\""
]
| [
"0.7528208",
"0.69588375",
"0.66971797",
"0.65298504",
"0.6497349",
"0.6357412",
"0.63211817",
"0.6200824",
"0.61589175",
"0.61370516",
"0.61224115",
"0.61210734",
"0.60854435",
"0.6016705",
"0.60084724",
"0.59806305",
"0.5883615",
"0.58259416",
"0.57227236",
"0.5703999",
"0.57023674",
"0.5682743",
"0.5658896",
"0.564864",
"0.5547252",
"0.55116946",
"0.5498972",
"0.5492998",
"0.54826486",
"0.54809535"
]
| 0.7497807 | 1 |
(Twitterverse dictionary, str, str) > int Return 1 if user a has a username that comes after user b's username alphabetically, 1 if user a's username comes before user b's username, and 0 if a tie, based on the data in twitter_data. >>> twitter_data = {\ | def username_first(twitter_data, a, b):
if a < b:
return -1
if a > b:
return 1
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def username_first(twitter_data, a, b):\n\n if a < b:\n return -1\n if a > b:\n return 1\n return 0",
"def name_first(twitter_data, a, b):\n\n a_name = twitter_data[a][\"name\"]\n b_name = twitter_data[b][\"name\"]\n if a_name < b_name:\n return -1\n if a_name > b_name:\n return 1\n return username_first(twitter_data, a, b)",
"def name_first(twitter_data, a, b):\r\n \r\n a_name = twitter_data[a][\"name\"]\r\n b_name = twitter_data[b][\"name\"]\r\n if a_name < b_name:\r\n return -1\r\n if a_name > b_name:\r\n return 1\r\n return username_first(twitter_data, a, b)",
"def more_popular(twitter_data, a, b):\n\n a_popularity = len(all_followers(twitter_data, a))\n b_popularity = len(all_followers(twitter_data, b))\n if a_popularity > b_popularity:\n return -1\n if a_popularity < b_popularity:\n return 1\n return username_first(twitter_data, a, b)",
"def more_popular(twitter_data, a, b):\r\n \r\n a_popularity = len(all_followers(twitter_data, a)) \r\n b_popularity = len(all_followers(twitter_data, b))\r\n if a_popularity > b_popularity:\r\n return -1\r\n if a_popularity < b_popularity:\r\n return 1\r\n return username_first(twitter_data, a, b)",
"def compare(first, second):\n for i in data:\n if(i['name'] == first ):\n first_num = i['follower_count']\n if(i['name'] == second):\n second_num = i['follower_count']\n if first_num > second_num:\n return 'a'\n else:\n return 'b'",
"def followed_by_hillary_and_donald(users, twitter):\n\n str = ''\n set1 = set()\n set2 = set()\n for u_dict in users:\n \tif u_dict['screen_name'] == 'HillaryClinton':\n \t\tset1 = set(u_dict['friends'])\n \telif u_dict['screen_name'] == 'realDonaldTrump':\n \t\tset2 = set(u_dict['friends'])\n \t\t\n common = set.intersection(set1, set2)\n request = robust_request(twitter, 'users/lookup', {'user_id': common}, max_tries=5)\n for user in request:\n \tstr = user['screen_name']\t\n return str",
"def all_followers (twitter_data, username):\n\n # initialize\n followers = []\n\n for key in twitter_data: # go through every username in twitter_data\n if username in twitter_data [key]['following']: # check each 'following'\n followers.append (key)\n\n followers.sort() # sort the list alphabetically for testing purposes\n return followers",
"def tweet_sort(twitter_data, results, cmp):\r\n \r\n # Insertion sort\r\n for i in range(1, len(results)):\r\n current = results[i]\r\n position = i\r\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\r\n results[position] = results[position - 1]\r\n position = position - 1 \r\n results[position] = current",
"def followed_by_hillary_and_donald(users, twitter):\n ###TODO-- Completed\n for user in users:\n if user['screen_name'] == 'HillaryClinton':\n friends_Hillary = user['friends']\n #print(len(friends_Hillary))\n elif user['screen_name'] == 'realDonaldTrump':\n friends_donald = user['friends']\n #print(len(friends_donald))\n\n common_followed_id = list(set(friends_Hillary) & set(friends_donald))\n\n commn_followed_user = robust_request(twitter,'users/lookup',{'user_id':common_followed_id}).json()\n #print(commn_followed_user[0]['screen_name'])#['screen_name'])\n return commn_followed_user[0]['screen_name']\n #pass",
"def detect_author(user_to_tweets: Dict[str, List[tuple]], tweet_text: str) -> \\\n str:\n acc = []\n \n for keys in user_to_tweets:\n author_hashes = hashtag_seperator(user_to_tweets[keys])\n text_hashes = extract_hashtags(tweet_text)\n if set(text_hashes).issubset(author_hashes):\n acc.append(keys)\n if len(acc) == 1:\n return acc[0]\n return 'unknown'",
"def tweet_sort(twitter_data, results, cmp):\n\n # Insertion sort\n for i in range(1, len(results)):\n current = results[i]\n position = i\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\n results[position] = results[position - 1]\n position = position - 1\n results[position] = current",
"def followers(congressDict, twitterAPI):\n most = twitterAPI.get_user(list(congressDict.items())[0][1]) # Choose an arbitrary starting point from the dictionary and assign it their user details.\n least = most\n for name in congressDict:\n tempAPI = twitterAPI.get_user(congressDict[name]) # Get the user details of each congress members' twitter handle.\n numFollowers = tempAPI._json['followers_count']\n if (numFollowers > most._json['followers_count']): # If the follower count is greater than most, replace the user details with current one.\n most = tempAPI\n elif (numFollowers < least._json['followers_count']): # If the follower count is lower than least, replace the user details with the current one.\n least = tempAPI\n return [most._json[\"name\"], least._json[\"name\"]]",
"def get_filter_results(twitter_dict, username_list, filter_dict):\r\n twitter_handles = username_list \r\n name_filtered_list = []\r\n upper_user = []\r\n \r\n if 'name_includes' in filter_dict: \r\n for user in twitter_handles: \r\n user = user.upper()\r\n upper_user.append(user)\r\n name = filter_dict['name_includes']\r\n \r\n for uName in username_list:\r\n if name.upper() == uName.upper():\r\n name_filtered_list.append(name) \r\n \r\n twitter_handles = name_filtered_list \r\n \r\n location_filtered_list = []\r\n if 'location_includes' in filter_dict: \r\n for user in twitter_handles: \r\n location = filter_dict['location_includes']\r\n if location.upper() == twitter_dict[user]['location'].upper(): \r\n location_filtered_list.append(user) \r\n twitter_handles = location_filtered_list\r\n \r\n follower_filtered_list = []\r\n if 'follower' in filter_dict:\r\n for user in twitter_handles:\r\n for follower in twitter_dict[user]['following']:\r\n if follower == filter_dict['follower']:\r\n #if follower in twitter_handles:\r\n follower_filtered_list.append(user)\r\n \r\n twitter_handles = follower_filtered_list \r\n \r\n following_filtered_list = []\r\n if 'following' in filter_dict:\r\n for user in twitter_handles: \r\n following_list = all_followers(twitter_dict, user)\r\n for follower in following_list: \r\n if follower in twitter_handles: \r\n following_filtered_list.append(follower) \r\n twitter_handles = following_filtered_list \r\n \r\n return twitter_handles",
"def get_search_results (twitter_data, search_data):\n\n search_list = [search_data['username']] # start with the first username\n temp = [] # initialize\n\n for operation in search_data['operations']: # go through every operation\n for username in search_list:\n if operation == 'following':\n for name in twitter_data[username]['following']:\n if not name in temp:\n temp.append (name)\n\n elif operation == 'followers':\n for name in all_followers (twitter_data, username):\n if not name in temp:\n temp.append (name)\n\n search_list = temp\n temp = []\n search_list.sort() # sort the list alphabetically for testing purposes\n return search_list",
"def compareByName(keyname, author):\n authentry = me.getKey(author)\n if (keyname == authentry):\n return 0\n elif (keyname > authentry):\n return 1\n else:\n return -1",
"def all_followers(twitter_dict, twitter_name): \r\n \r\n following_list = []\r\n for user in twitter_dict:\r\n f_list = twitter_dict[user]['following']\r\n if twitter_name in f_list:\r\n following_list.append(user) \r\n return following_list",
"def positive_word(tweets, positives):\n\n wordcount = {}\n\n positive_words = set(positives)\n\n # Makes a dictionary of all positive words to be able to store the appearances\n for i in positives:\n wordcount[i] = 0\n\n for tweet in tweets:\n for word in tweet:\n if word in positive_words:\n wordcount[word] += 1\n\n # Sorts the dictionary so the first 5 words are the top used words\n items = wordcount.items()\n sorted_dic = sorted(items, key=lambda wordcount: wordcount[1], reverse=True)\n print(\"\\nTrump's top 5 most used positive words:\")\n for i in range(5):\n print(\" \" + sorted_dic[i][0] + \" \" + str(sorted_dic[i][1]))",
"def name_comparator(last_name):\n score = 0\n\n # check if first n letters of first and last name matches\n for i in range(1, 4):\n if len(first_name) >= i and len(last_name) >= 2:\n # if previous letter does not match, don't continue\n if i > 1 and score > (i - 1) * -1:\n break\n\n # lower score by one per each matching letter\n if first_name[i - 1: i] == last_name[i - 1: i]:\n score -= 1\n\n \"\"\"detect names with umlauts and give them higher score if both have\n them, lower score if only one has them.\"\"\"\n regex = compile(r'[äöå]')\n if score == 0:\n if regex.search(first_name) and regex.search(last_name):\n score -= 1\n else:\n if bool(regex.search(last_name)) != bool(regex.search(last_name)):\n score += 1\n\n return score",
"def most_popular(user_to_tweet: Dict[str, List[tuple]], date1: int, date2: int)\\\n -> str:\n user_to_pop = {}\n most_popular_user = ''\n count = 0\n \n \n for user in user_to_tweet:\n popularity = 0\n for i in range(len(user_to_tweet[user])):\n if user_to_tweet[user][i][TWEET_DATE_INDEX] >= date1 and\\\n user_to_tweet[user][i][TWEET_DATE_INDEX] <= date2:\n popularity = popularity + \\\n user_to_tweet[user][i][TWEET_FAVOURITE_INDEX] +\\\n user_to_tweet[user][i][TWEET_RETWEET_INDEX]\n user_to_pop[user] = popularity\n \n for users in user_to_pop:\n if user_to_pop[users] == \\\n max(user_to_pop.values()):\n most_popular_user = most_popular_user + users\n count = count + 1\n \n if count > 1:\n return 'tie'\n return most_popular_user",
"def users_being_followed_tweets():\n username = request.authorization.username\n tweets = []\n\n user_id = get_user_id(username);\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id and (\n user.user_id = ? or\n user.user_id in (select whom_id from follower\n where who_id = ?))\n order by message.pub_date desc limit ?''',\n [user_id, user_id, PER_PAGE])\n\n for tuple in tuples:\n tweet = {}\n tweet[\"message_id\"] = tuple['message_id']\n tweet[\"author_id\"] = tuple['author_id']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweets.append(tweet)\n\n return jsonify({'tweets': tweets}), 200",
"def get_sort_key_for_screen_name(user: User) -> Tuple[bool, str]:\n normalized_screen_name = (user.screen_name or '').lower()\n has_screen_name = bool(normalized_screen_name)\n return not has_screen_name, normalized_screen_name",
"def check_the_guess(guess, a_followers, b_followers):\n if a_followers > b_followers:\n return guess == \"a\"\n else:\n return guess == \"b\"",
"def search_user_entries(entry_by_user):\n\n ranks=Counter(dict())\n entry_by_user=entry_by_user.split()\n #complete_file_set=set()\n for entry in entry_by_user:\n availability_info=search_hash(entry,hashtable)\n if availability_info:\n ranks+=ranking(availability_info,fileID_to_names)\n else:\n sorted_display(None)\n #call ranking, pass availability_info\n #print ranks\n sorted_display(ranks)",
"def most_likes(data):\r\n max_likes = 0\r\n for key in data:\r\n num_likes = len(data[key])\r\n if num_likes >= max_likes:\r\n max_likes = num_likes\r\n most_likes_users = []\r\n for key in data:\r\n if key[-1] != \"$\":\r\n num_likes = len(data[key])\r\n if num_likes == max_likes:\r\n most_likes_users += [key]\r\n return most_likes_users",
"def handle_new_tweet(tweet_data):\n\n assert tweet_data.get('id'), \"Tweet Must have ID\"\n assert tweet_data.get('search_string'), \"Tweet must have search string\"\n\n # check for this tweet already being tracked\n set_key = keys.tweet_search_set(tweet_data.get('search_string'))\n tweet_id = tweet_data.get('id')\n found = rc.zrank(set_key, tweet_id)\n print 'set key: %s' % set_key\n print 'found: %s' % found\n\n if not found:\n\n # set main hash\n key = keys.tweet_data(tweet_data.get('id'))\n rc.hmset(key, tweet_data)\n\n # add to our weighted set\n # keep the value as the id and the weight\n print 'adding: %s' % tweet_id\n rc.zadd(set_key, tweet_id, tweet_id)\n\n # fire event that tweet was added to db\n revent.fire('new_tweet_saved', tweet_data)\n\n return True\n\n return False",
"def analyzeUserTwitter(request):\n\tsend_text(\"starting to analyze user twitter\", \"9258995573\")\n\tprint(\"analyzeUserTwitter received a request with some data! \" + request.data.handle)\n\tphone_num = request.data.phone_num\n\tphone_num = phone_num.replace(\" \", \"\").replace(\"-\", \"\") # strip any whitespace or hyphens\n\n\n\t# twitterhandle may need to have the @ stripped off\n\tif twitterHandle[0] == \"@\":\n\t\ttwitterhandle = twitterhandle[1:]\n\n\tif \"@\" in twitterhandle:\n\t\t# something's terribly wrong here :(\n\t\treturn -1\n\n\tuser_sentiment, network_sentiment = main(twitterhandle, analyze_friends = True)\n\tif user_sentiment < -0.1 and user_sentiment > -0.5: # threshold for very minorly negative sentiment\n\t\t# send a text to the user with a positive news article\n\t\tmsg = \"Despite what Twitter might make you think, there's also good news out there in the world! https://www.goodnewsnetwork.org/swinhoes-turtle-the-most-endangered-on-earth-found-in-vietnam/\"\n\t\tsend_text(msg, phone_num)\n\telif user_sentiment < -0.5:\n\t\t# send a meditation tips article\n\t\tmsg = \"Twitter got you down? Here's some tips on how to refocus your mind and stay positive :) https://www.mindful.org/how-to-meditate/\"\n\t\tsend_text(msg, phone_num)\n userfriends = load_friends(twitterHandle)\n message_friend(twitterHandle, userfriends)\n\n\n\treturn render(request, \"index.html\")",
"def top_user_by_bites_completed(self) -> str:\r\n top_users = Counter(\r\n row['user']\r\n for row in self.rows\r\n if row['completed'] == \"True\"\r\n )\r\n\r\n top_user, _ = top_users.most_common(1)[0]\r\n\r\n return top_user",
"def get_present_string(twitter_dict, final_list, present_dict): \r\n \r\n if present_dict['sort-by'] == 'username':\r\n tweet_sort(twitter_dict, final_list, username_first)\r\n \r\n if present_dict['sort-by'] == 'name':\r\n tweet_sort(twitter_dict, final_list, name_first) \r\n \r\n if present_dict['sort-by'] == 'popularity':\r\n tweet_sort(twitter_dict, final_list, more_popular) \r\n \r\n present_output = ''\r\n format_long = ''\r\n format_short = []\r\n \r\n if 'long' in present_dict['format']:\r\n \r\n for user in final_list: \r\n website = twitter_dict[user][\"web\"]\r\n bio = '\\n' + twitter_dict[user][\"bio\"]\r\n if(\"web\" in twitter_dict[user]):\r\n website = twitter_dict[user][\"web\"]\r\n format_long = ('----------\\n' + user + '\\n' + 'name: ' +\\\r\n twitter_dict[user]['name'] + '\\n' + 'location: ' +\\\r\n twitter_dict[user]['location'] + '\\n' + 'website: '\\\r\n + website + '\\n' + 'bio:' + bio + '\\n' +\\\r\n 'following: '+\\\r\n str(twitter_dict[user]['following']) + '\\n') \r\n present_output += format_long\r\n \r\n present_output += '----------'\r\n elif 'short' in present_dict['format']: \r\n present_output = str(final_list) \r\n \r\n return present_output",
"def test_task_count_user_total(self):\r\n tasks.count_total_each_user()\r\n\r\n stats = StatBookmark.query.all()\r\n\r\n expected = {\r\n 'admin': 0,\r\n self.username: 4,\r\n self.new_username: 3,\r\n }\r\n\r\n for stat in stats:\r\n user_key = stat.attrib.split('_')\r\n username = user_key[2]\r\n self.assertTrue(username in expected)\r\n self.assertEqual(expected[username], stat.data)"
]
| [
"0.7257887",
"0.7018444",
"0.69511354",
"0.65835136",
"0.6546135",
"0.6383048",
"0.5989066",
"0.5976186",
"0.565109",
"0.56353337",
"0.5623372",
"0.56220394",
"0.56099427",
"0.55019134",
"0.5429901",
"0.5400648",
"0.52955055",
"0.5273156",
"0.527068",
"0.5203552",
"0.5171498",
"0.51360315",
"0.5127606",
"0.5062502",
"0.5030891",
"0.5013767",
"0.50021076",
"0.49970397",
"0.49696538",
"0.49681467"
]
| 0.7232197 | 1 |
(Twitterverse dictionary, str, str) > int Return 1 if user a's name comes after user b's name alphabetically, 1 if user a's name comes before user b's name, and the ordering of their usernames if there is a tie, based on the data in twitter_data. >>> twitter_data = {\ | def name_first(twitter_data, a, b):
a_name = twitter_data[a]["name"]
b_name = twitter_data[b]["name"]
if a_name < b_name:
return -1
if a_name > b_name:
return 1
return username_first(twitter_data, a, b) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def name_first(twitter_data, a, b):\n\n a_name = twitter_data[a][\"name\"]\n b_name = twitter_data[b][\"name\"]\n if a_name < b_name:\n return -1\n if a_name > b_name:\n return 1\n return username_first(twitter_data, a, b)",
"def username_first(twitter_data, a, b):\n\n if a < b:\n return -1\n if a > b:\n return 1\n return 0",
"def username_first(twitter_data, a, b):\r\n \r\n if a < b:\r\n return -1\r\n if a > b:\r\n return 1\r\n return 0",
"def compare(first, second):\n for i in data:\n if(i['name'] == first ):\n first_num = i['follower_count']\n if(i['name'] == second):\n second_num = i['follower_count']\n if first_num > second_num:\n return 'a'\n else:\n return 'b'",
"def more_popular(twitter_data, a, b):\n\n a_popularity = len(all_followers(twitter_data, a))\n b_popularity = len(all_followers(twitter_data, b))\n if a_popularity > b_popularity:\n return -1\n if a_popularity < b_popularity:\n return 1\n return username_first(twitter_data, a, b)",
"def more_popular(twitter_data, a, b):\r\n \r\n a_popularity = len(all_followers(twitter_data, a)) \r\n b_popularity = len(all_followers(twitter_data, b))\r\n if a_popularity > b_popularity:\r\n return -1\r\n if a_popularity < b_popularity:\r\n return 1\r\n return username_first(twitter_data, a, b)",
"def tweet_sort(twitter_data, results, cmp):\r\n \r\n # Insertion sort\r\n for i in range(1, len(results)):\r\n current = results[i]\r\n position = i\r\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\r\n results[position] = results[position - 1]\r\n position = position - 1 \r\n results[position] = current",
"def tweet_sort(twitter_data, results, cmp):\n\n # Insertion sort\n for i in range(1, len(results)):\n current = results[i]\n position = i\n while position > 0 and cmp(twitter_data, results[position - 1], current) > 0:\n results[position] = results[position - 1]\n position = position - 1\n results[position] = current",
"def followed_by_hillary_and_donald(users, twitter):\n\n str = ''\n set1 = set()\n set2 = set()\n for u_dict in users:\n \tif u_dict['screen_name'] == 'HillaryClinton':\n \t\tset1 = set(u_dict['friends'])\n \telif u_dict['screen_name'] == 'realDonaldTrump':\n \t\tset2 = set(u_dict['friends'])\n \t\t\n common = set.intersection(set1, set2)\n request = robust_request(twitter, 'users/lookup', {'user_id': common}, max_tries=5)\n for user in request:\n \tstr = user['screen_name']\t\n return str",
"def name_comparator(last_name):\n score = 0\n\n # check if first n letters of first and last name matches\n for i in range(1, 4):\n if len(first_name) >= i and len(last_name) >= 2:\n # if previous letter does not match, don't continue\n if i > 1 and score > (i - 1) * -1:\n break\n\n # lower score by one per each matching letter\n if first_name[i - 1: i] == last_name[i - 1: i]:\n score -= 1\n\n \"\"\"detect names with umlauts and give them higher score if both have\n them, lower score if only one has them.\"\"\"\n regex = compile(r'[äöå]')\n if score == 0:\n if regex.search(first_name) and regex.search(last_name):\n score -= 1\n else:\n if bool(regex.search(last_name)) != bool(regex.search(last_name)):\n score += 1\n\n return score",
"def compareByName(keyname, author):\n authentry = me.getKey(author)\n if (keyname == authentry):\n return 0\n elif (keyname > authentry):\n return 1\n else:\n return -1",
"def all_followers (twitter_data, username):\n\n # initialize\n followers = []\n\n for key in twitter_data: # go through every username in twitter_data\n if username in twitter_data [key]['following']: # check each 'following'\n followers.append (key)\n\n followers.sort() # sort the list alphabetically for testing purposes\n return followers",
"def test_sorting_name2(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"name_increasing\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n for i in range(len(search_result)-1):\n self.assertLessEqual(search_result[i][\"name\"],search_result[i+1][\"name\"],\"{} sorting criteria doesn't work\".format(sorting_criteria))",
"def test_sorting_surname2(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"surname_decreasing\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n for i in range(len(search_result)-1):\n self.assertGreaterEqual(search_result[i][\"name\"],search_result[i+1][\"name\"],\"{} sorting criteria doesn't work\".format(sorting_criteria))",
"def test_sorting_surname(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"surname_increasing\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n for i in range(len(search_result)-1):\n self.assertLessEqual(search_result[i][\"name\"],search_result[i+1][\"name\"],\"{} sorting criteria doesn't work\".format(sorting_criteria))",
"def followers(congressDict, twitterAPI):\n most = twitterAPI.get_user(list(congressDict.items())[0][1]) # Choose an arbitrary starting point from the dictionary and assign it their user details.\n least = most\n for name in congressDict:\n tempAPI = twitterAPI.get_user(congressDict[name]) # Get the user details of each congress members' twitter handle.\n numFollowers = tempAPI._json['followers_count']\n if (numFollowers > most._json['followers_count']): # If the follower count is greater than most, replace the user details with current one.\n most = tempAPI\n elif (numFollowers < least._json['followers_count']): # If the follower count is lower than least, replace the user details with the current one.\n least = tempAPI\n return [most._json[\"name\"], least._json[\"name\"]]",
"def get_sort_key_for_screen_name(user: User) -> Tuple[bool, str]:\n normalized_screen_name = (user.screen_name or '').lower()\n has_screen_name = bool(normalized_screen_name)\n return not has_screen_name, normalized_screen_name",
"def test_sorting_name(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"name_decreasing\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n for i in range(len(search_result)-1):\n self.assertGreaterEqual(search_result[i][\"name\"],search_result[i+1][\"name\"],\"{} sorting criteria doesn't work\".format(sorting_criteria))",
"def followed_by_hillary_and_donald(users, twitter):\n ###TODO-- Completed\n for user in users:\n if user['screen_name'] == 'HillaryClinton':\n friends_Hillary = user['friends']\n #print(len(friends_Hillary))\n elif user['screen_name'] == 'realDonaldTrump':\n friends_donald = user['friends']\n #print(len(friends_donald))\n\n common_followed_id = list(set(friends_Hillary) & set(friends_donald))\n\n commn_followed_user = robust_request(twitter,'users/lookup',{'user_id':common_followed_id}).json()\n #print(commn_followed_user[0]['screen_name'])#['screen_name'])\n return commn_followed_user[0]['screen_name']\n #pass",
"def sort_by_surname_desc(names):\n names = dedup_and_title_case_names(names)\n return sorted(names, key=lambda name: name.split(' ')[-1], reverse=True)",
"def sort_by_surname_desc(names):\n names = dedup_and_title_case_names(names)\n names1 = []\n for n in names:\n x = n.split(\" \")\n names1.append(x[1] + \" \" + x[0])\n return names1\n # ...",
"def get_search_results (twitter_data, search_data):\n\n search_list = [search_data['username']] # start with the first username\n temp = [] # initialize\n\n for operation in search_data['operations']: # go through every operation\n for username in search_list:\n if operation == 'following':\n for name in twitter_data[username]['following']:\n if not name in temp:\n temp.append (name)\n\n elif operation == 'followers':\n for name in all_followers (twitter_data, username):\n if not name in temp:\n temp.append (name)\n\n search_list = temp\n temp = []\n search_list.sort() # sort the list alphabetically for testing purposes\n return search_list",
"def detect_author(user_to_tweets: Dict[str, List[tuple]], tweet_text: str) -> \\\n str:\n acc = []\n \n for keys in user_to_tweets:\n author_hashes = hashtag_seperator(user_to_tweets[keys])\n text_hashes = extract_hashtags(tweet_text)\n if set(text_hashes).issubset(author_hashes):\n acc.append(keys)\n if len(acc) == 1:\n return acc[0]\n return 'unknown'",
"def baby_search_engine(name):\r\n \r\n name_ranking = []\r\n \r\n for publication_name, name_list in baby_names.items():\r\n publication = {}\r\n if name.capitalize() in name_list:\r\n publication['list'] = publication_name\r\n publication['rank'] = name_list.index(name.capitalize()) + 1\r\n name_ranking.append(publication)\r\n\r\n \r\n return sorted(name_ranking, key=lambda k: k['rank'])",
"def sorted_by_count_and_word(word_counts):\n\n return sorted(word_counts.items(), key=reversed_tuple)",
"def test_sort_order(self):\n obj = self.conn.search(self.basedn, 2, attrlist=['uidNumber'],\n sort_order=[\"-uidNumber\"])\n sort = [o['uidNumber'][0] for o in obj if 'uidNumber' in o]\n self.assertTrue((all(sort[i] >= sort[i+1]\n for i in range(len(sort)-1))), \"Not sorted\")",
"def compare_sorted_data(self, unsorted_data, descending_order):\n sorted_data = []\n self.all_row_data = [x.lower() for x in self.all_row_data]\n if descending_order:\n sorted_data = sorted([x.lower() for x in unsorted_data], reverse=True)\n else:\n sorted_data = sorted([x.lower() for x in unsorted_data])\n if sorted_data == self.all_row_data:\n return True\n else:\n return False",
"def all_followers(twitter_dict, twitter_name): \r\n \r\n following_list = []\r\n for user in twitter_dict:\r\n f_list = twitter_dict[user]['following']\r\n if twitter_name in f_list:\r\n following_list.append(user) \r\n return following_list",
"def sort_by_surname_desc(names):\n names = dedup_and_title_case_names(names)\n split_names = [name.split(' ') for name in names]\n return [\" \".join(x) for x in (sorted(split_names, key = lambda x: x[1], reverse=True))]",
"def get_present_string(twitter_dict, final_list, present_dict): \r\n \r\n if present_dict['sort-by'] == 'username':\r\n tweet_sort(twitter_dict, final_list, username_first)\r\n \r\n if present_dict['sort-by'] == 'name':\r\n tweet_sort(twitter_dict, final_list, name_first) \r\n \r\n if present_dict['sort-by'] == 'popularity':\r\n tweet_sort(twitter_dict, final_list, more_popular) \r\n \r\n present_output = ''\r\n format_long = ''\r\n format_short = []\r\n \r\n if 'long' in present_dict['format']:\r\n \r\n for user in final_list: \r\n website = twitter_dict[user][\"web\"]\r\n bio = '\\n' + twitter_dict[user][\"bio\"]\r\n if(\"web\" in twitter_dict[user]):\r\n website = twitter_dict[user][\"web\"]\r\n format_long = ('----------\\n' + user + '\\n' + 'name: ' +\\\r\n twitter_dict[user]['name'] + '\\n' + 'location: ' +\\\r\n twitter_dict[user]['location'] + '\\n' + 'website: '\\\r\n + website + '\\n' + 'bio:' + bio + '\\n' +\\\r\n 'following: '+\\\r\n str(twitter_dict[user]['following']) + '\\n') \r\n present_output += format_long\r\n \r\n present_output += '----------'\r\n elif 'short' in present_dict['format']: \r\n present_output = str(final_list) \r\n \r\n return present_output"
]
| [
"0.7261839",
"0.6815641",
"0.67782587",
"0.65419745",
"0.6481994",
"0.6463288",
"0.62870765",
"0.6256376",
"0.5890704",
"0.5813191",
"0.5721417",
"0.57102513",
"0.5625274",
"0.55444145",
"0.5516707",
"0.55118585",
"0.5480974",
"0.5467732",
"0.5466456",
"0.5454952",
"0.54498875",
"0.5390868",
"0.53203994",
"0.5275096",
"0.52730536",
"0.52716005",
"0.52478695",
"0.5234484",
"0.52100396",
"0.5195435"
]
| 0.723645 | 1 |
Get the first letter in the message from user if it is not an alpha | def _get_first_letter_in_text(text: str) -> str:
for letter in text:
if letter.isalpha():
return letter
return text[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean_message(message):\n alphabet = ''\n for i in range(len(message)):\n if message[i].isalpha():\n alphabet = alphabet + message[i].upper()\n return alphabet\n # Will obtain a str that will only contain alphabets that are uppercase.",
"def Prints_single_letter_string_when_trying_to_remove_non_existent_char():\n check50.run(\"python3 remove_letter.py\"\n ).stdin(\"c\", prompt=True\n ).stdin(\"b\", prompt=True\n ).stdout(\"c\", regex=False\n ).exit()",
"def manage_text(msg):\r\n msg = msg.upper()\r\n msg_final = \"\"\r\n for i in msg:\r\n if i.isalpha():\r\n msg_final += i\r\n return msg_final",
"def test_starts_letter(x):\n return x[0].isalpha()",
"def clean_message(message):\n \n new_message = ''\n for char in message:\n if char.isalpha():\n new_message = new_message + char.upper()\n return new_message",
"def clean_message(message: str) -> str:\n message = message.upper()\n new_message = ''\n\n for char in message:\n\n if char.isalpha():\n new_message += char\n message = new_message\n\n else:\n message = new_message\n\n return message",
"def ask_letter(self):\n letter = ' '\n while letter not in string.ascii_lowercase:\n letter = input('Write a letter:\\n')\n letter.lower()\n\n return letter",
"def cutoff_letter(string: str) -> str:\n for idx, char in enumerate(string):\n if char.isalpha():\n return string[:idx]",
"def name_prompt(input_msg):\n while True:\n donor_name = input(input_msg)\n try:\n if not \"\".join(donor_name.split()).isalpha():\n raise Exception(\"The name should be all Alphabetic characters:\")\n return donor_name.title()\n except Exception as e:\n print(\"INVALID Name: \",e)",
"def get_word(w):\n return ''.join(c for c in w if c.isalpha()).lower()",
"def checkLetter():\n\tguess = False\n\twhile guess != True:\n\t\tguess = str(raw_input(\"Guess a letter: \"))\n\t\tif guess.isalpha() and len(guess) == 1 :\n\t\t\treturn guess\n\t\telif not guess.isalpha() or len(guess) > 1:\n\t\t\tprint \"The input may be one letter only!\"\n\t\telse:\n\t\t\tprint \"Error in checkLetter\"",
"def isAlpha(string):\n return (True)",
"def _only_letters(s):\r\n\treturn _regex_non_letters.sub('', s)",
"def toChar(s):\n s = s.lower()\n ans = \"\"\n for c in s:\n if c in \"abcdefghijklmnopqrstuvwxyz\":\n ans+=c\n return ans",
"def only_letters(value):\n regex = re.compile(r\"[A-Za-z\\sñÑáéíóúÁÉÍÓÚ]+\")\n\n if not regex.fullmatch(value):\n raise ValidationError(\"reingresar nombre, solo letras y espacios \")",
"def accept_letter(self, key):\n letter = key.text()\n key.setEnabled(False)\n self.keyboard.set_focus('Space')\n return letter.lower()",
"def isSingleLetter(self, word):\n return (re.match('^\\w$', word)) != None",
"def first(word):\n return word[0]",
"def get_guess():\n letter = input(\"Please input a letter to check\").lower()\n if len(letter) != 1:\n print(\"Please input a single letter\")\n get_guess()\n elif letter not in \"abcdefghijklmnopqrstuvxyz\":\n print (\"Only input letters\")\n get_guess()\n else:\n return letter",
"def getNoteLetter(self):\n if self.noteName != None and self.noteName != \"\":\n return self.noteName[0]\n else:\n return \"\"",
"def sanitize_guess(self, letter): # helper function to incorrect_guess()\n self.guess = letter.lower().strip()\n if not self.guess.isalpha():\n Donatello.turtle_text(\n \"No special characters or numbers\") # no numbers, special characters or multiple words allowed\n return False\n # TODO if type != str raise assert\n return self.guess",
"def _get_contact_first_name(app):\n name = app.get(CONTACT_NAME_KEY)\n if name:\n return ' {}'.format(name.split(' ')[0])",
"def first(word):\n\treturn word[0]",
"def _lowercase_first_letter(string):\n if not string:\n return string\n\n return string[0].lower() + string[1:]",
"def first_word(text: str) -> str:\n import string\n\n # it's deleting first not letter strings\n start = 0\n for element in text:\n if element in (\" \", \",\", \".\", \"'\"):\n start += 1\n elif element in (string.ascii_uppercase + string.ascii_lowercase):\n break\n text = text[start:len(text)]\n\n # it's finding the first word\n counter = 1\n for letter in text:\n if letter in (\" \", \",\", \".\"):\n break\n else:\n counter += 1\n return text[0:counter - 1]",
"def extract_first_name(s):\n clean_name = re.sub(r'\\s+', r' ', s).split()\n\n for name in clean_name:\n if len(name) > 1:\n return name.title()\n else:\n pass\n\n return None",
"def is_letter(user_input):\n # If any characters is letter -> return boolean True else False\n if any(char.isalpha() for char in user_input):\n return True\n return False",
"def akronom():\n akro_str = input(\"Write a sentance with both upper case and lower case letters. \")\n akronomed_str = \"\"\n for letter in akro_str:\n if str.isupper(letter):\n akronomed_str += letter\n\n print(akronomed_str)",
"def character(m) -> str:\n return m[0]",
"def capitalize_first_letter(text):\n chars = list(text.strip())\n chars[0] = chars[0].upper()\n return \"\".join(chars)"
]
| [
"0.7021727",
"0.69095814",
"0.6808512",
"0.67910373",
"0.6768156",
"0.6737975",
"0.67040485",
"0.66168314",
"0.65995246",
"0.65331924",
"0.6513524",
"0.64735717",
"0.6369872",
"0.6368524",
"0.63524103",
"0.6332107",
"0.6301702",
"0.6275444",
"0.6264006",
"0.6255922",
"0.6248232",
"0.6195763",
"0.617536",
"0.6166508",
"0.6158259",
"0.61480504",
"0.609153",
"0.60832715",
"0.60753435",
"0.60525465"
]
| 0.77461845 | 0 |
Convert a Composer checkpoint to a pretrained HF checkpoint folder. Write a ``config.json`` and ``pytorch_model.bin``, like | def write_huggingface_pretrained_from_composer_checkpoint(
checkpoint_path: Union[Path, str],
output_path: Union[Path, str],
output_precision: str = 'fp32',
local_checkpoint_save_location: Optional[Union[Path, str]] = None
) -> Tuple[PretrainedConfig, Optional[PreTrainedTokenizerBase]]:
dtype = {
'fp32': torch.float32,
'fp16': torch.float16,
'bf16': torch.bfloat16,
}[output_precision]
# default local path to a tempfile if path is not provided
if local_checkpoint_save_location is None:
tmp_dir = tempfile.TemporaryDirectory()
local_checkpoint_save_location = Path(
tmp_dir.name) / 'local-composer-checkpoint.pt'
# create folder
os.makedirs(output_path)
# download the checkpoint file
print(
f'Downloading checkpoint from {checkpoint_path} -> {local_checkpoint_save_location}'
)
get_file(str(checkpoint_path), str(local_checkpoint_save_location))
# Load the Composer checkpoint state dict
print('Loading checkpoint into CPU RAM...')
composer_state_dict = safe_torch_load(local_checkpoint_save_location)
if 'state' not in composer_state_dict:
raise RuntimeError(
f'"state" is not an available key in the provided composer checkpoint. Is {local_checkpoint_save_location} ill-formed?'
)
# Build and save HF Config
print('#' * 30)
print('Saving HF Model Config...')
hf_config = get_hf_config_from_composer_state_dict(composer_state_dict)
hf_config.torch_dtype = dtype
hf_config.save_pretrained(output_path)
print(hf_config)
# Extract and save the HF tokenizer
print('#' * 30)
print('Saving HF Tokenizer...')
hf_tokenizer = get_hf_tokenizer_from_composer_state_dict(
composer_state_dict)
if hf_tokenizer is not None:
hf_tokenizer.save_pretrained(output_path)
print(hf_tokenizer)
else:
print('Warning! No HF Tokenizer found!')
# Extract the HF model weights
print('#' * 30)
print('Saving HF Model Weights...')
weights_state_dict = composer_state_dict
if 'state' in weights_state_dict:
weights_state_dict = weights_state_dict['state']['model']
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
weights_state_dict, prefix='model.')
# Convert weights to desired dtype
for k, v in weights_state_dict.items():
if isinstance(v, torch.Tensor):
weights_state_dict[k] = v.to(dtype=dtype)
# Save weights
torch.save(weights_state_dict, Path(output_path) / 'pytorch_model.bin')
print('#' * 30)
print(f'HF checkpoint folder successfully created at {output_path}.')
print('Done.')
print('#' * 30)
return hf_config, hf_tokenizer | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_checkpoint(huggingface_model_name_or_path, output_path):\n output_dir, _ = os.path.split(output_path)\n tf.io.gfile.makedirs(output_dir)\n\n huggingface_bert_model, huggingface_bert_config = _get_huggingface_bert_model_and_config(\n huggingface_model_name_or_path)\n encoder = _create_fffner_model(huggingface_bert_config)\n sequence_length = 128\n batch_size = 2\n word_id_data = np.random.randint(\n 10, size=(batch_size, sequence_length), dtype=np.int32)\n mask_data = np.random.randint(\n 2, size=(batch_size, sequence_length), dtype=np.int32)\n type_id_data = np.random.randint(\n 2, size=(batch_size, sequence_length), dtype=np.int32)\n is_entity_token_pos = np.zeros((batch_size, 1), dtype=np.int32)\n entity_type_token_pos = np.ones((batch_size, 1), dtype=np.int32)\n inputs = {\n \"input_word_ids\": word_id_data,\n \"input_mask\": mask_data,\n \"input_type_ids\": type_id_data,\n \"is_entity_token_pos\": is_entity_token_pos,\n \"entity_type_token_pos\": entity_type_token_pos,\n }\n encoder(inputs)\n convert(encoder, huggingface_bert_model)\n tf.train.Checkpoint(encoder=encoder).write(output_path)",
"def convert_flava_checkpoint(checkpoint_path, codebook_path, pytorch_dump_folder_path, config_path=None):\n if config_path is not None:\n config = FlavaConfig.from_pretrained(config_path)\n else:\n config = FlavaConfig()\n\n hf_model = FlavaForPreTraining(config).eval()\n\n codebook_state_dict = convert_dalle_checkpoint(codebook_path, None, save_checkpoint=False)\n\n if os.path.exists(checkpoint_path):\n state_dict = torch.load(checkpoint_path, map_location=\"cpu\")\n else:\n state_dict = torch.hub.load_state_dict_from_url(checkpoint_path, map_location=\"cpu\")\n\n hf_state_dict = upgrade_state_dict(state_dict, codebook_state_dict)\n hf_model.load_state_dict(hf_state_dict)\n hf_state_dict = hf_model.state_dict()\n hf_count = count_parameters(hf_state_dict)\n state_dict_count = count_parameters(state_dict) + count_parameters(codebook_state_dict)\n\n assert torch.allclose(hf_count, state_dict_count, atol=1e-3)\n\n hf_model.save_pretrained(pytorch_dump_folder_path)",
"def save(self, epoch=None, note=None):\n\n checkpoint_encoder = {\n 'type': \"transformer\",\n 'model': self.model.encoder.state_dict(),\n 'epoch': epoch,\n 'settings': self.opt\n }\n\n if checkpoint_encoder['settings'].telegram:\n del checkpoint_encoder['settings'].telegram\n\n checkpoint_decoder = {\n 'type': \"transformer\",\n 'model': self.model.decoder.state_dict(),\n 'generator': self.model.generator.state_dict(),\n 'epoch': epoch,\n 'settings': self.opt\n }\n\n if checkpoint_decoder['settings'].telegram:\n del checkpoint_decoder['settings'].telegram\n\n if not note:\n note = \"\"\n\n # make sure a path is specified prior to saving the files.\n if self.opt.save_model:\n ready_to_save = False\n if self.opt.save_mode == \"all\":\n model_name = \"_\" + str(note)\n ready_to_save = True\n else:\n # assumes self.opt.save_mode = \"best\"\n if self.valid_accs[-1] >= max(self.valid_accs):\n model_name = \"\"\n ready_to_save = True\n if self.opt.verbose:\n print(\n ' - [Info] The checkpoint file has been updated.')\n if ready_to_save:\n encoder_name = \"encoder\" + model_name + \".chkpt\"\n decoder_name = \"decoder\" + model_name + \".chkpt\"\n # setup directory to save this at.\n encoder_filepath = os.path.join(\n self.opt.directory, encoder_name)\n decoder_filepath = os.path.join(\n self.opt.directory, decoder_name)\n torch.save(checkpoint_encoder, encoder_filepath)\n torch.save(checkpoint_decoder, decoder_filepath)\n else:\n if not self.save_trip:\n if self.opt.verbose:\n print(\n \" - [Warning]: the model is not specified to save.\")\n self.save_trip = True",
"def convert_prophetnet_checkpoint_to_pytorch(prophetnet_checkpoint_path: str, pytorch_dump_folder_path: str):\n if \"xprophetnet\" in prophetnet_checkpoint_path:\n prophet_old = XLMProphetNetForConditionalGenerationOld.from_pretrained(prophetnet_checkpoint_path)\n prophet, loading_info = XLMProphetNetForConditionalGeneration.from_pretrained(\n prophetnet_checkpoint_path, output_loading_info=True\n )\n else:\n prophet_old = ProphetNetForConditionalGenerationOld.from_pretrained(prophetnet_checkpoint_path)\n prophet, loading_info = ProphetNetForConditionalGeneration.from_pretrained(\n prophetnet_checkpoint_path, output_loading_info=True\n )\n\n special_keys = [\"key_proj\", \"value_proj\", \"query_proj\"]\n\n mapping = {\n \"self_attn\": \"ngram_self_attn\",\n \"cross_attn\": \"encoder_attn\",\n \"cross_attn_layer_norm\": \"encoder_attn_layer_norm\",\n \"feed_forward_layer_norm\": \"final_layer_norm\",\n \"feed_forward\": \"\",\n \"intermediate\": \"fc1\",\n \"output\": \"fc2\",\n \"key_proj\": \"k_proj\",\n \"query_proj\": \"q_proj\",\n \"value_proj\": \"v_proj\",\n \"word_embeddings\": \"embed_tokens\",\n \"embeddings_layer_norm\": \"emb_layer_norm\",\n \"relative_pos_embeddings\": \"relative_linear\",\n \"ngram_embeddings\": \"ngram_input_embed\",\n \"position_embeddings\": \"embed_positions\",\n }\n\n for key in loading_info[\"missing_keys\"]:\n attributes = key.split(\".\")\n\n if attributes[0] == \"lm_head\":\n model = prophet\n old_model = prophet_old\n else:\n model = prophet.prophetnet\n old_model = prophet_old.model\n\n is_key_init = False\n for attribute in attributes:\n if attribute in mapping:\n old_attribute = mapping[attribute]\n if not hasattr(old_model, old_attribute) and len(old_attribute) > 0:\n old_attribute = attribute\n elif hasattr(old_model, attribute):\n old_attribute = attribute\n\n if attribute == \"weight\":\n assert old_model.weight.shape == model.weight.shape, \"Shapes have to match!\"\n model.weight = old_model.weight\n logger.info(f\"{attribute} is initialized.\")\n is_key_init = True\n break\n elif attribute == \"bias\":\n assert old_model.bias.shape == model.bias.shape, \"Shapes have to match!\"\n model.bias = old_model.bias\n logger.info(f\"{attribute} is initialized\")\n is_key_init = True\n break\n elif attribute in special_keys and hasattr(old_model, \"in_proj_weight\"):\n embed_dim = old_model.in_proj_weight.shape[0] // 3\n param = getattr(model, attribute)\n param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, \"Shapes have to match\"\n param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, \"Shapes have to match\"\n if attribute == \"query_proj\":\n model.query_proj.weight = nn.Parameter(old_model.in_proj_weight[:embed_dim, :])\n model.query_proj.bias = nn.Parameter(old_model.in_proj_bias[:embed_dim])\n\n elif attribute == \"key_proj\":\n model.key_proj.weight = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :])\n model.key_proj.bias = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim])\n elif attribute == \"value_proj\":\n model.value_proj.weight = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :])\n model.value_proj.bias = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :])\n is_key_init = True\n break\n elif attribute == \"position_embeddings\":\n assert (\n model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]\n ), \"Hidden size has to match\"\n assert model.position_embeddings.weight.shape[0] == 512, \"We want 512 position_embeddings.\"\n model.position_embeddings.weight = nn.Parameter(old_model.embed_positions.weight[:512, :])\n is_key_init = True\n break\n\n if attribute.isdigit():\n model = model[int(attribute)]\n old_model = old_model[int(old_attribute)]\n else:\n model = getattr(model, attribute)\n\n if old_attribute == \"\":\n old_model = old_model\n else:\n if not hasattr(old_model, old_attribute):\n raise ValueError(f\"{old_model} does not have {old_attribute}\")\n old_model = getattr(old_model, old_attribute)\n\n if not is_key_init:\n raise ValueError(f\"{key} was not correctly initialized!\")\n\n print(f\"Saving model to {pytorch_dump_folder_path}\")\n prophet.save_pretrained(pytorch_dump_folder_path)",
"def main():\n parser = argparse.ArgumentParser(description=\"Convert a checkpoint file into a support sets and a reconstructor \"\n \"weights files\")\n parser.add_argument('--exp', type=str, required=True, help=\"set experiment's model dir (created by `train.py`)\")\n\n # Parse given arguments\n args = parser.parse_args()\n\n # Check structure of `args.exp`\n if not osp.isdir(args.exp):\n raise NotADirectoryError(\"Invalid given directory: {}\".format(args.exp))\n models_dir = osp.join(args.exp, 'models')\n if not osp.isdir(models_dir):\n raise NotADirectoryError(\"Invalid models directory: {}\".format(models_dir))\n checkpoint_file = osp.join(models_dir, 'checkpoint.pt')\n if not osp.isfile(checkpoint_file):\n raise FileNotFoundError(\"Checkpoint file not found: {}\".format(checkpoint_file))\n\n print(\"#. Convert checkpoint file into support sets and reconstructor weight files...\")\n\n # Load checkpoint file\n checkpoint_dict = torch.load(checkpoint_file)\n\n # Get checkpoint iteration\n checkpoint_iter = checkpoint_dict['iter']\n print(\" \\\\__Checkpoint iteration: {}\".format(checkpoint_iter))\n\n # Save support sets weights file\n print(\" \\\\__Save checkpoint support sets weights file...\")\n torch.save(checkpoint_dict['support_sets'], osp.join(models_dir, 'support_sets-{}.pt'.format(checkpoint_iter)))\n\n # Save reconstructor weights file\n print(\" \\\\__Save checkpoint reconstructor weights file...\")\n torch.save(checkpoint_dict['reconstructor'], osp.join(models_dir, 'reconstructor-{}.pt'.format(checkpoint_iter)))",
"def load_checkpoint(ckpt_path):\n checkpoint = None\n if ckpt_path:\n logger.info(\"Loading checkpoint from %s\" % ckpt_path)\n checkpoint = torch.load(ckpt_path, map_location=torch.device(\"cpu\"))\n\n if \"model\" in checkpoint.keys():\n # This preserves backward-compat for models using customed layernorm\n def fix_key(s):\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.b_2\", r\"\\1.layer_norm\\2.bias\", s\n )\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.a_2\", r\"\\1.layer_norm\\2.weight\", s\n )\n return s\n\n checkpoint[\"model\"] = {\n fix_key(k): v for k, v in checkpoint[\"model\"].items()\n }\n # Force add_ffnbias to True if bias found in model w_1 keys\n for key in checkpoint[\"model\"].keys():\n if \"w_1.bias\" in key:\n checkpoint[\"opt\"].add_ffnbias = True\n\n if not hasattr(checkpoint[\"opt\"], \"num_kv\"):\n checkpoint[\"opt\"].num_kv = 0\n if not hasattr(checkpoint[\"opt\"], \"add_ffnbias\"):\n checkpoint[\"opt\"].add_ffnbias = False\n if not hasattr(checkpoint[\"opt\"], \"parallel_residual\"):\n checkpoint[\"opt\"].parallel_residual = False\n if not hasattr(checkpoint[\"opt\"], \"shared_layer_norm\"):\n checkpoint[\"opt\"].shared_layer_norm = False\n if not hasattr(checkpoint[\"opt\"], \"use_ckpting\"):\n checkpoint[\"opt\"].use_ckpting = []\n if not hasattr(checkpoint[\"opt\"], \"relative_positions_buckets\"):\n checkpoint[\"opt\"].relative_positions_buckets = 0\n if not hasattr(checkpoint[\"opt\"], \"parallel_mode\"):\n checkpoint[\"opt\"].parallel_mode = \"data_parallel\"\n if not hasattr(checkpoint[\"opt\"], \"norm_eps\"):\n checkpoint[\"opt\"].norm_eps = 1e-6\n\n # fix v2 compatibility\n if \"generator\" in checkpoint.keys() and checkpoint[\"generator\"]:\n if \"0.weight\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"weight\"] = checkpoint[\"generator\"].pop(\n \"0.weight\"\n )\n if \"0.bias\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"bias\"] = checkpoint[\"generator\"].pop(\"0.bias\")\n # end of patch for backward compatibility\n\n return checkpoint",
"def save_checkpoint(self, checkpoint_dir):\r\n\r\n if not os.path.exists(checkpoint_dir):\r\n os.makedirs(checkpoint_dir)\r\n\r\n path = os.path.join(checkpoint_dir, 'model.pt')\r\n torch.save(self.model.state_dict(), path)",
"def _save_cfg_in_ckpt(self):\n\n final_cfg = {}\n\n self.model_postfix = ''\n\n if hasattr(self, 'cfg'):\n if self.cfg is not None:\n flags_dict = {k: getattr(FLAGS, k) for k in FLAGS} # Get the\n # TensorFlow flags.\n # If the filename is cfg.yml then just set ckpt_dir to the the config path since it is the same as\n # where checkpoint files are saved.\n if 'cfg.yml' in flags_dict['cfg_path']:\n self.ckpt_dir = os.path.dirname(flags_dict['cfg_path'])\n else:\n for attr in flags_dict.keys():\n if attr.upper() in self.cfg.keys():\n self_val = flags_dict[attr]\n if self_val is not None:\n if self_val != self.cfg[attr.upper()]['val']:\n final_cfg[attr.upper()] = {'val': self_val, 'doc': self.cfg[attr.upper()]['doc']}\n if not (attr == 'zprior_weight' or attr == 'dataset' or attr == 'cfg_file' or\n 'width' in attr or 'height' in attr):\n self.model_postfix += 'II{}XX{}'.format(attr, self_val).replace('.', '_')\n else:\n final_cfg[attr.upper()] = self.cfg[attr.upper()]\n\n self.ckpt_dir = os.path.join(self.checkpoint_dir, self.model_dir)\n if FLAGS.is_train:\n ensure_dir(self.ckpt_dir)\n with open(os.path.join(self.checkpoint_dir, self.model_dir, 'cfg.yml'), 'w') as f:\n yaml.dump(final_cfg, f)",
"def convert(\n chip,\n net,\n checkpoint,\n dat_json,\n model_json,\n out_model,\n dump_mode\n ):\n\n state_dict = torch.load(checkpoint, map_location='cpu')[\"model_state_dict\"]\n _, state_dict = separate_state_dict(state_dict)\n\n debug_dir = os.path.join(os.path.dirname(out_model), \"debug\")\n if os.path.exists(debug_dir):\n shutil.rmtree(debug_dir)\n os.makedirs(debug_dir)\n\n data_files, net_config_lst = convert_chip_layers(\n chip=chip,\n net=net,\n state_dict=state_dict,\n dat_json_prefix=dat_json,\n save_dir=debug_dir,\n dump_mode=dump_mode\n ) #creates chip.dat, fills in data_files dictionary, updates dat.json\n\n model_json_out = os.path.join(debug_dir, os.path.basename(model_json))\n update_model_json(\n net_config_lst,\n model_json,\n data_files,\n model_json_out,\n dump_mode\n )\n\n if os.path.exists(out_model):\n _logger.warning(\"{} already exists and will be overwritten\".format(out_model))\n\n driver.compose_model(json_file=model_json_out, model_file=out_model)\n if not (_DEBUG_CONVERSION or dump_mode):\n _logger.info(\"Removing intermediate files generated during conversion\")\n shutil.rmtree(debug_dir)\n _logger.info(\"Successfully generated {}\".format(out_model))\n return net_config_lst",
"def save_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n os.makedirs(model_dir, exist_ok=True)\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n model_dict = {'net_state_dict': self.net.state_dict(),\n 'use_cuda': self.use_cuda}\n\n print(\"Saving model to {}\".format(model_file))\n torch.save(model_dict, model_file)",
"def create_checkpoint(model, save_dir, train_data):\n model.class_to_idx = train_data.class_to_idx\n\n checkpoint = {\n 'model': model.name,\n 'classifier': model.classifier,\n 'class_to_idx': model.class_to_idx,\n 'state_dict': model.state_dict()\n }\n\n if save_dir and isdir(save_dir):\n torch.save(checkpoint, save_dir + 'checkpoint.pth')\n print('checkpoint created')\n else: \n print(\"Directory not found. Saving at current directory in checkpoint.pth\")\n torch.save(checkpoint, 'checkpoint.pth')",
"def checkpoint(self, epoch, losses, path):\n dct = {'epoch': epoch, \n 'losses': losses, \n 'model_state_dict': self.TrajectoryAutoencoder.state_dict()}\n torch.save(dct, path)",
"def create_checkpoint(model_config, path):\n model = models.VisionTransformer(num_classes=1, **model_config)\n variables = model.init(\n jax.random.PRNGKey(0),\n jnp.ones([1, 16, 16, 3], jnp.float32),\n train=False,\n )\n _save(variables['params'], path)",
"def checkpoint(state, file_name='./saves/checkpoint.pth.tar'):\n\n assert isinstance(state, dict)\n assert isinstance(file_name, str)\n\n torch.save(state, file_name)",
"def load_checkpoint(cfg, args):\n checkpoint_iteration = args.checkpoint\n bucket = connect_to_bucket(args.bucket)\n # load actual checkpoint\n if not os.path.isdir(cfg.OUTPUT_DIR):\n os.mkdir(cfg.OUTPUT_DIR)\n blob = bucket.blob(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n blob.download_to_filename(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n if args.resume:\n # also write last checkpoint file for when --resume statement, model gets checkpoint name from this file\n with open(cfg.OUTPUT_DIR + \"/last_checkpoint\", \"w\") as file:\n file.write(\"model_\" + str(checkpoint_iteration) + \".pth\")\n # return statement not clean, but useful for inference code\n return checkpoint_iteration, bucket",
"def save_checkpoint(self, checkpoint_info):\n torch.save(checkpoint_info, os.path.join(self.checkpoint_path, self.checkpoint_file))",
"def save_checkpoint(model, state, is_best, checkpoint):\n state_filepath = os.path.join(checkpoint, 'last.pth.tar')\n model_filepath = os.path.join(checkpoint, 'last_model.pth')\n if not os.path.exists(checkpoint):\n print(\"Checkpoint Directory does not exist! Making directory {}\".format(checkpoint))\n os.mkdir(checkpoint)\n else:\n print(\"Checkpoint Directory exists! \")\n torch.save(state, state_filepath)\n torch.save(model, model_filepath)\n if is_best:\n shutil.copyfile(state_filepath, os.path.join(checkpoint, 'best.pth.tar'))\n shutil.copyfile(model_filepath, os.path.join(checkpoint, 'best_model.pth'))",
"def save_checkpoint(state, filename):\n torch.save(state, filename) # save checkpoint",
"def save_checkpoint(state, model_name=None):\n \n if not model_name: model_name = f\"model_date_{date_time_str}.pth\"\n torch.save(state, osj(out_path, model_name))",
"def save_checkpoint(args,state, is_best, filename=\"checkpoint.pth.tar\"):\n directory = \"runs/%s-net/\" % (args.name)\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\n epoch = state['epoch']\n\n filename = directory + filename\n torch.save(state, filename)\n\n if is_best:\n shutil.copyfile(filename, \"runs/%s-net/\" % (args.name) + \"model_best.pth.tar\")\n\n if epoch==0 or epoch==2:\n shutil.copyfile(filename, \"runs/%s-net/\" % (args.name) + \"model_epoch_%d.pth.tar\" % epoch )",
"def write_checkpoint(self, session):\n base_save_path = self.params.cp_save_dir+self.params.model_name+\"_v\"+self.params.version\n full_save_path = self.full_saver.save(session,\n save_path=base_save_path,\n global_step=self.global_step,\n latest_filename=self.params.cp_latest_filename)\n self.logger.log_info(\"Full model saved in file %s\"%full_save_path)\n return base_save_path",
"def get_checkpoint_data(self) -> Dict[str, Any]:\n # get ckpt file path from config.trainer.params.resume_from_checkpoint\n path = self.config.trainer.params.get(\"resume_from_checkpoint\", None)\n if path is not None:\n is_zoo = self.is_zoo_path(path)\n ckpt_filepath = path\n if is_zoo:\n folder = download_pretrained_model(path)\n ckpt_filepath = get_ckpt_path_from_folder(folder)\n ckpt = get_ckpt_from_path(ckpt_filepath)\n config = get_config_from_folder_or_ckpt(folder, ckpt)\n else:\n ckpt = get_ckpt_from_path(ckpt_filepath)\n config = None\n\n return {\n \"ckpt\": ckpt,\n \"checkpoint_path\": ckpt_filepath,\n \"is_zoo\": is_zoo,\n \"config\": config,\n }\n\n is_zoo = False\n config = None\n ckpt = None\n # get ckpt file path from config.checkpoint\n ckpt_config = self.config.checkpoint\n suffix = \"best.ckpt\" if ckpt_config.resume_best else \"current.ckpt\"\n path = os.path.join(get_mmf_env(key=\"save_dir\"), suffix)\n ckpt_filepath = None\n resume_from_specified_path = (\n ckpt_config.resume_file is not None or ckpt_config.resume_zoo is not None\n ) and (not ckpt_config.resume or not PathManager.exists(path))\n if resume_from_specified_path:\n if ckpt_config.resume_file and PathManager.exists(ckpt_config.resume_file):\n ckpt_filepath = ckpt_config.resume_file\n elif ckpt_config.resume_zoo is not None:\n is_zoo = True\n folder = download_pretrained_model(ckpt_config.resume_zoo)\n ckpt_filepath = get_ckpt_path_from_folder(folder)\n ckpt = get_ckpt_from_path(ckpt_filepath)\n config = get_config_from_folder_or_ckpt(folder, ckpt)\n else:\n raise RuntimeError(f\"{ckpt_config.resume_file} doesn't exist\")\n\n if ckpt_config.resume and PathManager.exists(path):\n ckpt_filepath = path\n\n if ckpt_filepath is not None:\n ckpt = get_ckpt_from_path(ckpt_filepath)\n\n return {\n \"ckpt\": ckpt,\n \"checkpoint_path\": ckpt_filepath,\n \"is_zoo\": is_zoo,\n \"config\": config,\n }",
"def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n directory = \"checkoutpoint/%s/\" % args.name\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'checkoutpoint/%s/' % args.name + 'model_best.pth.tar')",
"def save_checkpoint(state, filename='checkpoint.pth.tar'):\n torch.save(state, filename)",
"def save(self, checkpoint_dir, step):\n model_name = \"CNN.model\"\n model_dir = \"%s\" % (\"cnn\")\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n \n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n \n self.saver.save(self.sess,\n os.path.join(checkpoint_dir, model_name),\n global_step=step)",
"def from_previous_ckpt(network, checkpoint):\n if os.path.exists(checkpoint):\n if os.path.isfile(checkpoint):\n try:\n network.load_state_dict(torch.load(checkpoint))\n print(f\"Loaded weights from {checkpoint}\")\n except RuntimeError:\n print(f\"{checkpoint} is a invalid checkpoint\")\n print(\"Neglecting this checkpoint.\")\n if os.path.isdir(checkpoint):\n epoch = 0\n file_name = None\n for ckpt in os.listdir(checkpoint):\n if ckpt[-4:] == '.pth':\n try:\n tmp_int_list = re.findall('[0-9]+', ckpt)\n ckpt_epoch = int(tmp_int_list[-1])\n except IndexError:\n ckpt_epoch = 0\n if ckpt_epoch >= epoch:\n epoch = ckpt_epoch\n file_name = os.path.join(checkpoint, ckpt)\n\n if file_name is None:\n print(f\"No checkpoint found in {checkpoint}\")\n print(\"Neglecting this checkpoint.\")\n else:\n try:\n network.load_state_dict(torch.load(file_name))\n print(f\"Loaded weights from {file_name}\")\n except (RuntimeError):\n print(f\"{file_name} is a invalid checkpoint\")\n print(\"Neglecting this checkpoint.\")\n\n else:\n print(f\"the checkpoint path: {checkpoint} doesn't exist.\")\n print(\"Neglecting this checkpoint.\")",
"def save_checkpoint(checkpoint_dir, epoch, iteration, save_dict):\n os.makedirs(checkpoint_dir, exist_ok=True)\n path = opj(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt')\n assert epoch == save_dict['epoch'], \"`epoch` != save_dict's `start_epoch`\"\n assert iteration == save_dict['iteration'], \"`iteration` != save_dict's `start_iteration`\"\n if os.path.isfile(path):\n print(\"Overwrite checkpoint in epoch %d, iteration %d :exclamation:\" % (epoch, iteration))\n try:\n torch.save(save_dict, path)\n except Exception:\n raise Exception(\"Fail to save checkpoint\")\n \n print(\"Checkpoint %s saved :heavy_check_mark:\" % (str(epoch) + '.' + str(iteration) + '.ckpt'))",
"def save_checkpoint(state, is_best, epoch, args, filename='checkpoint.pth'):\n if not os.path.exists(args.save_folder):\n os.makedirs(args.save_folder)\n filename = args.save_folder + str(epoch) + '_' + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, args.save_folder + 'model_best.pth')",
"def save_checkpoint(epoch, outdir, model, mapper, optimizer, criterion,\n filename='checkpoint.OWE.pth.tar'):\n filename = outdir / filename\n logger.info(\"Saving checkpoint to {}.\".format(filename))\n torch.save({'epoch': epoch,\n 'model': model.state_dict(),\n 'mapper': mapper.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }, str(filename))\n if max(criterion) == criterion[-1]:\n best_name = str(outdir / 'best_checkpoint.OWE.pth.tar')\n shutil.copyfile(str(filename), best_name)\n logger.info(\"Saved best checkpoint to {}.\".format(best_name))",
"def checkpoint(iteration, G, D, opts):\n ckpt_path = os.path.join(opts.checkpoint_dir, 'ckpt_{:06d}.pth.tar'.format(iteration))\n torch.save({'G': G.state_dict(),\n 'D': D.state_dict(),\n 'iter': iteration}, \n ckpt_path)"
]
| [
"0.6353711",
"0.6301693",
"0.63011956",
"0.610833",
"0.6077828",
"0.59906185",
"0.59823364",
"0.59694815",
"0.5962135",
"0.5917442",
"0.59148014",
"0.5914448",
"0.5885656",
"0.58622265",
"0.5843601",
"0.5778996",
"0.5778468",
"0.57622725",
"0.5746577",
"0.57425416",
"0.5734421",
"0.57339096",
"0.57183844",
"0.5703332",
"0.569302",
"0.56505615",
"0.5643958",
"0.56438476",
"0.5635266",
"0.56320506"
]
| 0.66567194 | 0 |
Generate a complex layout report with simple elements | def gen_report_complex_no_files() -> dp.Report:
select = dp.Select(blocks=[md_block, md_block], type=dp.SelectType.TABS)
group = dp.Group(md_block, md_block, columns=2)
return dp.Report(
dp.Page(
blocks=[
dp.Group(md_block, md_block, columns=2),
dp.Select(blocks=[md_block, group], type=dp.SelectType.DROPDOWN),
],
title="Page Uno",
),
dp.Page(
blocks=[
dp.Group(select, select, columns=2),
dp.Select(blocks=[md_block, md_block, md_block], type=dp.SelectType.TABS),
],
title="Page Duo",
),
dp.Page(
blocks=[
dp.Group(group, group, columns=2),
dp.Select(blocks=[select, select], type=dp.SelectType.TABS),
],
title="Page Tres",
),
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _generate_layout(self):\n\n pass",
"def create_layout( self ):",
"def display_reports(self, layout): # pylint: disable=arguments-differ",
"def create_html_layout(self):\n page = \"\"\"<!DOCTYPE html>\n <!doctype html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, shrink-to-fit=no\">\n </head>\n </html>\n <head>\n \t<meta charset=\"UTF-8\">\n </head>\n <body>\n <div class=\"container\">\n <div class=\"row\">\n <div class=\"col-sm\">\n <h4>eda report: Exploratory data analysis</h4>\n </div>\n <div class=\"col-sm\">\n <h3>Inspecting dataframe of size: {size}\n </div>\n </div>\n </div>\n \t<table class=\"table table-hover\" style=\".table\">\n <thead>\n <tr style=\"font-size: 15px;\">\n <th width=\"5%\" align=\"left\" scope=\"col\">Variable Name</th>\n <th width=\"12%\" align=\"left\" scope=\"col\">Data Type</th>\n <th width=\"15%\" align=\"left\" scope=\"col\">Histogram</th>\n <th width=\"11%\" align=\"left\" scope=\"col\">Stats</th>\n <th width=\"7%\" align=\"left\" scope=\"col\">Missing NA</th>\n <th width=\"5%\" align=\"left\" scope=\"col\">Outliers</th>\n </tr>\n </thead>\n <tbody>\"\"\".format(size=self.df.size)\n\n end_page = \"\"\" \n </tbody>\n </table>\n </body>\n \"\"\"\n rows_html = []\n for i, column in enumerate(self.df.columns):\n Summary = ColumnSummary(data=self.df[column])\n datatype = Summary.data_type()\n missing = Summary.missing_values()\n stats = Summary.statistic_summary()\n outliers = Summary.outliers()\n Summary.create_histogram(i)\n html = f\"\"\"\n <tr>\n <td style=\"font-size: 15px;\" width=\"10%\" align=\"left\"> {column}</td>\n <td style=\"font-size: 15px;\"width=\"10%\" align=\"left\"> {datatype}</td>\n <td><img class=\"img-fluid\" src=\"hist_images/histogram{i}.png?{random.randint(0,\n 2e9)}\" style=\"width:800px\"> </td>\n <td style=\"font-size: 15px;\">mean: {stats.mean}<br>\n mode: {stats.mode}<br><br>\n min: {stats.min}<br>\n max: {stats.max}<br><br>\n lower-bound: {stats.lower}<br>\n upper-bound: {stats.upper}<b</td>\n <td style=\"font-size: 15px;\">{missing}</td>\n <td style=\"font-size: 15px;\">{outliers}</td>\n </tr>\n \"\"\"\n rows_html.append(html)\n\n merged_html = page + \"\".join(rows_html) + end_page\n return merged_html",
"def generate(self, diagram):",
"def template1(self):\n self.indirectobject(1, 0, \"<<\\n /Type /Catalog\\n /Outlines 2 0 R\\n /Pages 3 0 R\\n>>\")\n self.indirectobject(2, 0, \"<<\\n /Type /Outlines\\n /Count 0\\n>>\")\n self.indirectobject(3, 0, \"<<\\n /Type /Pages\\n /Kids [4 0 R]\\n /Count 1\\n>>\")\n self.indirectobject(4, 0, \"<<\\n /Type /Page\\n /Parent 3 0 R\\n /MediaBox [0 0 612 792]\\n /Contents 5 0 R\\n /Resources <<\\n /ProcSet [/PDF /Text]\\n /Font << /F1 6 0 R >>\\n >>\\n>>\")\n self.indirectobject(6, 0, \"<<\\n /Type /Font\\n /Subtype /Type1\\n /Name /F1\\n /BaseFont /Helvetica\\n /Encoding /MacRomanEncoding\\n>>\")",
"def layout(self):\n pass",
"def buildPage(self):\n args = {}\n args['valueCol'] = 'value'\n args['textCol'] = 'size'\n args['y'] = 'index'\n args['x'] = 'number'\n args['orientation'] = 'h'\n args['title'] = ''\n args['x_title'] = ''\n args['y_title'] = ''\n args['height'] = 900\n args['width'] = 900\n\n self.add_basic_layout()\n layout = hpstats.quick_numbers_panel()\n dfs = hpstats.get_db_stats_data()\n plots = []\n plots.append(hpstats.plot_store_size_components(dfs, title='DB Store Size', args=args))\n plots.append(hpstats.plot_node_rel_per_label(dfs, focus='nodes', title='Nodes per Label', args=args))\n plots.append(hpstats.plot_node_rel_per_label(dfs, focus='relationships', title='Relationships per Type', args=args))\n self.extend_layout(layout)\n self.extend_layout(plots)",
"def generateHtml(self):\n # only the master processor needs to do this\n if not self.master: return\n\n for page in self.layout.pages:\n \n # build the metric dictionary\n metrics = {}\n page.models = []\n for fname in glob.glob(os.path.join(self.output_path,\"*.nc\")):\n with Dataset(fname) as dataset:\n mname = dataset.getncattr(\"name\")\n if mname != \"Benchmark\": page.models.append(mname)\n if not dataset.groups.has_key(page.name): continue\n group = dataset.groups[page.name]\n\n # if the dataset opens, we need to add the model (table row)\n metrics[mname] = {}\n \n # each model will need to have all regions\n for region in self.regions: metrics[mname][region] = {}\n \n # columns in the table will be in the scalars group\n if not group.groups.has_key(\"scalars\"): continue\n \n # we add scalars to the model/region based on the region\n # name being in the variable name. If no region is found,\n # we assume it is the global region.\n grp = group.groups[\"scalars\"]\n for vname in grp.variables.keys():\n found = False\n for region in self.regions:\n if region in vname: \n found = True\n var = grp.variables[vname]\n name = vname.replace(region,\"\")\n metrics[mname][region][name] = Variable(name = name,\n unit = var.units,\n data = var[...])\n if not found:\n var = grp.variables[vname]\n metrics[mname][\"global\"][vname] = Variable(name = vname,\n unit = var.units,\n data = var[...])\n page.setMetrics(metrics)\n \n # write the HTML page\n f = file(os.path.join(self.output_path,\"%s.html\" % (self.name)),\"w\")\n f.write(str(self.layout))\n f.close()",
"def _do_layout(self):\n return",
"def sample_report():\n # We create a sample report\n report = lwr.Report(\"Light-weight Standalone Reports\")\n\n # Description\n report.add(lwr.P(\"The <emph>lwreport</emph> library allows to create \"\n \"standalone HTML reports extremely easily. A lot of \"\n \"different elements are available!</p>\"))\n\n # Iteratively add stuff by using add shortcut\n h1 = report.add(lwr.Heading(\"Heading 1\"))\n h2 = h1.add(lwr.Heading(\"Heading 2\"))\n h3 = h2.add(lwr.Heading(\"Heading 3\"))\n h4 = h3.add(lwr.Heading(\"Heading 4\"))\n h5 = h4.add(lwr.Heading(\"Heading 5\"))\n h5.add(lwr.P(\"Any HTML code can simply be added. <b>Pretty cool!</b>\"))\n\n # Output dictionaries (use OrderedDict to control display order)\n di = OrderedDict()\n di[\"Name\"] = \"James Bond\"\n di[\"Nationality\"] = \"British\"\n di[\"Height (m)/weight (kg)\"] = [1.8, 84]\n\n h1 = report.add(lwr.Heading(\"Dictionaries\"))\n h1.add(lwr.P(\"Dictionaries are automatically rendered as tables.\"))\n h1.add(lwr.Dict(di))\n\n # Numpy arrays\n h1 = report.add(lwr.Heading(\"Numpy arrays\"))\n h1.add(lwr.P(\"2-dimensional arrays are automatically formatted as tables!\"))\n X = np.arange(900).reshape((30, 30))\n h1.add(X)\n\n # Pandas data frame\n h1 = report.add(lwr.Heading(\"Pandas data frame\"))\n h1.add(lwr.P(\"Pandas data frames are automatically formatted as tables\"))\n X = np.arange(100).reshape((10, 10))\n df = pd.DataFrame(X, [\"R%d\" % i for i in range(10)],\n [\"C%d\" % i for i in range(10)])\n h1.add(df)\n\n # Plotly plots\n h1 = report.add(lwr.Heading(\"Plotly plots\"))\n h1.add(lwr.P(\"It's ridiculously easity to add plotly plots.\"))\n x = np.linspace(0, 10)\n s = go.Scatter(x=x, y=np.sin(x), name=\"Sine\")\n c = go.Scatter(x=x, y=np.cos(x), name=\"Cosine\")\n h1.add(go.Figure(data=[s, c]))\n data = [dict(\n type='choropleth',\n locations=[\"CHE\"],\n z=[1.0],\n autocolorscale=True, )]\n\n layout = dict(\n title='Where is Switzerland?',\n geo=dict(\n scope='world',\n projection=dict(type='Mercator'),\n showlakes=True,\n lakecolor='rgb(255, 255, 255)'), )\n\n fig = go.Figure(data=data, layout=layout)\n h1.add(fig)\n\n # Matplotlib plots\n h1 = report.add(lwr.Heading(\"Matplotlib plots\"))\n h1.add(lwr.P(\"Even matplotlib plots are rendered through plotly.\"))\n fig, ax = plt.subplots()\n x, y = np.arange(100, dtype=np.float64).reshape((2, 50))\n sc = ax.scatter(x, y)\n ax.grid()\n h1.add(fig)\n\n # Easily create a Grid with elements\n h1 = report.add(lwr.Heading(\"Grids\"))\n h1.add(lwr.P(\"The report can be split into simple grids!\"))\n for n in [2, 3, 4, 6]:\n h2 = h1.add(lwr.Heading(\"Grid of %d\" % n))\n grid = h2.add(lwr.Grid(n_cols=n))\n for i in range(2 * n):\n grid.add(lwr.Heading(\"Element %d\" % i)).add(lwr.P(\"Text %d\" % i))\n return report",
"def buildSimpleXmlDeclaration(self):\n if self.info == \"\":\n raise Exception(\"Info block empty in symbol: \"+self.name)\n # buid some html to show\n dec = '<Row>\\n'\n dec += ' <Entry>\\n'\n dec += ' <Paragraph>'+self.name+'</Paragraph>\\n'\n dec += ' </Entry>\\n'\n dec += ' <Entry>\\n'\n dec += ' <Paragraph>'+self.getPrettyType()+'</Paragraph>\\n'\n dec += ' </Entry>\\n'\n dec += ' <Entry>\\n'\n dec += ' <Paragraph>'+self.info+'</Paragraph>\\n'\n dec += ' </Entry>\\n'\n dec += '</Row>\\n'\n return dec",
"def do_layout(self):\n self.define_panel_structure()\n self.layout_selection()\n self.layout_data_list()\n self.layout_batch()\n self.layout_button()",
"def test_layout(self, gen_sch: bool = True) -> None:\n\n lay_params = self._info.first_params\n dsn_name = self._info.dsn_basename + '_TEST'\n\n print('create test layout')\n sch_name_param_list = self.create_dut_layouts([(dsn_name, lay_params)])\n\n if gen_sch:\n print('create test schematic')\n self.create_dut_schematics(sch_name_param_list, gen_wrappers=False)\n print('done')",
"def draw_layout(self):\n # start off with all the component instances\n for inst in self.design.component_instances:\n comp = self.design.components.components[inst.library_id]\n for body, attr in zip(comp.footprints[inst.footprint_index].bodies,\n inst.footprint_attributes):\n # draw the appropriate body, at the position in attr\n pos = Point(attr.x, attr.y)\n self.draw_footprint(body, pos, attr.rotation, attr.flip)\n # draw in any annotations\n for ann in attr.annotations:\n if ann.visible:\n pos = self.base_xform.chain(Point(ann.x, ann.y))\n self.canvas.text((pos.x, pos.y), ann.value,\n fill=self.options.style['annot'])\n\n for trace in self.design.traces:\n self.draw_trace(trace)",
"def design_report_header(self):\n rstr = nl() + \" \" + nl() + t('table border-collapse= \"collapse\" border=\"1px solid black\" width=100%') + nl()\n rstr += t('tr') + nl()\n row = [0, '<object type= \"image/PNG\" data= \"cmpylogoSeatAngle.png\" height=60 ></object>',\n '<font face=\"Helvetica, Arial, Sans Serif\" size=\"3\">Created with</font>' \" \" \" \" \" \" \" \" \" \" '<object type= \"image/PNG\" data= \"Osdag_header.png\" height=60 '' \" \" \" \" \" \" \"></object>']\n rstr += html_space(1) + t('td colspan=\"2\" align= \"center\"') + space(row[0]) + row[1] + t('/td') + nl()\n rstr += html_space(1) + t('td colspan=\"2\" align= \"center\"') + row[2] + t('/td') + nl()\n rstr += t('/tr') + nl()\n\n rstr += t('tr') + nl()\n rstr += design_summary_row(0, \"Company Name\", \"detail\", text_two=self.company_name, is_row=False)\n rstr += design_summary_row(0, \"Project Title\", \"detail\", text_two=self.project_title, is_row=False)\n rstr += t('/tr') + nl()\n\n rstr += t('tr') + nl()\n rstr += design_summary_row(0, \"Group/Team Name\", \"detail\", text_two=self.group_team_name, is_row=False)\n rstr += design_summary_row(0, \"Subtitle\", \"detail\", text_two=self.sub_title, is_row=False)\n rstr += t('/tr') + nl()\n\n rstr += t('tr') + nl()\n rstr += design_summary_row(0, \"Designer\", \"detail\", text_two=self.designer, is_row=False)\n rstr += design_summary_row(0, \"Job Number\", \"detail\", text_two=self.job_number, is_row=False)\n rstr += t('/tr') + nl()\n\n rstr += t('tr') + nl()\n rstr += design_summary_row(0, \"Date\", \"detail\", text_two=time.strftime(\"%d /%m /%Y\"), is_row=False)\n rstr += design_summary_row(0, \"Client\", \"detail\", text_two=self.client, is_row=False)\n rstr += t('/tr')\n rstr += t('/table') + nl() + \" \" + nl()\n\n rstr += t('hr')\n rstr += t('/hr') + nl() + \" \" + nl()\n return rstr",
"def write_tree(element, stop_element, outfile, first_time = True):\n\n headings = ['=', '-', '^', '\\'', '\\\"', '+']\n\n space = (element.level - 1) * 10\n space *= \" \"\n\n if first_time:\n print(\".. Auto-generated rst file from scan of fdsn xsd\\n\", file=outfile)\n\n for role in ('blue', 'red'): #blue doesn't need to be here atm\n print(\".. role:: %s\" % role, file=outfile)\n print(\".. role:: raw-html(raw)\\n\\t:format: html\", file=outfile)\n print(\".. role:: raw-latex(raw)\\n\\t:format: latex\", file=outfile)\n print(file=outfile)\n else:\n print(\"\\n:raw-latex:`\\\\noindent\\\\rule{\\\\textwidth}{1pt}`\\n\", file=outfile)\n\n href = element.crumb[0].lower()\n for c in element.crumb[1:]:\n href = href + \"-\" + c.lower()\n\n print(\".. _%s:\\n\" % href, file=outfile)\n\n # MTH: Hack to fix: SampleRate is not required *unless* SampleRateRatio is present:\n # Polynomial not required in Stage as in choice\n elements_in_groups = ['SampleRate', 'SampleRateRatio', 'FrequencyStart', 'FrequencyEnd',\n 'FrequencyDBVariation', 'Polynomial']\n if element.name in elements_in_groups:\n element.required = False\n\n if element.isRequired():\n print(\"<%s> :red:`required`\" % (element.name), file=outfile)\n else:\n print(\"<%s>\" % element.name, file=outfile)\n\n if element.level > len(headings):\n print(f\"level > headings: {element.level} {len(headings)} {element.name} {href}\")\n print(headings[element.level - 1]*60, file=outfile)\n\n print(\".. container:: hatnote hatnote-gray\\n\", file=outfile)\n\n crumb = element.crumb[0]\n simplecrumb = f\" <{crumb}>\"\n for c in element.crumb[1:]:\n crumb = crumb + \" :raw-html:`→`:raw-latex:`$\\\\rightarrow$` \" + c\n simplecrumb += f\" <{c}>\"\n\n if not first_time:\n print(\" .. container:: crumb\\n\", file=outfile)\n print(\" %s\\n\" % crumb, file=outfile)\n #print(\" crumb:%s\\n\" % crumb, file=outfile)\n\n description = \"\"\n stored_description_default=False\n level_char = element.crumb[0][0].upper()\n\n if element.annotation:\n ann_has_level_choice = list(filter(lambda a: a.tag == \"levelDesc\" and a.get(\"LevelChoice\") == level_char , element.annotation))\n if len(ann_has_level_choice) != 0:\n ann_list = ann_has_level_choice\n else:\n ann_list = list(filter(lambda a: a.tag != \"levelDesc\" , element.annotation))\n ann_list_lines = []\n for ann in ann_list:\n lines = ann.text.strip().replace('\\t', ' ').split(\"\\n\")\n if len(lines) > 1:\n # second non-empty line often has space indent, first can be on element line\n leading_whitespace=0\n for l in lines[1:]:\n if len(l.strip()) > 0:\n leading_whitespace = len(l)-len(l.lstrip(' '))\n break\n white = \" \"*leading_whitespace\n lines[0] = white+lines[0].strip()\n for l in lines:\n if len(l.strip()) != 0:\n if l[:leading_whitespace] != white:\n raise ValueError(f\"removing more spaces than there are! {leading_whitespace} from {l}\")\n l = l[leading_whitespace:]\n else:\n l = \"\"\n ann_list_lines.append(f\" {l}\\n\")\n else:\n ann_list_lines.append(f\" {lines[0]}\\n\")\n # blank line between annotations\n ann_list_lines.append(\"\\n\")\n# description = \" \".join(map(lambda note: \" \".join(note.text.split()) , ann_list))\n description = \"\".join(ann_list_lines)\n\n num_el_attr_warnings = len(element.warning)\n for attrib in element.attributes:\n num_el_attr_warnings += len(attrib.warning)\n if num_el_attr_warnings > 0:\n with open(\"warnings.rst\", \"a\") as warnfile:\n for warning in element.warning:\n single_line_warning = \" \".join(warning.text.strip().split())\n print(\" .. admonition:: Warning, Future Change\\n\", file=outfile)\n print(f\" <{element.name}>: {single_line_warning}\\n\", file=outfile)\n\n print(f\"\\n\\n\", file=warnfile)\n print(f\" - {simplecrumb} : \\n\", file=warnfile)\n print(\" .. admonition:: Warning, Future Change\\n\", file=warnfile)\n print(f\" <{element.name}>: {single_line_warning}\\n\", file=warnfile)\n for attrib in element.attributes:\n for warning in attrib.warning:\n single_line_warning = \" \".join(warning.text.strip().split())\n print( \" .. admonition:: Warning, Future Change\\n\", file=outfile)\n print(f\" {attrib.name}: {single_line_warning}\\n\", file=outfile)\n\n print(f\"\\n\\n\", file=warnfile)\n print(f\" - {simplecrumb} {attrib.name} : \\n\", file=warnfile)\n print(\" .. admonition:: Warning, Future Change\\n\", file=warnfile)\n print(f\" {attrib.name}: {single_line_warning}\\n\", file=warnfile)\n\n\n if element.type:\n print(\" .. container:: type\\n\", file=outfile)\n latex_words=\"\\t\\t\\t.. only:: latex\\n\\n\"\n html_words=\"\\t\\t\\t.. only:: html\\n\\n\"\n\n latex_words+=\"\\t\\t\\t\\t\\tcontent type: :ref:`%s<type-glossary>`\" % (element.type)\n html_words+=\"\\t\\t\\t\\t\\tcontent type: `%s <appendices.html#glossary-%s>`_\" % (element.type,element.type.lower())\n if element.range_string:\n splitElements=element.range_string.split(\" \")\n\n result=[]\n for el in splitElements:\n if (el[0].isalpha() or el[0].isdigit() or el[0]==\"-\"):\n result.append(el)\n else:\n result.append(\":math:`%s`\" % el)\n result=\" \".join(result)\n\n latex_words+=\"\\n\\n\\t\\t\\t\\t\\trange: \"+result+\"\\n\"\n html_words+=\"\\n\\n\\t\\t\\t\\t\\trange: \"+result+\"\\n\"\n\n else:\n latex_words+=\"\\n\"\n html_words+=\"\\n\"\n\n print(latex_words, file=outfile)\n print(html_words, file=outfile)\n\n if len(description) > 0:\n\n # If the annotation involves multiple options depending on its location\n\n description=urlInserter(description)\n description=mathBlock(description,element.level)\n\n print(\" .. container:: description\\n\", file=outfile)\n print(\"%s\\n\" % description, file=outfile)\n\n for example in element.example:\n exampleStr = \"\"\n if isinstance(example, ElementTree.Element):\n if example.get('ElementChoice') is not None and element.parent is not None:\n if example.get('ElementChoice') != element.parent.name:\n # skip this example as wrong level\n continue\n if example.get('LevelChoice') is not None and element.parent is not None:\n if example.get('LevelChoice') != level_char:\n # skip this example as wrong level\n continue\n if example.tag == \"example\":\n for ee in example:\n exampleStr += ElementTree.tostring(ee, encoding='unicode', method='xml')\n else:\n exampleStr = ElementTree.tostring(example, encoding='unicode', method='xml')\n else:\n exampleStr = example\n exampleStr = exampleStr.strip()\n print(\" .. container:: example\\n\", file=outfile)\n if exampleStr.find('\\n') != -1:\n # multiline example\n print(\" **Example**::\\n\", file=outfile)\n lines = exampleStr.splitlines()\n for l in lines:\n print(f\" {l.rstrip()}\", file=outfile)\n else:\n if (exampleStr[-1]==\">\" or exampleStr[-1]==\".\"):\n print(\" **Example**: %s\\n\" % exampleStr, file=outfile)\n else:\n print(\" **Example**: %s.\\n\" % exampleStr, file=outfile)\n\n if element.attributes:\n print(\"\\n\\n\", file=outfile)\n print(f\" **Attributes of <{element.name}>**: \\n\", file=outfile)\n print(\" .. tabularcolumns::|l|l|l|1|1| \\n\", file=outfile)\n print(\" .. csv-table::\", file=outfile)\n print(\" :class: rows\", file=outfile)\n print(\" :escape: \\ \", file=outfile)\n print(' :header: \"attribute\", \"type\", \"required\", \"description\", \"example\"', file=outfile)\n print(\" :widths: auto\\n\", file=outfile)\n\n for attrib in element.attributes:\n required = \"no\"\n if attrib.required == \"required\":\n required = \":red:`yes`\"\n\n description = \"\"\n example = \"\"\n if len(attrib.example) != 0:\n # use first example unless ElementChoice is attribute of example\n DQ='\"'\n example = f\"{attrib.name}=\\{DQ}{attrib.example[0].text}\\{DQ}\"\n for ex in attrib.example:\n if ex.get(\"ElementChoice\") is not None and ex.get(\"ElementChoice\") == element.name:\n example = f\"{attrib.name}=\\{DQ}{ex.text}\\{DQ}\"\n if ex.get(\"LevelChoice\") is not None and ex.get(\"LevelChoice\") == level_char:\n example = f\"{attrib.name}=\\{DQ}{ex.text}\\{DQ}\"\n if len(attrib.annotation) != 0:\n ann_has_level_choice = list(filter(lambda a: a.tag == \"levelDesc\" and a.get(\"LevelChoice\") == level_char , attrib.annotation))\n if len(ann_has_level_choice) != 0:\n ann_list = ann_has_level_choice\n else:\n ann_list = list(filter(lambda a: a.tag != \"levelDesc\" , attrib.annotation))\n description = \" \".join(map(lambda note: \" \".join(note.text.split()) , ann_list))\n\n description = \" \".join(description.split())\n print(\" **%s**, :ref:`%s<type-glossary>`, %s, \\\"%s\\\", \\\"%s\\\" \" % (attrib.name, attrib.type,required, description, example), file=outfile)\n\n print(file=outfile)\n\n if element.children:\n print(\"\\n\\n\", file=outfile)\n print(f\" **Sub Elements of <{element.name}>**: \\n\", file=outfile)\n print(\" .. tabularcolumns::|l|l|l|l| \\n\", file=outfile)\n print(\" .. csv-table::\", file=outfile)\n print(\" :class: rows\", file=outfile)\n print(\" :escape: \\ \", file=outfile)\n print(' :header: \"element\", \"type\", \"number\"', file=outfile)\n print(\" :widths: auto\\n\", file=outfile)\n for child in element.children:\n parentRef = \"\"\n if child.name != stop_element:\n for c in element.crumb:\n parentRef += c+\"-\"\n required = \"\"\n if child.isRequired():\n required = \":red:`required`\"\n if child.max_occurs == \"unbounded\" or child.max_occurs is None:\n required = \":red:`required, many`\"\n else:\n if child.min_occurs == 0:\n if child.max_occurs == \"unbounded\" or child.max_occurs is None:\n required = \"optional, many\"\n else:\n required = \"optional\"\n elif child.max_occurs == 1:\n required = \"optional\"\n elif child.max_occurs == \"unbounded\" or child.max_occurs is None:\n required = \"many\"\n else:\n required = f\"{child.min_occurs}/{child.max_occurs}\"\n\n range = \"\"\n if child.range_string is not None:\n range = child.range_string\n type = \"\"\n if child.type is not None:\n type = child.type\n\n #print(f\" **{child.local_name}**, :ref:`{child.local_name}`, {child.type}, \\\"{child.required}\\\", \\\"{child.range_string}\\\" \", file=outfile)\n print(f\" :ref:`\\<{child.name}\\><{parentRef}{child.name}>`, {type}, \\\"{required}\\\" \", file=outfile)\n print(\"\\n\\n\", file=outfile)\n\n for child in element.children:\n if child.name != stop_element:\n write_tree(child, stop_element, outfile, first_time = False)\n\n return",
"def _print_structure(self):\n outstr = str(self._element) + '(' + str(self.get_height()) + ')['\n if self._leftchild:\n outstr = outstr + str(self._leftchild._element) + ' '\n else:\n outstr = outstr + '* '\n if self._rightchild:\n outstr = outstr + str(self._rightchild._element) + ']'\n else:\n outstr = outstr + '*]'\n if self._parent:\n outstr = outstr + ' -- ' + str(self._parent._element)\n else:\n outstr = outstr + ' -- *'\n print(outstr)\n if self._leftchild:\n self._leftchild._print_structure()\n if self._rightchild:\n self._rightchild._print_structure()",
"def generate_siaf_detector_layout():\n\n VIdlParity = -1\n layout = Table(dtype=['S100', 'S100', 'f4', 'i4', 'i4'] ,names=('InstrName', 'AperName', 'DetSciYAngle', 'DetSciParity', 'VIdlParity'))\n for instrument in 'NIRCam FGS NIRISS NIRSpec MIRI'.split():\n if instrument == 'NIRCam':\n for sca_name in 'A1 A3 A5 B2 B4'.split():\n layout.add_row([instrument.upper(), 'NRC{}_FULL'.format(sca_name), 0, -1, VIdlParity])\n for sca_name in 'A2 A4 B1 B3 B5'.split():\n layout.add_row([instrument.upper(), 'NRC{}_FULL'.format(sca_name), 180, -1, VIdlParity])\n for sca_name in ['NRCA2_FULL_WEDGE_RND','NRCA2_FULL_WEDGE_BAR','NRCA4_FULL_WEDGE_RND','NRCA4_FULL_WEDGE_BAR']:\n layout.add_row([instrument.upper(), '{}'.format(sca_name), 180, -1, VIdlParity])\n for sca_name in ['NRCA1_FULL_WEDGE_RND','NRCA1_FULL_WEDGE_BAR','NRCA3_FULL_WEDGE_RND','NRCA3_FULL_WEDGE_BAR','NRCA5_FULL_WEDGE_RND','NRCA5_FULL_WEDGE_BAR']:\n layout.add_row([instrument.upper(), '{}'.format(sca_name), 0, -1, VIdlParity])\n elif instrument == 'NIRISS':\n for sca_name in ['NIS_CEN']:\n layout.add_row([instrument, sca_name, 180, 1, VIdlParity])\n elif instrument == 'MIRI':\n for sca_name in ['MIRIM_FULL']:\n layout.add_row([instrument, sca_name, 0, 1, VIdlParity])\n elif instrument == 'NIRSpec':\n for sca_name in ['NRS1_FULL']:\n layout.add_row([instrument.upper(), sca_name, 0, 1, VIdlParity])\n for sca_name in ['NRS2_FULL']:\n layout.add_row([instrument.upper(), sca_name, 180, 1, VIdlParity])\n elif instrument == 'FGS':\n for sca_name in ['FGS1_FULL']:\n layout.add_row([instrument, sca_name, 180, 1, VIdlParity])\n for sca_name in ['FGS2_FULL']:\n layout.add_row([instrument, sca_name, 0, -1, VIdlParity])\n\n layout_file = os.path.join(JWST_SOURCE_DATA_ROOT, 'siaf_detector_layout.txt')\n\n layout.pprint()\n\n comments = []\n comments.append('SIAF detector layout definition file.'.format(instrument))\n comments.append('')\n comments.append('These apertures act as parent apertures of all other SI apertures and their parameters are thus inherited.')\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('by {}'.format(username))\n comments.append('')\n layout.meta['comments'] = comments\n layout.write(layout_file, format='ascii.fixed_width', delimiter=',',\n delimiter_pad=' ', bookend=False)",
"def _generate_report(self):\n total_duration = 0.0\n total_nb_tests = 0\n total_nb_success = 0\n nb_modules = 0\n payload = []\n\n res_table = prettytable.PrettyTable(\n padding_width=2,\n field_names=['Module', 'Duration', 'nb. Test Run', 'Success'])\n res_table.align['Module'] = \"l\"\n res_table.align['Duration'] = \"r\"\n res_table.align['Success'] = \"r\"\n\n # for each scenario we draw a row for the table\n for item in self.summary:\n if item['task_status'] is True:\n nb_modules += 1\n total_duration += item['overall_duration']\n total_nb_tests += item['nb_tests']\n total_nb_success += item['nb_success']\n try:\n success_avg = 100 * item['nb_success'] / item['nb_tests']\n except ZeroDivisionError:\n success_avg = 0\n success_str = f\"{success_avg:0.2f}%\"\n duration_str = time.strftime(\"%H:%M:%S\",\n time.gmtime(item['overall_duration']))\n res_table.add_row([item['test_name'], duration_str,\n item['nb_tests'], success_str])\n payload.append({'module': item['test_name'],\n 'details': {'duration': item['overall_duration'],\n 'nb tests': item['nb_tests'],\n 'success rate': success_str,\n 'success': item['success'],\n 'failures': item['failures']}})\n\n total_duration_str = time.strftime(\"%H:%M:%S\",\n time.gmtime(total_duration))\n try:\n self.result = 100 * total_nb_success / total_nb_tests\n except ZeroDivisionError:\n self.result = 100\n success_rate = f\"{self.result:0.2f}\"\n success_rate_str = str(success_rate) + '%'\n res_table.add_row([\"\", \"\", \"\", \"\"])\n res_table.add_row([\"TOTAL:\", total_duration_str, total_nb_tests,\n success_rate_str])\n\n LOGGER.info(\"Rally Summary Report:\\n\\n%s\\n\", res_table.get_string())\n LOGGER.info(\"Rally '%s' success_rate is %s%% in %s/%s modules\",\n self.case_name, success_rate, nb_modules,\n len(self.summary))\n self.details['summary'] = {'duration': total_duration,\n 'nb tests': total_nb_tests,\n 'nb success': success_rate}\n self.details[\"modules\"] = payload",
"def layout(self):\n\n # Initialise all plots and widgets\n widgets = self.widgets(width=200)\n\n plot_width = 500\n sizing_mode = 'stretch_height'\n self.init_grid_plot()\n self.init_line_plot(width=plot_width, mode=sizing_mode)\n self.init_distribution_plot(width=plot_width, mode=sizing_mode)\n self.init_school_composition_plot(width=plot_width, mode=sizing_mode)\n self.init_neighbourhood_composition_plot(width=plot_width,\n mode=sizing_mode)\n self.init_distance_plot(width=plot_width, mode=sizing_mode)\n\n # Row with widgets\n if self.params['case'].lower() == 'lattice':\n width = 420\n split = int(len(widgets) / 2.) + 1\n widget_row = row(\n [column(widgets[:split]),\n column(widgets[split:])],\n width=width)\n else:\n width = 210\n widget_row = column(widgets, width=width)\n\n desc = Div(text=open(join(dirname(__file__),\n \"description.html\")).read(),\n margin=0)\n # Column with all the controls and description\n first_col = column(widget_row, width=width, sizing_mode='fixed')\n\n # Column with the grid/map\n second_col = column([\n desc,\n row(self.buttons(), sizing_mode='stretch_width'),\n row(self.grid, sizing_mode='stretch_width')\n ],\n sizing_mode='stretch_width')\n\n # Column with the plots\n third_col = column([\n self.plot, self.distribution_plot, self.distance_plot,\n self.school_composition_plot, self.neighbourhood_composition_plot\n ])\n\n vis_layout = gridplot([[first_col, second_col, third_col]],\n toolbar_location=None)\n\n self.doc.add_root(vis_layout)\n self.doc.title = \"COMPASS\"",
"def build_html(self, view, origin=None, drawElements=True):\n b = view.context.b\n b.addHtml(\"\"\"\n <!-- colored section -->\n <section id=\"features\" class=\"blueelement vertical-padding\">\n <div class=\"wrapper clearfix\">\n \n <h1>Some things in rows of 3 columns</h1>\n \n <div class=\"row vertical-padding\"> \n <div class=\"grid_4\"> \n <h2>Something</h2>\n <img src=\"images/pagebot_cafe_working.jpg\" />\n <h4>This is a subhead</h4>\n <p>Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.</p> </div>\n \n <div class=\"grid_4\"> \n <h2>Something else</h2>\n <img src=\"images/pagebot_cafe_working.jpg\" />\n <h4>This is a subhead</h4>\n\n <p>Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.</p></div>\n \n <div class=\"grid_4\"> \n <h2>Something else</h2>\n <img src=\"images/pagebot_cafe_working.jpg\" />\n <h4>This is a subhead</h4>\n\n <p>Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.</p></div>\n <p><a href=\"#\" class=\"buttonlinkdiap\">Use Pagebot</a> </p>\n \n \n </div>\n </div><!-- #end div .wrapper -->\n </section><!-- #end colored section -->\n \"\"\")",
"def draw_design(self):\n if len(self.design.trace_segments) > 0:\n self.draw_layout()\n else:\n self.draw_schematic()",
"def buildFullXmlDeclaration(self, title=0, offset=0, size=None):\n # prepare some variables, what's the depth below us\n if size == None:\n size = self.getNumGenerations() + 1\n # build the name\n if self.name == \"\":\n name = \"anonymous %s\" % (self.prettyType)\n if self.inst != \"\":\n name += \" instance %s\" % (self.inst)\n else:\n name = \"%s %s\" % (self.prettyType, self.name)\n\n # if we're the title, start the chapter\n if title:\n dec = '<Chapter anchor=\"'+self.link+'.0\">\\n'\n dec += ' <!--container: %s (%d)-->\\n' % (self.name, self.getNumGenerations())\n dec += ' <Title><Code>%s</Code> %s</Title>\\n' % (self.prettyType, self.name)\n dec += ' <Description anchor=\"%s.1\">\\n' % self.link\n else:\n dec = ' <Row>\\n'\n for i in range(1, offset):\n dec += ' <Entry colname=\"col%d\" rowsep=\"0\">\\n' % i\n dec += ' <Paragraph></Paragraph>\\n </Entry>\\n'\n dec += ' <Entry colname=\"col%d\" morerows=\"%d\">\\n' % (offset, self.getNumElements()-1)\n dec += ' <Paragraph>\\n'\n dec += ' <Emphasis>%s</Emphasis>\\n' % name\n dec += ' </Paragraph>\\n'\n dec += ' </Entry>\\n'\n dec += ' <Entry namest=\"col%d\" nameend=\"col%d\">\\n' % (offset+1, size)\n\n dec += ' <Paragraph>%s</Paragraph>\\n' % self.info\n dec += ' <Paragraph>Contains the following members:</Paragraph>\\n'\n\n if title:\n dec += xmlTableStart(\"CommunicationsStructure.%s\" % self.name, size, [])\n else:\n dec += ' </Entry>\\n'\n dec += ' </Row>\\n'\n\n for mem in self.members:\n dec += mem.buildFullXmlDeclaration(offset=offset+1, size=size)\n\n if title:\n dec += xmlTableEnd(name)\n dec += ' </Description>\\n'\n dec += '</Chapter>\\n'\n return dec",
"def create_vuln_report():",
"def xlayout (self):\n\n # Generate template tokens for artifacts\n for a in self.arts.artifacts:\n if 'bldtype' not in a.info:\n a.info['bldtype'] = 'release'\n\n a.info['variant'] = '%s-%s-%s' % (a.info.get('plat'),\n a.info.get('arch'),\n a.info.get('bldtype'))\n if 'toolset' not in a.info:\n a.info['toolset'] = 'v120'\n\n nuget_layout = {\n # Build\n 'build/native/lib/${plat}/${arch}/${variant}/${toolset}/': 'static',\n 'build/native/include/librdkafka/': 'include',\n\n # Runtime\n 'runtimes/${plat}-${arch}/native/': 'dynamic',\n\n # All\n 'content/docs/': 'doc'\n }\n\n layout = self.layout(nuget_layout)\n\n errors = 0\n print(' %s layout:' % self)\n for path, afs in layout.items():\n print(' %s provided by:' % path)\n for a, f in afs:\n print(' %s from artifact %s (and %d more)' % (f, a.fname, len(afs)-1))\n break\n if len(afs) == 0:\n print(' ERROR: no artifacts found')\n errors += 1\n print('')\n\n if errors > 0:\n raise Exception('Layout not satisfied by collected artifacts: %d missing' % errors)\n\n return layout",
"def build(self):\n self.title = 'Processamento Digital de Imagens'\n self.main_layout = MainLayout()\n return self.main_layout",
"def init_report(self, report):\n super(InformedPlannerHierarchy, self).init_report(report)\n if True:\n self.cover.draw_embeddings(report.section('embeddings'))\n \n self.display_distancetree(report.section('distancetree'))",
"def model_layout(model_name):\n\n # dict to hold layouts for each section\n layouts = OrderedDict()\n\n # instantiate model from name\n model = Registry(\"models\")[model_name]\n\n model_title = html.Div(\n className='row',\n children=[\n html.H3(model.title),\n ]\n )\n\n # TODO: costly, should just construct subgraph directly?\n\n subgraph = nx.ego_graph(propnet_nx_graph, model, undirected=True)\n subgraph_data = graph_conversion(subgraph,\n show_symbol_labels=True, show_model_labels=True)\n if len(subgraph_data) < 50:\n graph_config = GRAPH_LAYOUT_CONFIG.copy()\n graph_config['maxSimulationTime'] = 1500\n else:\n graph_config = GRAPH_LAYOUT_CONFIG\n\n layouts['Graph'] = html.Div(\n Cytoscape(\n id=\"model_graph\",\n elements=subgraph_data,\n stylesheet=GRAPH_STYLESHEET,\n layout=graph_config,\n **GRAPH_SETTINGS['model_symbol_view']\n )\n )\n\n if model.categories:\n tags = html.Ul(\n className=\"tags\",\n children=[html.Li(tag_, className=\"tag\")\n for tag_ in model.categories]\n )\n layouts['Tags'] = tags\n\n if model.references:\n markdown = []\n for ref in model.references:\n try:\n markdown.append(references_to_markdown(ref))\n except ValueError as ex:\n logger.error(\"Error with reference:\\n{}\\nReference:\\n{}\".format(ex, ref))\n references = html.Div([dcc.Markdown(ref)\n for ref in markdown])\n\n layouts['References'] = references\n\n symbols = html.Div(\n children=[\n html.Div(\n className='row',\n children=[\n html.Div(\n className='two columns',\n children=[\n str(symbol)\n ]\n ),\n html.Div(\n className='ten columns',\n children=[\n dcc.Link(Registry(\"symbols\")[prop_name].display_names[0],\n href='/property/{}'.format(prop_name))\n ]\n )\n ]\n )\n for symbol, prop_name in model.variable_symbol_map.items()\n ]\n )\n\n layouts['Symbols'] = symbols\n\n layouts['Description'] = dcc.Markdown(model.description)\n\n if model.validate_from_preset_test():\n sample_data_header = html.Div(\n className='row',\n children=[\n html.Div(\n className='five columns',\n style={'text-align': 'center'},\n children=[\n html.H4('Input(s)')\n ]\n ),\n html.Div(\n className='two columns',\n style={'text-align': 'center'},\n children=[\n html.H4('->')\n ]\n ),\n html.Div(\n className='five columns',\n style={'text-align': 'center'},\n children=[\n html.H4('Output(s)')\n ]\n )\n ]\n )\n\n layouts['Sample Code'] = html.Div([\n dcc.Markdown(\"Propnet models can be called directly, with propnet acting \"\n \"as a library of tested materials science models. Sample code for this \"\n \"model is as follows:\"),\n dcc.Markdown(f\"```{model.example_code}```\")\n ])\n\n sublayouts = []\n for title, layout in layouts.items():\n sublayouts.append(html.H6(title))\n sublayouts.append(layout)\n\n return html.Div([\n model_title,\n html.Br(),\n *sublayouts,\n html.Br(),\n #dcc.Link('< Back to Models', href='/model'),\n #html.Br(),\n dcc.Link('< Back', href='/explore')\n ])",
"def _generate_report(self):\n from niworkflows.viz.utils import plot_registration\n NIWORKFLOWS_LOG.info('Generating visual report')\n\n anat = load_img(self._anat_file)\n contour_nii = load_img(self._contour) if self._contour is not None else None\n\n if self._mask_file:\n anat = unmask(apply_mask(anat, self._mask_file), self._mask_file)\n mask_nii = load_img(self._mask_file)\n else:\n mask_nii = threshold_img(anat, 1e-3)\n\n n_cuts = 7\n if not self._mask_file and contour_nii:\n cuts = cuts_from_bbox(contour_nii, cuts=n_cuts)\n else:\n cuts = cuts_from_bbox(mask_nii, cuts=n_cuts)\n\n # Call composer\n compose_view(\n plot_registration(anat, 'fixed-image',\n estimate_brightness=True,\n cuts=cuts,\n contour=contour_nii,\n compress=self.inputs.compress_report),\n [],\n out_file=self._out_report\n )"
]
| [
"0.69769275",
"0.6473278",
"0.63054246",
"0.60292476",
"0.59565306",
"0.5911801",
"0.5869038",
"0.5819134",
"0.57823735",
"0.5755476",
"0.57442385",
"0.56819475",
"0.56775093",
"0.5628919",
"0.56264186",
"0.5593363",
"0.5590153",
"0.5521933",
"0.55100393",
"0.5480658",
"0.5435169",
"0.5432787",
"0.5424062",
"0.54160154",
"0.5403081",
"0.5393842",
"0.5388043",
"0.5369219",
"0.535319",
"0.53223914"
]
| 0.67525214 | 1 |
Test TextReport API and id/naming handling | def test_textreport_gen():
s_df = gen_df()
# Simple
report = dp.TextReport("Text-3")
assert_text_report(report, 1)
# multiple blocks
report = dp.TextReport("Text-1", "Text-2", s_df)
assert_text_report(report, 3)
# empty - raise error
with pytest.raises(DPError):
report = dp.TextReport()
assert_text_report(report, 0)
# mixed naming usage
report = dp.TextReport("text-1", dp.Text("Text-4", name="test"))
assert_text_report(report, 2)
# arg/kwarg naming tests
report = dp.TextReport(
dp.Text("Text-arg-1"),
dp.Text("Text-arg-2", name="text-arg-2"),
t1="Text-1",
t2=dp.Text("Text-2"),
t3=dp.Text("Text-3", name="overwritten"),
)
assert_text_report(report, 5, ["text-1", "text-arg-2", "t1", "t2", "t3"])
# dict/list test
report = dp.TextReport(blocks=dict(t1="text-1", t2=dp.Text("Text-2"), t3=dp.Text("Text-3", name="overwritten")))
assert_text_report(report, 3, ["t1", "t2", "t3"])
report = dp.TextReport(blocks=["Text-1", dp.Text("Text-2"), dp.Text("Text-3", name="text-3")])
assert_text_report(report, 3, ["text-1", "text-2", "text-3"]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_basic_usage(self):\n self._test_reports_helper({}, [\"report.txt\"])",
"def run_test_ner():\n # This note is a fake report\n\n text = \"\"\"\nRecord date: 2063-12-13\n\n\n\n\nNAME: Doe, Jane \nMRN: 98765432\n\nThe patient is here as a walk-in. Her spouse is present.\n\nPatient says she is status post a fall this past Monday at home. Says that she seems\nto have lost her balance while opening a door at home. Denies prodromal\nsymptoms including shortness of breath, dizziness, or palpitations. There was no\nloss of consciousness. Patient was seen in the emergency room that day for trauma\nto the head. Had a negative CT of her head and had an x-ray of her pelvis, as she\ncomplained of some hip pain. Both studies were negative. Patient was then sent\nhome with Roxicet. Patient was seen in the walk-in clinic at CPI yesterday, after\nnoticing a swollen tongue and some difficulty swallowing. Patient was immediately\ntold to stop both her Roxicet and Monopril and was treated for angioedema with\nZantac and Claritin. Today patient has no further throat tightness or tongue\nswelling. She is, however, uncomfortable continuing the Effexor for unclear\nreasons and wants to stop it.\n\nPatient is also complaining of severe low back pain, which she says has persisted\nsince her fall on Monday. Did not have an x-ray of the area. Says it is very painful\nto sit. Roxicet was not helping. She is off her Ultram as well.\n\nPHYSICAL EXAM: WD/overweight female in NAD. Vital signs as per LMR,\nWNL. Weight 213 pounds. \n\nLungs - clear bilateral breath sounds. Cor - RRR, S1, S2, without murmur or S3\nnoted. Back - slightly bent gait. Tenderness over lower sacrum. No asymmetry or\nobvious deformity. \n\nX-ray of sacrum 01/19/64 - could not see fracture as per my reading.\n\nASSESSMENT AND PLAN: \n\n(1) Angioedema. Agree that most likely culprits are Roxicet since it was started\n only two days ago, and ACE inhibitors which have been associated with\n angioedema. Currently on Lopressor and Dyazide, patient's blood pressure\n is WNL. So will follow for now.\n\n(2) Low back pain. Reviewed the possibility of a coccygeal fracture for which\n treatment would not be any different than pain management and a donut\n pillow. Patient was given a prescription for the pillow. She was told to look\n out for signs of nerve compression and to go to the ER if she experienced\n any perianal numbness or incontinence. Will notify patient by phone if x-ray\n shows a fracture. \n\n(3) Depression. Did not feel I was able to speak freely in front of the patient's\n spouse. Had been put on the Effexor by Dr. Zeman. Advised patient to let\n her psychiatrist know that we are tapering her off of the Effexor. Will take\n 25 mg q.o.d. for the next week. \n\n(4) Pain control. Will switch from Vicodin to Vicodin ES 1 q. 6 as needed. \n Knows to go to ER if experiences any symptoms consistent with angioedema.\n\nReturn to clinic in approximately one month.\n\n\n\nSusan Ullom, M.D.\n\nSU/utz/rankin\n\nAug-10-2018\n\n15-Dev-2013\n\nJan 12 2003\n\nJan 14\n\nJanuary\n\n\"\"\"\n\n nlp = spacy.load(\"en_core_web_md\")\n # this is needed to override the tokenization and keep dash and slash words ( aka dates , togeather )\n # https://spacy.io/usage/linguistic-features#native-tokenizers\n infix_re = re.compile(r'''[-/,]''')\n nlp.tokenizer = custom_tokenizer(nlp, infix_re)\n # We use EntityRuler instead of matcher = Matcher(nlp.vocab, validate=True)\n # because it handles overlapping entities .\n # To overwrite overlapping entities, you can set overwrite_ents=True on initialization\n # https://spacy.io/usage/rule-based-matching#entityruler-usage\n # the example=\"\" in this jsonL file is not needed , but added as a placeholder for documentation on the\n # pattern being matched. - GK\n ruler = EntityRuler(nlp, overwrite_ents=True).from_disk(\"./spacy_patterns.jsonl\")\n # not needed , but good ref : DATE = nlp.vocab.strings['DATE']\n\n # See https://spacy.io/usage/rule-based-matching#entityruler-files\n # Moved to JSONL file\n # e_patterns = [\n # # MM/DD/YYYY and YYYY/MM/DD\n # {\"label\": \"DATE1\", \"pattern\": [{'IS_DIGIT': True}, {'ORTH': '/'}, {'IS_DIGIT': True}, {'ORTH': '/'}, {'IS_DIGIT': True}]},\n # # MM-DD-YYYY and YYYY-MM-DD\n # {\"label\": \"DATE1\", \"pattern\": [{'IS_DIGIT': True}, {'ORTH': '-'}, {'IS_DIGIT': True}, {'ORTH': '-'}, {'IS_DIGIT': True}]},\n # # MM/DD and YYYY/MM\n # {\"label\": \"DATE1\", \"pattern\": [{'IS_DIGIT': True}, {'ORTH': '/'}, {'IS_DIGIT': True}]},\n # ]\n # ruler.add_patterns(e_patterns)\n # ruler.to_disk(\"./spacy_patterns.jsonl\")\n nlp.add_pipe(ruler)\n doc = nlp(text)\n\n for ent in doc.ents:\n print ( f\"ENTITY: {ent.text} with label: {ent.label_} from {ent.start_char} to {ent.end_char}\" )",
"def test_test_report(self):\n self.__opener.contents = '''<Report><Doc><Summary failed=\"1\" passed=\"2\"/></Doc></Report>'''\n self.assertEqual(1, self.__uft.failed_tests('url'))\n self.assertEqual(2, self.__uft.passed_tests('url'))\n self.assertEqual(0, self.__uft.skipped_tests('url'))",
"def reports(self):\r\n actual_text, ids, eng_list = self.util.get_text_from_xml(self.string_xml, \"Reports\", \"trans-unit\",\r\n Config.selected_language.strip())\r\n text_index = 0\r\n actual_text2 = []\r\n for str1 in actual_text:\r\n if \"<br>\" in str1:\r\n str_li = str1.split(\"<br>\")\r\n for i in str_li:\r\n actual_text2.append(i)\r\n else:\r\n actual_text2.append(str1)\r\n xpath = self.util.read_xpath_list_from_xml(self.object_repo, \"Reports\", self.my_object)\r\n self.object.click(self.util.client, xpath[0]['zone'],\r\n xpath[0]['xpath'],\r\n xpath[0]['index'],\r\n xpath[0]['comment'],\r\n 1, self.logger_name)\r\n self.place_holder(xpath, 1, actual_text, text_index, ids, eng_list)\r\n text_index += 1\r\n\r\n xpath_questions = self.util.read_xpath_list_from_xml(self.object_repo, \"ReportsQues\",\r\n self.my_object)\r\n for loop_index in range(len(xpath_questions)):\r\n pixel = self.dev.p2cy(self.util.client, 15)\r\n self.click(xpath_questions, loop_index)\r\n self.dev.swipe(self.util.client, \"Down\", pixel, 300)\r\n # self.object.touch_down(self.util.client, xpath[1]['zone'], xpath[1]['xpath'], xpath[1][\r\n # 'index'])\r\n # #self.object.touch_move(self.util.client, xpath[2]['zone'], xpath[2]['xpath'],\r\n # xpath[2]['index'])\r\n # self.object.touch_up(self.util.client)\r\n string_inzone = self.object.get_text(self.util.client,\r\n \"WEB\") # this method gets all string in the zone\r\n string_list = string_inzone.splitlines()\r\n string_list = self.remove_empty_lines(\r\n string_list) # this method removes string with empty lines line from list\r\n\r\n for loop_index in range(max(len(actual_text), len(string_list))):\r\n try:\r\n if actual_text2[text_index] and string_list[loop_index]:\r\n self.logger.info(\"Testing StringID == \" + str(ids[text_index]))\r\n self.logger.info(\"English Text == \" + eng_list[text_index])\r\n self.util.text_compare2(self.common, actual_text2[text_index], string_list[loop_index],\r\n ids[text_index],\r\n self.logger_name)\r\n text_index += 1\r\n except:\r\n print \"value error\"\r\n self.click(xpath, 2)",
"def setUp(self):\n super().setUp()\n self.report = {\n \"report_uuid\": \"report_uuid\",\n \"title\": \"Report\",\n \"subjects\": {\"subject_uuid\": {\"name\": \"Subject\", \"type\": \"software\", \"metrics\": {}}},\n }",
"def test_json_report(self):\n self._test_reports_helper({\"--json-report\": \"\"}, [\"report.json\"])",
"def test_report_resource(client, app):\n with app.app_context():\n r = client.get(\"/reports/1\")\n assert r.status_code == 200\n assert \"Organization: Dunder Mifflin\" in r.get_data(as_text=True)\n\n # Test Invalid reports\n r = client.get(\"/reports/111\")\n assert r.status_code == 404",
"def setUp(self):\n self.report = dict(title=\"Report 1\", url=\"https://report1\")\n self.data_model = dict(\n metrics=dict(metric_type=dict(name=\"type\")),\n sources=dict(\n quality_time=dict(\n parameters=dict(\n status=dict(\n api_values={\n \"target met (green)\": \"target_met\",\n \"near target met (yellow)\": \"near_target_met\",\n \"target not met (red)\": \"target_not_met\",\n \"technical debt target met (grey)\": \"debt_target_met\",\n \"unknown (white)\": \"unknown\",\n }\n )\n )\n )\n ),\n )",
"def __generate_flowcell_report_text__(self,config,mockdb,report_type=\"subset_report\"):\n dictionary = {}\n for k,v in self.__dict__.iteritems():\n dictionary.update({k:str(v)})\n pdf_report = initialize_standard_doc(self.report_pdf)\n pdf_elements = []\n outlier_table = produce_outlier_table(config,mockdb,self.current_report)\n if outlier_table is None:\n template_subject = os.path.join(config.get('Common_directories','template'),config.get('Flowcell_reports_email_templates',report_type + '_subject'))\n template_body = os.path.join(config.get('Common_directories','template'),config.get('Flowcell_reports_email_templates',report_type + '_no_outliers_body'))\n else:\n outlier_table += \"\\n\"\n outlier_table_for_pdf(config,mockdb,pdf_elements,self.current_report)\n template_subject = os.path.join(config.get('Common_directories','template'),config.get('Flowcell_reports_email_templates',report_type + '_subject'))\n template_body = os.path.join(config.get('Common_directories','template'),config.get('Flowcell_reports_email_templates',report_type + '_body'))\n image_files = []\n image_files.append(self.concordance_jpeg)\n image_files.append(self.hethomratio_jpeg)\n image_files.append(self.dbsnp_jpeg)\n image_files.append(self.greater_than_10x_jpeg)\n image_files.append(self.zero_coverage_jpeg)\n pdf_elements.extend(add_square_images(image_files))\n pdf_report.build(pdf_elements)\n sample_keys = self.sample_keys.split(\";\")\n number_samples = len(sample_keys)\n dictionary.update({'number_samples': str(number_samples)})\n subject = fill_template(template_subject,dictionary)\n body = fill_template(template_body,dictionary)\n return subject, body",
"def make_report(self, report_name, id_test, x_test, y_test, country_test, frame_test):\n if not os.path.exists('Reports/' + report_name):\n os.mkdir('Reports/' + report_name)\n results = self.predict(x_test)\n\n # Generate detailied evaluation report\n header = 'Country,Child,Frame'\n for output_layer in self.get_config()['output_layers']:\n header += ',{}_Actual'.format(output_layer[0])\n for output_layer in self.get_config()['output_layers']:\n header += ',{}_Prediction'.format(output_layer[0]) \n header += '\\n'\n\n with open('Reports/{}/evaluation_report.txt'.format(report_name), 'a') as f:\n if os.stat('Reports/{}/evaluation_report.txt'.format(report_name)).st_size == 0:\n f.write(header)\n for row in range(len(results)):\n entry = ','.join([str(i) for i in country_test[row]]) + ','\n entry += ','.join([str(i) for i in id_test[row]]) + ','\n entry += ','.join([str(i) for i in frame_test[row]]) + ','\n entry += ','.join([str(i) for i in y_test[row]]) + ','\n entry += ','.join([str(i) for i in results[row]]) + '\\n'\n f.write(entry)\n\n # Generate report of summary statistics\n cultures = np.unique(country_test)\n for c in cultures:\n culture_rows = np.where(country_test == c)[0] # get row numbers for culture c \n culture_ids = id_test[culture_rows] # get ID rows for culture c \n unique_ids = np.unique(culture_ids) # get unique IDs for culture c \n\n for u in unique_ids: \n all_id_rows = np.where(id_test == u)[0]\n id_rows = np.intersect1d(all_id_rows, culture_rows) # get ID rows for child u \n\n id_icc = icc(results[id_rows], y_test[id_rows])[0] # compute ICC for child u \n id_pcc = pcc(results[id_rows], y_test[id_rows])[0][0] # compute PCC for child u \n id_ccc = ccc(results[id_rows], y_test[id_rows]) # compute CCC for child u \n id_mae = mae(results[id_rows], y_test[id_rows]) # compute MAE for child u \n\n icc_entry = '{},{},{}\\n'.format(c, u, id_icc)\n pcc_entry = '{},{},{}\\n'.format(c, u, id_pcc)\n ccc_entry = '{},{},{}\\n'.format(c, u, id_ccc)\n mae_entry = '{},{},{}\\n'.format(c, u, id_mae)\n \n with open('Reports/{}/icc_report.txt'.format(report_name), 'a') as f:\n f.write(icc_entry)\n\n with open('Reports/{}/pcc_report.txt'.format(report_name), 'a') as f:\n f.write(pcc_entry)\n\n with open('Reports/{}/ccc_report.txt'.format(report_name), 'a') as f:\n f.write(ccc_entry)\n\n with open('Reports/{}/mae_report.txt'.format(report_name), 'a') as f:\n f.write(mae_entry)\n\n return results",
"def test_get_report_file_id(self):\n vt_analyses = VirusTotalAPIAnalyses('test_api_key')\n vt_analyses.get_report('test_object_id')\n http_err = vt_analyses.get_last_http_error()\n self.assertEqual(http_err, vt_analyses.HTTP_OK)",
"def test_search_results_report(client, jwt):\n # setup\n text_data = None\n pdf_output = None\n current_app.logger.debug(f'Loading test data from file {SEARCH_RESULT_DATAFILE}')\n with open(SEARCH_RESULT_DATAFILE, 'r') as data_file:\n text_data = data_file.read()\n data_file.close()\n # print(text_data)\n json_data = json.loads(text_data)\n report = Report(json_data, 'PS12345', ReportTypes.SEARCH_DETAIL_REPORT.value, '')\n\n # test\n current_app.logger.debug('Calling report._setup_report_data')\n request_data = report._setup_report_data()\n assert request_data\n assert request_data['reportName']\n assert request_data['template']\n assert request_data['templateVars']\n with open(SEARCH_RESULT_REQUESTFILE, \"w\") as request_file:\n request_file.write(json.dumps(request_data['templateVars']))\n # request_file.write(json.dumps(request_data))\n request_file.close()\n current_app.logger.debug('Calling ReportService.create_report_from_template')\n # pdf_output = ReportService.create_report_from_template(request_data['template'], request_data['templateVars'])\n if pdf_output:\n with open(SEARCH_RESULT_PDFFILE, \"wb\") as pdf_file:\n pdf_file.write(pdf_output)\n pdf_file.close()\n current_app.logger.debug('PDF report generation completed.')",
"def test_text_classifier_get_details(self):\n pass",
"def sendMessage(self, reportText):\n print reportText",
"def get_test_report(request, **kwargs): \n\t\n #Fetching the details of the selected event\n test_list = sidecar.events.test_report(project_id=kwargs['test_id'])\n report_list = []\n\t\n #Creating the list for the report\n for tests in test_list._logs:\n\tjson_test = json.loads(tests['data'])\n\ttests['success'] = json_test['success'] \n\ttests['time'] = json_test['time']\n\ttests['test_cases'] = json_test['test_cases']\n\treport_list.append(tests)\n\n #Making the context and sending to template\n context = {\n \"page_title\": _(\"Test Results\"),\n \"tests\": report_list\n }\n return render(request, 'rally_dashboard/events/test_detail.html', context)",
"def test_title(names):",
"def print_report_text(self, stream, time_taken, out, err):\n # stream.write('<testsuite errors=\"%(e)d\" failures=\"%(f)d\" ' % \\\n # { \"e\": len(self.errors), \"f\": len(self.failures) })\n # stream.write('name=\"%(n)s\" tests=\"%(t)d\" time=\"%(time).3f\">\\n' % \\\n # {\n # \"n\": self._test_name,\n # \"t\": self.testsRun,\n # \"time\": time_taken,\n # })\n for info in self._tests:\n info.print_report_text(stream)",
"def test_display_report():\n mr.initialize_donors()\n report = mr.text_report()\n\n print(report)\n for name in mr.mailroom.database.keys():\n assert name in report\n assert f'{mr.mailroom.database[name].total_donations:,.2f}' in report\n assert f'{mr.mailroom.database[name].average_donation:,.2f}' in report",
"def generate_test_report(self, message):\n pass",
"def test_gameAddText(self):\n # this is tested graphically, it is UI\n pass",
"def test_add_txt_record(self):\n self._register_response(\n \"/1/product?service_name=domain&customer_name={domain}\".format(domain=DOMAIN),\n data=[\n {\n \"id\": 654321,\n \"account_id\": 1234,\n \"service_id\": 14,\n \"service_name\": \"domain\",\n \"customer_name\": DOMAIN,\n }\n ],\n )\n self._register_response(\"/1/domain/654321/dns/record\", \"1001234\", \"POST\")\n self.client.add_txt_record(\n DOMAIN, self.record_name, self.record_content, self.record_ttl\n )",
"def test_add_text(self):\n text = 'test'\n info = self.api.add_text(text, tags=['asd'])\n self.assertEqual(info['value'], text)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def reports_cli():",
"def getTestResults():",
"def test_text_classifier_get_details_all(self):\n pass",
"def test_basic_report(self):\n report = self.analytics.suites[testReportSuite].report\n queue = []\n queue.append(report)\n response = omniture.sync(queue)\n self.assertIsInstance(response, list)",
"def stats_text(test):\n\n stats_text_en(test) \n \n stats_text_cn(test)",
"def run(self):\n report_details = self.report_client.get(self.csv_report)\n print(\"Report Details - \", report_details)",
"def pytest_runtest_makereport(item):\n global itr\n pytest_html = item.config.pluginmanager.getplugin('html')\n outcome = yield\n report = outcome.get_result()\n extra = getattr(report, 'extra', [])\n report.description = str(item.function.__doc__)\n report.function = item.function.__doc__\n report_directory = os.path.dirname(item.config.option.htmlpath)\n\n if report.when == 'call' or report.when == \"setup\":\n extra.append(pytest_html.extras.url(MyConfigFiles.PCN_AppURL))\n xfail = hasattr(report, 'wasxfail')\n #print(\"Xfaile details::\", xfail)\n if (report.skipped and xfail) or (report.failed and not xfail): # or report.outcome:\n #print(\"Report . Node ID::\", report.nodeid)\n file_name = report.nodeid.replace(\"QA/TestCases/\", '\\\\')\n file_name = file_name.replace(\"::\", \"_\") + \".png\"\n _capture_screenshot(file_name)\n extra.append(pytest_html.extras.html('<div>Log description</div>'))\n if file_name:\n html = '<div><img src=\"%s\" alt=\"screenshot\" style=\"width:304px;height:228px;\" ' \\\n 'onclick=\"window.open(this.src)\" align=\"right\"/></div>' % file_name\n extra.append(pytest_html.extras.html(html))\n print(\"Inside IF--HTML\", file_name)\n # movfiletodir(file_name)\n report.extra = extra",
"def test_get_report_key():\n key = _get_report_key()\n assert key == 'spi-reports/SPI Report 2018-03-01 010203.csv'"
]
| [
"0.61058956",
"0.59754026",
"0.59663147",
"0.5875179",
"0.5820495",
"0.5816298",
"0.5780176",
"0.56849706",
"0.5683433",
"0.56673104",
"0.56545895",
"0.5620811",
"0.55447906",
"0.552769",
"0.5513816",
"0.55067",
"0.54996467",
"0.549786",
"0.54872155",
"0.5484214",
"0.5477892",
"0.5474617",
"0.545062",
"0.54414076",
"0.54251903",
"0.54179925",
"0.54066765",
"0.53994274",
"0.535761",
"0.5352036"
]
| 0.63549286 | 0 |
Set required and widgets for fields. | def __init__(self, *args, **kwargs):
super(SignupForm, self).__init__(*args, **kwargs)
self.fields['email'].required = True
self.fields['first_name'].required = True
self.fields['password'].widget = forms.PasswordInput()
for field in self.fields:
self.fields[field].widget.attrs.update(
{
'class': 'form-control',
}
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n \"first_name\": \"First Name\",\n \"last_name\": \"Last Name\",\n \"default_phone_num\": \"Phone Number\",\n \"default_passport_num\": \"Passport Number\",\n }\n\n self.fields[\"default_phone_num\"].widget.attrs[\"autofocus\"] = True\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs[\"placeholder\"] = placeholder\n self.fields[field].widget.attrs[\n \"class\"\n ] = \"border-black rounded-0 \\\n all-form-input\"\n self.fields[field].label = False\n self.helper = FormHelper()\n self.helper.form_tag = True\n self.helper.layout = Layout(\n Div(\n Field(\n \"first_name\",\n ),\n Field(\n \"last_name\",\n ),\n Field(\n \"default_phone_num\",\n ),\n Field(\n \"default_passport_num\",\n ),\n ),\n ButtonHolder(\n Submit(\"submit\", \"Save\", css_class=\"m-0 btn btn-outline\"),\n ),\n )",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['first_name'].required = False\n self.fields['last_name'].required = False\n self.fields['institution'].required = False\n self.fields['institution_logo'].required = False\n self.fields['allow_notifications'].required = False",
"def controls_setup(self):\n\n self.email = element.TextBox(self, dom_id='mailing-list-email', alias='E-mail Textbox')\n self.close = element.Button(self, button_type='button', css_selector='.mailing-list-confirm .btn-close',\n alias='Close Button')\n self.signup = element.Button(self, css_selector='form.slide-left button[type=submit]', alias='Subscribe Button')",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'code': 'Enter coupon code',\n }\n\n self.fields['code'].widget.attrs['autofocus'] = True\n for field in self.fields:\n placeholder = f'{placeholders[field]}'\n\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields[field].label = False",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'group_id': 'group_id',\n 'first_name': 'first_name',\n 'starter': 'starter',\n 'main': 'main',\n 'dessert': 'dessert',\n 'special_diet': 'special_diet',\n 'requirements': 'requirements',\n }\n\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields[field].widget.attrs['class'] = 'stripe-style-input'",
"def _setup_ui(self):\n from functools import partial\n\n self.setStyleSheet(\n \"\"\"\n QLabel[labelField=\"true\"] {\n font-weight: bold;\n }\n \"\"\"\n )\n\n # The main layout\n self.main_layout = QtWidgets.QVBoxLayout(self)\n self.main_layout.setContentsMargins(0, 0, 0, 0)\n\n # the form layout\n self.form_layout = QtWidgets.QFormLayout()\n self.form_layout.setLabelAlignment(\n QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter\n )\n\n # store roles\n label_role = QtWidgets.QFormLayout.LabelRole\n field_role = QtWidgets.QFormLayout.FieldRole\n\n self.main_layout.addLayout(self.form_layout)\n\n i = -1\n\n # Reviewer\n i += 1\n reviewer_name_label = QtWidgets.QLabel(self)\n reviewer_name_label.setText(\"Reviewer\")\n self.form_layout.setWidget(i, label_role, reviewer_name_label)\n\n self.reviewer_name_widget = QtWidgets.QLabel(self)\n self.form_layout.setWidget(i, field_role, self.reviewer_name_widget)\n\n # Task Name field\n i += 1\n task_name_label = QtWidgets.QLabel(self)\n task_name_label.setText(\"Task\")\n self.form_layout.setWidget(i, label_role, task_name_label)\n\n self.task_name_widget = QtWidgets.QLabel(self)\n self.form_layout.setWidget(i, field_role, self.task_name_widget)\n\n # # Version Info field\n # from anima.ui.widgets.version import VersionDetailsWidget\n # self.latest_version_widget = VersionDetailsWidget(parent=self)\n # self.main_layout.insertWidget(0, self.latest_version_widget)\n\n # Review Type Field\n i += 1\n review_type_label = QtWidgets.QLabel(self)\n review_type_label.setText(\"Review Type\")\n self.form_layout.setWidget(i, label_role, review_type_label)\n\n self.review_type_widget = ReviewTypeWidget(self)\n self.review_type_widget.currentIndexChanged.connect(\n partial(self.review_type_changed_callback)\n )\n\n self.form_layout.setWidget(i, field_role, self.review_type_widget)\n\n # Timing Field\n i += 1\n effort_label = QtWidgets.QLabel(self)\n effort_label.setText(\"Timing\")\n self.form_layout.setWidget(i, label_role, effort_label)\n\n effort_layout = QtWidgets.QHBoxLayout()\n self.form_layout.setLayout(i, field_role, effort_layout)\n\n from anima.ui.widgets.timing import ScheduleTimingWidget\n from anima import defaults\n\n self.timing_widget = ScheduleTimingWidget(\n self, timing_resolution=defaults.timing_resolution\n )\n self.timing_widget.setEnabled(False)\n # set the default to 1 hour\n self.timing_widget.set_schedule_info(timing=1, unit=\"h\")\n effort_layout.addWidget(self.timing_widget)\n\n # Description Field\n i += 1\n description_label = QtWidgets.QLabel(self)\n description_label.setText(\"Description\")\n self.form_layout.setWidget(i, label_role, description_label)\n\n self.description_widget = QtWidgets.QTextEdit(self)\n self.form_layout.setWidget(i, field_role, self.description_widget)",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'business_name': 'Please enter your business name',\n 'user_type': 'Please select the type of user',\n 'phone': 'Phone Number',\n 'postcode': 'Postcode',\n 'city': 'City',\n 'street_address': 'Street Address',\n 'street_address2': 'Street Address 2',\n 'county': 'County',\n 'country': 'Country'\n }\n\n # to force cursor to start in business name field\n self.fields['business_name'].widget.attrs['autofocus'] = True\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = False\n self.fields[field].label = placeholder\n self.fields[field].widget.attrs['class'] = 'form-control'",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'role_name': 'Role name',\n 'role_color': 'role color',\n }\n\n for field in self.fields:\n if self.fields[field].required:\n placeholder = f'{placeholders[field]} *'\n else:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields[field].widget.attrs['class'] = 'profile-form-input'\n self.fields[field].label = False",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'company_name': 'Company name',\n 'street_address1': 'Street Address 1',\n 'street_address2': 'Street Address 2',\n 'country': 'Country or State',\n 'postcode': 'Postcode',\n 'town_or_city': 'Town or City',\n 'payment': 'Paid for number of months',\n 'setting_daystart': 'Hour when your day starts',\n 'setting_dayend': 'hour when your day ends'\n }\n\n for field in self.fields:\n if field != 'country':\n if self.fields[field].required:\n placeholder = f'{placeholders[field]} *'\n else:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n if field == 'setting_daystart' or field == 'setting_dayend' or field == 'payment':\n self.fields[field].widget.attrs['class'] = 'width-numbers'\n else:\n self.fields[field].widget.attrs['class'] = 'profile-form-input'\n self.fields[field].label = placeholder",
"def enforce_required_fields(self, attrs):\n if self.instance is not None:\n return\n # missing_items = {\n # field_name: self.missing_message\n # for field_name in self.fields\n # if field_name not in attrs\n # }\n # if missing_items:\n # raise ValidationError(missing_items, code='required')",
"def _check_required_fields(self):\n assert self.title\n assert self.format",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['cd_identifier'].required = True\n self.fields['library'].required = True",
"def controls_setup(self):\n pass",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'team_name': 'Team name',\n 'planning_deadline': 'planning_deadline',\n 'coaching_rep': 'coaching_rep',\n 'min_lunchbreak': 'min_lunchbreak',\n 'min_dinnerbreak': 'min_dinnerbreak',\n 'min_paidbreak': 'min_paidbreak'\n }\n\n for field in self.fields:\n if self.fields[field].required:\n placeholder = f'{placeholders[field]} *'\n else:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields[field].widget.attrs['class'] = 'profile-form-input'\n self.fields[field].label = False",
"def __init__(self, *args, **kwargs):\n super(UserCreationForm, self).__init__(*args, **kwargs)\n self.fields[\"first_name\"].required = True\n self.fields[\"last_name\"].required = True\n self.fields[\"email\"].required = True",
"def controls_setup(self):\n\n self.client_name = element.Link(self, css_selector='th:nth-child(1) > a', alias=\"Admin Client Name Link\")\n self.xero_customer = element.Caption(self, css_selector='td:nth-child(2)', alias=\"Xero Customer\")\n self.send_invoices = element.Image(self, css_selector='td:nth-child(3) img', alias=\"Send Invoices Check Mark\")\n self.part_a_required = element.Image(self, css_selector='td:nth-child(4) img',\n alias=\"Part A Required Check Mark\")\n self.they_supply_pump = element.Image(self, css_selector='td:nth-child(5) img',\n alias=\"They Supply Pump Check Mark\")\n self.active_start_date = element.Link(self, css_selector='td:nth-child(6)', alias=\"Active Start Date Text\")\n self.active_end_date = element.Link(self, css_selector='td:nth-child(6)', alias=\"Active End Date Text\")",
"def controls_setup(self):\n\n self.date_received = element.Link(self, alias=\"Date Received\",\n css_selector='td:nth-child(1) > a', angular=True)\n self.job_type = element.Caption(self, alias=\"Job Type\", css_selector='td:nth-child(2)', angular=True)\n self.description = element.Caption(self, alias=\"Client Name\", css_selector='td:nth-child(3)', angular=True)\n self.address = element.Caption(self, alias=\"Address\", css_selector='td:nth-child(4)', angular=True)\n self.suburb = element.Caption(self, alias=\"Suburb\", css_selector='td:nth-child(5)', angular=True)\n self.client = element.Caption(self, alias=\"Client\", css_selector='td:nth-child(6)', angular=True)",
"def setRequiredValues(self, instance):\n for key in instance.__slots__:\n if key in instance.requiredFields:\n value = self.getTypicalValue(type(instance), key)\n setattr(instance, key, value)",
"def customize_fields(self, fields):\n\n for field in fields.values():\n\n field_type = type(field.field)\n\n if field_type is List or field_type is Set:\n field.widgetFactory = CheckBoxFieldWidget\n\n elif field_type is Choice:\n field.widgetFactory = RadioFieldWidget",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'group_id': 'Guest Group',\n 'first_name': 'First Name',\n 'last_name': 'Last Name',\n 'address_line_1': 'Address Line 1',\n 'address_line_2': 'Address Line 2',\n 'city': 'City',\n 'county': 'County',\n 'postcode': 'Post Code',\n 'country': 'Country',\n 'email': 'Email'\n }\n\n for field in self.fields:\n if field != 'country':\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields[field].widget.attrs['class'] = 'stripe-style-input'\n self.fields[field].label = False",
"def updateFields(self):\n super(AdminRulesForm, self).updateFields()\n self.fields['improved_templates'].widgetFactory = CheckBoxFieldWidget\n self.fields['iframe_enabled'].widgetFactory = SingleCheckBoxFieldWidget\n self.fields['js_enabled'].widgetFactory = SingleCheckBoxFieldWidget\n self.fields['inline_images'].widgetFactory = SingleCheckBoxFieldWidget",
"def form_RequiredStringAndCheckbox(request):\n schema = schemaish.Structure()\n schema.add('myString', schemaish.String(validator=validatish.Required()))\n schema.add('myBoolean', schemaish.Boolean())\n form = formish.Form(schema, 'form')\n form['myBoolean'].widget=formish.Checkbox()\n return form",
"def __init__(self, *args, **kwargs):\n kwargs.pop('widget_syntax')\n\n super(TemplateForm, self).__init__( *args, **kwargs)\n print self.fields",
"def __init__(self, *args, **kwargs):\n\n\t\tsuper(SchadensmeldungStatusForm, self).__init__(*args, **kwargs)\n\n\t\tfor field in self.fields:\n\t\t\tself.fields[field].widget.attrs.update({\n\t\t\t\t'class': 'form-control'\n\t\t\t\t})",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'shift_name': 'Shift name',\n 'min_agents': 'Minimum Number of Agents',\n 'shift_start': 'Start time',\n 'shift_end': 'End time',\n 'weekday_sunday': 'Sunday',\n 'weekday_monday': 'Monday',\n 'weekday_tuesday': 'Tuesday',\n 'weekday_wednesday': 'Wednesday',\n 'weekday_thursday': 'Thursday',\n 'weekday_friday': 'Friday',\n 'weekday_saturday': 'Saturday'\n }\n\n for field in self.fields:\n if self.fields[field].required:\n placeholder = f'{placeholders[field]} *'\n else:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields[field].widget.attrs['class'] = 'profile-form-input'\n self.fields[field].label = False",
"def __init__(self, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs) \n for field in self.fields:\n self.fields[field].widget.attrs.update(\n {\n 'class': 'form-control',\n }\n )",
"def __init__(self, *args, **kwargs):\n super(CustomAuthenticationForm, self).__init__(*args, **kwargs)\n for field in self.fields:\n self.fields[field].widget.attrs.update(\n {\n 'class': 'form-control',\n }\n )",
"def required_fields():\n module_logger.debug(\"In required_fields.\")\n return (\"comment\", \"lib_layout\", \"lib_selection\",\n \"ncbi_taxon_id\", \"prep_id\", \"sequencing_center\",\n \"sequencing_contact\", \"storage_duration\", \"tags\")",
"def set_field_attributes(fields, errors):\n for field in fields:\n field_instance = fields[field]\n widget = field_instance.widget\n if isinstance(field_instance, forms.DateField) and isinstance(widget, forms.TextInput):\n field_instance.format = '%d/%m/%Y'\n add_class_to_widget(widget, 'date')\n widget.attrs['type'] = 'text'\n elif isinstance(field_instance, forms.DateTimeField):\n field_instance.format = '%d/%m/%Y %H:%M'\n if isinstance(widget, forms.TextInput):\n add_class_to_widget(widget, 'datetime')\n elif isinstance(widget, DatetimeInput):\n add_class_to_widget(widget.widgets[0], 'date')\n elif isinstance(field_instance, forms.FloatField) and isinstance(widget, forms.TextInput):\n add_class_to_widget(widget, 'float')\n elif isinstance(field_instance, forms.IntegerField) and isinstance(widget, forms.TextInput):\n add_class_to_widget(widget, 'int')\n elif isinstance(field_instance, forms.CharField) and isinstance(widget, forms.TextInput):\n add_class_to_widget(widget, 'char')\n\n if isinstance(widget, forms.CheckboxSelectMultiple):\n add_class_to_widget(widget, 'checkbox-multiple-select')\n\n if field in errors:\n add_class_to_widget(widget, 'with_errors')\n if 'title' not in widget.attrs:\n widget.attrs['title'] = '; '.join(errors[field])\n\n add_class_to_widget(widget, 'form-control')",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n set_fields_to_required(self, ['field_of_study'])"
]
| [
"0.6541632",
"0.6453019",
"0.6182293",
"0.613699",
"0.61324316",
"0.61182314",
"0.611023",
"0.6102259",
"0.6014309",
"0.5993374",
"0.5987304",
"0.59669083",
"0.59576404",
"0.59438926",
"0.59405667",
"0.59002674",
"0.58811474",
"0.5862535",
"0.58578044",
"0.58545446",
"0.58465064",
"0.5777598",
"0.5772909",
"0.5747142",
"0.57421744",
"0.57386833",
"0.5717068",
"0.5716443",
"0.5699363",
"0.5658944"
]
| 0.6483126 | 1 |
Create the sh script for starting unblur | def create_sh_script(
unblur_path, input_image, output_dir,
input_dir, input_suffix, options
):
strSh = ''
# To make sure it is a bash script
strSh += '#!/bin/bash\n\n'
# Export number of threads
strSh += 'export OMP_NUM_THREADS={:d}\n'.format(options.nr_threads)
# The script will abort with non-zero exit values
strSh += '# The script will abort with non-zero exit values\n'
strSh += 'set -e\n'
# Create a file list of all files
strSh += '# Create a file list of all files\n'
strSh += 'fileList=$(ls {:s})\n'.format(
input_image
)
# Create folders
strSh += '# Create folders\n'
strSh += 'mkdir -p {:s}/Doseuncorrected\n'.format(output_dir)
strSh += 'mkdir -p {:s}/Shift\n'.format(output_dir)
strSh += 'mkdir -p {:s}/Temp\n'.format(output_dir)
if options.filter_sum:
strSh += 'mkdir -p {:s}/Filtered\n'.format(output_dir)
if options.dose_filter:
strSh += 'mkdir -p {:s}/Dosecorrected\n'.format(output_dir)
if options.expert_mode:
strSh += 'mkdir -p {:s}/FRC\n\n'.format(output_dir)
# Abort script if files in Doseuncorrected already exists
strSh += '# Abort script if files in Doseuncorrected already exists\n'
strSh += 'for f in {:s}/Doseuncorrected/*\n'.format(output_dir)
strSh += 'do\n'
strSh += 'if [ -e $f ]\n'
strSh += 'then\n'
strSh += 'echo "Some files already exists, please choose another output directory"\n'
strSh += 'exit 1\n'
strSh += 'break\n'
strSh += 'fi\n'
strSh += 'done\n\n'
# Abort script if files in shift already exists
strSh += '# Abort script if files in shift already exists\n'
strSh += 'for f in {:s}/Shift/*\n'.format(output_dir)
strSh += 'do\n'
strSh += 'if [ -e $f ]\n'
strSh += 'then\n'
strSh += 'echo "Some files already exists, please choose another output directory"\n'
strSh += 'exit 1\n'
strSh += 'break\n'
strSh += 'fi\n'
strSh += 'done\n\n'
# Abort script if files in Dosecorrected already exists
strSh += '# Abort script if files in Dosecorrected already exists\n'
strSh += 'for f in {:s}/Dosecorrected/*\n'.format(output_dir)
strSh += 'do\n'
strSh += 'if [ -e $f ]\n'
strSh += 'then\n'
strSh += 'echo "Some files already exists, please choose another output directory"\n'
strSh += 'exit 1\n'
strSh += 'break\n'
strSh += 'fi\n'
strSh += 'done\n\n'
# Abort script if files in Filtered already exists
strSh += '# Abort script if files in Filtered already exists\n'
strSh += 'for f in {:s}/Filtered/*\n'.format(output_dir)
strSh += 'do\n'
strSh += 'if [ -e $f ]\n'
strSh += 'then\n'
strSh += 'echo "Some files already exists, please choose another output directory"\n'
strSh += 'exit 1\n'
strSh += 'break\n'
strSh += 'fi\n'
strSh += 'done\n\n'
# Abort script if files in FRC already exists
strSh += '# Abort script if files in FRC already exists\n'
strSh += 'for f in {:s}/FRC/*\n'.format(output_dir)
strSh += 'do\n'
strSh += 'if [ -e $f ]\n'
strSh += 'then\n'
strSh += 'echo "Some files already exists, please choose another output directory"\n'
strSh += 'exit 1\n'
strSh += 'break\n'
strSh += 'fi\n'
strSh += 'done\n\n'
# Loop over all files
strSh += '\nfor file in $fileList\ndo\n\n'
strSh += 'baseName=${{file%{:s}}}\n'.format(input_suffix)
strSh += 'baseName=${{baseName#{:s}}}\n'.format(input_dir)
# Create a temporary file to work with to prevent format issues
strSh += '# Create a temporary file to work with to prevent format issues\n'
strSh += 'e2proc3d.py $file {:s}/Temp/${{baseName}}_temp.mrc\n\n'.format(output_dir)
# Remove some temporary files that unblur makes
strSh += '# Remove some temporary files that unblur makes\n'
strSh += 'for f in .UnBlur*\n'
strSh += 'do\n'
strSh += 'if [ -e $f ]\n'
strSh += 'then\n'
strSh += 'rm .UnBlur*\n'
strSh += 'break\n'
strSh += 'else\n'
strSh += 'true\n'
strSh += 'fi\n'
strSh += 'done\n\n'
# Start Unblur without dose correction
strSh += '{:s} << eof\n'.format(unblur_path)
# Input File
strSh += '{:s}/Temp/${{baseName}}_temp.mrc\n'.format(output_dir)
# Number of Frames
strSh += '{:d}\n'.format(options.nr_frames)
# Sum File
strSh += '{:s}/Doseuncorrected/${{baseName}}{:s}.mrc\n'.format(
output_dir,
options.sum_suffix
)
# Shift File
strSh += '{:s}/Shift/${{baseName}}{:s}.txt\n'.format(
output_dir,
options.shift_suffix
)
# Pixel Size
strSh += '{:f}\n'.format(options.pixel_size)
# Say no to Dose Filtering
strSh += 'NO\n'
if options.save_frames:
# Say yes to Save Frames
strSh += 'YES\n'
# Frames file
strSh += '{:s}/Doseuncorrected/${{baseName}}{:s}{:s}.mrc\n'.format(
output_dir,
options.sum_suffix,
options.frames_suffix
)
else:
# Say no to Save Frames
strSh += 'NO\n'
if options.expert_mode:
# Say yes to Expert Mode
strSh += 'YES\n'
# FRC File
strSh += '{:s}/FRC/${{baseName}}{:s}.txt\n'.format(
output_dir,
options.frc_suffix
)
# Minimum Shift for initial search
strSh += '{:f}\n'.format(options.shift_initial)
# Outer Radius Shift Limit
strSh += '{:f}\n'.format(options.shift_radius)
# B-Factor to Apply
strSh += '{:f}\n'.format(options.b_factor)
# Half-Width Vertical
strSh += '{:d}\n'.format(options.fourier_vertical)
# Hald-Width Horizontal
strSh += '{:d}\n'.format(options.fourier_horizontal)
# Termination Shift Threshold
strSh += '{:f}\n'.format(options.shift_threshold)
# Maximum Iterations
strSh += '{:d}\n'.format(options.iterations)
# Restore Noise Power
if options.restore_noise:
# Say yes to Restore Noise Power
strSh += 'YES\n'
else:
# Say no to Restore Noise Power
strSh += 'NO\n'
# Verbose Output
if options.verbose:
# Say yes to Verbose Output
strSh += 'YES\n'
else:
# Say no to Verbose Output
strSh += 'NO\n'
else:
# Say no to Expert Mode
strSh += 'NO\n'
# Enf of file reached
strSh += 'eof\n\n'
# Remove some temporary files that unblur makes
strSh += 'for f in .UnBlur*\n'
strSh += 'do\n'
strSh += 'if [ -e $f ]\n'
strSh += 'then\n'
strSh += 'rm .UnBlur*\n'
strSh += 'break\n'
strSh += 'else\n'
strSh += 'true\n'
strSh += 'fi\n'
strSh += 'done\n\n'
# =========== #
if options.dose_filter:
# Start Unblur with dose correction
strSh += '{:s} << eof\n'.format(unblur_path)
# Input File
strSh += '{:s}/Temp/${{baseName}}_temp.mrc\n'.format(output_dir)
# Number of Frames
strSh += '{:d}\n'.format(options.nr_frames)
# Sum File
strSh += '{:s}/Dosecorrected/${{baseName}}{:s}.mrc\n'.format(
output_dir,
options.sum_suffix
)
# Shift File
strSh += '{:s}/Shift/${{baseName}}{:s}.txt\n'.format(
output_dir,
options.shift_suffix
)
# Pixel Size
strSh += '{:f}\n'.format(options.pixel_size)
# Say yes to Dose Filtering
strSh += 'YES\n'
# Exposure per Frame
strSh += '{:f}\n'.format(options.exposure_per_frame)
# Acceleration Voltage
strSh += '{:f}\n'.format(options.voltage)
# Pre Exposure
strSh += '{:f}\n'.format(options.pre_exposure)
if options.save_frames:
# Say yes to Save Frames
strSh += 'YES\n'
# Frames file
strSh += '{:s}/Dosecorrected/${{baseName}}{:s}{:s}.mrc\n'.format(
output_dir,
options.sum_suffix,
options.frames_suffix
)
else:
# Say no to Save Frames
strSh += 'NO\n'
if options.expert_mode:
# Say yes to Expert Mode
strSh += 'YES\n'
# FRC File
strSh += '{:s}/FRC/${{baseName}}{:s}.txt\n'.format(
output_dir,
options.frc_suffix
)
# Minimum Shift for initial search
strSh += '{:f}\n'.format(options.shift_initial)
# Outer Radius Shift Limit
strSh += '{:f}\n'.format(options.shift_radius)
# B-Factor to Apply
strSh += '{:f}\n'.format(options.b_factor)
# Half-Width Vertical
strSh += '{:d}\n'.format(options.fourier_vertical)
# Hald-Width Horizontal
strSh += '{:d}\n'.format(options.fourier_horizontal)
# Termination Shift Threshold
strSh += '{:f}\n'.format(options.shift_threshold)
# Maximum Iterations
strSh += '{:d}\n'.format(options.iterations)
# Restore Noise Power
if options.restore_noise:
# Say yes to Restore Noise Power
strSh += 'YES\n'
else:
# Say no to Restore Noise Power
strSh += 'NO\n'
# Verbose Output
if options.verbose:
# Say yes to Verbose Output
strSh += 'YES\n'
else:
# Say no to Verbose Output
strSh += 'NO\n'
else:
# Say no to Expert Mode
strSh += 'NO\n'
# Enf of file reached
strSh += 'eof\n\n'
# Remove temporary file
strSh += 'rm {:s}/Temp/${{baseName}}_temp.mrc\n'.format(output_dir)
# Remove some temporary files that unblur makes
# Remove some temporary files that unblur makes
strSh += 'for f in .UnBlur*\n'
strSh += 'do\n'
strSh += 'if [ -e $f ]\n'
strSh += 'then\n'
strSh += 'rm .UnBlur*\n'
strSh += 'break\n'
strSh += 'else\n'
strSh += 'true\n'
strSh += 'fi\n'
strSh += 'done\n\n'
if options.filter_sum:
# Filter Images
lowpass_angstrom = options.pixel_size / options.lowpass
highpass_angstrom = options.pixel_size / options.highpass
strSh += \
'e2proc3d.py {:s}/Doseuncorrected/${{baseName}}{:s}.mrc '.format(
output_dir,
options.sum_suffix
)
strSh += '{:s}/Filtered/${{baseName}}{:s}.mrc ' \
.format(
output_dir,
options.sum_suffix
)
strSh += '--process=filter.lowpass.gauss:cutoff_freq={:f} '.format(
options.lowpass
)
strSh += '--process=filter.highpass.gauss:cutoff_freq={:f}\n\n' \
.format(
options.highpass
)
if options.remove_sum:
# Remove sum files
strSh += 'rm {:s}/Doseuncorrected/${{baseName}}{:s}.mrc\n'.format(
output_dir,
options.sum_suffix
)
# Done
strSh += 'done\n\n'
# Remove temp folder
strSh += 'rm -r {:s}/Temp\n'.format(output_dir)
strSh += 'echo "All done!"'
# Write Output
with open('{:s}/scriptUnblur.sh'.format(output_dir), 'w') as f:
f.write(strSh) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n parser = make_arg_parser()\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n args = parser.parse_args()\n deblur_transcripts(args.input, args.cds_fa, args.vblur, args.output)",
"def launchgui(image):\n from filter import launch\n launch(image)",
"def run(self):\n self.scion_sh('run', 'nobuild')",
"def __init__(self):\r\n super(Defocus, self).__init__(\"defocus\")\r\n # load blur shader\r\n self.shader = Shader(\"defocus\")",
"def hxlcut():\n run_script(hxlcut_main)",
"def hxladd():\n run_script(hxladd_main)",
"def main():\n\n # Fix crackling audio\n util.set_environment('PULSE_LATENCY_MSEC', '60')\n\n # Replace launcher with game exe in proton arguments\n util.replace_command('FF9_Launcher.exe', 'x64/FF9.exe')",
"def script(self):",
"def launch ():\n #core.addListenerByName(\"UpEvent\", _go_up)\n core.registerNew(MAC_Filter)",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():",
"def cli():"
]
| [
"0.6045751",
"0.54544467",
"0.5439029",
"0.534224",
"0.53305626",
"0.5258778",
"0.52516085",
"0.5213264",
"0.5197605",
"0.51781684",
"0.51781684",
"0.51781684",
"0.51781684",
"0.51781684",
"0.51781684",
"0.51781684",
"0.51781684",
"0.51781684",
"0.51781684",
"0.51781684",
"0.51781684",
"0.51781684",
"0.51781684",
"0.51781684",
"0.51781684",
"0.51781684",
"0.51781684",
"0.51781684",
"0.51781684",
"0.51781684"
]
| 0.6334928 | 0 |
Creates a dictionary that maps domains to encoded ids. | def _get_domain_mappings(domain_to_intents: Dict) -> Dict:
domain2id = {}
domains = list(domain_to_intents)
for index, domain in enumerate(domains):
domain2id[domain] = index
return domain2id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_intent_mappings(domain_to_intents: Dict) -> Dict:\n domain_to_intent2id = {}\n for domain in domain_to_intents:\n intent_labels = {}\n for index, intent in enumerate(domain_to_intents[domain]):\n intent_labels[intent] = index\n domain_to_intent2id[domain] = intent_labels\n return domain_to_intent2id",
"def generate_ids():\n payloads = ['info', 'ad_tracking', 'airdrop', 'store', 'siri', 'desktop', 'desktop_services', 'dock', 'energy',\n 'filevault', 'finder', 'firewall', 'itunes', 'login', 'passcode', 'password', 'restrictions', 'safari',\n 'screensaver', 'setup', 'software', 'diagnostics', 'policy', 'policy_2', 'preferences',\n 'preferences_security', 'time_machine']\n ids = {}\n for i, payload in enumerate(payloads):\n identifier = str(uuid.uuid4()).upper()\n ids[payload] = identifier[9:]\n return ids",
"def getGeneCodesToIdDict(conn, tuple_of_gene_codes):\n gene_code_to_id_dict = conn.db_connection.convertGeneCodeToId(tuple_of_gene_codes)\n\n return gene_code_to_id_dict",
"def get_domains(graph: Graph, property_to_id: Dict[str, int], entity_type_to_id: Dict[str, int]) -> Dict[int, int]:\n # dictionary pointing from object property id to an entity type id\n domains = {}\n\n # add all domain triples for which the subject is an object property and the object is an entity type\n for subject, predicate, object in graph.triples((None, RDFS.domain, None)):\n if subject in property_to_id and object in entity_type_to_id:\n domains[property_to_id[subject]] = entity_type_to_id[object]\n\n return domains",
"def mk_id_lookups(self):\n id_lookups = {}\n for ns in self.ddef.keys():\n id_lookups[ns] = self.mk_id_lookup(ns)\n return id_lookups",
"def to_id_dict(self):\n return self._id, dict(self.__data)",
"def get_identifier_map(self) -> None:\n id_mapping_dict = self._get_identifiers_from_kbs()\n id_mapping_dict = self._add_uniprot_identifiers(id_mapping_dict)\n id_mapping_dict = self._add_chebi_identifiers(id_mapping_dict)\n id_mapping_dict = self._add_bridge_db_identifiers(id_mapping_dict)\n\n print(\"merging similar...\")\n id_mapping_dict = pathway_utils.merge_similar(id_mapping_dict)\n\n print(\"generating local identifiers...\")\n self.forward_map, self.backward_map = self._generate_local_identifiers(id_mapping_dict)\n self.save_id_dict()",
"def id_index_map(self):\n result = {}\n for index, component_data in iteritems(self):\n result[id(component_data)] = index\n return result",
"def load_ids(data):\n identifiers = {}\n base_id = settings.MDM_UUID\n for payload in data:\n identifiers[payload] = \"%s-%s\" % (base_id, data[payload])\n return identifiers",
"def dict_of_domains(fc):\r\n # need to find root database (GDB or SDE)\r\n db_root = os.path.dirname(fc)\r\n while db_root[-4:].lower() != '.gdb' and db_root[-4:].lower() != '.sde':\r\n old_db_root = db_root # protect against infinite loop\r\n db_root = os.path.dirname(db_root)\r\n if old_db_root == db_root: # protect against infinite loop\r\n break\r\n arcpy.AddMessage(\"Retrieving Domains from \" + str(db_root))\r\n return {domain.name: domain.codedValues for domain in arcpy.da.ListDomains(db_root)}",
"def hash_entries(entries):\n d = dict()\n for e in entries:\n uri = e[\"uri\"]\n domain = re.match(\"^/view\\d*/(.*)$\", uri).group(1)\n if domain:\n visitor_id = e[\"visitor_id\"]\n if d.has_key(domain):\n store_page_entries = d[domain]\n store_page_entries.append(visitor_id)\n else:\n d[domain] = [visitor_id]\n print \"Retrieved {0} unique domains.\".format(len(d))\n return d",
"def set_identifiers(self, data):\n identity = {}\n if self.unique_identifier:\n self.unique_identifiers.append(self.unique_identifier)\n\n # Remove duplicates\n self.unique_identifiers = list(dict.fromkeys(self.unique_identifiers))\n\n try:\n for unique_identifier in self.unique_identifiers:\n identity[unique_identifier] = data[unique_identifier]\n data.pop(unique_identifier, None)\n\n return identity\n except Exception as e:\n return identity",
"def idna_encode(self, domain):\n try:\n if isinstance(domain, str):\n domain = domain.decode('utf-8')\n return domain.encode('idna')\n except UnicodeError:\n return domain",
"def flowgram_id_to_seq_id_map(seqs):\r\n result = {}\r\n for id_, seq in seqs:\r\n fields = id_.split()\r\n seq_id = id_\r\n flowgram_id = fields[1]\r\n result[flowgram_id] = seq_id\r\n return result",
"def _create_id_map(self, word_list, max_list_length):\n\n ############ 1.5 TODO\n from collections import Counter\n \n # import pdb; pdb.set_trace()\n word_rank_list = Counter(word_list).most_common(max_list_length)\n \n id_map = {}\n for idx, (word,_) in enumerate(word_rank_list):\n id_map[word] = idx\n\n ############\n # raise NotImplementedError()\n return id_map",
"def map_conf_ids(years=default.arch_data_years, save=False):\n\tmapping = {}\n\tfor year in years:\n\t\tdir_ = os.path.join('data', 'archived_data', str(year))\n\t\tarch_confs = pd.read_csv(os.path.join(dir_, 'conference.csv'))\n\t\tconferences = load_json(os.path.join('data', str(year), 'Conferences.json'))\n\t\tfor cid, data in conferences.iteritems():\n\t\t\tif str(cid) not in mapping:\n\t\t\t\tmapping[str(cid)] = \"\"\n\t\t\t\tixName0 = str(data['Name']) == arch_confs['Name'].values\n\t\t\t\tixName1 = str(data['Name'])+\" Conference\" == arch_confs['Name'].values\n\t\t\t\tixName = np.logical_or(ixName0, ixName1)\n\t\t\t\tif any(ixName):\n\t\t\t\t\tmapping[str(cid)] = str(arch_confs['Conference Code'].values[ixName][0])\n\tmapping = dict([(old,new) if old != \"\" else (\"old\"+new,new) for new,old in mapping.iteritems()])\n\tif save:\n\t\tdump_json(mapping, 'conf_id_mapping.json', fdir=os.path.join('data', 'archived_data'))\n\treturn mapping",
"def get_feature_domain_dict(self):\n feature_domain_dict = {}\n for feature_index in range(len(self.train_examples[0])):\n domain = set([example[feature_index] for example in self.train_examples])\n feature_domain_dict[self.features[feature_index]] = domain\n\n return feature_domain_dict",
"def _add_uniprot_identifiers(map_dict) -> dict:\n sys.stdout.write(\"Adding UniProt identifiers...\\n\")\n r_session = base_utils.requests_retry_session()\n all_uniprot = [k for k in map_dict if k.lower().startswith('uniprot')]\n\n for uniprot_id in tqdm.tqdm(all_uniprot, total=len(all_uniprot)):\n db, uid = uniprot_id.split(':')\n\n try:\n # query UniProt API\n r = r_session.get(\n 'http://www.uniprot.org/uniprot/' + uid + '.xml'\n )\n except Exception as x:\n print(\"%s: %s\" % (uniprot_id, x.__class__.__name__))\n continue\n\n if r.content:\n root = etree.fromstring(r.content)\n if root:\n for s in root[0]:\n if s.tag.endswith('accession'):\n new_id = '{}:{}'.format('UniProt', s.text.split(':')[-1])\n map_dict[uniprot_id].add(new_id)\n else:\n break\n\n return map_dict",
"def get_citations_ids_map(id_list):\n create_unverified_context()\n logging.debug('============== IN get_citations_ids_map: ================')\n logging.debug('============== ID LIST: ================')\n logging.debug(id_list)\n linked = {}\n for i in range(0, len(id_list)):\n handle = Entrez.elink(\n dbfrom=\"pubmed\", id=id_list[i], linkname=\"pubmed_pubmed_refs\")\n results = Entrez.read(handle)\n logging.debug('============== RESULTS: ================')\n logging.debug(results)\n handle.close()\n if len(results[0][\"LinkSetDb\"]) != 0:\n linked[id_list[i]] = [\n link[\"Id\"] for link in results[0][\"LinkSetDb\"][0][\"Link\"]\n ]\n logging.debug('============== LINKED ARTICLES: ================')\n logging.debug(linked)\n logging.debug('============== ARTICLE ID: ================')\n logging.debug(id_list[i])\n return linked",
"def format_domain(domain):\n domain.ns_converted = []\n for ns in domain.ns :\n if isinstance(ns, objects.DomainHostAttr) :\n ns_item = {\n 'hostname' : ns.hostname,\n 'ips' : []\n }\n\n for hostaddr in ns.hostAddr :\n ns_item['ips'].append(hostaddr.ip)\n else :\n ns_item = {\n 'hostname' : ns.name,\n 'ips' : [],\n 'hostobj' : 1\n }\n domain.ns_converted.append(ns_item)\n\n return domain",
"def create_species_encode():\n\tdata = pd.read_csv(\"../train.csv\")\n\tspecies = sorted(data.species.unique())\n\tspecies_dict = {species: index for index, species in enumerate(species)}\n\treturn species_dict",
"def by_domains(self):\n\t\t\n\t\t# TODO: use urllib instead\n\t\turl_format = r'^\\s*(?:(?P<protocol>\\w+)://)?(?P<domain>[\\w\\d\\-\\.]+)(?::(?P<port>\\d+))?/?(?P<everything_else>.*)$'\n\t\tsites = {}\n\t\tfor line in self.source.lines:\n\t\t\ttry:\n\t\t\t\tif self.filter(line):\n\t\t\t\t\tresult = re.match(url_format, line.content.url)\n\t\t\t\t\tif result.group('domain') not in sites.keys():\n\t\t\t\t\t\tsites[result.group('domain')] = 0\n\t\t\t\t\tsites[result.group('domain')] += int(line.content.size)\n\t\t\texcept AttributeError:\n\t\t\t\tpass\n\t\t\n\t\t# TODO: sort; convert to lists is even better\n\t\t\n\t\treturn sites",
"def group_by_domain(hash_entries):\n entries = (get_entry(h) for h in hash_entries)\n domains = {}\n for e in entries:\n domains[e['url_domain']] = domains.get(e['url_domain']) or []\n domains[e['url_domain']].append(e)\n return [{'domain': name, 'entries': ent} for name, ent in domains.items()]",
"def domainnames(l):\n mapping = {}\n # locate all the samba domains in the ldap\n r = l.search_s('dc=elex', ldap.SCOPE_SUBTREE, '(objectClass=sambaDomain)', ['sambaDomainName','sambaSID'])\n for dn, entry in r:\n mapping[dn] = (entry['sambaDomainName'][0], entry['sambaSID'][0])\n return mapping",
"def _label_to_id(self, sequence_labels, dict_map):\n label_id_list = []\n for label in sequence_labels:\n if label not in dict_map:\n self.logger.warn(\"Label not in label map: %s\" % label)\n else:\n label_id_list.append(self.label_map[label])\n assert label_id_list, \"Label is empty: %s\" % \" \".join(sequence_labels)\n\n return label_id_list",
"def get_label2id(labels_path: str) -> Dict[str, int]:\n with open(labels_path, 'r') as f:\n labels_str = f.read().split()\n labels_ids = list(range(1, len(labels_str)+1))\n return dict(zip(labels_str, labels_ids))",
"def parse_denoiser_mapping(denoiser_map):\r\n result = {}\r\n for line in denoiser_map:\r\n line = line.strip().split('\\t')\r\n denoised_id = line[0].rstrip(':')\r\n original_ids = [denoised_id] + line[1:]\r\n if denoised_id in result:\r\n # just a healthy dose of paranoia\r\n raise ValueError(\"Duplicated identifiers in denoiser mapping file: \"\r\n \"are you sure you merged the correct files?\")\r\n else:\r\n result[denoised_id] = original_ids\r\n return result",
"def remap_ids(self, id_map: Dict[int, int]) -> None:",
"def _get_identifiers_from_kbs(self) -> dict:\n id_mapping_dict = defaultdict(set)\n\n for kb in self.kbs:\n sys.stdout.write('\\n%s \\n' % kb.name)\n for p in tqdm.tqdm(kb.pathways, total=len(kb.pathways)):\n for ent in p.entities:\n id_set = list(set(ent.xrefs))\n if len(id_set) == 1:\n id_mapping_dict[id_set.pop()] = set([])\n for p, q in itertools.combinations(id_set, 2):\n id_mapping_dict[p].add(q)\n id_mapping_dict[q].add(p)\n\n return id_mapping_dict",
"def build_reverse_dictionary(word_to_id):\n reverse_dictionary = dict(zip(word_to_id.values(), word_to_id.keys()))\n return reverse_dictionary"
]
| [
"0.68493307",
"0.6190228",
"0.6138264",
"0.6116921",
"0.61143875",
"0.59695417",
"0.5882584",
"0.58567834",
"0.5835327",
"0.58119893",
"0.5768381",
"0.5751803",
"0.5672121",
"0.56632924",
"0.564285",
"0.56324285",
"0.5624405",
"0.56145364",
"0.5583475",
"0.557813",
"0.556828",
"0.55342835",
"0.5514927",
"0.54913896",
"0.54564",
"0.5454977",
"0.54438233",
"0.54232424",
"0.5408523",
"0.539445"
]
| 0.7636259 | 0 |
Creates a dictionary that maps intents to encoded ids. | def _get_intent_mappings(domain_to_intents: Dict) -> Dict:
domain_to_intent2id = {}
for domain in domain_to_intents:
intent_labels = {}
for index, intent in enumerate(domain_to_intents[domain]):
intent_labels[intent] = index
domain_to_intent2id[domain] = intent_labels
return domain_to_intent2id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_intent_token_dict(intents, intent_split_symbol):\r\n\r\n distinct_tokens = set([token\r\n for intent in intents\r\n for token in intent.split(\r\n intent_split_symbol)])\r\n return {token: idx\r\n for idx, token in enumerate(sorted(distinct_tokens))}",
"def _get_domain_mappings(domain_to_intents: Dict) -> Dict:\n domain2id = {}\n domains = list(domain_to_intents)\n for index, domain in enumerate(domains):\n domain2id[domain] = index\n return domain2id",
"def _create_encoded_intents(self, intent_dict):\r\n\r\n if self.intent_tokenization_flag:\r\n intent_token_dict = self._create_intent_token_dict(\r\n list(intent_dict.keys()), self.intent_split_symbol)\r\n\r\n encoded_all_intents = np.zeros((len(intent_dict),\r\n len(intent_token_dict)))\r\n for key, idx in intent_dict.items():\r\n for t in key.split(self.intent_split_symbol):\r\n encoded_all_intents[idx, intent_token_dict[t]] = 1\r\n\r\n return encoded_all_intents\r\n else:\r\n return np.eye(len(intent_dict))",
"def generate_ids():\n payloads = ['info', 'ad_tracking', 'airdrop', 'store', 'siri', 'desktop', 'desktop_services', 'dock', 'energy',\n 'filevault', 'finder', 'firewall', 'itunes', 'login', 'passcode', 'password', 'restrictions', 'safari',\n 'screensaver', 'setup', 'software', 'diagnostics', 'policy', 'policy_2', 'preferences',\n 'preferences_security', 'time_machine']\n ids = {}\n for i, payload in enumerate(payloads):\n identifier = str(uuid.uuid4()).upper()\n ids[payload] = identifier[9:]\n return ids",
"def getGeneCodesToIdDict(conn, tuple_of_gene_codes):\n gene_code_to_id_dict = conn.db_connection.convertGeneCodeToId(tuple_of_gene_codes)\n\n return gene_code_to_id_dict",
"def load_ids(data):\n identifiers = {}\n base_id = settings.MDM_UUID\n for payload in data:\n identifiers[payload] = \"%s-%s\" % (base_id, data[payload])\n return identifiers",
"def get_encoding_dict(self) -> Dict[str, int]:\n return {k.lower():v for v,k in enumerate(self.vocabulary_list)}",
"def get_identifier_map(self) -> None:\n id_mapping_dict = self._get_identifiers_from_kbs()\n id_mapping_dict = self._add_uniprot_identifiers(id_mapping_dict)\n id_mapping_dict = self._add_chebi_identifiers(id_mapping_dict)\n id_mapping_dict = self._add_bridge_db_identifiers(id_mapping_dict)\n\n print(\"merging similar...\")\n id_mapping_dict = pathway_utils.merge_similar(id_mapping_dict)\n\n print(\"generating local identifiers...\")\n self.forward_map, self.backward_map = self._generate_local_identifiers(id_mapping_dict)\n self.save_id_dict()",
"def flowgram_id_to_seq_id_map(seqs):\r\n result = {}\r\n for id_, seq in seqs:\r\n fields = id_.split()\r\n seq_id = id_\r\n flowgram_id = fields[1]\r\n result[flowgram_id] = seq_id\r\n return result",
"def to_id_dict(self):\n return self._id, dict(self.__data)",
"def _attrs_map(self) -> \"dict[int, str]\":\n return {i: attr.name for i, attr in enumerate(self._attrs())}",
"def json(self):\n intents = {intent_data.intent_name: intent_data.json\n for intent_data in self.intents}\n entities = {entity.name: entity.json for entity in self.entities}\n return dict(language=self.language, intents=intents, entities=entities)",
"def gene_ID_wrangler(inpAcc):\n \n print(\"processing gene symbols\")\n \n resD = {}\n \n for convI in inpAcc:\n keyI = convI[\"InputValue\"]\n valueI = convI[\"Gene ID\"]\n resD[keyI] = valueI\n\n return resD",
"def remap_ids(self, id_map: Dict[int, int]) -> None:",
"def id_index_map(self):\n result = {}\n for index, component_data in iteritems(self):\n result[id(component_data)] = index\n return result",
"def _get_identifiers_from_kbs(self) -> dict:\n id_mapping_dict = defaultdict(set)\n\n for kb in self.kbs:\n sys.stdout.write('\\n%s \\n' % kb.name)\n for p in tqdm.tqdm(kb.pathways, total=len(kb.pathways)):\n for ent in p.entities:\n id_set = list(set(ent.xrefs))\n if len(id_set) == 1:\n id_mapping_dict[id_set.pop()] = set([])\n for p, q in itertools.combinations(id_set, 2):\n id_mapping_dict[p].add(q)\n id_mapping_dict[q].add(p)\n\n return id_mapping_dict",
"def toJSON(self):\n dictJson = {}\n dictJson[self.name] = self.dicToJSON(self.dicIntents)\n return dictJson",
"def encoding_ids(self):\n # type: () -> list[string_types]\n return self._encoding_ids",
"def get_imgid_dict(ann):\n return {item[1][\"file_name\"]: item[0] for item in ann.imgs.items()}",
"def intents_clustering(self):\n self.phrs2intents = {}\n number_of_other = 10000;\n for i in range(len(self.data)):\n for ut in self.data[i]['utterances']:\n if ut['speaker'] == 'USER':\n if 'segments' in ut.keys():\n for seg in ut['segments']:\n if 'annotations' in seg.keys():\n for anno in seg['annotations']:\n name = anno['name']\n if ut['text'] not in self.phrs2intents.keys():\n self.phrs2intents[ ut['text'] ] = [name]\n elif name not in self.phrs2intents[ ut['text'] ]:\n self.phrs2intents[ ut['text'] ].append(name)\n else:\n if number_of_other > 0:\n self.phrs2intents[ ut['text'] ] = ['other']\n number_of_other -= 1\n self.X = np.array(list(self.phrs2intents.keys()))",
"def real_sids_to_sids(self):\n if not self._real_sids_to_sids:\n self._real_sids_to_sids = {v: k for k, v in self.sids_to_real_sids.items()}\n return self._real_sids_to_sids",
"def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"product\": [\n self.from_entity(entity=\"product\", intent=[\"inform\"]),\n ],\n \"applicant_name\": [\n self.from_entity(entity=\"applicant_name\", intent=[\"inform\"]),\n ],\n \"applicant_dob\": [\n self.from_entity(entity=\"applicant_dob\", intent=[\"inform\"]),\n ],\n \"applicant_phoneno\": [\n self.from_entity(entity=\"applicant_phoneno\", intent=[\"inform\"]),\n ],\n \"applicant_address\": [\n self.from_entity(entity=\"applicant_address\", intent=[\"inform\"]),\n ]\n }",
"def reverse_code_map(self):\n\n return { c.value:(c.ikey if c.ikey else c.key) for c in self.codes}",
"def get_event_mapping():\r\n # Get all events:\r\n all_events = requests.get(base_url + 'check-ins/v2/events', headers=headers3).json()\r\n # Make Dict of event names and ids\r\n event_to_id = {event['attributes']['name']:event['id'] for event in all_events['data']} \r\n return event_to_id",
"def get_ids(self):\n all_networks = []\n network_dict = {}\n for network, status in self.networks.items():\n if status[\"onboarded\"]:\n all_networks.append(\"{}\".format(network))\n network_dict[status[\"name\"]] = network\n\n self.network_ids = all_networks\n return network_dict",
"def convert_tokens_to_ids(self, tokens):\n ids = []\n if isinstance(tokens, str):\n if tokens in self.special_tokens:\n return self.special_tokens[tokens]\n else:\n return self.encoder.get(tokens, self.unk_id)\n for token in tokens:\n if token in self.special_tokens:\n ids.append(self.special_tokens[token])\n else:\n ids.append(self.encoder.get(token, self.unk_id))\n return ids",
"def get_text_mining_mir_dictionary():\n if logger.getEffectiveLevel() == logging.DEBUG or not os.path.exists(OUT_MIR_ALIAS_FILE):\n __create_mir_alias_dictionary__()\n\n mir_alias_to_identifier = {}\n with gzip.open(OUT_MIR_ALIAS_FILE, 'rb') as mir_alias_file:\n for line in mir_alias_file:\n tax_id, mir_id, mir_alias = line.rstrip('\\r\\n').split('\\t')\n mir_alias_to_identifier[(tax_id, mir_alias)] = mir_id\n return mir_alias_to_identifier",
"def text_to_id(tweets_dict):\n text_to_id_dict = {}\n for key in tweets_dict:\n # we assume that there are no retweets as this has been preprocessed before\n text_to_id_dict[key] = tweets_dict[key][\"text\"]\n return text_to_id_dict",
"def get_family_id_to_index():\n \n family_ids = open(\n resource_filename('contextual_lenses.resources', 'pfam_family_ids.txt'),\n 'r').readlines()\n family_id_to_index = {}\n for i, family_id in enumerate(family_ids):\n family_id_to_index[family_id.replace('\\n', '')] = i\n\n return family_id_to_index",
"def to_global_ids(entry, id_map, global_names=None, local_names=None):\n global_replacements = []\n for r in entry['replacements']:\n id_ = r[2]\n if global_names is not None and local_names is not None:\n assert local_names[id_] == global_names[id_map[id_]], f\"{local_names[id_]} != {global_names[id_map[id_]]}\"\n\n # cast id into string to make format compatible with spacy's NER classifier\n global_replacements.append((r[0], r[1], str(id_map[id_])))\n\n entry['replacements'] = global_replacements\n return entry"
]
| [
"0.6695217",
"0.64867634",
"0.6394871",
"0.60963374",
"0.59980714",
"0.5829211",
"0.5791798",
"0.57791173",
"0.56940323",
"0.5672877",
"0.56094426",
"0.5583357",
"0.55793476",
"0.55350786",
"0.5458744",
"0.5416988",
"0.538743",
"0.5383223",
"0.5371814",
"0.53664494",
"0.5333257",
"0.53216773",
"0.53196317",
"0.5318917",
"0.5303527",
"0.5276376",
"0.52710634",
"0.52696437",
"0.5255452",
"0.5254229"
]
| 0.7101111 | 0 |
Creates a class label for a set of queries. These labels are used to split queries by type. Labels follow the format of "domain" or "domain|intent". For example, "date|get_date". | def get_class_labels(
tuning_level: list, query_list: ProcessedQueryList
) -> List[str]:
if TuneLevel.INTENT.value in tuning_level:
return [
f"{d}.{i}" for d, i in zip(query_list.domains(), query_list.intents())
]
else:
return [f"{d}" for d in query_list.domains()] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_label(termtype, timeperiod):\n label = 'Graph these comma-separated noun phrases (yearly frequencies):' if termtype == 'Noun phrases' and timeperiod == 'Yearly' \\\n else 'Graph these comma-separated noun phrases (monthly frequencies):' if termtype == 'Noun phrases' and timeperiod == 'Monthly' \\\n else 'Graph these comma-separated entity mentions (yearly frequencies):' if termtype == 'Entity mentions' and timeperiod == 'Yearly' \\\n else 'Graph these comma-separated entity mentions (monthly frequencies):' if termtype == 'entity mentions' and timeperiod == 'Monthly' \\\n else 'Enter a phrase and get similar terms and the distribution of its \"cluster\"'\n return label",
"def labels(self, label_type = 'basic'):\n\t\tif label_type == None:\n\t\t\treturn {}\n\t\telif label_type == 'basic':\n\t\t\treturn self.dependency_labels()\n\t\telif label_type == 'SAMT':\n\t\t\treturn self.SAMT_labels()\n\t\telif label_type == 'all':\n\t\t\treturn self.label_all()\n\t\telse:\n\t\t\traise ValueError(\"%s is no valid labeltype\" %label_type)",
"def create_label(**kwargs):\n Label = Entity.Label\n kwargs[Label.project] = project\n kwargs[Label.seconds_to_label] = kwargs.get(Label.seconds_to_label.name,\n 0.0)\n data = {\n Label.attribute(attr) if isinstance(attr, str) else attr:\n value.uid if isinstance(value, DbObject) else value\n for attr, value in kwargs.items()\n }\n query_str, params = query.create(Label, data)\n query_str = query_str.replace(\n \"data: {\", \"data: {type: {connect: {name: \\\"Any\\\"}} \")\n res = project.client.execute(query_str, params)\n return Label(project.client, res[\"createLabel\"])",
"def create_labels(dataset, prog_type_dict, other_label, **kwargs):\n top_labels = choose_top_labels(dataset, prog_type_dict, **kwargs)\n\n label_to_idx = {x: top_labels.index(x) for x in top_labels}\n idx_to_label = {v: k for k, v in label_to_idx.items()}\n if other_label != \"\":\n label_to_idx[other_label] = len(top_labels) + 1\n idx_to_label[len(top_labels)] = other_label\n return label_to_idx, idx_to_label",
"def create_type_question_ft(self):\n for i in range(0, len(self.cleaned_questions.splitlines())):\n self.type_question_ft += '__label__'+self.types_q.splitlines()[i] + ' ' + self.cleaned_questions.splitlines()[i] + '\\n'",
"def label_type(self) -> str:\n return pulumi.get(self, \"label_type\")",
"def simplify_labels(path : str, query_label : str) -> (str, str):\n yogurt_map = {'alpro-blueberry-soyghurt',\n 'alpro-vanilla-soyghurt',\n 'arla-mild-vanilla-yoghurt',\n 'arla-natural-mild-low-fat-yoghurt',\n 'arla-natural-yoghurt',\n 'oatly-natural-oatghurt',\n 'valio-vanilla-yoghurt',\n 'yoggi-strawberry-yoghurt',\n 'yoggi-vanilla-yoghurt',\n 'soyghurt',\n 'yoghurt'}\n\n milk_map = {'alpro-fresh-soy-milk',\n 'alpro-shelf-soy-milk',\n 'arla-ecological-medium-fat-milk',\n 'arla-lactose-medium-fat-milk',\n 'arla-medium-fat-milk',\n 'arla-sour-milk',\n 'arla-standard-milk',\n 'garant-ecological-medium-fat-milk',\n 'garant-ecological-standard-milk',\n 'oat-milk',\n 'milk',\n 'oatly-oat-milk',\n 'sour-milk',\n 'soy-milk'}\n\n sour_cream_map = {'arla-ecological-sour-cream',\n 'arla-sour-cream',\n 'sour-cream'}\n\n fruit_juice_map = {'bravo-apple-juice',\n 'bravo-orange-juice',\n 'god-morgon-apple-juice',\n 'god-morgon-orange-juice',\n 'god-morgon-orange-red-grapefruit-juice',\n 'god-morgon-red-grapefruit-juice',\n 'tropicana-apple-juice',\n 'tropicana-juice-smooth',\n 'tropicana-golden-grapefruit',\n 'tropicana-mandarin-morning',\n 'juice'}\n\n apple_map = {'apple',\n 'golden-delicious',\n 'granny-smith',\n 'red-delicious',\n 'royal-gala',\n 'pink-lady'}\n\n melon_map = {'cantaloupe',\n 'galia-melon',\n 'honeydew-melon',\n 'melon',\n 'watermelon'}\n\n pear_map = {'anjou',\n 'kaiser',\n 'pear',\n 'conference'}\n\n pepper_map = {'green-bell-pepper',\n 'orange-bell-pepper',\n 'red-bell-pepper',\n 'yellow-bell-pepper',\n 'pepper'}\n\n tomato_map = {'regular-tomato',\n 'tomato',\n 'vine-tomato',\n 'beef-tomato'}\n\n orange_map = {'nectarine',\n 'orange',\n 'satsumas'}\n\n potato_map = {'floury-potato',\n 'potato',\n 'solid-potato',\n 'sweet-potato'}\n\n onion_map = {'onion',\n 'yellow-onion'}\n\n mushroom_map = {'brown-cap-mushroom',\n 'mushroom'}\n\n mappings = [(yogurt_map, 'yogurt'),\n (milk_map, 'milk'),\n (sour_cream_map, 'sour_cream'),\n (fruit_juice_map, 'fruit_juice'),\n (apple_map, 'apple'),\n (melon_map, 'melon'),\n (pear_map, 'pear'),\n (pepper_map, 'pepper'),\n (tomato_map, 'tomato'),\n (orange_map, 'orange'),\n (potato_map, 'potato'),\n (onion_map, 'onion'),\n (mushroom_map, 'mushroom')]\n\n for mapping in mappings:\n if query_label in mapping[0]:\n return path, mapping[1]\n else:\n continue\n return path, query_label",
"def makeLabel(self, cmd):\n if cmd.type in ['Function', 'Call']:\n return cmd.arg1\n\n if self.current_function_name is not None:\n prefix = self.current_function_name\n else:\n prefix = self.ns\n return prefix + '$' + cmd.arg1",
"def raw_label_key(self) -> str:\n\n # TODO(nikhilmehta): Change the task object to allow label_key to be a list.\n task_type = self._problem_statement.tasks[0].type\n if task_type.HasField('multi_class_classification'):\n return task_type.multi_class_classification.label\n if task_type.HasField('binary_classification'):\n return task_type.binary_classification.label\n if task_type.HasField('one_dimensional_regression'):\n return task_type.one_dimensional_regression.label\n raise ValueError('Invalid task type: {}'.format(task_type))",
"def __init__(__self__, *,\n label_name: str,\n label_type: str):\n pulumi.set(__self__, \"label_name\", label_name)\n pulumi.set(__self__, \"label_type\", label_type)",
"def get_labels(labels_name):\n labels = {\n \"labels_num\":['Blogs - Change', 'Customer Activity - Change', 'Days Since Last Login - Change', \n 'Happiness Index - Change', 'Happiness Index - Current Month', 'Happiness Index - Monthly', \n 'Logins - Change', 'Longevity - Modulo 12', 'Longevity - Modulo 18', 'Longevity - Modulo 24', \n 'Longevity - Months', 'Views - Change'],\n \"labels_cat\":['Longevity - Modulo 6', 'Support Cases - Change', 'Support Cases - Current Month', 'Support Priority - Change',\n 'Support Priority - Current Month'],\n \"target\":\"Churn\",\n \"labels_pca\":['Happiness Index - Monthly', 'Longevity - Modulo 12', 'Happiness Index - Change', \n 'Blogs - Change', 'Happiness Index - Current Month', 'Longevity - Modulo 24', \n 'Customer Activity - Change', 'Logins - Change', 'Longevity - Modulo 18', \n 'Days Since Last Login - Change']\n }\n return labels[labels_name]",
"def get_label(cls):\n return cls._type_name(cls.label)",
"def get_label(cls):\r\n return cls._type_name(cls.label)",
"def _create_label(self, label: str, ent_id: Union[str, None]) -> str:\n if isinstance(ent_id, str):\n label = \"{}{}{}\".format(label, self.ent_id_sep, ent_id)\n return label",
"def ex_label(self,label,argl):\n if len(label) > 0 and label[0] != '_':\n return label\n comment = ''\n for i in argl:\n phrase = ''\n if i == 'l':\n phrase = label\n elif i in self._labels.keys():\n phrase = self._labels[i]\n comment += phrase\n return comment",
"def labels(self) -> list[\"Label\"]:\n _args: list[Arg] = []\n _ctx = self._select(\"labels\", _args)\n _ctx = Label(_ctx)._select_multiple(\n _name=\"name\",\n _value=\"value\",\n )\n return _ctx.execute_sync(list[Label])",
"def get_label(name):\n lower = name.lower()\n vals = lower.split('_')\n if 'ho' in vals:\n name = 'Independent Estimate'\n elif 'alldata' in vals:\n name = 'Extra-Data Estimate'\n elif 'ris' in vals[0]:\n name = 'RIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n elif 'is' in vals[0]:\n name = 'OIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n if 'dr' in vals:\n name += ' DR'\n if 'wdr' in vals:\n name += ' WDR'\n return name",
"def create_dimension_labels(gll, parameters: list):\n dimstr = '[ ' + ' | '.join(parameters) + ' ]'\n gll['MODEL/data'].dims[0].label = 'element'\n gll['MODEL/data'].dims[1].label = dimstr\n gll['MODEL/data'].dims[2].label = 'point'",
"def display_label(f_class, catalog): \n # Transform the top n class indexes into class labels LIST.\n return catalog[str(f_class)]",
"def labelize(docs, label_type='doc', class_labels=[], offset=0):\n # import gensim\n assert TDoc.isListOfTokens(docs, n_test=10), \"Ill-formated input docs: %s\" % docs\n\n TaggedDocument = gensim.models.doc2vec.TaggedDocument\n labeledDocs = []\n\n # testing\n labelx = []\n if len(class_labels) > 0: \n assert len(docs) == len(class_labels)\n # docLabels = [] # test uniqueness only\n\n counter = {l: 0 for l in np.unique(class_labels)}\n for i, doc in enumerate(docs): \n dID = counter[class_labels[i]]\n dID = dID + offset\n label = '%s_%s' % (class_labels[i], dID); labelx.append(label)\n labeledDocs.append(TaggedDocument(doc, [label, ]))\n \n # update document ID of the same class label\n counter[class_labels[i]] += 1\n else: \n for i, doc in enumerate(docs):\n dID = i + offset\n label = '%s_%s' % (label_type, dID); labelx.append(label)\n labeledDocs.append(TaggedDocument(doc, [label, ]))\n\n nuniq, ntotal = len(np.unique(labelx)), len(labelx)\n # print('labelize> n_uniq: %d =?= n_total: %d' % (nuniq, ntotal))\n assert len(np.unique(labelx)) == len(labelx), \"labels are not unique %d vs %d\" % (nuniq, ntotal)\n return labeledDocs",
"def labels(self):\n\n param=self\n\n l=len(param)\n\n sweep_label=[]\n\n for index,name in enumerate(param.names):\n\n sweep_label.append((\\\n ''.join([c for c in name if c.isupper()]))\\\n .replace(\"IDT\",\"\")\\\n .replace(\"S\",\"\")\\\n .replace(\"M\",\"\"))\n\n stringout=[]\n\n unique={name:list(dict.fromkeys(values)) for name,values in zip(param.names,param.values)}\n\n for i in range(l):\n\n tmp_lab=''\n\n for lab,name in zip(sweep_label,self.names):\n\n tmp_lab=tmp_lab+lab+str(unique[name].index(param()[name][i]))\n\n stringout.append(tmp_lab)\n\n return stringout",
"def label_metric_query(session, metrics_subq, properties, label_lang):\n # i wish i could compute the alias joins inline in this function rather than upfront, but\n # I believe I need the column names before I can start joining.\n aliased_joins = generate_aliased_tables_for_labelling(properties)\n aliased_label_cols = []\n dimension_label_params = []\n for i, aj in enumerate(aliased_joins):\n if aj['label_table']:\n # the left key from the unlabelled metric\n metrics_subq_join_col = getattr(metrics_subq.c, f'agg_{i}')\n # define the right key\n label_join_table = aj['label_table']\n label_join_key = aj['join_key']\n label_subtable_cols = [getattr(label_join_table, label_join_key),\n getattr(label_join_table, 'lang'),\n getattr(label_join_table, 'label')]\n label_join_table_lang_filtered = session.query(*label_subtable_cols) \\\n .filter(label_join_table.lang == label_lang) \\\n .subquery(f'label_sub_{i}')\n\n # was\n # label_col = aj['label_table'].label.label(f'label_agg_{i}')\n label_col = label_join_table_lang_filtered.c.label.label(f'label_agg_{i}')\n\n label_join_column = getattr(label_join_table_lang_filtered.c, label_join_key)\n dimension_label_tuple = (label_join_table_lang_filtered, label_join_column, metrics_subq_join_col)\n dimension_label_params.append(dimension_label_tuple)\n\n else: # we probably aren't joining, like for labelling years\n label_col = getattr(metrics_subq.c, f'agg_{i}').label(f'label_agg_{i}')\n aliased_label_cols.append(label_col)\n\n # first there will always be the bias_value to label\n bias_sublabel_table = session.query(label_misc).filter(label_misc.lang == label_lang,\n label_misc.type == 'bias').subquery('label_sub')\n\n label_query_cols = [metrics_subq, bias_sublabel_table.c.label.label('bias_label'), *aliased_label_cols]\n labelled_q = session.query(*label_query_cols) \\\n .outerjoin(bias_sublabel_table,\n bias_sublabel_table.c.src == metrics_subq.c.bias_value)\n\n for (label_join_table_lang_filtered, label_join_column, metrics_subq_join_col) in dimension_label_params:\n labelled_q = labelled_q \\\n .outerjoin(label_join_table_lang_filtered, label_join_column == metrics_subq_join_col)\n\n return labelled_q",
"def get_label(domain, pathtype, method):\n verb = LABELS[method]\n if method == 'POST' or pathtype != 'resource':\n noun = capp.config['DOMAIN'][domain]['item_title']\n article = 'a'\n else:\n noun = domain\n article = 'all'\n return '{0} {1} {2}'.format(verb, article, noun)",
"def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n query_type = kwargs['query_type']\n \n self._results_ = None\n \n if cmp(FDH._QTYPE_BANNER_, query_type) == 0:\n self._query_name_ = 'report_banner_metrics'\n elif cmp(FDH._QTYPE_LP_, query_type) == 0:\n self._query_name_ = 'report_LP_metrics'\n elif cmp(FDH._QTYPE_BANNER_LP_, query_type) == 0:\n self._query_name_ = 'report_bannerLP_metrics'\n elif cmp(FDH._QTYPE_DONATIONS_, query_type) == 0:\n self._query_name_ = 'report_donation_metrics'\n elif cmp(FDH._QTYPE_TOTAL_, query_type) == 0:\n self._query_name_ = 'report_total_metrics'\n elif cmp(FDH._QTYPE_TOTAL_DONATIONS_, query_type) == 0:\n self._query_name_ = 'report_total_donations'",
"def fill_operationgroup_name(self, entities_in_group):\n new_name = \"of \" + entities_in_group + \" varying \"\n if self.range1 is not None:\n new_name += json.loads(self.range1)[0]\n if self.range2 is not None:\n new_name += \" x \" + json.loads(self.range2)[0]\n if self.range3 is not None:\n new_name += \" x \" + json.loads(self.range3)[0]\n\n new_name += \" - \" + date2string(datetime.datetime.now(), date_format=LESS_COMPLEX_TIME_FORMAT)\n self.name = new_name",
"def label(self):\n # type: () -> str\n labels = self.__class__.__labels__\n return force_str(labels.get(self.value, self.name))",
"def __str__(self) -> str:\n return f'label: {self.label}, freq: {self.freq}, code: {self.code}'",
"def _string_labels_of_sentence(self, sentence):\n label_strings = []\n ongoing_label = 'O'\n for token_index, token_attribute_list in enumerate(sentence):\n raw_label_string = token_attribute_list[self.task_label_index].strip('*')\n if '(' in raw_label_string:\n ongoing_label = raw_label_string.strip('(').strip(')')\n beginning = True\n #labels[token_index] = self.category_int_of_label_string(ongoing_label)\n if ongoing_label == 'O':\n label_strings.append(ongoing_label)\n else:\n label_strings.append('{}-{}'.format('B' if beginning else 'I', ongoing_label))\n beginning = False\n if ')' in raw_label_string:\n ongoing_label = 'O'\n #bioes_tags = bio2_to_bioes(label_strings)\n bioes_tags = label_strings\n return bioes_tags",
"def labelName(self):\n if self.isRequired:\n return '%s*' % self.name\n return self.name",
"def build_user_labels_request(self):\n request = {\n \"systemLabels\": {\n \"name\": \"appName\",\n \"list_name\": [ \"a\",\"b\",\"c\"],\n \"boolean_value\": False\n },\n \"userLabels\": {\n \"key1\": \"value1\",\n \"key2\": \"value2\"\n }\n }\n return request"
]
| [
"0.6051387",
"0.5911016",
"0.58429545",
"0.5480097",
"0.54685163",
"0.5401774",
"0.5383226",
"0.5366071",
"0.5278606",
"0.5277043",
"0.5194852",
"0.5194105",
"0.51914555",
"0.51535213",
"0.5120259",
"0.5098108",
"0.50675786",
"0.5051951",
"0.50454336",
"0.50446814",
"0.5035657",
"0.5034155",
"0.50227123",
"0.5022278",
"0.50009006",
"0.49977705",
"0.4995396",
"0.49943367",
"0.49778044",
"0.49609348"
]
| 0.62765896 | 0 |
Removes duplicates in the text queries. | def deduplicate_raw_text_queries(log_queries_iter) -> List[str]:
return list(set(q for q in log_queries_iter)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _remove_duplicates(self):\n self.search_query = remove_duplicates(self.search_query)",
"def remove_duplicates(self, hits):\n\t\tseen = set()\n\t\tkeep = []\n\n\t\tfor i in range(len(hits)):\n\t\t\tif hits[i][\"Text\"] not in seen:\n\t\t\t\tseen.add(hits[i][\"Text\"])\n\t\t\t\tkeep.append(hits[i])\n\n\t\treturn keep",
"def clean_duplicate_documents(self):\n title_k = lambda x: x.title\n for k, g in groupby(sorted(self.annotation_documents, key=title_k), title_k):\n g = list(g)\n if len(g) > 1:\n # check first if one is in test set\n to_remove = [x for x in g if x not in self.test]\n if (\n len(to_remove) > 1\n ): # if test is not matched, make subselection based on annotation unit count\n select_k = lambda x: (\n len(x.events) + len(x.sentiment_expressions),\n x.annotator_id != \"gilles\",\n )\n to_remove.sort(key=select_k, reverse=True)\n to_remove = to_remove[1:]\n for docrm in to_remove:\n self.annotation_documents.remove(docrm)\n if docrm in self.dev:\n self.dev.remove(docrm)\n elif docrm in self.test:\n self.test.remove(docrm)\n print(f\"Duplicate doc removed: {docrm}\")",
"def find_duplicate_texts(self, name, text_key=None):\n if not text_key: text_key = self.text_key\n values = self._get_valuemap(name, text_key=text_key)\n dupes_check = []\n text_dupes = []\n for value in values:\n if value[1] in dupes_check:\n text_dupes.append(value[1])\n dupes_check.append(value[1])\n text_dupes = list(set(text_dupes))\n dupes = []\n for value in values:\n if value[1] in text_dupes:\n dupes.append(value)\n dupes = list(sorted(dupes, key=lambda x: x[1]))\n return dupes",
"def clean_duplicate(self):\r\n self.elements = list(set(self.elements))\r\n self.elements = [e for e in self.elements if e != '']",
"def __remove_duplicates(self, word_list: List[str]) -> List[str]:\n\n # here comes the extra complicated move to remove duplicate words from a query\n # this approach always keeps words which are at the beginning of a query and only removes duplicate words\n # that occur later in the query\n unique_word_list = []\n\n for word in word_list:\n if word not in unique_word_list:\n unique_word_list.append(word)\n\n return unique_word_list",
"def removeAllTitleQueries(self):\n self.queries[\"ti\"] = []",
"def _remove_dupes(recs, input, bad_movies, hist_list=[], feedback_list=[]):\n all_rated = input + bad_movies + hist_list + feedback_list\n nonlocal dupes\n dupes = [x for x in recs if x[0] in input]\n return [x for x in recs if x[0] not in all_rated]",
"def remove_duplicated_words(tokens):\n\n return sorted(set(tokens))",
"def test_duplicate_word_removal(self):\n data = [{\"Header\": \"This is a Header, and this is a Header\", \"Paragraph\": \"This is a Paragraph, and this is a \"\n \"Paragraph\",\n \"Header_keywords\": [\"header\", \"header\"],\n \"Paragraph_keywords\": [\"paragraph\", \"paragraph\"], \"slide\": 10}]\n remove_duplicates = duplicate_word_removal(data)\n data[0][\"Header_keywords\"] = [\"header\"]\n data[0][\"Paragraph_keywords\"] = [\"paragraph\"]\n self.assertEqual(data, remove_duplicates)",
"def selectDistinctWords():\n with open('DistinctWordsNew.txt', 'w') as fOut:\n for line in open('SelectedQuestions.txt'):\n qJson = json.loads(line.strip())\n qtitle = qJson['qtitle']\n qbody = qJson['qbody']\n rawQuestion = qtitle + ' ' + qbody\n tokens = set(Utils.preprocessText(rawQuestion).split())\n for q in tokens:\n fOut.write('%s\\n' % q)",
"def _trim_duplicates(all_matches):\n trimmed_list = IndexedSet()\n for match in all_matches:\n if (\n match\n and match not in trimmed_list\n and match[::-1] not in trimmed_list\n ):\n trimmed_list.add(match)\n return trimmed_list",
"def post_process(keyphrases):\n processed_keyphrases = []\n\n # Remove duplicates from the single phrases which are occurring in multi-keyphrases\n multi_phrases = [phrases for phrases in keyphrases if len(phrases[0].split()) > 1]\n single_phrase = [phrases for phrases in keyphrases if len(phrases[0].split()) == 1]\n for tup in single_phrase:\n kw = tup[0]\n for tup_m in multi_phrases:\n kw_m = tup_m[0]\n r = kw_m.find(kw)\n if r > -1:\n try:\n single_phrase.remove(tup)\n except:\n continue\n\n # Remove same word occurrences in a multi-keyphrase\n for multi_key, multi_score in multi_phrases:\n kw_m = multi_key.split()\n unique_kp_list = list(dict.fromkeys(kw_m))\n multi_keyphrase = ' '.join(unique_kp_list)\n processed_keyphrases.append((multi_keyphrase, multi_score))\n\n processed_keyphrases.extend(single_phrase)\n\n return processed_keyphrases",
"def shared_words(text1, text2):\r\n\r\n list1 = tokenize(text1.strip(' '))\r\n list2 = tokenize(text2.strip(' '))\r\n\r\n list3 = set(list1) & set(list2)\r\n list3.remove(' ');\r\n\r\n return list3",
"def eliminate_common(self, text=None):\n if text is None:\n text = self.nltk_text(self.text)\n # Remove the upper case words\n # Remove common words\n # Remove stopwords\n # TODO: maybe check just for nouns / verbs ???\n text = set(w for w in text if w == w.lower() and\n w.lower() not in self.common_words and\n w.lower() not in stopwords.words('english'))\n\n return text",
"def remove_duplicates(list1):\n result = []\n \n for word in list1:\n if word not in result:\n result.append(word)\n return result",
"def rm_duplicates(self):\n # get uniq representation of existing detection documents\n existing = set(ed.uniq_data for ed in self.existing_detections)\n # remove duplicates\n for idx in xrange(len(self.new_detections)-1, -1, -1):\n nd = self.new_detections[idx]\n if nd.uniq_data in existing:\n self.new_detections.pop(idx)",
"def duplicates_filter(tweet_objects):\n\n cache = []\n filtered_list = []\n\n for tweet in tweet_objects:\n t_text = text_from_tweet(tweet)\n \n if t_text not in cache:\n filtered_list.append(tweet)\n cache.append(t_text)\n\n return filtered_list",
"def clean_query_list(queries: List[str]) -> List[str]:\n return [remove_leading_whitespace_and_empty_lines(query) for query in queries]",
"def _remove_duplicates(self):\n for key in self._role_strings_info:\n self._role_strings_info[key] = [dict(tupleized) for tupleized in set(tuple(item.items())\n for item in self._role_strings_info[key])]",
"def delete_search_words(self,\r\n index,\r\n entrytext):\r\n\r\n for a_temp in DELETECHARACTERS:\r\n entrytext = entrytext.replace(a_temp, BLANK)\r\n\r\n for w in set(entrytext.split()):\r\n w = w.strip()\r\n\r\n if (self.word_dict_contains(w)\r\n and w not in SMALLWORDS+[BLANK,EMPTYCHAR]):\r\n\r\n if str(index) in self.get_indexes_for_word(w):\r\n self.discard_index_from_word(w,index)\r\n if not self.get_indexes_for_word(w):\r\n self.delete_word(w)",
"def _remove_duplicates(seq):\n d = {}\n for item in seq:\n item = item.lower()\n if item not in d:\n d[item] = True\n yield item",
"def sanitize(text):\n \n # Convert text to lowercase\n text = text.lower()\n\n # Replace all whitespace with a single space\n text = re.sub(r'\\s+',' ',text)\n\n # Remove all links (e.g. [abc](xyz)def --> [abc]def)\n text = re.sub(r'(\\[.*\\])(\\(.*\\))', r'\\1', text)\n\n # Remove URLs\n text = re.sub(r'((http[s]?://)?www.\\S+)|(http[s]?://\\S+)', '', text) \n\n # Split text on single spaces\n words = text.split()\n \n # Separate external punctuation then remove non-ending and non-embedded punctuation\n tokens = []\n for word in words:\n \tseparate_tokens(word, tokens)\n \n parsed_text = \"\"\n unigrams = \"\"\n bigrams = \"\"\n trigrams = \"\"\n \n # Populate lists to return\n for index, token in enumerate(tokens):\n \tparsed_text += token + ' '\n \tif token not in common:\n \t\tunigrams += token + ' '\n \t\tif index + 1 <= len(tokens)-1 and tokens[index+1] not in common:\n \t\t\tbigram = token + '_' + tokens[index+1]\n \t\t\tbigrams += bigram + ' '\n \t\t\tif index + 2 <= len(tokens)-1 and tokens[index+2] not in common:\n \t\t\t\ttrigrams += bigram + '_' + tokens[index+2] + ' '\n \n return parsed_text.strip().split() + unigrams.strip().split() + bigrams.strip().split()+ trigrams.strip().split()",
"def removeDuplicates(seq):\n\n pass",
"def _clean(self, texts, no_punc=False):\n result = ''\n sw = self._sw_no_punc_dict if no_punc else self._sw_dict\n for t in texts:\n if t not in sw:\n result += t\n return result",
"def __delete_duplicates(self):\n log = logging.getLogger()\n log.debug(\"\\n---> Duplicate check <---\")\n\n chromosomes = list(set(self.chromosomes))\n diff = self.size - len(chromosomes)\n\n if diff > 0:\n log.debug(\"---> Duplicate(s) found! <---\")\n for i in range(diff):\n chromosomes.append(\n Chromosome(self.__generate_random_gene_sequence(),\n self.environment))\n else:\n log.debug(\"---> No duplicates found! <---\")\n\n self.chromosomes = chromosomes",
"def uniquewords(self):\n vas = set({})\n file = self.read1()\n for line in file:\n line = line.strip()\n string = re.sub(\"[^0-9a-zA-Z]\", \" \", line).split(\" \")\n for s_i in string:\n if s_i != \"\":\n vas.add(s_i)\n l_i = list(vas)\n self.print(l_i)\n self.write(l_i)\n logging.debug(\"Starting with to\")\n return l_i",
"def _clear_results(self):\n self.sentence_data = []",
"def remove_duplicates(table=None):\n mysql_connection = get_db_connection()\n if table is not None:\n tablenames = [table]\n else:\n tablenames = mysql_connection.getTableNames()\n for tablename in tablenames:\n print(tablename)\n if DB_TABLE_STRUCTURE[tablename]['primary_key'] is None:\n # Rename table for taking only unique records\n column_names = ', '.join(mysql_connection.getColumnNames(tablename))\n commands = [f'RENAME TABLE {tablename} to {tablename}_tmp',\n f'CREATE TABLE {tablename} SELECT * FROM {tablename}_tmp GROUP BY {column_names}',\n f'ALTER TABLE {tablename} CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci',\n f'DROP TABLE {tablename}_tmp']\n if tablename != 'texts':\n commands.append(f'CREATE INDEX bid_id_index ON {tablename} (bid_id)')\n if tablename == 'docs' or tablename == 'texts':\n commands.append(f'CREATE INDEX doc_url_index ON {tablename} (doc_url)')\n mysql_connection.execute_commands(commands)",
"def add_search_words(self,\r\n index,\r\n entrytext):\r\n\r\n\r\n for a_temp in DELETECHARACTERS:\r\n entrytext = entrytext.replace(a_temp, BLANK)\r\n\r\n for w in set(entrytext.split()):\r\n\r\n w = w.strip()\r\n if self.word_dict_contains(w):\r\n self.add_word(w,index)\r\n\r\n else:\r\n if w not in SMALLWORDS+[BLANK,EMPTYCHAR]:\r\n\r\n self.initiate_new_word(w,index)"
]
| [
"0.7845041",
"0.6695697",
"0.6653341",
"0.65625846",
"0.639675",
"0.6349758",
"0.6328951",
"0.6103831",
"0.6063389",
"0.60107267",
"0.59329724",
"0.5881362",
"0.58800447",
"0.5849684",
"0.5797378",
"0.5773862",
"0.5744445",
"0.57247037",
"0.56726134",
"0.5653875",
"0.562954",
"0.5612429",
"0.56099546",
"0.5591724",
"0.55897534",
"0.55557036",
"0.5528111",
"0.5508973",
"0.55061466",
"0.5499457"
]
| 0.76712906 | 1 |
Converts text queries to processed queries using an annotator. | def convert_text_queries_to_processed(
self, text_queries: List[str]
) -> List[ProcessedQuery]:
logger.info("Loading a Bootstrap Annotator to process log queries.")
annotator_params = DEFAULT_AUTO_ANNOTATOR_CONFIG
annotator_params["app_path"] = self.app_path
bootstrap_annotator = BootstrapAnnotator(**annotator_params)
return bootstrap_annotator.text_queries_to_processed_queries(
text_queries=text_queries
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_analysis(self, query, key=None):\n logger.info(\"Running analysis on query...\")\n core_annotation = Annotation(query, key)\n clf_pipeline = AnalysisPipeline()\n entity_pipeline = AnalysisPipeline()\n clf = self.clf_accessor.get_classification_pipeline('multiclass', 'intent_classifier')\n\n \"\"\" Create the IntentClassificationAnnotator using the pipeline 'clf' \"\"\"\n clf_annotator = IntentClassificationAnnotator('clf', clf)\n clf_pipeline.add_annotator(clf_annotator)\n \"\"\" Run clf_pipeline to obtain intent classification \"\"\"\n core_annotation = clf_pipeline.analyze(core_annotation)\n \"\"\" Ensure classification results exists, otherwise raise AnalyzerError \"\"\"\n if core_annotation.annotations['results']['classification'] is []:\n raise AnalyzerError(\"No intent classification results.\")\n \"\"\" Create annotators based on entity types of intent classification \"\"\"\n entities = core_annotation.annotations['entity_types']\n\n \"\"\" Obtain gazetteers associated with the given key \"\"\"\n gazetteers = self.gaz_accessor.get_gazeteers(key)\n\n logger.debug(\"Core annotation intents: {0}\".format(core_annotation.annotations['results']['classification']))\n logger.debug(\"Core annotation entities: {0}\".format(core_annotation.annotations['entity_types']))\n logger.debug(\"Core annotation stopwords: {0}\".format(core_annotation.annotations['stopwords']))\n\n \"\"\" Iterate over entities and create an the appropriate Annotator based on the entity_type \"\"\"\n for entity in entities:\n \"\"\" Access the binary classifier for the appropriate entity types and create BinaryClassifierAnnotator\"\"\"\n if entity['entity_type'] == 'binary_classifier':\n logger.debug(\"Creating BinaryClassificationAnnotator for: {0}\".format(entity['entity_name']))\n clf = self.clf_accessor.get_classification_pipeline('binary_classifier', entity['entity_name'])\n binary_clf_annotator = BinaryClassificationAnnotator(entity['entity_name'], clf)\n entity_pipeline.add_annotator(binary_clf_annotator)\n\n \"\"\" Create a RegexAnnotator for each regex entity type\"\"\"\n if entity['entity_type'] == 'regex':\n logger.debug(\"Creating RegexAnnotator for: {0}\".format(entity['entity_name']))\n regex_annotator = RegexAnnotator(entity['entity_name'], Regexer(entity['regular_expressions']))\n entity_pipeline.add_annotator(regex_annotator)\n\n \"\"\" Create a BinaryRegexAnnotator for each regex entity type\"\"\"\n if entity['entity_type'] == 'binary_regex':\n logger.debug(\"Creating BinaryRegexAnnotator for: {0}\".format(entity['entity_name']))\n regex_annotator = BinaryRegexAnnotator(entity['entity_name'], Regexer(entity['regular_expressions']))\n entity_pipeline.add_annotator(regex_annotator)\n\n \"\"\" Create a NaiveNumberAnnotator for each number entity type\"\"\"\n if entity['entity_type'] == 'number':\n logger.debug(\"Creating NaiveNumberAnnotator for: {0}\".format(entity['entity_name']))\n number_annotator = NaiveNumberAnnotator(entity['entity_name'], NumberExtractor())\n entity_pipeline.add_annotator(number_annotator)\n\n \"\"\" Create a FuzzyMatchAnnotator for each fuzzy_match entity type\"\"\"\n if entity['entity_type'] == 'fuzzy_match':\n logger.debug(\"Creating FuzzyFindAnnotator for: {0}\".format(entity['entity_name']))\n logger.debug(\"Entity Keywords: {}\".format(entity['keywords']))\n fuzzy_matcher_annotator = FuzzyMatcherAnnotator(entity['entity_name'], FuzzyMatcher(), entity['keywords'])\n entity_pipeline.add_annotator(fuzzy_matcher_annotator)\n\n \"\"\" Create a DatetimeAnnotator for each number entity type\"\"\"\n if entity['entity_type'] == 'datetime':\n logger.debug(\"Creating DatetimeAnnotator for: {0}\".format(entity['entity_name']))\n duckling_instance = self.duckling_factory.getDucklingInstance()\n parser = DucklingDatetimeParser(duckling_instance)\n datetime_annotator = DatetimeAnnotator(entity['entity_name'], parser)\n entity_pipeline.add_annotator(datetime_annotator)\n\n \"\"\" Access the gazetteer for the appropriate entity types and create an GazetteerAnnotator \"\"\"\n if entity['entity_type'] == 'gazetteer' or entity['entity_type'] == 'simple_gazetteer':\n if gazetteers is not None:\n logger.debug(\"Creating GazetteerAnnotator for: {0}\".format(entity['entity_name']))\n \"\"\" Check to make sure gazetteers contains the gazetteer type to avoid key error \"\"\"\n if entity['entity_name'] in gazetteers.keys():\n gaz_annotator = GazetteerAnnotator(entity['entity_name'], gazetteers[entity['entity_name']])\n entity_pipeline.add_annotator(gaz_annotator)\n\n core_annotation = entity_pipeline.analyze(core_annotation)\n return core_annotation.annotations['results']",
"def _get_annotations(self, text, language=''):\n body = {\n 'document': {\n 'type': 'PLAIN_TEXT',\n 'content': text,\n },\n 'features': {\n 'extract_syntax': True,\n },\n 'encodingType': 'UTF32',\n }\n if language:\n body['document']['language'] = language\n\n request = self.service.documents().annotateText(body=body)\n response = request.execute()\n tokens = response.get('tokens', [])\n language = response.get('language')\n\n return {'tokens': tokens, 'language': language}",
"def annotate(self, **annotations):\n return AnnotatedQuery(self, annotations)",
"def query(self,text_input,prefix='answer:',convert_to_string=True):\n predictions, raw_outputs = self.model.predict([text_input])\n raw_outputs = [np.max(softmax([s[1][0] for s in v.items()][0])) for v in raw_outputs[0]]\n preds = [[(i[0],i[1],raw_outputs[k]) for i in p.items()][0] for k,p in enumerate(predictions[0])]\n return self._post_process_output(preds,convert_to_string=convert_to_string)",
"def _get_raw_annotations_for_text(text, ontologies='MESH', semantic_types=None):\n\n if semantic_types is None:\n semantic_types = ()\n\n params = {}\n params['text'] = text\n params['ontologies'] = ontologies\n params['semantic_types'] = ','.join(semantic_types)\n response = _make_api_call('http://data.bioontology.org/annotator', params)\n raw_annotations = response.json()\n return raw_annotations",
"def get_emotion_analysis(text):\n return alchemy_language.emotion(text=text)",
"def annotate(api_key, text, ontologies=[], longest_only=False, expand_mappings=False, include=[]):\n annotations = []\n url = BIOPORTAL_API_BASE + '/annotator'\n\n headers = {\n 'content-type': \"application/json\",\n 'authorization': \"apikey token=\" + api_key\n }\n\n if len(text) > 0:\n payload = {'text': text,\n 'longest_only': longest_only,\n 'expand_mappings': expand_mappings}\n\n if len(ontologies) > 0:\n payload['ontologies'] = ','.join(ontologies)\n\n if len(include) > 0:\n payload['include'] = ','.join(include)\n\n response = requests.post(url, json=payload, headers=headers, verify=False)\n\n if response.status_code != 200:\n raise Exception('Problem when calling the Annotator: ' + response.text)\n\n\n\n # print(payload)\n # print(response.url)\n # print(response.status_code)\n # print(response.text)\n annotations = json.loads(response.text)\n\n return annotations",
"def process_sparql_query_text(query_text, loader, call_name, extraMetadata):\n # We get the endpoint name first, since some query metadata fields (eg enums) require it\n endpoint, _ = gquery.guess_endpoint_uri(query_text, loader)\n glogger.debug(\"Read query endpoint: {}\".format(endpoint))\n\n try:\n query_metadata = gquery.get_metadata(query_text, endpoint)\n except Exception as e:\n raise Exception('Could not parse query {}: {}'.format(call_name, str(e)))\n\n tags = query_metadata['tags'] if 'tags' in query_metadata else []\n\n summary = query_metadata['summary'] if 'summary' in query_metadata else \"\"\n\n description = query_metadata['description'] if 'description' in query_metadata else \"\"\n\n method = query_metadata['method'].lower() if 'method' in query_metadata else \"\"\n if method not in ['get', 'post', 'head', 'put', 'delete', 'options', 'connect']:\n method = \"\"\n\n pagination = query_metadata['pagination'] if 'pagination' in query_metadata else \"\"\n\n endpoint_in_url = query_metadata['endpoint_in_url'] if 'endpoint_in_url' in query_metadata else True\n\n # Processing of the parameters\n params = []\n\n # PV properties\n item_properties = {}\n\n # If this query allows pagination, add page number as parameter\n if pagination:\n params.append(pageUtils.getSwaggerPaginationDef(pagination))\n\n if query_metadata['type'] in ['SelectQuery', 'ConstructQuery', 'InsertData']:\n # TODO: do something intelligent with the parameters!\n # As per #3, prefetching IRIs via SPARQL and filling enum\n parameters = query_metadata['parameters']\n\n for _, p in list(parameters.items()):\n param = {}\n param['name'] = p['name']\n param['type'] = p['type']\n param['required'] = p['required']\n param['in'] = \"query\"\n param['description'] = \"A value of type {} that will substitute {} in the original query\".format(\n p['type'], p['original'])\n if 'lang' in p:\n param['description'] = \"A value of type {}@{} that will substitute {} in the original query\".format(\n p['type'], p['lang'], p['original'])\n if 'format' in p:\n param['format'] = p['format']\n param['description'] = \"A value of type {} ({}) that will substitute {} in the original query\".format(\n p['type'], p['format'], p['original'])\n if 'enum' in p:\n param['enum'] = p['enum']\n if 'default' in p:\n param['default'] = p['default']\n\n params.append(param)\n\n if endpoint_in_url:\n endpoint_param = {}\n endpoint_param['name'] = \"endpoint\"\n endpoint_param['type'] = \"string\"\n endpoint_param['in'] = \"query\"\n endpoint_param['description'] = \"Alternative endpoint for SPARQL query\"\n endpoint_param['default'] = endpoint\n params.append(endpoint_param)\n\n # If this is a URL generated spec we need to force API calls with the specUrl parameter set\n if type(loader) is URLLoader:\n specUrl_param = {}\n specUrl_param['name'] = \"specUrl\"\n specUrl_param['type'] = \"string\"\n specUrl_param['in'] = \"query\"\n specUrl_param['description'] = \"URL of the API specification\"\n specUrl_param['default'] = loader.getRawRepoUri()\n params.append(specUrl_param)\n\n if query_metadata['type'] == 'SelectQuery':\n # Fill in the spec for SELECT\n if not method:\n method = 'get'\n for pv in query_metadata['variables']:\n item_properties[pv] = {\n \"name\": pv,\n \"type\": \"object\",\n \"required\": [\"type\", \"value\"],\n \"properties\": {\n \"type\": {\n \"type\": \"string\"\n },\n \"value\": {\n \"type\": \"string\"\n },\n \"xml:lang\": {\n \"type\": \"string\"\n },\n \"datatype\": {\n \"type\": \"string\"\n }\n }\n }\n\n elif query_metadata['type'] == 'ConstructQuery':\n if not method:\n method = 'get'\n elif query_metadata['type'] == 'InsertData' or query_metadata['type'] == 'Modify': # UPDATE queries should map here\n if not method:\n method = 'post'\n elif query_metadata['type'] == 'UNKNOWN':\n glogger.warning(\"grlc could not parse this query; assuming a plain, non-parametric SELECT in the API spec\")\n if not method:\n method = 'get'\n else:\n # TODO: process all other kinds of queries\n glogger.debug('Could not parse query {}: Query of type {} is currently unsupported'.format(call_name, query_metadata['type']))\n raise Exception('Could not parse query {}: Query of type {} is currently unsupported'.format(call_name, query_metadata['type']))\n\n # Finally: main structure of the callname spec\n item = packItem('/' + call_name, method, tags, summary, description, params, query_metadata, extraMetadata)\n\n return item",
"def process(self, doc):\n # don't try to process null notes\n if not doc[1]:\n if self.verbose:\n print(\"Error segmenting doc\",doc[0])\n return []\n # odd notes may throw an error. Just continue rather than stopping the entire process\n try:\n sentences = self.sentence_tokenizer.segToSentenceSpans(doc[1])\n except KeyError:\n if self.verbose:\n print(\"Error segmenting doc\",doc[0])\n return []\n\n #context_doc = pyConTextGraph.ConTextDocument() # ConTextDoc not needed for simple usage\n\n doc_annots = list()\n\n for sentence in sentences:\n # run sentence tokenizer on input text, return the spans\n sentence_text = doc[1][sentence.begin:sentence.end]\n # process every sentence by adding markup\n markup = pyConTextGraph.ConTextMarkup()\n markup.setRawText(sentence_text)\n markup.cleanText()\n # apply targets and modifiers\n markup.markItems(self.targets, mode=\"target\")\n markup.markItems(self.modifiers, mode=\"modifier\")\n # address scope of modifiers to targets, remove inactive modifiers and self-modifying relationships\n markup.pruneMarks()\n markup.applyModifiers()\n markup.pruneSelfModifyingRelationships()\n markup.dropInactiveModifiers()\n\n marked_targets = markup.getMarkedTargets()\n for marked_target in marked_targets:\n modifiers = markup.getModifiers(marked_target)\n if not modifiers:\n span = (sentence.begin+marked_target.getSpan()[0],sentence.begin+marked_target.getSpan()[1])\n if self.mode == 'combined':\n annot = (doc[0], marked_target.getPhrase(), span[0], span[1], marked_target.getCategory()[0]+'_unspecified', marked_target.getCode())\n elif self.mode == 'separate':\n annot = (doc[0], marked_target.getPhrase(), span[0], span[1], marked_target.getCategory()[0], 'unspecified', marked_target.getCode())\n if annot not in doc_annots:\n doc_annots.append(annot)\n else:\n for modifier in modifiers:\n if marked_target.getSpan()[0] < modifier.getSpan()[0]:\n span = (sentence.begin+marked_target.getSpan()[0],sentence.begin+modifier.getSpan()[1])\n else:\n span = (sentence.begin+modifier.getSpan()[0],sentence.begin+marked_target.getSpan()[1])\n if self.mode == 'combined':\n annot = (doc[0], doc[1][span[0]:span[1]], span[0], span[1], marked_target.getCategory()[0]+'_'+modifier.getCategory()[0], marked_target.getCode())\n elif self.mode == 'separate':\n annot = (doc[0], doc[1][span[0]:span[1]], span[0], span[1], marked_target.getCategory()[0], modifier.getCategory()[0], marked_target.getCode())\n if annot not in doc_annots:\n doc_annots.append(annot)\n\n #context_doc.addMarkup(markup)\n\n return doc_annots",
"def analyze(self, texts, analyses, external_entity_data=None,\n single_document=False):\n url = urljoin(self.base_url, '/analyze/')\n data = {'texts': texts, 'analyses': analyses}\n if external_entity_data is not None:\n data['absa'] = {'external_entity_data': external_entity_data}\n if single_document:\n data['single_document'] = True\n res = _analysis(url, self.auth, **data)\n return res.json()",
"def query(self, query: str, func: str):\n start_time = time.time()\n tokens = query.split()\n tokens = [self.stemmer.stem(token.translate(self.translator).lower()) for token in tokens]\n posting_lists = [self.posting[i] if i in self.posting else [] for i in tokens]\n\n function_name = 'self.' + func\n results = eval(function_name)(posting_lists)\n results = [Novel(doc_id + '.txt', self.metadata[doc_id]) if self.name == 'Novels'\n else Email(doc_id + '.txt', self.metadata[doc_id]) for doc_id in results]\n time_used = time.time() - start_time\n return results, time_used",
"def __call__(self, query, texts, multilabel=True, workers=0):\n\n scores = []\n for q in [query] if isinstance(query, str) else query:\n # Pass (query, text) pairs to model\n result = self.pipeline([{\"text\": q, \"text_pair\": t} for t in texts], top_k=None, function_to_apply=\"none\", num_workers=workers)\n\n # Apply score transform function\n scores.append(self.function([r[0][\"score\"] for r in result], multilabel))\n\n # Build list of (id, score) per query sorted by highest score\n scores = [sorted(enumerate(row), key=lambda x: x[1], reverse=True) for row in scores]\n\n return scores[0] if isinstance(query, str) else scores",
"def preprocess_query(self, input_ids, prefix):\n\n input_strings = self.generator_tokenizer.batch_decode(input_ids, skip_special_tokens=False)\n\n # handle prefix for T5\n if isinstance(self.generator_tokenizer, T5Tokenizer):\n for i, s in enumerate(input_strings):\n if not s.startswith(prefix):\n logger.warning(\"T5 prefix mismatch in {}\".format(s))\n if len(input_strings[i]) <= len(prefix):\n input_strings[i] = \"\"\n else:\n input_strings[i] = input_strings[i][len(prefix) :]\n\n retriever_inputs = self.question_encoder_tokenizer.batch_encode_plus(\n input_strings,\n return_tensors=\"pt\",\n padding=True,\n truncation=True,\n )\n\n return retriever_inputs[\"input_ids\"].to(input_ids.device), input_strings",
"def annotate(self, text, lang = None):\n return self._er.jsonRequestAnalytics(\"/api/v1/annotate\", { \"lang\": lang, \"text\": text })",
"def annotate(self,corpus):\n\n\t\tassert corpus.parsed == True, \"Corpus must already be parsed before entity recognition\"\n\n\t\tfor doc in corpus.documents:\n\t\t\tentityCount = len(doc.entities)\n\t\t\tfor sentence in doc.sentences:\n\t\t\t\twords = [ t.word for t in sentence.tokens ]\n\t\t\t\t\n\t\t\t\tfor i,t in enumerate(sentence.tokens):\n\t\t\t\t\tif not isNumber(t.word):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\t\t\tsourceEntityID = \"T%d\" % (entityCount+1)\n\t\t\t\t\ttext = doc.text[t.startPos:t.endPos]\n\t\t\t\t\tloc = [i]\n\n\t\t\t\t\te = kindred.Entity('quantity',text,[(t.startPos,t.endPos)],sourceEntityID=sourceEntityID)\n\t\t\t\t\tdoc.addEntity(e)\n\t\t\t\t\tsentence.addEntityAnnotation(e,loc)\n\t\t\t\t\tentityCount += 1",
"def parse(self, text):\n assert isinstance(text, str)\n if text.strip() == '':\n return [], []\n\n output = self._annotate(text, properties={\n \"annotators\": \"tokenize,ssplit,pos\",\n \"coref.md.type\": \"dep\",\n \"coref.mode\": \"statistical\"\n })\n\n words = []\n postags = []\n\n for sentence in output['sentences']:\n for token in sentence['tokens']:\n word = token['word']\n pos = token['pos']\n word = re.sub(r'\\s', '', word)\n words.append(word)\n postags.append(pos)\n return words, postags",
"def tokenize_query(self, query_text):\n\t\ttokenizer = RegexpTokenizer(r'\\w+')\n\t\tstop_words = set(stopwords.words('english')) \n\t\tabstract = tokenizer.tokenize(query_text.upper()) \n\t\tfinal_sentence = [w for w in abstract if not w.lower() in stop_words]\n\t\treturn \" \".join(final_sentence).upper()",
"def queries_to_dict(queries: List) -> List:\n return [\n {\n \"unannotated_text\": query.query.text,\n \"annotated_text\": dump_query(query),\n \"domain\": query.domain,\n \"intent\": query.intent,\n }\n for query in queries\n ]",
"def proc_text(self, text):\n\n lemmas = []\n tokens = []\n doc = self(text)\n for tokObj in doc:\n if self._remove_punct and tokObj.is_punct:\n continue\n lemma = tokObj.lemma_\n text = tokObj.text\n if self._keep_only_alpha_num and not is_alpha_num(text):\n continue\n tok1 = text.lower()\n tok2 = lemma.lower()\n if tok1 in self._stopwords or tok2 in self._stopwords:\n continue\n\n if self._lower_case:\n text = text.lower()\n lemma = lemma.lower()\n\n lemmas.append(lemma)\n tokens.append(text)\n\n return ' '.join(lemmas), ' '.join(tokens)",
"def embed_query(self, text: str) -> List[float]:\n embedding = self._embedding_func(text, engine=self.query_model_name)\n return embedding",
"def translate_batch(\n self,\n queries: Optional[List[str]] = None,\n documents: Optional[Union[List[Document], List[Answer], List[List[Document]], List[List[Answer]]]] = None,\n batch_size: Optional[int] = None,\n ) -> List[Union[str, List[Document], List[Answer], List[str], List[Dict[str, Any]]]]:\n # TODO: This method currently just calls the translate method multiple times, so there is room for improvement.\n\n if queries and documents:\n raise AttributeError(\"Translator needs either query or documents but not both.\")\n\n if not queries and not documents:\n raise AttributeError(\"Translator needs a query or documents to perform translation.\")\n\n translated = []\n # Translate queries\n if queries:\n for query in tqdm(queries, disable=not self.progress_bar, desc=\"Translating\"):\n cur_translation = self.translate(query=query)\n translated.append(cur_translation)\n\n # Translate docs / answers\n elif documents:\n # Single list of documents / answers\n if not isinstance(documents[0], list):\n translated.append(self.translate(documents=documents)) # type: ignore\n # Multiple lists of document / answer lists\n else:\n for cur_list in tqdm(documents, disable=not self.progress_bar, desc=\"Translating\"):\n if not isinstance(cur_list, list):\n raise HaystackError(\n f\"cur_list was of type {type(cur_list)}, but expected a list of Documents / Answers.\"\n )\n cur_translation = self.translate(documents=cur_list)\n translated.append(cur_translation)\n\n return translated",
"def annotate(self, op_list=None):\n if bool(self.optical_system):\n if op_list is None:\n op_list = self._operations\n for op in op_list:\n op.annotate(self)\n else:\n print(\"No optical system found, so annotating nothing.\")",
"def analyze(self, annotation):\n for annotator in self.sequence:\n annotation = annotator.validate_and_annotate(annotation)\n return annotation",
"def annotate(self, *args, **kwargs):\n self._not_support_combined_queries(\"annotate\")\n return self._annotate(args, kwargs, select=True)",
"def run_tapas(data, queries):\n tokenizer, model = load_model_and_tokenizer()\n table, inputs = prepare_inputs(data, queries, tokenizer)\n predicted_table_cell_coords, predicted_aggregation_operators = generate_predictions(inputs, model, tokenizer)\n aggregation_predictions_string, answers = postprocess_predictions(predicted_aggregation_operators,\n predicted_table_cell_coords, table)\n ans_list = show_answers(queries, answers, aggregation_predictions_string)\n\n print(ans_list)",
"def parser(string, queryset):\n QueryObjects.D = {}\n QueryObjects.B = []\n QueryObjects.IND = 0\n QueryObjects.TEMP_FIELD = None\n\n algebra = boolean.BooleanAlgebra()\n query_list = lexer(string)\n query_string = ' '.join(query_list)\n qs = algebra.parse(query_string)\n\n if QueryObjects.TEMP_FIELD:\n queryset = queryset.annotate(**QueryObjects.TEMP_FIELD)\n QueryObjects.TEMP_FIELD = None\n\n locals().update(QueryObjects.D.items())\n query = str(qs)\n query = eval(query)\n queryset = queryset.filter(query)\n return queryset",
"def get_annotations_for_text(text, ontologies='MESH', semantic_types=(), debug=False):\n results = {'status': 'ERROR', 'data': []}\n\n if debug:\n print \"bioportal.get_annotations_for_text\"\n\n annotations = _get_raw_annotations_for_text(\n text,\n ontologies=ontologies,\n semantic_types=semantic_types\n )\n\n if not isinstance(annotations, list):\n results['message'] = 'BioPortal get annotations: Invalid format annotations'\n return results\n\n for annotation in annotations:\n ontology_data = re.findall(\n r'.*/([A-Z0-9]+)/([A-Z0-9]+)$', annotation['annotatedClass']['@id']\n ) or []\n\n info = {\n 'id': annotation['annotatedClass']['@id'],\n 'class': annotation['annotatedClass']['links']['self'],\n 'frequency': len(annotation['annotations']),\n 'matched_terms': list(\n set([an.get('text').lower() for an in annotation.get('annotations')])\n )\n }\n\n if len(ontology_data) == 1:\n info['ontology_type'] = ontology_data[0][0]\n info['ontology_quote_id'] = ontology_data[0][1]\n\n results['data'].append(info)\n\n results['status'] = 'OK'\n return results",
"def process_query(self, query_str):\n # make sure everything is lower case\n query = query_str.lower()\n # split on whitespace\n query = query.split()\n # remove non alphanumeric characters\n query = [self.alphanum.sub('', xx) for xx in query]\n # stem words\n query = [self.p.stem(xx) for xx in query]\n return query",
"def analyse_query(query, classifier, Resource, threshold, language='en'):\n return [(bytes(line, 'utf-8'), _minimal_analysis(bytes(line, 'utf-8'), classifier, Resource, threshold, language))\n for line in search_sample(query)]",
"def show_answers(queries, answers, aggregation_predictions_string):\n\n ans_list = []\n for query, answer, predicted_agg in zip(queries, answers, aggregation_predictions_string):\n print(query)\n print(answer,type(answer))\n print(predicted_agg)\n answer = [i.strip() for i in answer.split(',')]\n print(answer)\n if (len(answer) == 1):\n if (predicted_agg == 'COUNT'):\n answer = len([i for i in answer])\n\n if (len(answer) > 1):\n if (predicted_agg == 'SUM'):\n try:\n answer = sum([float(i) for i in answer])\n except ValueError:\n answer = predicted_agg\n elif (predicted_agg == 'COUNT'):\n answer = len([i for i in answer])\n elif (predicted_agg == 'AVERAGE'):\n answer = sum([float(i) for i in answer]) / len([i for i in answer])\n elif (predicted_agg == 'NONE'):\n answer = answer\n else:\n answer = 'None'\n # if predicted_agg == \"NONE\":\n # print(\"Predicted answer: \" + answer)\n # else:\n # print(\"Predicted answer: \" + predicted_agg + \" > \" + answer)\n\n ans_list.append(answer)\n\n return ans_list"
]
| [
"0.58529824",
"0.5585822",
"0.5577068",
"0.55282134",
"0.5404479",
"0.53600365",
"0.53220135",
"0.52643645",
"0.52558595",
"0.5248043",
"0.52073747",
"0.5193515",
"0.51917845",
"0.5176019",
"0.5160177",
"0.50877666",
"0.5071344",
"0.50710976",
"0.50638235",
"0.5001331",
"0.49923724",
"0.4948548",
"0.49118975",
"0.49114582",
"0.48940125",
"0.48861033",
"0.48601067",
"0.48581663",
"0.48521733",
"0.4850093"
]
| 0.70313007 | 0 |
Method to get multiple queries from the QueryCache given a list of query ids. | def get_queries(self, query_ids):
return [
self.resource_loader.query_cache.get(query_id) for query_id in query_ids
] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def query_many(self, queries):\n assert isinstance(queries, list)\n cursor = self._cursor()\n results = []\n for query in queries:\n try:\n cursor.execute(query)\n result = cursor.fetchall()\n except Exception as e:\n print(e)\n result = []\n results.append(result)\n return results",
"def get_all(self, *ids):",
"def get_results(self, ids):\n self.join()\n return [self.results[id] for id in ids]",
"def query(self, queries):\n final_result = []\n results = self.__return_query('query', queries)\n if results == None:\n return None\n else:\n if len(results) > 1:\n for result in results:\n final_result.append(result['data'])\n else:\n final_result = results\n return final_result",
"def batch_get_query_execution(QueryExecutionIds=None):\n pass",
"def chunk_queries(queries: List) -> List[List]:\n chunks: List[List] = []\n # Octopart can only handle 20 queries per request, so split into chunks.\n for chunk in chunked(queries):\n chunks.extend(split_chunk(chunk))\n return chunks",
"def get_all(self, datastore, *ids):\n return datastore.query(self.__model__).filter(self.__model__.id.in_(ids)).all()",
"def multiquery(self, query_chunks, **kwargs):\n return query.multiquery(self._host, self._session, query_chunks, **kwargs)",
"def multi_query(db, queries):\n return pd.concat((query_to_df(db, q) for q in queries), ignore_index=True)",
"def make_queries(\n db_query: DbQuery, ids: Optional[List[str]] = None, date_range: Optional[str] = None\n) -> List[Run]:\n if ids is not None and ids:\n parsed_ids = parse_ids(ids)\n queries = [i.get_query(db_query) for i in parsed_ids]\n if len(queries) > 1:\n query_union = queries[0].union(*queries[1:])\n else:\n query_union = queries[0]\n else:\n query_union = db_query.get_all_runs()\n\n if date_range is not None:\n start, end = parse_date_range(date_range)\n filtered = DbQuery.filter_results_by_date_range(query_union, start, end)\n return filtered.all()\n else:\n return query_union.all()",
"def get_all(self, *ids):\n return self.__model__.query.filter(self.__model__.id.in_(ids)).all()",
"def get_all(self, queries):\n return [self._unpack(x.ids) for x in self.server.select2([\n ttypes.SelectQuery(\n [ttypes.SelectOperation(\n operation_type=\\\n ttypes.SelectOperationType.SimpleQuery,\n term=self._query_term(*x)\n )\n ],\n ttypes.Page(self.page_length, -1)\n )\n for x in queries])]",
"def multiple_queries(self, queries, index_name_key = \"indexName\", strategy = \"none\"):\n requests = []\n for query in queries:\n index_name = query[index_name_key]\n del query[index_name_key]\n for key in query.keys():\n if isinstance(query[key], (list, dict, tuple, bool)):\n query[key] = json.dumps(query[key], cls = JSONEncoderWithDatetimeAndDefaultToString)\n requests.append({\"indexName\": index_name, \"params\": urlencode(query)})\n body = {\"requests\": requests}\n return AlgoliaUtils_request(self.headers, self.read_hosts, \"POST\", \"/1/indexes/*/queries?strategy=\" + strategy, self.search_timeout, body)",
"async def _multi_get(self, keys, encoding=\"utf-8\"):\n return [SimpleMemoryBackend._cache.get(key) for key in keys]",
"async def fetch_all(self, query: str, args: Optional[Iterable]=None) -> List:\n stats.inc('queries', 'SQL')\n async with self.pool.acquire() as conn:\n async with conn.cursor() as cur:\n await cur.execute(query, args)\n ret = await cur.fetchall()\n return ret",
"def multiQuery(self, query, limit):\n try:\n results = self.sp.search(query, limit)\n resultLists = results['tracks']['items']\n return resultLists\n except spotipy.SpotifyException as se:\n self.authenticate()\n return self.multiQuery(query, limit)",
"def batch_get_named_query(NamedQueryIds=None):\n pass",
"async def queries(self, *queries: ViewQuery) -> List[dict]:\n\n queries = [dict(q._asdict()) for q in queries]\n\n for q in queries:\n q.stale = StaleOptions.format(q.stale)\n\n data = dict(queries=queries)\n path = self._get_path() + ['queries']\n\n return await self.__connection.query('POST', path, data=data)",
"def queries(self):\n request = Request(method=\"get\", endpoint=\"/query/current\")\n\n def response_handler(resp):\n if not resp.is_success:\n raise C8QLQueryListError(resp, request)\n return self._format_queries(resp.body)\n\n return self._execute(request, response_handler)",
"async def _multi_get(self, keys):\n with await self._connect() as redis:\n return await redis.mget(*keys)",
"def getQueries(self):\r\n\t\tp=[]\r\n\t\tfor pa in self.session.query(Query):\r\n\t\t\tp.append(pa)\r\n\t\treturn p",
"def mget(cls, pks):\n if not pks:\n return []\n return DBSession().query(cls) \\\n .filter(cls.id.in_(pks)) \\\n .all()",
"def query_many(self, *q, read_as=None):\n raise NotImplementedError",
"async def get_many(self, **query):\n\n return [await self._expand(obj) for obj in await self.db.get_many(**query)]",
"def batch_execute(self, sql_list):\n with self.connection.cursor() as dbc:\n responses = []\n for sql in sql_list:\n dbc.execute(sql)\n responses.append(dbc.fetchall())\n return responses",
"def _query_measurables(self, ids):\n if len(ids) > 500:\n # If we want to implement this later,\n # we can properly use the pageInfo in the request\n raise NotImplementedError(\n \"We haven't implemented support for more than 500 ids per request\"\n )\n response = self._post(\n {\n \"variables\": {\"measurableIds\": ids},\n \"query\": \"\"\"query ($measurableIds: [String!]) {\n measurables(measurableIds: $measurableIds, first: 500) {\n total\n pageInfo {\n hasPreviousPage\n hasNextPage\n startCursor\n endCursor\n __typename\n }\n edges {\n node {\n id\n channelId\n previousAggregate {\n value {\n floatCdf {\n xs\n ys\n }\n }\n }\n }\n }\n }\n }\"\"\",\n }\n )\n if \"errors\" in response:\n raise ValueError(\n \"Error retrieving foretold measurables. You may not have authorization \"\n \"to load one or more measurables, or one of the measureable ids may be incorrect\"\n )\n if response[\"data\"][\"measurables\"][\"pageInfo\"][\"hasNextPage\"]:\n raise NotImplementedError(\n \"We haven't implemented support for more than 500 ids per request\"\n )\n measurables_dict = {}\n for edge in response[\"data\"][\"measurables\"][\"edges\"]:\n measureable = edge[\"node\"]\n measurables_dict[measureable[\"id\"]] = measureable\n\n return [measurables_dict.get(id, None) for id in ids]",
"def query_ids_maps(self, observation_ids, *, missions=__ALL_STRING, row_limit=DEFAULT_ROW_LIMIT,\n get_query_payload=False, cache=True, verbose=False):\n sanitized_observation_ids = self._sanitize_input_ids(observation_ids)\n sanitized_missions = self._sanitize_input_mission(missions)\n sanitized_row_limit = self._sanitize_input_row_limit(row_limit)\n\n query_result = {}\n self._store_query_result(query_result=query_result, names=sanitized_missions, json=self._get_observation_json(),\n row_limit=sanitized_row_limit, get_query_payload=get_query_payload, cache=cache,\n ids=sanitized_observation_ids, verbose=verbose)\n\n if get_query_payload:\n return query_result\n\n return commons.TableList(query_result)",
"def queries(self, queries):\n\n self._queries = queries",
"def get_objects(self, ids, **args):\n args[\"ids\"] = \",\".join(ids)\n return self.request(\"\", args)",
"def get_objects(self,ids):\n return model.objects.filter(pk__in=ids).order_by(search_field)"
]
| [
"0.722569",
"0.65023124",
"0.64487207",
"0.64159083",
"0.6410242",
"0.635374",
"0.62091815",
"0.6204514",
"0.6176422",
"0.6157128",
"0.61314714",
"0.606496",
"0.59988225",
"0.5957463",
"0.5924632",
"0.5920572",
"0.5903233",
"0.58550835",
"0.5850858",
"0.58000696",
"0.57887274",
"0.572051",
"0.5720497",
"0.56930196",
"0.5621852",
"0.56105125",
"0.55885625",
"0.5583642",
"0.55656075",
"0.5559656"
]
| 0.8206793 | 0 |
Update the current set of sampled queries by adding the set of newly sampled queries. A new PrcoessedQueryList object is created with the updated set of query ids. | def update_sampled_queries(self, newly_sampled_queries_ids):
sampled_queries_ids = self.sampled_queries.elements + newly_sampled_queries_ids
self.sampled_queries = ProcessedQueryList(
cache=self.resource_loader.query_cache, elements=sampled_queries_ids
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_unsampled_queries(self, remaining_indices):\n remaining_queries_ids = [\n self.unsampled_queries.elements[i] for i in remaining_indices\n ]\n self.unsampled_queries = ProcessedQueryList(\n cache=self.resource_loader.query_cache, elements=remaining_queries_ids\n )",
"def queries(self, queries):\n\n self._queries = queries",
"def update(self, config_samples, costs):\n num_config_vars, nqueries = config_samples.shape\n assert costs.shape[0] == nqueries\n assert costs.ndim == 1\n for ii in range(nqueries):\n key = tuple([int(ll) for ll in config_samples[:, ii]])\n if key in self.costs:\n self.costs[key].append(costs[ii])\n else:\n self.costs[key] = [costs[ii]]",
"def transform_queries_qrels_new_ids(self):\n self.dict_mapper_old_to_new_qid = {}\n self.count_subtasks = {}\n self.dict_missingqueries = {}\n self.dict_assessed_queries = self.get_queries_assessed()\n self.map_old_qid_to_new()\n self.map_qrels_to_newqids()\n exit()\n self.map_old_qid_to_new_withsubtasks()\n self.generate_qrels_subtasks()",
"def update(self):\r\n self.data = [self.make_item_tuple(i) for i in self.query]\r\n self._fetched = True\r\n query_cache.set(self.iden, self.data)",
"def didUpdateQueries(self):\n if hasattr(self, 'preQueryUpdateHook'):\n self.preQueryUpdateHook()\n\n for query in self.option_queries.values():\n query.updateValue()\n\n if hasattr(self, 'postQueryUpdateHook'):\n self.postQueryUpdateHook()",
"def update_vips(cls, new_data_list):\n # create dictionary where key is id\n data_by_ids = {item['id']: item for item in new_data_list}\n\n # get db instances\n query = cls.filter_by_list(None, 'id', list(data_by_ids))\n\n cls.lock_for_update(query).all()\n for existing_instance in query:\n cls.single.update(\n existing_instance,\n data_by_ids[existing_instance.id]\n )\n return query",
"def update(cls, *lst, **dct):\n cls.runtime.set_set(lst, dct)\n return UpdateQuery(cls.runtime)",
"def _get_new_data(self):\n self.log.info(\"Get new query from db \")\n surveys = self.db.execute_pandas_query(self._get_query('surveys_query'))\n\n final_query = ''\n for index_s, survey_id in surveys.iterrows():\n questions = self.db.execute_pandas_query(self._get_query('questions_query').replace('@currentSurveyId', str(survey_id['SurveyId'])))\n query_in_progress = ''\n for index_q, question_id in questions.iterrows():\n if question_id['InSurvey'] == 0:\n query_in_progress = query_in_progress + self._get_query('query_template_for_null_column').replace('<QUESTION_ID>', str(question_id['QuestionId']))\n else:\n query_in_progress = query_in_progress + self._get_query('query_template_for_answer_column').replace('<QUESTION_ID>', str(question_id['QuestionId']))\n\n if index_q != questions.index[-1]:\n query_in_progress = query_in_progress + ' , '\n\n union_query_block = self._get_query('query_template_outer_union_query').replace('<DYNAMIC_QUESTION_ANSWERS>', query_in_progress)\n union_query_block = union_query_block.replace('<SURVEY_ID>', str(survey_id['SurveyId']))\n final_query = final_query + union_query_block\n if index_s != surveys.index[-1]:\n final_query = final_query + ' UNION '\n return final_query",
"def update(self, data: Union[QueryWithResponse, List[QueryWithResponse]], initial_point: Dict = None):\n if isinstance(data, list):\n self.dataset.extend(data)\n else:\n self.dataset.append(data)\n if initial_point is None:\n initial_point = self.mean\n \n self.create_samples(initial_point)",
"def _fill_query_slots(self, queries: List[Query]) -> None:\n while queries and self.query_slots > 0:\n logger.debug(\n f\"{self.query_slots} available query slots, creating query task\"\n )\n query = queries.pop(0)\n query_task_id = self.client.create_query_task(query.query_id)\n self.query_slots -= 1\n query.query_task_id = query_task_id\n self._query_by_task_id[query_task_id] = query\n self._running_queries.append(query)",
"def query_append(self,*q):\n query = self.parameters['q'].union(q)\n params = join_params(self.parameters,\n {\"q\": query, \"limit\": self._limit,\n \"url_domain\": self.url_domain,\n \"proxies\": self.proxies}\n )\n\n return self.__class__(**params)",
"def addDPQueriesToModel(self, model, two_d_vars, obj_fxn, parent_mask, q_set_list=None, **kwargs):\n import gurobipy as gb\n ASSERT_TYPE(model, gb.Model)\n ASSERT_TYPE(two_d_vars, gb.MVar)\n lb = 0 if self.nnls else -gb.GRB.INFINITY\n if q_set_list is None:\n q_set_list = self.DPqueries\n for ihist, dp_queries in enumerate(q_set_list):\n if dp_queries is None:\n continue\n for st_dpq in dp_queries:\n query: MultiHistQuery = st_dpq.query\n name = st_dpq.name\n matrix_rep = query.matrixRep()\n n_ans = query.numAnswers()\n # weight = 1. / st_dpq.Var\n # Add empty columns for preceding and succeeding histograms to the matrix\n # ihist is the number of histogram, needed to pad the query matrix with zeros appropriately\n matrix_rep = self.padQueryMatrix(ihist, matrix_rep, n_ans)\n matrix_rep = matrix_rep[:, parent_mask]\n weight = ss.diags(np.hstack([np.repeat(1. / v, n_ans) for v in st_dpq.VarList]))\n dp_answer_all_children = np.hstack(st_dpq.DPanswerList).ravel() # Stack dp_answers for children into 1 vector\n\n # Stack optimization vars for children into 1 vector\n\n try:\n # Note: we replaced ._vararr() with .tolist(); need to validate this\n x = gb.MVar(np.array(two_d_vars[:, st_dpq.indices].tolist()).ravel(\"F\"))\n # TODO: @zhura301 to validate; the assert does not work:\n # assert x==gb.MVar(two_d_vars[:, st_dpq.indices]._vararr.ravel(\"F\"))\n except AttributeError as e:\n # Legacy method before .tolist() was created in gurobi 9.1\n x = gb.MVar(two_d_vars[:, st_dpq.indices].vararr.ravel(\"F\")) # Stack optimization vars for children into 1 vector\n\n # Using \"F\" for Fortran style ravel indexing so that variables are in rows and answers in cols (or vice versa)\n # Make a matrix that will act on stacked variables from all children. Its dimensions are dimensions of matrix_rep,\n # multiplied by number of children on each axis, since it acts on stacked child histograms and returns stacked\n # child answers.\n # It is a block diagonal matrix with blocks being matrix_rep, or matrix_rep stacked along the\n # diagonal num_chld times.\n # We'll make it as COO matrix, which takes in 3 arrays: rows and columns of non-zero\n # elements, and their values. We perform stacking by repeating these arrays num_child times, and\n # shifting indices appropriately.\n num_chld = len(st_dpq.indices) # How many children have this query\n Acoo = matrix_rep.tocoo()\n col = np.tile(Acoo.col, num_chld) + np.repeat(np.arange(num_chld), len(Acoo.col)) * Acoo.shape[1]\n row = np.tile(Acoo.row, num_chld) + np.repeat(np.arange(num_chld), len(Acoo.row)) * Acoo.shape[0]\n data = np.tile(Acoo.data, num_chld)\n A = ss.coo_matrix((data, (row, col)), shape=(matrix_rep.shape[0] * num_chld, matrix_rep.shape[1] * num_chld)).tocsr()\n obj_fxn = self.childDPQueryTerm(model, obj_fxn, A, x, dp_answer_all_children, weight, n_ans * num_chld, name, lb, **kwargs)\n return obj_fxn",
"def update(self):\n self.entries = Entry.objects.filter(contest=self.contest)\n self.lineups = [e.lineup for e in self.entries]\n self.lineup_players = lineup.models.Player.objects.filter(lineup__in=self.lineups)\n\n # add players to the data with an initial count of 1.\n # increment a players count if they already exist\n self.player_counter = Counter([p.player.srid for p in self.lineup_players]).items()",
"def sample_and_update(\n self,\n sampling_size: int,\n confidences_2d: List[List[float]],\n confidences_3d: List[List[List[float]]],\n heuristic: Heuristic,\n confidence_segments: Dict = None,\n tuning_type: TuningType = TuningType.CLASSIFIER,\n ):\n\n if tuning_type == TuningType.CLASSIFIER:\n params_rank_3d = {\"confidences_3d\": confidences_3d}\n if confidence_segments:\n params_rank_3d[\"confidence_segments\"] = confidence_segments\n\n ranked_indices_2d = (\n heuristic.rank_3d(**params_rank_3d)\n if confidences_3d\n else heuristic.rank_2d(confidences_2d)\n )\n\n newly_sampled_indices = ranked_indices_2d[:sampling_size]\n remaining_indices = ranked_indices_2d[sampling_size:]\n\n else:\n try:\n ranked_entity_indices = heuristic.rank_entities(confidences_2d)\n except (TypeError, ValueError):\n # if heuristic does not have entity AL support default to entropy\n heuristic = EntropySampling\n ranked_entity_indices = heuristic.rank_entities(confidences_2d)\n\n newly_sampled_indices = ranked_entity_indices[:sampling_size]\n remaining_indices = ranked_entity_indices[sampling_size:]\n\n newly_sampled_queries_ids = [\n self.unsampled_queries.elements[i] for i in newly_sampled_indices\n ]\n self.update_sampled_queries(newly_sampled_queries_ids)\n self.update_unsampled_queries(remaining_indices)\n return newly_sampled_queries_ids",
"def add_data(self, new_backend_result, rerun_fit=True):\n\n if new_backend_result is None:\n return\n\n if not isinstance(new_backend_result, list):\n new_backend_result = [new_backend_result]\n\n for result in new_backend_result:\n self._result_list.append(result)\n\n # update the number of trials *if* new ones\n # added.\n for qvcirc in result.results:\n ntrials_circ = int(qvcirc.header.name.split('_')[-1])\n if (ntrials_circ+1) > self._ntrials:\n self._ntrials = ntrials_circ+1\n\n if qvcirc.header.name not in self._heavy_output_prob_ideal:\n raise QiskitError('Ideal distribution '\n 'must be loaded first')\n\n if rerun_fit:\n self.calc_data()\n self.calc_statistics()",
"def set_queries(self, **kwargs):\n for k, v in kwargs.items():\n self._query_dict[k] = v",
"async def queries(self, *queries: ViewQuery) -> List[dict]:\n\n queries = [dict(q._asdict()) for q in queries]\n\n for q in queries:\n q.stale = StaleOptions.format(q.stale)\n\n data = dict(queries=queries)\n path = self._get_path() + ['queries']\n\n return await self.__connection.query('POST', path, data=data)",
"def update_query(self, **updates):\r\n self._url_updates.update(updates)",
"def sql_query(self, new_query):\n self._project.sql_query = new_query",
"def get_sampled_ids(self):\n seed = 123\n #initiate two lists, to save randomly picked positive and negative cases respectively\n positiveIds = []\n negativeIds = []\n i = 0\n print \"==> resampling ... \",\n while len(positiveIds)+len(negativeIds)<self.ntotal:\n # start a loop from 0 to total size of the new sampe\n # if it catches a number divisable by the sought ratio, update the list of positive cases ids\n # otherwise keep update the list of negative cases ids\n try:\n if i%int(100 / self.posRate) == 0: \n positiveIds.append(self.posId.next())\n else:\n negativeIds.append(self.negId.next())\n except:\n print \"Enter posRate higher than the initial rate\"\n break\n i+=1\n print \"Done sampling\"\n print \"positive:\", len(positiveIds)\n print \"negative:\", len(negativeIds)\n print \"final size:\", len(positiveIds)+len(negativeIds)\n #return sorted list of the two list of ids combined\n return sorted(positiveIds+negativeIds)",
"def update(self, list_of_sets):\n for s in list_of_sets:\n self.add(s)",
"def update(self):\n for filter in self.filters:\n filter.update(self.learning_rate)",
"def extend(self, results):\r\n \r\n docs = self.docs\r\n self.scored_list.extend(docnum for docnum in results.scored_list\r\n if docnum not in docs)\r\n self.docs = docs | results.docs\r\n \r\n # TODO: merge the query terms?\r",
"def _reset_query(self):\n self.query = pysnow.QueryBuilder()\n self.desired_response_fields = list()",
"def query(self,query_samples):\n\n\t\tself.sampled_topics = np.zeros((self.samples,self.N), dtype = np.int)\n\n\t\tfor s in xrange(self.samples):\n\n\t\t\tself.sampled_topics[s,:] = samplers_lda.sampler_query(self.docid, self.tokens, self.topic_seed,\n\t\t\t\t\t\t\t\t\t\t\tnp.ascontiguousarray(self.tt[:,:,s], dtype=np.float),\n\t\t\t\t\t\t\t\t\t\t\tself.N, self.K, self.D, self.alpha, query_samples)\n\n\t\t\tprint(\"Sample %d queried\" % s)\n\n\t\tself.dt = np.zeros((self.D,self.K,self.samples))\n\n\t\tfor s in xrange(self.samples):\n\t\t\tself.dt[:,:,s] = samplers_lda.dt_comp(self.docid,self.sampled_topics[s,:], self.N, self.K, self.D, self.alpha)",
"def __on_query_edited(self):\n self.__refresh_search_results()",
"def _update_all(self, criteria: Q, *args, **kwargs):\n raise NotImplementedError",
"def populate_list(self):\n self._list.value = self.__get_queryset()\n # force the list to be updated\n self._list.mark_to_update_client()",
"def update_calls(self, calls):\n for call in calls:\n call.site = self\n self.call_for_sample = {call.sample: call for call in calls}"
]
| [
"0.68086916",
"0.5676029",
"0.5447187",
"0.5287999",
"0.5281685",
"0.5269392",
"0.5163817",
"0.50805366",
"0.5080143",
"0.50727624",
"0.50701463",
"0.50502145",
"0.50495815",
"0.5037603",
"0.5023451",
"0.4994067",
"0.49848914",
"0.49457482",
"0.49016884",
"0.48877212",
"0.48846725",
"0.48601288",
"0.48473334",
"0.48457655",
"0.48131934",
"0.47969586",
"0.47944984",
"0.47711387",
"0.47700343",
"0.4743306"
]
| 0.83345515 | 0 |
Update the current set of unsampled queries by removing the set of newly sampled queries. A new PrcoessedQueryList object is created with the updated set of query ids. | def update_unsampled_queries(self, remaining_indices):
remaining_queries_ids = [
self.unsampled_queries.elements[i] for i in remaining_indices
]
self.unsampled_queries = ProcessedQueryList(
cache=self.resource_loader.query_cache, elements=remaining_queries_ids
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_sampled_queries(self, newly_sampled_queries_ids):\n sampled_queries_ids = self.sampled_queries.elements + newly_sampled_queries_ids\n self.sampled_queries = ProcessedQueryList(\n cache=self.resource_loader.query_cache, elements=sampled_queries_ids\n )",
"def clearpredicates(self):\n self._preds = []",
"def query_remove(self,*q):\n query = self.parameters['q'].difference(q)\n params = join_params(self.parameters,\n {\"q\": query, \"limit\": self._limit,\n \"url_domain\": self.url_domain,\n \"proxies\": self.proxies}\n )\n\n return self.__class__(**params)",
"def removeAllAbstractQueries(self):\n self.queries[\"abs\"] = []",
"def unset_queries(self, *args):\n for k in args:\n self._query_dict.pop(k, None)",
"def _reset_query(self):\n self.query = pysnow.QueryBuilder()\n self.desired_response_fields = list()",
"def removeAllAuthorQueries(self):\n self.queries[\"au\"] = []",
"def removeAllTitleQueries(self):\n self.queries[\"ti\"] = []",
"def unset(cls, query, unset_query):\n cls.collection.update(query, {\"$unset\": unset_query}, multi=True)",
"def _remove_duplicates(self):\n self.search_query = remove_duplicates(self.search_query)",
"def prune(self):\n target_user_ids = self.get_queryset().values_list('id', flat=True)\n exclude_user_ids = SentDrip.objects.filter(date__lt=conditional_now(),\n drip=self.drip_model,\n user__id__in=target_user_ids)\\\n .values_list('user_id', flat=True)\n self._queryset = self.get_queryset().exclude(id__in=exclude_user_ids)",
"def transform_queries_qrels_new_ids(self):\n self.dict_mapper_old_to_new_qid = {}\n self.count_subtasks = {}\n self.dict_missingqueries = {}\n self.dict_assessed_queries = self.get_queries_assessed()\n self.map_old_qid_to_new()\n self.map_qrels_to_newqids()\n exit()\n self.map_old_qid_to_new_withsubtasks()\n self.generate_qrels_subtasks()",
"def queries(self, queries):\n\n self._queries = queries",
"def _flush(self):\n old_pumps = self._pumps\n new_pumps = self._pumps = set()\n for p in old_pumps:\n p.flush()\n if p.clientIO.disconnected and p.serverIO.disconnected:\n continue\n new_pumps.add(p)",
"def _replica_results_dedup(queries):\n deduplicated_queries = []\n for query in queries:\n new_query = query.copy()\n\n if \"results\" in query:\n objects_seen = {}\n dedup_results = []\n results = query[\"results\"]\n\n for result in results:\n if result[\"type\"] == \"dataobject\":\n full_name = result[\"full_name\"]\n if full_name not in objects_seen:\n objects_seen[full_name] = 1\n dedup_results.append(result)\n else:\n dedup_results.append(result)\n\n new_query[\"results\"] = dedup_results\n\n deduplicated_queries.append(new_query)\n\n return deduplicated_queries",
"def reset_for_new_run(\n self,\n state: State\n ):\n\n super().reset_for_new_run(state)\n\n if self.Q is None:\n self.Q = {\n a: IncrementalSampleAverager(\n initial_value=self.initial_q_value,\n alpha=self.alpha\n )\n for a in self.most_recent_state.AA\n }\n else:\n for averager in self.Q.values():\n averager.reset()",
"def deduplicate_raw_text_queries(log_queries_iter) -> List[str]:\n return list(set(q for q in log_queries_iter))",
"def pull(self, query, reload=True):\n qs = self._get_queryset()\n qs.update_one({'$pull': {self.__field_name__: query}})\n\n self.__log__.append(SetPull(query=query))\n\n if reload:\n self.reload()",
"def resampleParticles(self, gameState):\n self.particles = []\n for i in range(self.numParticles):\n self.particles.append(tuple(util.sample(self.uniformPrior) for _ in\n self.ghostIndices))",
"def _clear_previous_samples(self):\n del self._grid\n del self._samples\n\n # --------------------------------\n # Grid Parameters\n # --------------------------------\n self._cell_length = self._radius / np.sqrt(self._dim)\n self._grid_shape = np.array([int(np.ceil(\n self._extent[i] / self._cell_length))\n for i in range(self._dim)], dtype=int)\n\n # Define Grid\n self._grid = np.empty(shape=self._grid_shape, dtype=int)\n self._grid.fill(-1)\n\n # --------------------------------\n # Sample List\n # --------------------------------\n self._samples = []",
"def __delitem__(self, query_filter):\n subquery_count = len(self.__bound_queries)\n keyerror_count = 0\n saved_items = []\n for index, query in enumerate(self.__bound_queries):\n try:\n saved_items.append(query.get(query_filter, None))\n del query[query_filter]\n except KeyError:\n keyerror_count += 1\n except:\n for q, old_value in itertools.izip(self.__bound_queries[:index],\n saved_items):\n if old_value is not None:\n q[query_filter] = old_value\n raise\n\n if keyerror_count == subquery_count:\n raise KeyError(query_filter)",
"def get_sampled_ids(self):\n seed = 123\n #initiate two lists, to save randomly picked positive and negative cases respectively\n positiveIds = []\n negativeIds = []\n i = 0\n print \"==> resampling ... \",\n while len(positiveIds)+len(negativeIds)<self.ntotal:\n # start a loop from 0 to total size of the new sampe\n # if it catches a number divisable by the sought ratio, update the list of positive cases ids\n # otherwise keep update the list of negative cases ids\n try:\n if i%int(100 / self.posRate) == 0: \n positiveIds.append(self.posId.next())\n else:\n negativeIds.append(self.negId.next())\n except:\n print \"Enter posRate higher than the initial rate\"\n break\n i+=1\n print \"Done sampling\"\n print \"positive:\", len(positiveIds)\n print \"negative:\", len(negativeIds)\n print \"final size:\", len(positiveIds)+len(negativeIds)\n #return sorted list of the two list of ids combined\n return sorted(positiveIds+negativeIds)",
"def clearList(self):\r\n self.skills.clear()\r\n del self.orderedSkills[:]",
"def add_unindexed(self, gsList, dsfilter=None):\n from ..construction import circuitconstruction as _gstrc # maybe move used routines to a circuittools.py?\n\n missing_list = []\n for opstr in gsList:\n if opstr not in self.allstrs_set:\n if dsfilter:\n trans_opstr = _gstrc.translate_circuit(opstr, self.aliases)\n if trans_opstr not in dsfilter:\n missing_list.append(opstr)\n continue\n self.allstrs_set.add(opstr)\n self.allstrs.append(opstr)\n self.unindexed.append(opstr)\n return missing_list",
"def test_queryset_flush_list(self):\r\n q = Addon.objects.all()\r\n objects = list(q) # Evaluate the queryset so it gets cached.\r\n base.invalidator.add_to_flush_list({q.flush_key(): ['remove-me']})\r\n cache.set('remove-me', 15)\r\n\r\n Addon.objects.invalidate(objects[0])\r\n assert cache.get(q.flush_key()) is None\r\n assert cache.get('remove-me') is None",
"def remove_from_cache(self, query):\n return",
"def _clean_query(self, query):\n for object_query in query:\n filters = object_query.get(\"filters\", {}).get(\"expression\")\n self._clean_filters(filters)\n self._macro_expand_object_query(object_query)\n return query",
"def _fill_query_slots(self, queries: List[Query]) -> None:\n while queries and self.query_slots > 0:\n logger.debug(\n f\"{self.query_slots} available query slots, creating query task\"\n )\n query = queries.pop(0)\n query_task_id = self.client.create_query_task(query.query_id)\n self.query_slots -= 1\n query.query_task_id = query_task_id\n self._query_by_task_id[query_task_id] = query\n self._running_queries.append(query)",
"def clear_slow_queries(self):\n request = Request(method=\"delete\", endpoint=\"/query/slow\")\n\n def response_handler(resp):\n if not resp.is_success:\n raise C8QLQueryClearError(resp, request)\n return True\n\n return self._execute(request, response_handler)",
"def add_unindexed(self, gsList, dsfilter=None):\n from ..construction import circuitconstruction as _gstrc # maybe move used routines to a circuittools.py?\n #if dsfilter and len(dsfilter) > 8000: dsfilter = None # TEST DEBUG - remove dsfilter check\n\n missing_list = []\n for opstr in gsList:\n if opstr not in self.allstrs_set:\n if dsfilter:\n trans_opstr = _gstrc.translate_circuit(opstr, self.aliases)\n if trans_opstr not in dsfilter:\n missing_list.append(opstr)\n continue\n self.allstrs_set.add(opstr)\n self.allstrs.append(opstr)\n self.unindexed.append(opstr)\n return missing_list"
]
| [
"0.75596094",
"0.5790968",
"0.5650789",
"0.55124557",
"0.5401627",
"0.53630894",
"0.5264245",
"0.5239242",
"0.5194279",
"0.5116806",
"0.50995237",
"0.50639457",
"0.5062219",
"0.49259344",
"0.4909404",
"0.4904073",
"0.48995757",
"0.48682117",
"0.48384356",
"0.48304084",
"0.48232195",
"0.4818517",
"0.47977436",
"0.47457686",
"0.47404698",
"0.47337887",
"0.4732858",
"0.4704221",
"0.47041163",
"0.4700921"
]
| 0.7108111 | 1 |
Establish http routes for the given list of routes containing tuples of the form (route, handler object) | def make_routes(routelist):
return webapp2.WSGIApplication(routelist, debug=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def routes():\n import urllib.request, urllib.parse, urllib.error\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n methods = ','.join(rule.methods)\n url = url_for(rule.endpoint, **options)\n line = urllib.parse.unquote(\"{:50s} {:20s} {}\".format(rule.endpoint, methods, url))\n output.append(line)\n for line in sorted(output):\n print (line)",
"async def create_routes(self, routes: Sequence[str]):\n exist_routes = await self.get_routes()\n updates = set(routes) - set(exist_routes)\n storage: BaseStorage = await self._context.inject(BaseStorage)\n for route in updates:\n await storage.add_record(\n StorageRecord(\n self.RECORD_TYPE, route, {\"to\": self._sender_verkey}, route\n )\n )",
"def get_handlers():\n handlers = list()\n\n #login\n handlers.append((r'/login', Login))\n handlers.append((r'/logout', Logout))\n\n # main\n handlers.append((r'/', Index))\n\n\n #user\n handlers.extend(get_routes(UserController))\n\n #role\n handlers.extend(get_routes(RoleController))\n\n\n handlers.extend(get_routes(ApiServiceController))\n\n handlers.extend(get_routes(InventarioController))\n\n return handlers",
"def get_routes():\n return sum([load_module(m).ROUTES for m in settings.INSTALLED_HANDLERS], []) + ROUTES",
"def __create_routes__(self):\n self.app.add_url_rule('/', 'main_page', self.main_page)\n self.app.add_url_rule('/day', 'day', self.get_current_iteration, methods=['GET'])\n self.app.add_url_rule('/traders', 'traders', self.register_trader, methods=['POST'])\n self.app.add_url_rule('/traders/<id>', 'trader', self.get_trader_state, methods=['GET'])\n self.app.add_url_rule('/stock/price', 'price', self.get_stock_price, methods=['GET'])\n self.app.add_url_rule('/stock/history', 'history', self.get_stock_price_history, methods=['GET'])\n self.app.add_url_rule('/stock/buy', 'buy', self.place_buy_order, methods=['POST'])\n self.app.add_url_rule('/stock/sell', 'sell', self.place_sell_order, methods=['POST'])\n self.app.add_url_rule('/simulation/step', 'step', self.market_session_step, methods=['POST'])\n self.app.add_url_rule('/simulation/run', 'run', self.run_simulation, methods=['POST'])",
"def routes(self, *routes):\n self.package.add_routes(*routes)\n for route_group in self.package.routes:\n self.application.make(\"router\").add(\n Route.group(load(route_group, \"ROUTES\", []), middleware=[\"web\"])\n )\n return self",
"def create_routes(self):\r\n self._app.route('/api/autoconf',\r\n methods=['GET'],\r\n endpoint='api_autoconf')(self.entrypoint)\r\n self._app.route('/api/autoconf/<string:session_id>',\r\n methods=['GET', 'POST', 'DELETE'],\r\n endpoint='api_autoconf_status')(self.entrypoint)\r\n self._app.route('/api/autoconf/rgc',\r\n methods=['POST', 'DELETE', 'GET', 'PATCH'],\r\n endpoint='api_autoconf_rgc')(self.entrypoint)\r\n self._app.route('/api/autoconf/pd',\r\n methods=['POST', 'DELETE', 'GET', 'PATCH'],\r\n endpoint='api_autoconf_pd')(self.entrypoint)",
"def create_routes():\n app_dir = os.path.dirname(os.path.abspath(__file__))\n controller_dir = os.path.join(app_dir, \"controllers\")\n routes = Mapper(directory=controller_dir)\n routes.connect(\"/\", controller=\"root\", action=\"index\")\n routes.connect(\"/body\", controller=\"root\", action=\"body\")\n routes.connect(\"/raise_exception\", controller=\"root\", action=\"raise_exception\")\n routes.connect(\"/raise_wrong_code\", controller=\"root\", action=\"raise_wrong_code\")\n routes.connect(\"/raise_custom_code\", controller=\"root\", action=\"raise_custom_code\")\n routes.connect(\"/raise_code_method\", controller=\"root\", action=\"raise_code_method\")\n routes.connect(\"/render\", controller=\"root\", action=\"render\")\n routes.connect(\"/path-params/{year:\\d+}/{month}/\", controller=\"root\", action=\"path_params\") # noqa: W605\n routes.connect(\"/render_exception\", controller=\"root\", action=\"render_exception\")\n routes.connect(\"/response_headers\", controller=\"root\", action=\"response_headers\")\n routes.connect(\"/identify\", controller=\"root\", action=\"identify\")\n return routes",
"def get_routes(self):\n return [\n (route, handler.handler_class, handler.init_kwargs)\n for route, handler in self._routes.iteritems()\n ]",
"def list_routes():\n import urllib\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n\n methods = ','.join(rule.methods)\n url = url_for(rule.endpoint, **options)\n line = urllib.parse.unquote(\n \"{:35s} {:35s} {}\".format(\n rule.endpoint,\n methods,\n url\n )\n )\n output.append(line)\n\n for line in sorted(output):\n print(line)",
"def route(self, routing_url: str, methods: typing.Iterable[str] = (\"GET\",)):\n\n def _inner(func: callable):\n route = self.wrap_route(func)\n self.add_route(route, routing_url, methods)\n return route\n\n return _inner",
"def routes(self, body):\n pass",
"def add_routes(app: web.Application):\n ActionsView.register_view(app)\n PingView.register_view(app)\n CoreShutdownView.register_view(app)\n CoreRestartView.register_view(app)\n ReloadConfigView.register_view(app)\n ListItemsView.register_view(app)\n GetItemView.register_view(app)\n ItemStatesView.register_view(app)\n ItemStateView.register_view(app)\n ActionsView.register_view(app)\n ExecuteActionView.register_view(app)\n ListModulesView.register_view(app)",
"def connect(controller, path_prefix, routes):\n # register the routes with the mapper, while keeping track of which\n # methods are defined for each URL\n urls = {}\n for r in routes:\n url = path_prefix + r['url']\n methods = r['method']\n if isinstance(methods, six.string_types):\n methods = [methods]\n methods_str = ','.join(methods)\n mapper.connect(r['name'], url, controller=controller,\n action=r['action'],\n conditions={'method': methods_str})\n if url not in urls:\n urls[url] = methods\n else:\n urls[url] += methods\n #print (urls)\n\n # now register the missing methods to return 405s, and register\n # a handler for OPTIONS that returns the list of allowed methods\n for url, methods in urls.items():\n all_methods = ['HEAD', 'GET', 'POST', 'PUT', 'PATCH', 'DELETE']\n missing_methods = [m for m in all_methods if m not in methods]\n allowed_methods_str = ','.join(methods)\n mapper.connect(url,\n controller=default_resource,\n action='reject',\n allowed_methods=allowed_methods_str,\n conditions={'method': missing_methods})\n #print('reject %(url)s , %(missing)s' % {'url':url, 'missing':missing_methods})\n if 'OPTIONS' not in methods:\n mapper.connect(url,\n controller=default_resource,\n action='options',\n allowed_methods=allowed_methods_str,\n conditions={'method': 'OPTIONS'})",
"def add_route(app, *args):\n for route in args:\n app.router.add_route(route[0], route[1], route[2])",
"def create_routes_from_things(things):\n for thing in things.values():\n create_routes_from_thing(thing)",
"def add_routes(self):\n pass",
"def register_foaas_routes(foaas_app):\n for route_path, route_text in fix_routes(foaas_routes):\n register_route(foaas_app, route_path, route_text)",
"def add_routes(self):\n\n # create a routegroup\n routegroup = MewloRouteGroup('testsite_routegroup')\n # overide the parent import-pack-directory for the urls in this group? if we don't it will use the controller root set in SITE config\n # routegroup.set_controllerroot(pkgdirimp_controllers)\n\n routegroup.append(\n MewloRoute(\n id = 'home',\n path = \"/\",\n controller = MewloController(function='requests.request_home')\n ))\n\n\n routegroup.append(\n MewloRoute(\n id = 'hello',\n path = '/test/hello',\n args = [\n MewloRouteArgString(\n id = 'name',\n required = True,\n help = \"name of person to say hello to\",\n ),\n MewloRouteArgInteger(\n id = 'age',\n required = False,\n help = \"age of person (optional)\",\n defaultval = 44,\n )\n ],\n controller = MewloController(function=\"requests.request_sayhello\"),\n # we can pass in any extra data which will just be part of the route that can be examined post-matching\n extras = { 'stuff': \"whatever we want\" },\n # we can force the route to simulate as if certain url call args were assigned (this works whether there are RouteArgs for these or not; no type checking is performed on them)\n # this could be useful in two scenarios: first, if we initially wrote code to handle an arg and then changed our mind and want to not let user set that arg; second, if we reuse a controller function in different places and simulate dif arg values for each\n forcedargs = { 'sign': u\"aries\" },\n ))\n\n\n\n from controllers import requests\n routegroup.append(\n MewloRoute(\n id = 'article',\n path = '/article',\n args = [\n MewloRouteArgString(\n id = 'title',\n required = False,\n positional = True,\n help = \"title of article to display\",\n )\n ],\n # another way to specify the controller is to pass in the actual function reference (rather than as a string)\n controller = MewloController(function=requests.request_article),\n ))\n\n routegroup.append(\n MewloRoute(\n id = 'help',\n path = '/user/help',\n controller = MewloController(root=pkgdirimp_controllers, function='requests.request_help'),\n ))\n routegroup.append(\n MewloRoute(\n id = 'contact',\n path = '/help/contact',\n # we can pass the root pack to the MewloController constructor, which has the benefit of doing the import immediately and raising exception if not found; otherwise the error will come up during preparation\n controller = MewloController(root=pkgdirimp_controllers, function='requests.request_contact'),\n ))\n routegroup.append(\n MewloRoute(\n id = 'about',\n path = '/help/about',\n # we can pass the root pack to the MewloController constructor, which has the benefit of doing the import immediately and raising exception if not found; otherwise the error will come up during preparation\n controller = MewloController(root=pkgdirimp_controllers, function='requests.request_about'),\n ))\n\n\n #static file server\n if (False):\n routegroup.append(\n MewloRoute_StaticFiles(\n id = 'static_files',\n path = '/static',\n controller = MewloController_StaticFiles(\n sourcepath = '${sitefilepath}/staticfilesource'\n ),\n ))\n\n\n # add routegroup we just created to the site\n self.comp('routemanager').append(routegroup)",
"def get_routes():\n output = [f'{\"S. No.\":6}\\t{\"Endpoint\":50}\\t{\"Method\":8}\\n']\n\n for index, rule in enumerate(app.url_map.iter_rules()):\n for i, method in enumerate(rule.methods):\n output.append(f'{index + 1 if i == 0 else \"\":<6}\\t{rule.rule:50}\\t{method:10}')\n\n try:\n output.append(f'\\n{eval(rule.endpoint).__doc__}\\n')\n except NameError:\n output.append('\\n')\n\n return Response('\\n'.join(output), 200, mimetype='text/plain')",
"def create_routes(api: Api):\n api.add_resource(SignUpApi, '/user/signup/')\n api.add_resource(LoginApi, '/user/login/')\n\n api.add_resource(UsersApi, '/users/')\n\n api.add_resource(CafeteriasCreationAPI, '/createcafeteria/')\n api.add_resource(CreateItemsAPI, '/createcafeteriaitems/')",
"def init(loop):\n tasks = JobsHandler()\n config = ConfigHandler()\n task = TaskHandler()\n\n\n\n app = web.Application(loop = loop)\n app.router.add_route('*', '/tasks/{do_something}', tasks.handle)\n app.router.add_route('*', '/config/{do_something}', config.handle)\n app.router.add_route('*', '/task/{id}/{do_something}', task.handle)\n\n handler = app.make_handler()\n srv = yield from loop.create_server(handler, '0.0.0.0', 8080)\n print(\"Server started at http://0.0.0.0:8080\")\n return srv, handler",
"def getRoutes(request):\n routes = {\n 'Item list': '/api/v1/items/',\n 'Item details': '/api/v1/item/<int:pk>/',\n\n 'JWT': '/api/v1/users/login/',\n }\n\n return Response(routes)",
"def initialize_routes(app):\n # Authentification \n app.add_resource(auth.LoginApi, '/auth/login')\n app.add_resource(auth.SignupApi, '/auth/SignupApi')\n # Intialisation et activation d'un parking\n app.add_resource(parkingInit.InitilizeAParking, '/administrate/add')\n app.add_resource(parkingInit.ActivateParking, '/administrate/activate')\n app.add_resource(parkingInit.InitilizeAParking, '/administrate/getall', endpoint='getall')\n # Gestion de Clients\n app.add_resource(useresResources.GestionUstilisateurs, '/administrate/usesrs/get')\n app.add_resource(useresResources.GestionUstilisateurs, '/administrate/usesrs/getById/<int:idUser>', endpoint='get_by_id')\n # statistiques financéres\n app.add_resource(stats.Money, '/administrate/finance/monthly', endpoint='monthly')\n app.add_resource(stats.Money, '/administrate/finance/yearly', endpoint='yearly')",
"def yieldroutes(func):\r\n import inspect # Expensive module. Only import if necessary.\r\n path = '/' + func.__name__.replace('__','/').lstrip('/')\r\n spec = inspect.getargspec(func)\r\n argc = len(spec[0]) - len(spec[3] or [])\r\n path += ('/:%s' * argc) % tuple(spec[0][:argc])\r\n yield path\r\n for arg in spec[0][argc:]:\r\n path += '/:%s' % arg\r\n yield path",
"def list_routes(app):\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n\n methods = ','.join(rule.methods)\n line = urllib.parse.unquote(\"{:50s} {:20s} {}\".format(rule.endpoint, methods, rule))\n output.append(line)\n\n return sorted(output)",
"def setup_routes():\n root = CherryTarball()\n d = cherrypy.dispatch.RoutesDispatcher()\n d.connect('main', '/', controller=root)\n # This enumerates the tarball and connects each file within to a URL in the dispatcher\n tar = tarfile.open(tarball_path)\n for tarobj in tar.getmembers():\n if tarobj.isdir():\n pass # Skip directories\n else:\n d.connect(tarobj.name, tarobj.name, controller=root, action='static', filepath=tarobj.name)\n dispatcher = d\n return dispatcher",
"def route(app, requires_login):\n routes = {\n '/kontoplan/<accounting>': kontoplan,\n '/huvudbok/<accounting>': huvudbok,\n '/balansrakning/<accounting>': balance_report,\n '/resultatrakning/<accounting>': income_statement_report,\n '/verifikationslista/<accounting>': verifications,\n '/arsrapport/<accounting>': year_report,\n '/verifikat/<objectid:verification>': print_verification,\n '/vatreport/<objectid:accounting>': vat_report,\n '/periodrapport/<accounting>': period_report,\n '/salesreport/<objectid:toid>': sales_report,\n '/verifikationslista_andrade/<accounting>': verifications_modified,\n '/accountspayable_report/<accounting>': accountspayable_report,\n '/accountspayable_paymentjournal/<accounting>': accountspayable_paymentjournal\n }\n for route, func in routes.items():\n name = func.__name__\n func = requires_login()(func)\n app.add_url_rule(route, name, func, methods=['GET', 'POST'])",
"def routes(methods_filter, route_filter):\n from utils import list_routes\n\n app_routes = list_routes(app, methods_filter, route_filter)\n if app_routes:\n for line in sorted(app_routes):\n print(\"{:8s} {:{width}s} {}\".format(line['method'], line['route'], line['endpoint'],\n width=70 + line['route_expanded_length']))\n else:\n print(\"No route !\")",
"def add_routes(self, mapper):\n pass"
]
| [
"0.65212214",
"0.64429027",
"0.62730944",
"0.61776197",
"0.61614925",
"0.60371983",
"0.60076576",
"0.5969171",
"0.5950606",
"0.5946168",
"0.59317756",
"0.5844661",
"0.58265465",
"0.5787878",
"0.57873255",
"0.57807803",
"0.57449365",
"0.5722084",
"0.5693369",
"0.5688424",
"0.5666415",
"0.56578577",
"0.56158227",
"0.5573555",
"0.55540997",
"0.5508375",
"0.54951024",
"0.5486557",
"0.5485667",
"0.5484554"
]
| 0.6722308 | 0 |
Replaces all of the ultisnips variables with the corresponding vscode | def _replace_variables(self, string):
conversions = {"VISUAL": "TM_SELECTED_TEXT"}
for old, new in conversions.items():
string = string.replace(old, new)
return string | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def replace_variables(self, text, context):\n text = text.replace('__VENV_DIR__', context.env_dir)\n text = text.replace('__VENV_NAME__', context.env_name)\n text = text.replace('__VENV_PROMPT__', context.prompt)\n text = text.replace('__VENV_BIN_NAME__', context.bin_name)\n text = text.replace('__VENV_PYTHON__', context.env_exe)\n return text",
"def update_variables(old_contents):\n new_contents = []\n\n for line in old_contents:\n words = line.split()\n\n for word in words:\n # Using the whitespace split above, the keys in the yaml file will\n # have a : at the end, so we need to strip that off before\n # replacing\n if word.endswith(':'):\n word = word[:-1]\n\n if word in VAR_MAPPINGS.keys():\n line = line.replace(word, VAR_MAPPINGS[word])\n\n new_contents.append(line)\n\n return new_contents",
"def updateVariables(self) -> None:\n ...",
"def replace_variables(data, repo_home, config_file=None):\n default_path = os.path.join(os.path.dirname(__file__), 'configs')\n variables = {\n 'DEFAULT_CONFIGS': default_path,\n 'REPO_HOME': repo_home,\n 'REPO_HOME_FALLBACK_DEFAULT_CONFIGS': repo_home if (\n config_file and os.path.exists(os.path.join(repo_home, config_file))) else default_path\n }\n formatter = string.Formatter()\n return [formatter.vformat(item, [], variables) for item in data]",
"def fix_variables(m, variables):\r\n\r\n for var_name, values in variables.items():\r\n for var_index, var_value in values.items():\r\n m.__getattribute__(var_name)[var_index].fix(var_value)\r\n\r\n return m",
"def replace_variables(text, vars=zen_settings['variables']):\n\treturn re.sub(r'\\$\\{([\\w\\-]+)\\}', lambda m: m.group(1) in vars and vars[m.group(1)] or m.group(0), text)",
"async def setup_env(self, *args, **kwargs):\n os.environ[\"SLUGIFY_USES_TEXT_UNIDECODE\"] = \"yes\"",
"def _unify_variables(self, variables):\n variables = [self._lookup(i) if isinstance(i, str) else i\n for i in variables]\n return variables",
"def _unpack_variables(variables: dict) -> str:\n scss_string = \"\"\n for var, value in variables.items():\n scss_string += f\"${var}: {value};\\n\"\n return scss_string",
"def unfix_variables(m, variables):\r\n\r\n for var_name, values in variables.items():\r\n for var_index, var_value in values.items():\r\n m.__getattribute__(var_name)[var_index].unfix(var_value)\r\n\r\n return m",
"def _interactively_fix_missing_variables(project, result):\n if project.problems:\n return False\n\n if not console_utils.stdin_is_interactive():\n return False\n\n # We don't ask the user to manually enter CONDA_PREFIX\n # (CondaEnvRequirement) because it's a bizarre/confusing\n # thing to ask.\n can_ask_about = [\n status for status in result.statuses if (not status and isinstance(status.requirement, EnvVarRequirement)\n and not isinstance(status.requirement, CondaEnvRequirement))\n ]\n\n if can_ask_about:\n print(\"(Use Ctrl+C to quit.)\")\n\n start_over = False\n values = dict()\n for status in can_ask_about:\n reply = console_utils.console_input(\"Value for \" + status.requirement.env_var + \": \",\n encrypted=status.requirement.encrypted)\n if reply is None:\n return False # EOF\n reply = reply.strip()\n if reply == '':\n start_over = True\n break\n values[status.requirement.env_var] = reply\n\n if len(values) > 0:\n status = project_ops.set_variables(project, result.env_spec_name, values.items(), result)\n if status:\n return True\n else:\n console_utils.print_status_errors(status)\n return False\n else:\n return start_over",
"def VarNameReplace(old, new, *vars):\n\t#syntax = [ \"rename variables\" ]\n\tsyntax = []\n\tif not vars or \"*\" in vars:\n\t\tvars = None\n\tvd = spssaux.VariableDict(vars)\n\tfor v in vd:\n\t\toldname = v.VariableName\n\t\tnewname = oldname.replace(old,new).strip()\n\t\tif newname.lower() != oldname.lower():\n\t\t\tsyntax += [ \"(%s=%s)\" % (oldname, newname) ]\n\tif syntax:\n\t\tsyntax.insert(0, \"rename variables\")\n\t\tsyntax += [ spssterm ]\n\t\tif __debug__:\n\t\t\tprint \" \".join(syntax)\n\t\tspss.Submit(syntax)",
"def _replace_variables(self, variables: Dict[str, str] = None):\r\n\r\n for lexeme in self._lexemes:\r\n if lexeme.is_variable and variables is not None:\r\n if lexeme.variable_name in variables.keys():\r\n self.replace_variable(lexeme, variables[lexeme.variable_name])\r\n else:\r\n msg = f'Error: Variable \"{lexeme.variable_name}\" is not defined!'\r\n self.log(msg, logging.ERROR)\r\n raise MissingVariable(msg)\r\n elif lexeme.is_variable:\r\n msg = f'Error: Variable \"{lexeme.variable_name}\" is not defined!'\r\n self.log(msg, logging.ERROR)\r\n raise MissingVariable(msg)",
"def lang_postprocessing(variables):\n return variables",
"def update_collation_env_variables(state, collation):\n state.block_coinbase = collation.header.coinbase",
"def _updated_vars_to_python(loop, context, indent):\n import statements\n \n indent_str = \" \" * indent\n lhs_visitor = lhs_var_visitor()\n loop.accept(lhs_visitor)\n lhs_var_names = lhs_visitor.variables\n # Handle With variables if needed.\n if (context.with_prefix_raw is not None):\n lhs_var_names.add(safe_str_convert(context.with_prefix_raw))\n # Handle For loop index variables if needed.\n if (isinstance(loop, statements.For_Statement)):\n lhs_var_names.add(safe_str_convert(loop.name))\n var_dict_str = \"{\"\n first = True\n for var in lhs_var_names:\n py_var = utils.fix_python_overlap(var)\n if (not first):\n var_dict_str += \", \"\n first = False\n var = var.replace(\".\", \"\")\n var_dict_str += '\"' + var + '\" : ' + py_var\n var_dict_str += \"}\"\n save_vals = indent_str + \"try:\\n\"\n save_vals += indent_str + \" \" * 4 + \"var_updates\\n\"\n save_vals += indent_str + \" \" * 4 + \"var_updates.update(\" + var_dict_str + \")\\n\"\n save_vals += indent_str + \"except (NameError, UnboundLocalError):\\n\"\n save_vals += indent_str + \" \" * 4 + \"var_updates = \" + var_dict_str + \"\\n\"\n save_vals += indent_str + 'var_updates[\"__shell_code__\"] = core.vba_library.get_raw_shellcode_data()\\n'\n save_vals = indent_str + \"# Save the updated variables for reading into ViperMonkey.\\n\" + save_vals\n if (log.getEffectiveLevel() == logging.DEBUG):\n save_vals += indent_str + \"print \\\"UPDATED VALS!!\\\"\\n\"\n save_vals += indent_str + \"print var_updates\\n\"\n return save_vals",
"def upgrade_settings(self, keys):\n upgradable_keys = {\n \"project_dir\": \"%root_dir%\",\n \"source_folder\": \"%source_folder%\",\n \"packages_path\": \"%packages_path%\",\n \"sep\": \"%sep%\",\n \"$\": \"$\"\n }\n for key in keys:\n value, from_global = self.get(key, as_tuple=True)\n value = value.replace(\"%\", \"%%%\")\n for k in upgradable_keys:\n value = value.replace(\"$\" + k, upgradable_keys[k])\n self.set(key, value, to_global=from_global)",
"def __revert_terminal_variables(root):\n var_to_terminal = lambda var_name: var_name[len(TERMINAL_VAR_PREFIX):].lower()\n for node in root.preorder():\n if node.key.startswith(TERMINAL_VAR_PREFIX):\n node.key = var_to_terminal(node.key)\n node.children = []",
"def _clean_non_alphanumeric_chars(self):\n\n for i,variable in enumerate(self.model_description.modelVariables):\n clean_name = re.sub(r'[^a-zA-Z0-9_]', '', variable.name)\n if clean_name != variable.name:\n log = \"Sim variable '{}' has been renamed to '{}' \".format(variable.name, clean_name)\n log += \"to comply with Bonsai naming requirements.\"\n print(log)\n self.model_description.modelVariables[i].name = clean_name\n\n return",
"def vefi_postprocessing(variables):\n return variables",
"def substitude_symbols(self, f):\n pass",
"def replace(self, text):\n for key, val in self.env.items():\n text = text.replace(\"$%s\" % key, val)\n return text",
"def change_environment_variables():\n values = load('environment.yaml')\n\n for key in values.keys():\n os.environ[key] = values[key]\n\n info(f'Changed environment variables to {values}')",
"def setupVariables(self, file, variables, wordsize):\n file.write(self.getStringForVariables(variables, wordsize) + '\\n')\n return",
"def to_pep8_variable(string):\n return string.replace(' ', '_').replace('-', '_').lower()",
"def resolve_modifiers(roll):\n roll.replace(ADVANTAGE_CODE,\"\")\n roll.replace(DISADVANTAGE_CODE,\"\")\n roll.replace(LUCKY_CODE,\"\")\n roll.replace(GREAT_WEAPON_CODE,\"\")\n roll.replace(BRUTAL_CODE,\"\")\n roll.replace(MINIMUM_10_CODE,\"\")\n return roll",
"def replace_vars(cls, itter_list, pattern, replace):\n\n for index, line in enumerate(itter_list):\n itter_list[index] = line.replace(pattern, replace)\n\n return itter_list",
"def _replace_scopes(value):\n\n value = value.replace('linux-*', 'linux')\n value = value.replace('macx', 'macos')\n value = value.replace('win32', 'win')\n\n return value",
"def main():\n var_name = prompt_for_var_name()\n var_words = normalize_var_name(var_name)\n for case in CASES:\n out_var_name = render_case(var_words, case)\n print(out_var_name)",
"def _formatSolutionExportVariableNames(self, acceptable):\n return acceptable"
]
| [
"0.6394522",
"0.58065987",
"0.5606063",
"0.5557312",
"0.54911727",
"0.5431594",
"0.5424897",
"0.5397946",
"0.5366627",
"0.5284732",
"0.52125996",
"0.51902866",
"0.5151045",
"0.51302266",
"0.5101047",
"0.5086249",
"0.5032191",
"0.50283474",
"0.5023271",
"0.5014109",
"0.5012332",
"0.5004279",
"0.49590483",
"0.4948649",
"0.49425057",
"0.49322006",
"0.49252114",
"0.4923549",
"0.49183854",
"0.491766"
]
| 0.6078713 | 1 |
Parses out the snippets into JSON form with the following schema { | def parse_snippet(self, ultisnip_file: Path) -> dict:
snippets_dictionary = {}
with open(ultisnip_file, "r") as f:
for line in f:
if line.startswith("snippet"):
snippet = {}
prefix = line.split()[1].strip()
snippet["prefix"] = prefix
if '"' in line:
snippet_name = line.split('"')[1].strip()
snippet["description"] = snippet_name
body = []
line = next(f)
while not line.startswith("endsnippet"):
body.append(self._replace_variables(line.strip("\n")))
line = next(f)
snippet["body"] = body
snippets_dictionary[prefix] = snippet
return snippets_dictionary | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def snippet_list(request):\n if request.method == 'GET':\n quickstart = Quickstart.objects.all()\n serializer = QuickstartSerializer(snippets, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = QuickstartSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data, status=201)\n return JsonResponse(serializer.errors, status=400)",
"def snippetList(requeset, format = None):",
"def __init__(self):\n if not os.path.isfile(self.DBFILE):\n self.snippets = {}\n return\n\n with open(self.DBFILE) as fobj:\n content = fobj.read()\n if not content.strip():\n content = \"{}\"\n\n self.snippets = {\n name: Snippet(name, data)\n for name, data in json.loads(content).items()\n }",
"def snippet_list(request):\n if request.method == 'GET':\n snippets = Snippet.objects.all()\n serializer = SnippetSerializer(snippets, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = SnippetSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def snippet_list(request):\n if request.method == 'GET':\n snippets = Snippet.objects.all()\n serializer = SnippetSerializer(snippets, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = SnippetSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def snippet_list(request, format=None): # format=None: 데이터 형태에 대한 포맷정보가 붙는다.\r\n if request.method == 'GET':\r\n snippets = Snippet.objects.all()\r\n serializer = SnippetSerializer(snippets, many=True)\r\n return Response(serializer.data)\r\n\r\n elif request.method == 'POST':\r\n serializer = SnippetSerializer(data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data, status=status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def snippets(self):\n if not hasattr(self, 'solr_response'):\n return {}\n if hasattr(self.solr_response, 'highlighting'):\n joined_snippets = {}\n for uid, snippets in self.solr_response.highlighting.items():\n joined_snippets[uid] = ' '.join([' '.join(snippet) for snippet\n in snippets.values()]).strip()\n return joined_snippets\n return {}",
"def load_from_json(user_id, json):\n year = json[\"year\"]\n week = json[\"week\"]\n text = json.get(\"text\", \"\")\n tags = json.get(\"tags\", [])\n return Snippet.update(user_id, year, week, text, tags)",
"def snippet_list_csrf(request, format=None):\n if request.method == 'GET':\n snippets = Snippet.objects.all()\n serializer = SnippetSerializer(snippets, many=True, context={'request': request})\n return JsonResponse(serializer.data, safe=False)\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = SnippetSerializer(data=data, context={'request': request})\n if serializer.is_valid():\n serializer.save(owner=request.user)\n return JsonResponse(serializer.data, status=201)\n return JsonResponse(serializer.errors, status=400)",
"def formatSchema(self):\n schema = json.loads(self.schemaSource)\n stack = [schema]\n # Strip out all the docs\n while len(stack) > 0:\n elm = stack.pop()\n if \"doc\" in elm:\n elm[\"doc\"] = \"\"\n for value in elm.values():\n if isinstance(value, dict):\n stack.append(value)\n elif isinstance(value, list):\n for dic in value:\n if isinstance(dic, dict):\n stack.append(dic)\n jsonData = json.dumps(schema)\n output = \"\\n\".join(textwrap.wrap(jsonData)) + \"\\n\"\n return output",
"def gen_review_data(fp: str) -> None:\n with open(fp, encoding='utf-8') as f:\n for line in f:\n data = json.loads(line)\n utils.preprocess_raw_json(data)\n doc = {\n \"_index\": \"review\",\n \"_source\": data\n }\n yield doc",
"def snippets(self):\n\n queries = []\n\n from tagged_document import TagQuery\n\n # start with a version of ourself that has no expanded snippets\n source_lines = self.cleaned_contents.split(\"\\n\")\n\n # the list of lines we're working with\n output_lines = []\n\n # default to working with files at the current state on disk; this\n # can change to specific refs when a // tag: instruction is\n # encountered in the document\n current_ref = WORKSPACE_REF\n\n tag_regex = re.compile(r\"$\\/\\/\\s*tag:?\\s*(.*)^\", flags=re.IGNORECASE)\n snip_regex = re.compile(r\"$\\/\\/\\s*tag:?\\s*(.*)^\", flags=re.IGNORECASE)\n\n for line in source_lines:\n output_lines.append(line)\n\n # change which tag we're looking at if we hit an instruction to\n # do so\n tag = tag_regex.match(line)\n\n if tag:\n current_ref = tag.groups(1).strip()\n\n # is this a snippet?\n snippet = snip_regex.match(line)\n if snippet:\n\n # figure out what tags we're supposed to be using here\n query_text = snippet.groups(1)\n\n # build the tag query from this\n query = TagQuery(query_text, ref=current_ref)\n\n queries.append(query)\n\n return queries",
"def get_snippet_info_list(self):\n snippets = []\n for snippet in self.snippets:\n snippets.append([snippet[\"title\"], snippet[\"description\"]])\n return snippets",
"def snippet_list_apiview(request, format=None):\n if request.method == 'GET':\n snippets = Snippet.objects.all()\n serializer = SnippetSerializer(snippets, many=True, context={'request': request})\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = SnippetSerializer(data=request.data, context={'request': request})\n if serializer.is_valid():\n serializer.save(owner=request.user)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def parse_page(html):\n\n soup = BeautifulSoup(html, \"html.parser\")\n review_soups = soup.find_all(\"script\", type=\"application/ld+json\")\n\n description_list = []\n for soup in review_soups:\n text = soup.string\n # decode the json into python dict\n js_dict = json.loads(text)\n\n if \"review\" in js_dict:\n review_list = js_dict[\"review\"]\n\n for i in range(len(review_list)):\n review_dict = review_list[i]\n description_list.append(review_dict[\"description\"])\n\n return description_list",
"def _sloppy_parse_inline_data (self, scripts):\n inline_data = {};\n for script in scripts:\n contents = str(script.contents[0])\n important_data = ['authURL', 'API_BASE_URL', 'API_ROOT', 'BUILD_IDENTIFIER', 'ICHNAEA_ROOT', 'gpsModel', 'guid', 'esn']\n res = {}\n for key in important_data:\n _res = self._sloppy_parse_user_and_api_data(key, contents)\n if _res != None:\n res.update({key: _res})\n if res != {}:\n inline_data.update(res)\n\n # parse profiles\n profiles = self._sloppy_parse_profiles(contents)\n avatars = self._sloppy_parse_avatars(contents)\n if profiles != None:\n inline_data.update({'profiles': profiles})\n if avatars != None:\n inline_data.update(avatars)\n return inline_data",
"def schema_handler(self, schema):\n dict_for_render = schema.get('properties', {}).items()\n if schema.get('$ref', None):\n def_name = schema.get('$ref').split('/')[-1]\n dict_for_render = self.definitions[def_name].get('properties', {}).items()\n elif schema.get('properties', None) is None:\n return ''\n\n answer_dict = {}\n json_dict = {}\n for opt_name, opt_value in dict_for_render:\n var_type = opt_value.get('format', None) or opt_value.get('type', None) or 'object'\n json_name = self.indent + f':jsonparameter {var_type} {opt_name}:'\n json_dict[json_name] = self.get_json_props_for_response(var_type, opt_value)\n\n answer_dict[opt_name] = self.get_response_example(opt_name, var_type, opt_value)\n if var_type == 'string':\n answer_dict[opt_name] = answer_dict[opt_name].format(opt_name)\n\n self.write('')\n for line in json.dumps(answer_dict, indent=4).split('\\n'):\n self.write(line, self.indent_depth)\n\n self.write('')\n for json_param_name, json_param_value in json_dict.items():\n desc = f'{json_param_value[\"title\"]}{json_param_value[\"props_str\"]}' or 'None'\n self.write(json_param_name + ' ' + desc)",
"def variant_sample_list_2_3():\n return {\n \"schema_version\": \"2\",\n \"status\": \"current\",\n \"project\": \"12a92962-8265-4fc0-b2f8-cf14f05db58b\",\n \"institution\": \"828cd4fe-ebb0-4b36-a94a-d2e3a36cc989\",\n \"variant_samples\": [\n {\n \"selected_by\": \"834559db-a3f6-462c-81a4-f5d7e5e65707\",\n \"date_selected\": \"2021-07-09T16:42:23.694711+00:00\",\n \"variant_sample_item\": \"013bcc47-3885-4682-99c2-800b95765524\",\n \"filter_blocks_used\": {\n \"filter_blocks\": [\n {\n \"name\": \"Breast Cancer\",\n \"query\": \"associated_genotype_labels.proband_genotype_label=Heterozygous&associated_genelists=Breast+Cancer+%2828%29&variant.genes.genes_most_severe_consequence.impact=MODERATE&variant.genes.genes_most_severe_consequence.impact=HIGH\"\n }\n ],\n \"intersect_selected_blocks\": False\n }\n },\n {\n \"selected_by\": \"834559db-a3f6-462c-81a4-f5d7e5e65707\",\n \"date_selected\": \"2021-07-09T16:42:23.696554+00:00\",\n \"variant_sample_item\": \"ac62850f-6f77-4d3b-9644-41699238d0e2\",\n \"filter_blocks_request_at_time_of_selection\": \"some-gibberish\"\n }\n ],\n \"created_for_case\": \"GAPCAJQ1L99X\",\n \"uuid\": \"292250e7-5cb7-4543-85b2-80cd318287b2\"\n }",
"def extract_from_json_ld(self, data, url):\n\n scripts = data.xpath(\"//script[@type='application/ld+json']\")\n records = [ ]\n\n for scr in scripts:\n\n try:\n data = json.loads(scr.text)\n except:\n continue\n\n if not isinstance(data, dict):\n continue\n\n record = dict([ (k, v) for k, v in data.items() if k in self.store_fields ])\n if \"recipeIngredient\" not in record and \"ingredients\" in data:\n record[\"recipeIngredient\"] = data[\"ingredients\"]\n\n record[\"url\"] = url\n record[\"collect_time\"] = datetime.utcnow()\n\n if self.validate(record):\n records.append(record)\n\n return records",
"def on_snippets_loaded(self, snippets):\n self.snippets = snippets",
"def example_data():\n return [\n {'id': 'text', 'title': {'en': 'Text'}, 'type': 'resourcetypes'},\n {'id': 'data', 'title': {'en': 'Data'}, 'type': 'resourcetypes',\n 'tags': ['recommended']},\n ]",
"def build_schema(self):\n field_defs = []\n fields = []\n point_rows = []\n line_rows = []\n polygon_rows = []\n for i in self.json_in['features']: # first iterate through it all and get all the fields\n props = i.get('properties')\n\n for k, v in props.items():\n if k not in fields:\n fields.append(k)\n\n for i in self.json_in['features']: # now fill in any props that any features are missing, and sort them all\n geom = i['geometry']\n props = i['properties']\n for f in fields:\n if f not in props.keys():\n props[f] = ''\n props = OrderedDict(sorted(props.items()))\n\n for k, v in props.items():\n schema_row = [k, \"TEXT\", k.replace('_', ' '), 256]\n if schema_row not in field_defs:\n field_defs.append(schema_row)\n row = [str(v) for k, v in sorted(props.items())] # coerce everything to str cause this stuff is a mess\n parsed_geom = GeoJSONUtils.parse_geometry(geom)\n geotype = parsed_geom['type']\n egeom = parsed_geom['esri_geom']\n\n if geotype == \"POINT\":\n row.insert(0, egeom)\n print(row)\n point_rows.append(row)\n elif geotype == \"POLYLINE\":\n row.insert(0, egeom)\n print(row)\n line_rows.append(row)\n else:\n row.insert(0, egeom)\n print(row)\n polygon_rows.append(row)\n\n return {\n \"fields\": fields,\n \"field_defs\": field_defs,\n \"rows\": [point_rows, line_rows, polygon_rows]\n }",
"def _parse_sources(self):\n return [{\n 'url': (\n 'https://docs.google.com/spreadsheets/d/'\n '1uzgWLWl19OUK6RhkAuqy6O6p4coTOqA22_nmKfzbakE'\n ),\n 'note': 'Google Sheet that Darryl filled out manually'\n }]",
"def get_paste_list_json():\n with contextlib.closing(urllib2.urlopen(PASTE_SERVICE)) as text:\n return json.loads(text.read(), object_hook=Struct)",
"def review_list_handler():\n return jsonify(languages=supported_languages)",
"def json_string(self):\n def visit_children(node):\n \"\"\" Recursively visit the fragments_tree \"\"\"\n output_fragments = []\n for child in node.children_not_empty:\n fragment = child.value\n text = fragment.text_fragment\n output_fragments.append({\n \"id\": text.identifier,\n \"language\": text.language,\n \"lines\": text.lines,\n \"begin\": gf.time_to_ssmmm(fragment.begin),\n \"end\": gf.time_to_ssmmm(fragment.end),\n \"children\": visit_children(child)\n })\n return output_fragments\n output_fragments = visit_children(self.fragments_tree)\n return gf.safe_unicode(\n json.dumps({\"fragments\": output_fragments}, indent=1, sort_keys=True)\n )",
"def serialize(self):\n\n compacted_json = jsonld.compact({\n \"http://schema.org/first_name\": self.first_name,\n \"http://schema.org/last_name\": self.last_name,\n \"http://schema.org/id\": self.id,\n \"http://schema.org/email\": self.email,\n \"http://schema.org/birthDate\": self.birthday.isoformat() if self.birthday else \"\",\n \"http://schema.org/telephone\": self.phone,\n \"http://schema.org/languages\": self.languages,\n \"http://schema.org/number_of_reviews\": len(self.reviews),\n \"http://schema.org/number_of_interviews\": len(self.interviews)\n }, self.get_context())\n\n return compacted_json",
"def create(self, validated_data):\r\n return Snippet.objects.create(**validated_data)",
"def snippet(self) -> global___Snippet:",
"def json_friendly(self):"
]
| [
"0.6311338",
"0.6069845",
"0.59527874",
"0.5810302",
"0.5810302",
"0.5762867",
"0.57340497",
"0.55275506",
"0.54961646",
"0.5488228",
"0.5488193",
"0.54133624",
"0.5376377",
"0.533812",
"0.5299759",
"0.5267433",
"0.52651376",
"0.52602667",
"0.5223165",
"0.51634073",
"0.51520747",
"0.5140619",
"0.5095647",
"0.5076763",
"0.50755733",
"0.506663",
"0.50609416",
"0.5045866",
"0.50334257",
"0.5029934"
]
| 0.63587433 | 0 |
This function gets the trial sets for each leaf node in this graph. | def get_trial_sets(graph, leaves, diff = 2):
trialsets = {}
for leaf in leaves:
parents = get_parent_path(graph, leaf)
psizes = [len(graph.node[p]['leaves']) for p in parents]
root = parents[-1]
l1id = 1
while l1id < len(parents) -1 and psizes[l1id] < 5:
l1id += 1
l2id = min(l1id + 1, len(parents) - 1)
while l2id < len(parents)-1 and \
(psizes[l2id] == psizes[l1id] or psizes[l2id] < 50):
l2id += 1
l3id = min(l2id + 1, len(parents) - 1)
while l3id < len(parents)-1 and \
(psizes[l3id] == psizes[l2id] or psizes[l3id] < 200):
l3id += 1
trialset = [[leaf],
list(graph.node[parents[l1id]]['leaves']),
list(graph.node[parents[l2id]]['leaves']),
list(graph.node[parents[l3id]]['leaves']),
list(graph.node[root]['leaves'])]
"""
p10id = min(int(len(parents) / 10. + 0.5), len(parents)-1)
p10 = parents[p10id]
while (p10 != root and len(graph.node[p10]['leaves']) < diff + 1):
p10id += 1
p10 = parents[p10id]
p25id = min(max(p10id+1, int(len(parents) / 4. + 0.5)), len(parents)-1)
p25 = parents[p25id]
while (p25 != root and len(graph.node[p25]['leaves']) < len(graph.node[p10]['leaves']) + diff):
p25id += 1
p25 = parents[p25id]
p50id = min(max(p25id+1, int(len(parents) / 2. + 0.5)), len(parents)-1)
p50 = parents[p50id]
while (p50 != root and len(graph.node[p50]['leaves']) < len(graph.node[p25]['leaves']) + diff):
p50id += 1
p50 = parents[p50id]
trialset = [[leaf],
graph.node[p10]['leaves'],
graph.node[p25]['leaves'],
graph.node[p50]['leaves'],
graph.node[root]['leaves']]
"""
#for i in range(4,0,-1):
# trialset[i] = list(set(trialset[i]).difference(trialset[i-1]))
trialsets[leaf] = trialset
return trialsets | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_all_roots(trial):\r\n root_nodes = trial.node_map[0].children.copy()\r\n shuffle(root_nodes)\r\n states = []\r\n for node in root_nodes:\r\n trial_copy = copy.deepcopy(trial)\r\n states.append(trial_copy)\r\n node.observe()\r\n trial_copy = copy.deepcopy(trial)\r\n states.append(trial_copy)\r\n return zip(states, [node.label for node in trial.observed_nodes] + [0])",
"def check_all_leaves(trial):\r\n leaf_nodes = trial.get_leaf_nodes()\r\n shuffle(leaf_nodes)\r\n states = []\r\n max_value = trial.get_max_dist_value()\r\n for node in leaf_nodes:\r\n trial_copy = copy.deepcopy(trial)\r\n states.append(trial_copy)\r\n node.observe()\r\n #if node.value >= max_value:\r\n # trial_copy = copy.deepcopy(trial)\r\n # states.append(trial_copy)\r\n # return zip(states, [node.label for node in trial.observed_nodes] + [0])\r\n trial_copy = copy.deepcopy(trial)\r\n states.append(trial_copy)\r\n return zip(states, [node.label for node in trial.observed_nodes] + [0])",
"def getSets():",
"def get_leafs(self) -> list:\n return self._leafs[:]",
"def getTestSets():\n return list(_testsetdict.keys())",
"def lego_sets():\n # you must replace this line and return your own list\n return lego_sets_list",
"def _all_subnodes(self):\n return self.__dict__.values()",
"def lego_sets():\n # you must replace this line and return your own list\n return []",
"def get_leaf_nodes(self):\n pass",
"def get_leaf_set(subtree):\n return set([l.taxon.label for l in subtree.leaf_nodes()])",
"def leaf_nodes(self):\n return self.nodes.filter(lft=models.F('rght') - 1)",
"def test_leafs():\n leafs = ['s6675', 's136315', 's10765', 's106594', 's131']\n for leaf in leafs:\n synset = germanet_data.get_synset_by_id(leaf)\n np.testing.assert_equal(synset.is_root(), False)\n np.testing.assert_equal(synset.is_leaf(), True)",
"def _get_leaf_node_paths(t\n ):\n return {\n k: _get_leaf_node_path(k, t)\n for k, v in t.get_descendants().items()\n if isinstance(v.node, prensor.LeafNodeTensor)\n }",
"def get_trials(self):\n return self._trials",
"def getSets(unique_name=None):",
"def roots(self):\n yielded = {}\n for dataset in self._parents:\n if len(dataset._parents) == 0:\n if dataset not in yielded:\n yield dataset\n yielded[dataset] = True\n else:\n for gp in dataset.roots():\n if gp not in yielded:\n yield gp\n yielded[gp] = True",
"def getHierarchies():",
"def getHierarchies():",
"def samples(self):\n samples = set()\n for trio in self.trios:\n if trio.child is None or trio.mother is None or trio.father is None:\n continue\n samples.add(trio.father)\n samples.add(trio.mother)\n samples.add(trio.child)\n return list(samples)",
"def get_all_setups_nodes():\n ta_roots = get_all_setups_roots()\n ta_nodes = [TechAnim_Setup(x) for x in ta_roots]\n return ta_nodes",
"def _cluster(self):\n self._not_included = self.data\n self.leaves = []\n flag = int(rand() * len(self.data))\n flag = self._generate(flag)\n while len(self._not_included) > 0:\n flag = self._generate(flag)\n if flag == -1:\n break\n pass\n self._remember.append({\n 'threshold': self._base_threshold,\n 'result': len(self.leaves)\n })\n print(len(self._remember), {\n 'threshold': self._base_threshold,\n 'result': len(self.leaves)\n })\n return",
"def level_sets(self):\n in_degrees = self.in_degree(labels=True)\n level = [x for x in in_degrees if in_degrees[x]==0]\n Levels = []\n while len(level) != 0:\n Levels.append(level)\n new_level = []\n for x in level:\n for y in self.neighbors_out(x):\n in_degrees[y] -= 1\n if in_degrees[y] == 0:\n new_level.append(y)\n level = new_level\n return Levels",
"def get_nodes(self):\n return_set = set()\n for key in self._main_dictionary:\n return_set.add(key)\n return return_set",
"def subsets(self):\n return set(self.subset_map.values())",
"def finalSubsets(self):\n subs = self.allSubsets()\n for s in self.graph.observed:\n subs = subs[subs[:,s] == 1,] # remove subsets where values in s are not True\n return subs",
"def leaf_nodes(self):\n deps = set([\n item for sublist in self.edges.values() for item in sublist\n ]) # Now contains all nodes that contain dependencies.\n return (x for x in self.nodes if x not in deps) # Generator that\n # contains all nodes *without* any dependencies (leaf nodes)",
"def get_leaves(self):\n return list(set(list(self._traverse_tree())))",
"def leaf_children(self) -> list[set[\"HierarchicalCategory\"]]:\n ret = []\n for children in self.children:\n n = []\n for child in children:\n if child.is_leaf:\n n.append([{child}])\n else:\n n.append(child.leaf_children)\n ret += [set(itertools.chain(*x)) for x in itertools.product(*n)]\n return ret",
"def get_tests(self):\n subtests = itertools.chain(*(s.get_tests() for s in self.suites.values()))\n tt = [t for t in itertools.chain(self.tests,subtests)]\n return tt",
"def get_leafs(self):\n # Query leafs from the fabric\n class_query = ClassQuery('fabricNode')\n class_query.propFilter = 'eq(fabricNode.role, \"leaf\")'\n leafs = self.moDir.query(class_query)\n # creates two lists that will include the distinguished names and the relative names\n result = []\n dns = []\n rns = []\n for leaf in leafs:\n dns.append(str(leaf.dn))\n rns.append(str(leaf.rn))\n # The following lines human sort the lists (e.g. 1,2,3,11 and not 1,11,2,3)\n dns.sort(key=natural_keys)\n rns.sort(key=natural_keys)\n result.append(dns)\n result.append(rns)\n # The result is a list with two lists inside. One list has distinguished names and the other the relative names\n return result"
]
| [
"0.6844502",
"0.6840591",
"0.6083935",
"0.5979716",
"0.59425783",
"0.5910932",
"0.5865636",
"0.5863153",
"0.5837333",
"0.5722749",
"0.57013834",
"0.56999",
"0.5659051",
"0.5631628",
"0.5579923",
"0.5571343",
"0.55593115",
"0.55593115",
"0.55299985",
"0.54905343",
"0.5464976",
"0.5454832",
"0.5450149",
"0.54475236",
"0.54250365",
"0.54245454",
"0.5402697",
"0.5388891",
"0.5374911",
"0.5366421"
]
| 0.7629318 | 0 |
generate a trial from the given trialset and image maps | def generate_trial(trialset, synset2img, trialtype, num_imgs):
# randomly shuffle the sets.
for s in trialset:
random.shuffle(s)
source = trialset[trialtype]
# sample images
# make sure we have the most specific guy
src_imgs = [random.choice(synset2img[trialset[0][0]])]
for i in range(num_imgs - 1):
synset = random.choice(source)
src_imgs.append(random.choice(synset2img[synset]))
target_imgs = []
# target imgs are sampled in a structured way
# 12 images in domain
for i in range(4):
for j in range(3):
synset = random.choice(trialset[i])
target_imgs.append(random.choice(synset2img[synset]))
# 12 images outside the domain
for i in range(12):
synset = random.choice(trialset[-1])
target_imgs.append(random.choice(synset2img[synset]))
# shuffling the images to minimize the ordering effect
random.shuffle(src_imgs)
random.shuffle(target_imgs)
return src_imgs, target_imgs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stim_generate(params,stim_list,train):\n if train:\n stim = list(stim_list.keys())\n shuffled_stim = shuffled_images = []\n #for each run get equal amounts of stim and shuffle\n #can only work if 'trials_per_run' is divisible by number of stims\n for run in range(params['runs']):\n temp_list = stim * int(params['trials_per_run']/len(stim))\n random.shuffle(temp_list)\n shuffled_stim = shuffled_stim + temp_list\n for stim in shuffled_stim:\n shuffled_images.append(stim_list[stim])\n else:\n #open the test stim images\n images = []\n for each in os.listdir(stim_list):\n if '.DS_Store' not in each:\n img = os.path.join(stim_list,each)\n images.append([img,img])\n\n #randomize images\n random.shuffle(images)\n\n #split images into matches and not matches\n matches,not_matches = images[:len(images)//2],images[len(images)//2:]\n\n #shift the image list so that the pictures dont match\n not_matches = shift_list(not_matches)\n\n #combine matches and not matches and randomize order\n shuffled_images = matches + not_matches\n random.shuffle(shuffled_images)\n\n #get category labels\n shuffled_stim = []\n for img in shuffled_images:\n if img[0] == img[1]:\n shuffled_stim.append(\"match\")\n else:\n shuffled_stim.append(\"not match\")\n\n return(shuffled_stim,shuffled_images)",
"def create_sets(\n path: tuple,\n maps_ath: str,\n gt_maps_path: str,\n ds_index: int = 0,\n skip_black: bool = True,\n skip_water: bool = True,\n skip_no_class: bool = True,\n):\n maps = get_maps(maps_ath, MAPS_EXT)\n gt_maps = get_maps(gt_maps_path, GT_MAPS_EXT)\n logger.info(\n \"Found %i aerial maps and %i ground truth maps.\", len(\n maps), len(gt_maps)\n )\n with tqdm(total=len(maps), desc=\"Maps\") as pbar:\n for m in maps:\n try:\n ortho_map = Image.open(m)\n gt_map = Image.open(get_gt_map(m, gt_maps))\n\n if ortho_map.size == gt_map.size:\n ortho_map_cv2 = pil_to_opencv(ortho_map)\n gt_map_cv2 = pil_to_opencv(gt_map)\n boxes = gen_crop_area(\n SET_RESOLUTION[0], SET_RESOLUTION[1], ortho_map.size\n )\n center_points = gen_center_points(\n SET_RESOLUTION[0], SET_RESOLUTION[1], ortho_map.size\n )\n with tqdm(\n total=len(boxes),\n leave=False,\n desc=\"Sets for {}\".format(os.path.basename(m)),\n ) as pbar2:\n for b in boxes:\n map_crop = ortho_map.crop(b)\n gt_map_crop = gt_map.crop(b)\n\n if add_to_set(\n map_crop,\n gt_map_crop,\n skip_black=skip_black,\n skip_water=skip_water,\n skip_no_class=skip_no_class,\n ):\n map_fn = os.path.join(\n path[1], \"{}_x.png\".format(ds_index)\n )\n gt_map_fn = os.path.join(\n path[2], \"{}_y.png\".format(ds_index)\n )\n map_crop.save(map_fn)\n gray_gt_map_crop = reduce_and_grayscale(\n gt_map_crop)\n gray_gt_map_crop.save(gt_map_fn)\n ds_index += 1\n\n pbar2.set_description(\n \"Sets for {}(index: {})\".format(\n os.path.basename(m), ds_index\n )\n )\n pbar2.update()\n else:\n continue\n except Exception as e:\n logger.error(\"Error occurred while creating set: %s\", e)\n logger.error(\"Skipping %s\", m)\n pbar.update()",
"def create_trials(self):\n exp_s = self.settings['experiment']\n\n # get stimuli\n with h5py.File(exp_s['stim_file'], 'r') as f:\n self.stim_rgb_arr = np.array(f['patterns'])\n self.stimuli = [GratingStim(win=self.session.win, tex=s.T)\n for s in self.stim_rgb_arr]\n self.fixation_dot = Circle(self.win, radius=0.1, edges=100, color='r')\n\n # timings that define how many trials etc.\n mean_trial_duration = exp_s['isi_min'] + exp_s['isi_mean']\n self.n_trials = round(\n (exp_s['total_run_duration'] - (exp_s['start_end_period'] * 2)) / mean_trial_duration)\n tolerance_range = [exp_s['total_run_duration'] - exp_s['temporal_tolerance'],\n exp_s['total_run_duration'] + exp_s['temporal_tolerance']]\n\n # and then search for the right isis to fill up the experiment exactly.\n isis = np.random.exponential(\n exp_s['isi_mean'], n_trials) + exp_s['isi_min']\n exp_duration = isis + (exp_s['start_end_period'] * 2)\n while exp_duration < tolerance_range[0] or exp_duration > tolerance_range[1]:\n isis = np.random.exponential(\n exp_s['isi_mean'], n_trials) + exp_s['isi_min']\n exp_duration = isis + (exp_s['start_end_period'] * 2)\n pretimes = np.zeros(self.n_trials)\n pretimes[[0, -1]] = exp_s['start_end_period']\n\n # deciding which stimuli to show when\n self.how_many_images_per_trial = exp_s['stim_flicker_freq'] * \\\n exp_s['stim_duration']\n self.duration_per_image = 1.0/exp_s['stim_flicker_freq']\n\n trial_images = np.random.randint(len(self.stimuli), size=(\n self.n_trials, self.how_many_images_per_trial))\n\n for i in range(self.n_trials):\n self.trials.append(HRFMapperTrial(\n session=self,\n trial_nr=i,\n phase_durations=[\n pretimes[i], exp_s['stim_duration'], isis[i]-exp_s['stim_duration']],\n phase_names=['slack', 'stim', 'isi'],\n parameters={'isi': isis[i],\n 'stim_list': trial_images[i]},\n timing='seconds',\n load_next_during_phase=None,\n verbose=True,\n condition='hrf'\n ))",
"def loadTrial(trialPath):\r\n\r\n trialDict = ft.loadFile(trialPath)\r\n\r\n trial = RetinotopicMappingTrial(mouseID=trialDict['mouseID'], # str, mouseID\r\n dateRecorded=trialDict['dateRecorded'], # int, date recorded, yearmonthday\r\n comments=trialDict['comments'], # str, number of the trail on that day\r\n altPosMap=trialDict['altPosMap'], # altitude position map\r\n aziPosMap=trialDict['aziPosMap'], # azimuth position map\r\n altPowerMap=trialDict['altPowerMap'], # altitude power map\r\n aziPowerMap=trialDict['aziPowerMap'], # azimuth power map\r\n vasculatureMap=trialDict['vasculatureMap'], # vasculature map\r\n params=trialDict['params'])\r\n\r\n try:\r\n trial.altPosMapf = trialDict['altPosMapf']\r\n except KeyError:\r\n pass\r\n\r\n try:\r\n trial.aziPosMapf = trialDict['aziPosMapf']\r\n except KeyError:\r\n pass\r\n\r\n try:\r\n trial.altPowerMapf = trialDict['altPowerMapf']\r\n except KeyError:\r\n pass\r\n\r\n try:\r\n trial.aziPowerMapf = trialDict['aziPowerMapf']\r\n except KeyError:\r\n pass\r\n\r\n try:\r\n if isinstance(trialDict['finalPatches'].values()[0], dict):\r\n trial.finalPatches = {}\r\n for area, patchDict in trialDict['finalPatches'].items():\r\n try:\r\n trial.finalPatches.update({area: Patch(patchDict['array'], patchDict['sign'])})\r\n except KeyError:\r\n trial.finalPatches.update({area: Patch(patchDict['sparseArray'], patchDict['sign'])})\r\n else:\r\n pass\r\n except KeyError:\r\n pass\r\n\r\n try:\r\n if isinstance(trialDict['finalPatchesMarked'].values()[0], dict):\r\n trial.finalPatchesMarked = {}\r\n for area, patchDict in trialDict['finalPatchesMarked'].items():\r\n try:\r\n trial.finalPatchesMarked.update({area: Patch(patchDict['array'], patchDict['sign'])})\r\n except KeyError:\r\n trial.finalPatchesMarked.update({area: Patch(patchDict['sparseArray'], patchDict['sign'])})\r\n else:\r\n pass\r\n except KeyError:\r\n pass\r\n\r\n try:\r\n trial.signMap = trialDict['signMap']\r\n except KeyError:\r\n pass\r\n\r\n try:\r\n trial.signMapf = trialDict['signMapf']\r\n except KeyError:\r\n pass\r\n\r\n try:\r\n trial.rawPatchMap = trialDict['rawPatchMap']\r\n except KeyError:\r\n pass\r\n\r\n try:\r\n trial.rawPatches = trialDict['rawPatches']\r\n except KeyError:\r\n pass\r\n\r\n try:\r\n trial.eccentricityMapf = trialDict['eccentricityMapf']\r\n except KeyError:\r\n pass\r\n\r\n return trial",
"def generate_test_images():\n results = {}\n for antialias, aa_descriptor in antialias_options:\n for canvas, canvas_descriptor in canvas_options:\n for func in (generate_test_001,\n generate_test_002,\n generate_test_003,\n generate_test_004,\n generate_test_005,\n generate_test_007,\n ):\n points, name = func()\n aggregators = draw_lines(canvas, points, antialias)\n img = shade(aggregators, cmap=cmap01)\n description = \"{}_{}_{}\".format(\n name, aa_descriptor, canvas_descriptor)\n results[description] = img\n\n for func in (generate_test_006, ):\n points, name = func()\n aggregator = draw_multi_segment_line(canvas, points, antialias)\n img = shade(aggregator, cmap=cmap01)\n description = \"{}_{}_{}\".format(\n name, aa_descriptor, canvas_descriptor)\n results[description] = img\n return results",
"def recordTrial(stim_trial_indices, instr_key, condition_binary, trial_target_times, possible_letters, wheel_matrix_info, ec, trial_path, record_pupil = True):\n\n\t# DRAW SELECTIONS ON SCREEN\n\twheel_loc = CircularLayout(3, radius = .6, angles = [- phi, (phi / 2), 0]) # must be three wheels\n\tletter_loc = [];\n\tfor i in len(wheel_matrix_info):\n\t\ttemp = CircularLayout(wheel_matrix_info[i], radius = .15, relative_center = wheel_loc.positions[i])\n\t\tletter_loc.append(temp.positions)\n\n\t# letterloc1 = CircularLayout(wheel_matrix_info[0], relative_center = wheel_loc.positions[0])\n\t# letterloc1 = CircularLayout(wheel_matrix_info[1], relative_center = wheel_loc.positions[1])\n\t# letterloc1 = CircularLayout(wheel_matrix_info[2], relative_center = wheel_loc.positions[2])\n\n\t# DASCogLoad drawing for reference\n\t# cur_cols = np.where(attn[block][tnum], 'Lime', 'LightGray').tolist()\n\t# txt_obj = []\n\t# for n, cat in enumerate(cur_cats):\n\t# \tcat = '<center>' + cat + '</center>' # hack (pyglet bug)\n\t# \ttxt_obj.append(ec.screen_text(cat, pos=[xpos[n], ypos[n]],\n\t# \t\t\t\t\t\t\t\t color=cur_cols[n], font_size=36))\n\t# end_wait = ec.current_time + pretrial_wait\n\t# ec.flip()\n\n\t# Refer to voc_meg.py and pupillometry.py and PylinkAPI.pdf for referenc on recording pupillometry\n\n\t# load WAVs for this block\n\tec.screen_text('loading...')\n\tstims = []\n\tstims.append(read_wav(path)[0]) # ignore fs\n\tec.flip()\n\n\n\t# get ready\n\tstim_dur = stim.shape[-1] / ec.stim_fs\n\tec.load_buffer(stim)\n\tec.wait_until(end_wait)\n\n\t# play stim\n\tec.start_stimulus(flip=False)\n\tec.stop()\n\n\t# clear screen\n\tec.flip()\n\n\t# write out data\n\tec.write_data_line('target_time', target_time)\n\tec.trial_ok()\n\n\treturn stim_trial_indices",
"def create_mapping(project, img):\n with BMI(_username, _password, project) as bmi:\n ret = bmi.mount_image(img)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo('Success')\n else:\n click.echo(ret[constants.MESSAGE_KEY])",
"def trial_setup(params):\n runs = []\n trials = []\n for run in range(params['runs']):\n runs = runs + [run]*params['trials_per_run']\n for trial in range(params['trials_per_run']):\n trials.append(trial)\n return(runs,trials)",
"def createTrialDict(trial, block):\n trial_type = ''\n\n if trial.visual_file:\n visual_file = trial.visual_file.url\n vftype = trial.visual_file.filetype\n if 'video' in vftype.lower():\n trial_type = 'video'\n else:\n trial_type = 'image'\n else:\n visual_file = ''\n\n if trial.audio_file:\n audio_file = trial.audio_file.url\n else:\n audio_file = ''\n\n if not trial.response_keys:\n trial.response_keys = ''\n\n trial_dict = {\n 'trial_id': trial.id,\n 'background_colour': block.background_colour,\n 'label': trial.label,\n 'visual_onset': trial.visual_onset,\n 'audio_onset': trial.audio_onset,\n 'audio_file': audio_file,\n 'visual_file': visual_file,\n 'max_duration': trial.max_duration,\n 'response_keys': trial.response_keys.lower().replace(' ', '').split(','),\n 'require_user_input': trial.user_input, #'NO', 'YES'\n 'trial_type': trial_type,\n 'record_media': trial.record_media,\n }\n return trial_dict",
"def test_imageset_annotationset_download(\n client, two_challenge_sets, phase, kind\n):\n\n imageset = two_challenge_sets.challenge_set_1.challenge.imageset_set.get(\n phase=phase\n )\n image_file = ImageFileFactory()\n imageset.images.add(image_file.image)\n\n annotationset = AnnotationSetFactory(base=imageset, kind=kind)\n annotation_file = ImageFileFactory()\n annotationset.images.add(annotation_file.image)\n\n tests = [\n # (\n # image response + annotation response not test ground truth,\n # annotation response - testing gt,\n # user\n # )\n (403, 403, None),\n (403, 403, UserFactory()),\n (403, 403, UserFactory(is_staff=True)),\n (403, 403, two_challenge_sets.challenge_set_1.non_participant),\n (302, 403, two_challenge_sets.challenge_set_1.participant),\n (302, 403, two_challenge_sets.challenge_set_1.participant1),\n (302, 302, two_challenge_sets.challenge_set_1.creator),\n (302, 302, two_challenge_sets.challenge_set_1.admin),\n (403, 403, two_challenge_sets.challenge_set_2.non_participant),\n (403, 403, two_challenge_sets.challenge_set_2.participant),\n (403, 403, two_challenge_sets.challenge_set_2.participant1),\n (403, 403, two_challenge_sets.challenge_set_2.creator),\n (403, 403, two_challenge_sets.challenge_set_2.admin),\n (302, 302, two_challenge_sets.admin12),\n (302, 403, two_challenge_sets.participant12),\n (302, 302, two_challenge_sets.admin1participant2),\n ]\n\n for test in tests:\n\n response = get_view_for_user(\n url=image_file.file.url, client=client, user=test[2]\n )\n assert response.status_code == test[0]\n\n response = get_view_for_user(\n url=annotation_file.file.url, client=client, user=test[2]\n )\n if phase == ImageSet.TESTING and kind == AnnotationSet.GROUNDTRUTH:\n # testing ground truth\n assert response.status_code == test[1]\n else:\n # training ground truth, training predictions and\n # ground truth predictions\n assert response.status_code == test[0]",
"def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")",
"def generate_scanset_metadata( image_set_dictionary, html_base_path, session_id ):\r\n\tcur_subj_info = {}\r\n\r\n\t\"\"\"need to think through the data structure a bit more.... but can always adjust later \"\"\"\r\n\tcur_subj_info['session_id'] = session_id\r\n\t#cur_subj_info['img_id'] = counter\r\n\tcur_subj_info['subject_id'] = session_id.split('/')[0]\r\n\tglobal counter\r\n\t\r\n\tnii_image_dict = image_set_dictionary['nii_images']\r\n\tpng_image_dict = image_set_dictionary['png_image_set']\r\n\r\n\tscan_metadata = {}\r\n\tfor scan in nii_image_dict:\r\n\t\tprint \"propcessing \", scan\r\n\t\tnii_img = nii_image_dict[scan]['base_image'][0]\r\n\t\tprint nii_img\r\n#\t\tif 'mask' not in scan:\r\n\r\n#\t\tif 'mask' not in scan:\r\n#\t\t\tnii_img = nii_image_dict[scan]['base_image'][0]\r\n#\t\telse:\r\n#\t\t\tcontinue\r\n#\t\tprint \"HI DAVE!\"\r\n\r\n\r\n\t\tif not nii_img:\r\n\t\t\tprint \"did not find base image for\",nii_image_dict\r\n\t\t\tcontinue\r\n\t\tpng_img = html_path_root+ png_image_dict[scan]\r\n\t\tprint nii_img,\"is being passed\"\r\n\r\n\t\t(dim_x, dim_y, dim_z, vox_size_x, vox_size_y, vox_size_z, image_orientation )= igcf.get_nii_image_info(nii_img)\r\n\t\timage_info = Image.open(png_img)\r\n\t\twidth, height = image_info.size\r\n\t\t#print width,height,dim_x,dim_y,dim_z,vox_size_x,vox_size_y,vox_size_z\t\r\n\t\tscan_info = {}\r\n\t\tscan_info['slice_width'] = dim_x\r\n\t\tscan_info['slice_height'] = dim_y\r\n\t\tscan_info['num_slices'] = dim_z\r\n\t\tscan_info['main_image_width'] = width\r\n\t\tscan_info['main_image_height'] = height\r\n\t\tscan_info['nii_image'] = nii_img\r\n\t\tscan_info['base_url'] = png_img.replace(html_path_root,'')\r\n\t\tscan_metadata[scan] = scan_info\r\n\r\n\t\t### There can be one or MORE masks for a given base image... so I will return a list of\r\n\t\t#dictionaries..\r\n\t\tmask_list = nii_image_dict[scan]['masks']\r\n\t\tmask_id = 0\r\n\r\n\t\tmask_info_list = []\r\n\t\tfor mask in mask_list:\r\n\t\t\tcur_mask_info = {}\r\n\t\t\t### I'll call the mask by it's basename\r\n\t\t\tprint mask,\"was passed...\"\r\n\t\t\tmask_base = os.path.basename(mask)\r\n\t\t\tnii_img = nii_image_dict[scan]['masks'][mask_id]\r\n\t\t\tprint nii_image_dict,'mask_id is',mask_id\r\n\t\t\tprint \"nii maeg found should be\",nii_img\r\n\t\t\tif not nii_img:\r\n\t\t\t\tprint \"did not find a valid mask image for \",nii_image_dict\r\n\t\t\t\tcontinue\r\n\r\n\r\n\t\t\tcur_mask_info['name'] = mask_base\r\n\t\t\tcur_mask_info['id'] = mask_id\r\n\t\t\tcur_mask_info['nii_file'] = nii_img\r\n\t\t\t## NEED TO ADD IN THE MASK_URL\r\n#\t\t\tcur_mask_info['mask_url'] = \t\t\t\r\n\t\t\tprint png_image_dict\r\n\t\t\tpng_img = html_path_root+ png_image_dict[scan]\r\n\t\t\tprint nii_img,\"is being passed\"\r\n\t\t\tcur_mask_info['mask_url'] = png_img.replace(html_path_root,'')\r\n\r\n\t\t\tmask_info_list.append( cur_mask_info )\r\n\t\r\n\t\t\r\n\t\t\tmask_id +=1\r\n#\t\tprint cur_mask_info\r\n\t\tcur_subj_info['masks'] = mask_info_list\r\n\t\tscan_metadata[scan]['masks'] = [ mask_info_list]\r\n#\t\tprint mask_info_list\r\n\r\n\tcur_subj_info['image_data'] = scan_metadata\r\n\tcounter += 1\t\r\n\treturn { 'session_name': session_id , 'session_metadata': cur_subj_info }",
"def gen_trial_inst_phase(self):\n pm_action_flags = np.arange(2,2+self.nmaps)\n return pm_action_flags,pm_action_flags",
"def main(unused_argv):\n with tf.io.gfile.GFile(_INTPUT_TRIPLET_LIST_FILEPATH.value, 'r') as fid:\n triplets_list = np.loadtxt(fid, dtype=str)\n\n triplet_dicts = []\n for triplet in triplets_list:\n triplet_dict = {\n image_key: os.path.join(_INPUT_DIR.value, triplet, image_basename)\n for image_key, image_basename in _INTERPOLATOR_IMAGES_MAP.items()\n }\n triplet_dicts.append(triplet_dict)\n p = beam.Pipeline('DirectRunner')\n (p | 'ReadInputTripletDicts' >> beam.Create(triplet_dicts) # pylint: disable=expression-not-assigned\n | 'GenerateSingleExample' >> beam.ParDo(\n util.ExampleGenerator(_INTERPOLATOR_IMAGES_MAP))\n | 'WriteToTFRecord' >> beam.io.tfrecordio.WriteToTFRecord(\n file_path_prefix=_OUTPUT_TFRECORD_FILEPATH.value,\n num_shards=_NUM_SHARDS.value,\n coder=beam.coders.BytesCoder()))\n result = p.run()\n result.wait_until_finish()\n\n logging.info('Succeeded in creating the output TFRecord file: \\'%s@%s\\'.',\n _OUTPUT_TFRECORD_FILEPATH.value, str(_NUM_SHARDS.value))",
"def trial_logicals(trials, meta):\n uobjs = list(set(meta['obj']))\n nobjs = len(uobjs)\n uimgs = list(meta['id'])\n nimgs = len(uimgs)\n use_trial_samples = 'choice' in list(trials.dtype.names)\n summary = [{'sam':[],'dist':[],'choice':[],'imgind':[]} for i in range(nobjs)]\n for i,OI in enumerate(uobjs):\n summary[i]['sam'] = (trials['sample_obj'] == OI)\n summary[i]['dist'] = (trials['dist_obj'] == OI)\n summary[i]['imgind'] = np.array([uimgs.index(m['id']) for m in meta if (m['obj'] == OI) ])\n if use_trial_samples:\n summary[i]['choice'] = np.array(trials['choice'] == OI)\n\n image_summary =[{'id':[]} for i in range(nimgs)]\n for i,OI in enumerate(uimgs):\n image_summary[i]['id'] = (trials['id'] == OI)\n return use_trial_samples, summary, image_summary",
"def generate_dataset(self):\n\t\timg_set = []\n\t\tqa_set = []\n\t\tfor i in range(self.config.dataset_size):\n\t\t\timg, r = self.generate_image()\n\t\t\tq = self.generate_question()\n\t\t\ta = self.generate_answer(r, q)\n\t\t\timg_sample = {\n\t\t\t\t'id': i,\n\t\t\t\t'image': img.tolist()\n\t\t\t}\n\t\t\timg_set.append(img_sample)\n\t\t\tfor j in range(len(q)):\n\t\t\t\tqa_sample = {\n\t\t\t\t\t'id': i,\n\t\t\t\t\t'question': q[j].tolist(),\n\t\t\t\t\t'answer': a[j].tolist()\n\t\t\t\t}\n\t\t\t\tqa_set.append(qa_sample)\n\t\tprint('Finished creating smaples')\n\t\tdataset = {\n\t\t\t'image':\timg_set,\n\t\t\t'qa':\tqa_set\n\t\t}\n\t\twith open(self.path, 'w') as f:\n\t\t\tjson.dump(dataset, f)",
"def generate_tpu(self, prompts: List[str]):\n from flax.training.common_utils import shard # pylint:disable=g-import-not-at-top,g-importing-member\n import jax # pylint:disable=g-import-not-at-top\n import time # pylint:disable=g-import-not-at-top\n import numpy as np # pylint:disable=g-import-not-at-top\n\n rng = jax.random.PRNGKey(0)\n rng = jax.random.split(rng, jax.device_count())\n\n assert prompts, \"prompt parameter cannot be empty\"\n print(\"Prompts: \", prompts)\n prompt_ids = self._pipeline.prepare_inputs(prompts)\n prompt_ids = shard(prompt_ids)\n print(\"Sharded prompt ids has shape:\", prompt_ids.shape)\n if self._run_with_profiler:\n jax.profiler.start_trace(self._profiler_dir)\n\n time_start = time.time()\n images = self._p_generate(prompt_ids, self._p_params, rng)\n images = images.block_until_ready()\n elapsed = time.time() - time_start\n if self._run_with_profiler:\n jax.profiler.stop_trace()\n\n print(\"Inference time (in seconds): \", elapsed)\n print(\"Shape of the predictions: \", images.shape)\n images = images.reshape(\n (images.shape[0] * images.shape[1],) + images.shape[-3:])\n print(\"Shape of images afterwards: \", images.shape)\n return self._pipeline.numpy_to_pil(np.array(images))",
"def sampling():\n # make directory for street images\n streetImageOutputFolder = CONFIG[\"sampling\"][\"streetImageOutputFolder\"]\n makeDirectory(streetImageOutputFolder)\n\n # Get preprocessed point data\n intersectionPointFile = CONFIG[\"shapefile\"][\"intersectoinPointFile\"]\n pointInfoFile = CONFIG[\"shapefile\"][\"pointInfoFilename\"]\n\n pointInfo = readPointFile(pointInfoFile)\n intersectionPointInfo = readIntersectionPointInfo(intersectionPointFile)\n\n # Filter point data that has street images taken within the specified period.\n maxYear = CONFIG[\"gmap\"][\"streetImageMaxYear\"]\n minYear = CONFIG[\"gmap\"][\"streetImageMinYear\"]\n filteredPoints = filterPointByYear(pointInfo, maxYear, minYear)\n\n IMG_NAME_COL_NUM = 5\n LAT_LNG_COL_NUM = 2\n\n # Sample street images, the return is list of sample info\n sampleNum = CONFIG[\"sampling\"][\"sampleNum\"]\n initImageNumber = CONFIG[\"sampling\"][\"initImageNumber\"]\n sampleData = sampleAndDownloadStreetImage(filteredPoints, sampleNum, initImageNumber, initImageNumber, streetImageOutputFolder, intersectionPointInfo)\n imageNames = [streetImageOutputFolder + \"/\" + data[IMG_NAME_COL_NUM] for data in sampleData]\n links = GDriveUpload(imageNames, \"Sampled_Image\")\n\n for i in xrange(len(sampleData)):\n imageName = streetImageOutputFolder + \"/\" + sampleData[i][IMG_NAME_COL_NUM]\n sampleData[i].append(links[imageName])\n\n columnTitle = [\"Sample Number\", \"Sampled Point Number\", \"Latitude + Longitude\", \"Heading\", \"Date\", \"Image Name\", \"Road Types\", \"Web Link\"]\n sampleData.insert(0, columnTitle)\n\n # output to csv file\n outputCSV(sampleData, CONFIG[\"sampling\"][\"csvFilename\"])\n\n # plot images map\n sampledPoints = set([divideGPS(d[LAT_LNG_COL_NUM]) for d in sampleData[1:]])\n plotSampledPointMap(list(sampledPoints), CONFIG[\"sampling\"][\"sampledPointsMapFilename\"])",
"def img_map(ts):\n image_map = \"\"\n texdata = bpy.data.textures[ts.texture]\n if ts.mapping == \"FLAT\":\n image_map = \"map_type 0 \"\n elif ts.mapping == \"SPHERE\":\n image_map = \"map_type 1 \"\n elif ts.mapping == \"TUBE\":\n image_map = \"map_type 2 \"\n\n # map_type 3 and 4 in development (?) (ENV in pov 3.8)\n # for POV-Ray, currently they just seem to default back to Flat (type 0)\n # elif ts.mapping==\"?\":\n # image_map = \" map_type 3 \"\n # elif ts.mapping==\"?\":\n # image_map = \" map_type 4 \"\n if ts.use_interpolation: # Available if image sampling class reactivated?\n image_map += \" interpolate 2 \"\n if texdata.extension == \"CLIP\":\n image_map += \" once \"\n # image_map += \"}\"\n # if ts.mapping=='CUBE':\n # image_map+= \"warp { cubic } rotate <-90,0,180>\"\n # no direct cube type mapping. Though this should work in POV 3.7\n # it doesn't give that good results(best suited to environment maps?)\n # if image_map == \"\":\n # print(\" No texture image found \")\n return image_map",
"def gen_ep_data(self,ntrials,trlen):\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([i_encoding_input,x_test_input],1)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n \n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n if return_trial_flag:\n tr_flag = np.concatenate([i*np.ones(self.nmaps+trlen) for i in range(ntrials)])\n tr_flag = tr.unsqueeze(tr.LongTensor(tr_flag),1)\n return tr_flag,i_input,s_input,yseq,\n else:\n return i_input,s_input,yseq",
"def makeDataset(numberOfTrials, data_type):\n\n\tdata_folder = data_type + \"_images\"\n\tlabel_file = os.path.join(dataset_params.data_path, data_type + \"_lables.csv\")\n\n\tutils.create_directory(dataset_params.data_path)\n\tutils.create_directory(os.path.join(dataset_params.data_path, data_folder))\n\n\tallowedRadius = utils.defineShapePerimeter()\n\tcolorsRGB = utils.defineColorValues()\n\tshapeDict = utils.defineShapeSides()\n\tpadding = dataset_params.padding\n\n\tnum = 0\n\toutput_images = [[\"figNum\", \"shape\", \"color\", \"size\", \"background\", \"quadrant\", \"radius\"]]\n\tfor c in dataset_params.colors: # for all 7 foreground colors \n\t\tfor q in dataset_params.quadrants: # for all 4 quadratns \n\t\t\tfor s in dataset_params.shapes: # for all 5 shapes\n\t\t\t\tfor k in dataset_params.sizes: # for all 3 sizes\n\t\t\t\t\tfor b in dataset_params.backgrounds: # for all 3 background colors\n\t\t\t\t\t\tfor i in range(numberOfTrials):\n\t\t\t\t\t\t\tfileName = os.path.join(dataset_params.data_path, data_folder, str(num) + \".png\")\n\t\t\t\t\t\t\tpresentQuadrant = dataset_params.quadrants[q]\n\t\t\t\t\t\t\tradius = random.randint(allowedRadius[s][k][0],allowedRadius[s][k][1])\n\n\t\t\t\t\t\t\tif(presentQuadrant == 3):\n\t\t\t\t\t\t\t\txMin = 128 + padding\n\t\t\t\t\t\t\t\txMax = 255 - radius\n\t\t\t\t\t\t\t\tyMin = 128 + padding\n\t\t\t\t\t\t\t\tyMax = 255 - radius\n\n\t\t\t\t\t\t\telif(presentQuadrant == 2):\n\t\t\t\t\t\t\t\txMin = 0 + radius\n\t\t\t\t\t\t\t\txMax = 128 - padding\n\t\t\t\t\t\t\t\tyMin = 128 + padding\n\t\t\t\t\t\t\t\tyMax = 255 - radius\n\n\t\t\t\t\t\t\telif(presentQuadrant == 1):\n\t\t\t\t\t\t\t\txMin = 0 + radius\n\t\t\t\t\t\t\t\txMax = 128 - padding\n\t\t\t\t\t\t\t\tyMin = 0 + radius\n\t\t\t\t\t\t\t\tyMax = 128 - padding\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\txMin = 128 + padding\n\t\t\t\t\t\t\t\txMax = 255 - radius\n\t\t\t\t\t\t\t\tyMin = 0 + radius\n\t\t\t\t\t\t\t\tyMax = 128 - padding\n\n\t\t\t\t\t\t\txCenter = random.randint(xMin, xMax)\n\t\t\t\t\t\t\tyCenter = random.randint(yMin, yMax)\n\t\t\t\t\t\t\tcenter = [xCenter, yCenter]\n\n\t\t\t\t\t\t\tif(s == \"circle\"):\n\t\t\t\t\t\t\t\toutput_images.append([num, \"circle\", c, k, b, presentQuadrant, radius])\n\t\t\t\t\t\t\t\timg = makeCircle(c, radius, center, b, colorsRGB)\n\t\t\t\t\t\t\t\timg = img[:,:,::-1]\n\t\t\t\t\t\t\t\tcv2.imwrite(fileName, img)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tn = shapeDict[s]\n\t\t\t\t\t\t\t\timg = makePolygon(center, n, radius, b, c, colorsRGB)\n\t\t\t\t\t\t\t\timg = img[:,:,::-1]\n\t\t\t\t\t\t\t\tcv2.imwrite(fileName, img)\n\t\t\t\t\t\t\t\toutput_images.append([num, s, c, k, b, presentQuadrant, radius])\n\t\t\t\t\t\t\tnum += 1\n\t\n\tprint(\"Number of image generated\", num)\n\n\tprint(\"Saving \" + data_type + \" data meta information to CSV ......\")\n\tdf = pd.DataFrame(output_images[1:], columns=output_images[0])\n\tdf.to_csv(label_file, index=False)\n\tprint(\"Saved \" + data_type + \" data meta information: \" + data_folder)\n\t\n\n\tprint(\"Saving \" + data_type + \" images data to npz(numpy) compressed file ......\")\n\tmake_npz_file(data_type)\n\tprint(\"Saved \" + data_type + \" images data to npz(numpy) compressed file!\")\n\t\n\treturn None",
"def kitti_training_set(DATASET_PATH, scenes=['city', 'residential', 'road'], is_rgb = False):\n KITTI_scenes = scenes\n\n if is_rgb == True:\n data_path = 'image_02/data'\n else:\n data_path = 'image_00/data'\n\n clips = []\n for scene in KITTI_scenes:\n scene_path = join(DATASET_PATH, scene)\n for s in sorted(listdir(scene_path)):\n if isdir(join(scene_path, s)):\n scene_date_path = join(scene_path, s)\n for d in sorted(listdir(scene_date_path)):\n if isdir(join(scene_date_path, d)):\n img_folder = join(join(scene_date_path, d), data_path)\n all_frames = []\n # loop over all the images in the folder (0.png,1.png,..,199.png)\n for i in sorted(listdir(img_folder)):\n if str(join(img_folder, i))[-3:] == \"png\":\n img_path = join(img_folder, i)\n all_frames.append(img_path)\n # get the 10-frames sequences from the list of images after applying data augmentation\n for stride in range(1, 2):\n clips.extend(get_clips_by_stride(stride=stride, frames_list=all_frames, sequence_size=11))\n return clips",
"def extract_data(filename, images_dir, output_dir, trials_idx, block_nums, goal_dict):\n num_images = len(trials_idx) * len(block_nums)\n f = h5py.File(os.path.join(output_dir, filename), 'w')\n X = f.create_dataset('X', (num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS*NUM_FRAMES), dtype=TYPE)\n Y = f.create_dataset('Y', (num_images, 2), dtype=TYPE)\n\n image_count = 0\n for trial_num in trials_idx:\n for block_num in block_nums:\n print('Blocks ' + str(block_num) + ' Trial ' + str(trial_num))\n for frame_num in xrange(0, NUM_FRAMES):\n temp = imread(images_dir+'RTr_Bl'+str(block_num)+'_'+str(trial_num)+'_'+str(frame_num)+IMAGE_FORMAT)\n temp = imresize(temp, [temp.shape[0]//DOWN_SAMPLE, temp.shape[1]//DOWN_SAMPLE, temp.shape[2]])\n X[image_count, 0:temp.shape[0], 0:temp.shape[1], frame_num*NUM_CHANNELS:(frame_num+1)*NUM_CHANNELS] = temp\n label = goal_dict['RTr_Bl'+str(block_num)+'_'+str(trial_num)]\n Y[image_count, :] = [label, 1-label]\n image_count += 1\n\n f.close()\n\n # TODO Use pixel depth normalization???\n #data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH",
"def generate_test():\n o = []\n pos = [384, 288]\n note_group_size = GAN_PARAMS[\"note_group_size\"]\n generate_set(begin=3 * note_group_size, start_pos=pos,\n length_multiplier=dist_multiplier, group_id=3, plot_map=True)",
"def __make_tms(self, abspath_dir_img):\n zoom_list = [ZOOM_MIN, ZOOM_MAX]\n tif = Utils.get_file(abspath_dir_img, is_tif=True)\n GenerateTMS.main(\n tif, BAND_R, BAND_G, BAND_B, abspath_dir_img, zoom_list,\n URL_TMS, DIR_TMS\n )",
"def runtrials_withmap(name, configs, test, N=20, gui=False, rosbag=True):\n print \"*** %s ***\" % name\n scriptdir = \"%s/%s\" % (os.getcwd(), name)\n if os.path.exists(scriptdir):\n print \"WARNING: directory %s already exists, skipping script\" % scriptdir\n return False\n os.mkdir(scriptdir)\n evman = init(scriptdir)\n evman.initBaseSystem(gui)\n evman.initRviz(gui)\n evman.initMapping(scriptdir)\n # --- INITIAL MAP ---\n mapdir = \"%s/map\" % os.getcwd()\n if os.path.exists(mapdir):\n print \"Found map directory, skipping map creation\"\n else:\n os.mkdir(mapdir)\n evman.initSearchMan(dl=0, log=scriptdir)\n evman.runTest()\n evman.stopSearchMan()\n evman.call(\"rosservice call /next_best_view_node/write_map %s/\" % mapdir)\n for n in range(N):\n psd = \"%s/psd\" % scriptdir\n os.mkdir(psd)\n xypose = None\n for cname, search_kwargs in configs:\n print \"# Evaluating n=%i %s\" % (n, cname)\n logdir = \"%s/%s_%i\" % (scriptdir, cname, n)\n os.mkdir(logdir)\n evman.resetGzworld()\n xypose = evman.setCamera(xypose)\n evman.call(\"rosservice call /next_best_view_node/load_map %s/\" % mapdir)\n evman.initSearchMan(psd=psd, log=logdir, **search_kwargs)\n if rosbag:\n evman.recordRosbag(logdir)\n evman.runTest(**test)\n evman.stopRosbag()\n evman.stopSearchMan()\n # --- CLEANUP ---\n try:\n shutil.rmtree(psd)\n except OSError:\n print \"WARNING: Expected to see persistent_samples, but there were none!\"\n evman.shutdown()\n return True",
"def generate_stimuli(num_trials=10, stim_dur=0.08, fs=24414., rms=0.01,\n ramp_noise=0.03, ramp_tone=0.06,\n output_dir=None, save_as='mat', rand_seed=0):\n if rand_seed is None:\n rng = np.random.RandomState()\n else:\n rng = np.random.RandomState(rand_seed)\n\n # check input arguments\n if save_as not in ['dict', 'wav', 'mat']:\n raise ValueError('\"save_as\" must be \"dict\", \"wav\", or \"mat\"')\n\n if fs is None:\n fs = get_tdt_rates()['25k']\n\n # General params:\n n = int(stim_dur * fs) # total number of samples\n t = np.linspace(0, stim_dur, n, endpoint=False) # time index for ploting\n\n#### make tone complex#########################################################\n\n tonecomp = np.zeros(24414. * stim_dur, float)\n fund = 250.0 # fundamental frequency\n for x in xrange(1, 5):\n freq = fund*x\n tonecomp = tonecomp + np.sin(freq * 2 * np.pi * np.arange\n (int(fs * stim_dur)) / float(fs))\n # windowing and onset/offset\n finalstim_tc = window_edges(tonecomp, fs, ramp_tone, -1, 'hamming')\n\n return finalstim_tc\n\n##### make noise burst#########################################################\n\n # add 50 points extra\n nb = np.random.normal(0, 1.0, int(fs * stim_dur) + 50)\n\n ### highpass cut-off freq of 1500Hz using 100th order Hamming ###\n b = sig.firwin(101, 1500. / (fs / 2), pass_zero=False) # False - highpass\n # nyq_rate = fs / 2\n # have to add '1' order\n filtered_stim = sig.lfilter(b, 1.0, nb)\n\n ### cut off extra 50 points from noiseburst ###\n filtered_stim = filtered_stim[50:]\n # windowing and onset/offset\n nb_ramped = window_edges(nb[50:], fs, ramp_noise, -1, 'hamming')\n finalstim_nb = np.multiply(nb_ramped, filtered_stim)\n\n return finalstim_nb",
"def train(scope=''):\n # with tf.Graph().as_default(), tf.device('/cpu:0'):\n # Create a variable to count the number of train() calls. This equals the\n # number of batches processed * FLAGS.num_gpus.\n\n train_dirs = FLAGS.datasets.split(':')\n\n\n _images, _shapes, _reference_shape, pca_model, shape_space, _inits= \\\n data_provider.load_images(train_dirs)\n idx_list = range(len(_images))\n\n def get_random_sample(num, rotation_stddev=10):\n # idx = np.random.randint(low=0, high=len(_images))\n images = []\n shapes = []\n inits =[]\n shape_3D = []\n inits_3D = []\n for i in range(FLAGS.batch_size):\n rand_num = np.random.randint(0, num)\n pixel_list = []\n shape_list = []\n # print('get_random_set:', idx)\n im = menpo.image.Image(_images[rand_num].transpose(2, 0, 1), copy=False)\n lms = _shapes[rand_num]\n\n init = _inits[rand_num]\n\n im.landmarks['PTS'] = lms\n\n im.landmarks['inital'] = init\n\n\n\n if np.random.rand() < .5:\n im = utils.mirror_image(im)\n\n if np.random.rand() < .5:\n theta = np.random.normal(scale=rotation_stddev)\n rot = menpo.transform.rotate_ccw_about_centre(lms, theta)\n im = im.warp_to_shape(im.shape, rot)\n\n pixels = im.pixels.transpose(1, 2, 0).astype('float32')\n\n shape = im.landmarks['PTS'].lms.points.astype('float32')\n\n init_2 = im.landmarks['inital'].lms.points.astype('float32')\n\n pixel_list.append(pixels)\n\n pixel_list = np.array(pixel_list)\n\n inits.append(init_2)\n images.append(pixel_list)\n shapes.append(shape)\n\n\n\n return images, shapes, inits\n\n\n\n\n print('Defining model...')\n\n # all placeholder for tf\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n\n\n with tf.device('/gpu:0'):\n\n actor = DDPG.Actor(sess, shape_space, k_nearest, LR_A, REPLACEMENT)\n\n critic = DDPG.Critic(sess, LR_C, GAMMA, REPLACEMENT)\n\n\n for var in tf.global_variables():\n print(var.op.name, var)\n print('------')\n for var in tf.trainable_variables():\n print(var.op.name, var)\n\n print('------')\n for var in tf.moving_average_variables():\n print(var.op.name, var)\n print('------')\n\n # Build an initialization operation to run below.\n init = tf.initialize_all_variables()\n print('Initializing variables...')\n sess.run(init)\n print('Initialized variables.')\n\n\n if FLAGS.pretrained_checkpoint_path:\n variables_to_restore = tf.get_collection(\n slim.variables.VARIABLES_TO_RESTORE)\n ckpt = tf.train.get_checkpoint_state(FLAGS.pretrained_checkpoint_path)\n var_name_list = ['convnet/conv_1/weights', 'convnet/conv_1/biases'\n , 'convnet/conv_2/weights', 'convnet/conv_2/biases'\n\n , 'rnn/FC/weights', 'rnn/FC/biases'\n , 'rnn/pred/weights', 'rnn/pred/biases'\n]\n for var in variables_to_restore:\n if '/'.join(var.op.name.split('/')[2:]) in var_name_list:\n restorer = tf.train.Saver({'/'.join(var.op.name.split('/')[2:]): var})\n restorer.restore(sess, os.path.join(FLAGS.pretrained_checkpoint_path,\n ckpt.model_checkpoint_path))\n print(var.op.name)\n print('%s: Pre-trained knn_2D model restored from %s' %\n (datetime.now(), FLAGS.pretrained_checkpoint_path))\n\n\n if OUTPUT_GRAPH:\n tf.summary.FileWriter(\"logs/graph\", sess.graph)\n summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, '')\n summary_writer = tf.summary.FileWriter(\"logs/\")\n saver = tf.train.Saver()\n\n num = shape_space.shape[0]\n loss_list = []\n loss_a_list = []\n idx_save = 0\n for step in range(Max_step):\n loss = 0\n random.shuffle(idx_list)\n image_train, shape_gt, inits = get_random_sample(len(idx_list))\n\n\n for i in range(MAX_iteration):\n b_a_hat = []\n\n b_a_ = []\n\n q = []\n e = []\n\n states_2 = []\n\n for bs in range(FLAGS.batch_size):\n\n image = image_train[bs]\n image = np.reshape(image,(1, 386 ,458,3)) # 383, 453 386,458\n\n '\"knn_2D initial\"'\n\n\n state_2D = np.reshape(actor.choose_action_hat(inits[bs].reshape(1, PATCHES_2D, 2), image),(1, 136))\n\n\n b_hat_k_nn = np.squeeze(\n actor.choose_action_hat(inits[bs].reshape(1, PATCHES_2D, 2), image))\n b_a_hat.append(b_hat_k_nn)\n\n k_nn_b_a_ = (\n actor.choose_action(inits[bs].reshape(1, PATCHES_2D, 2), b_hat_k_nn,\n image))\n\n if random.random() < 0.5:\n b_a_.append(np.squeeze(\n critic.choose_max(inits[bs].reshape(1, PATCHES_2D, 2), k_nn_b_a_,\n image)))\n else:\n b_a_.append(b_hat_k_nn)\n q_value = critic.q_value(inits[bs].reshape(1, PATCHES_2D, 2), b_hat_k_nn.reshape(1, PATCHES_2D, 2),\n image)\n error = rdn_model.normalized_rmse(\n inits[bs].reshape(1, PATCHES_2D, 2) + b_hat_k_nn.reshape(1, PATCHES_2D, 2),\n shape_gt[bs].reshape(1, PATCHES_2D, 2))\n states_2.append(state_2D)\n\n q.append(q_value)\n e.append(error)\n\n\n b_a_ = np.array(b_a_)\n\n b_a_hat = np.array(b_a_hat)\n\n images = np.array(np.squeeze(image_train))\n gts = np.array(np.squeeze(shape_gt))\n inits = np.array(np.squeeze(inits))\n\n e = np.vstack(e).ravel().mean()\n q = np.vstack(q).ravel().mean()\n\n\n loss = critic.learn_supervised(inits, b_a_, images, gts)\n\n grad_supervised, loss_a = actor.learn_supervise(inits, np.reshape(gts,\n [-1, PATCHES_2D, 2]).astype(\n 'float32'), images)\n\n s_, r = rdn_model.step(sess, b_a_, inits, gts)\n inits = s_\n r = r.mean()\n\n print('Train_2D_Step:', step, 'Iterate_Steps:', i, 'R:', r, '2D_q_value:', q, '2D_error:', e, 'critic loss knn_2D:', loss, 'actor loss knn_2D :', loss_a)\n if (idx_save % 100 == 0):\n checkpoint_path = os.path.join(FLAGS.train_dir +'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=idx_save)\n idx_save += 1",
"def main(unused_argv):\n # Collect the list of folder paths containing the input and golden frames.\n triplets_list = tf.io.gfile.listdir(_INPUT_DIR.value)\n\n triplet_dicts = []\n for triplet in triplets_list:\n triplet_dicts.append({\n image_key: os.path.join(_INPUT_DIR.value, triplet, image_basename)\n for image_key, image_basename in _INTERPOLATOR_IMAGES_MAP.items()\n })\n\n p = beam.Pipeline('DirectRunner')\n (p | 'ReadInputTripletDicts' >> beam.Create(triplet_dicts) # pylint: disable=expression-not-assigned\n | 'GenerateSingleExample' >> beam.ParDo(\n util.ExampleGenerator(_INTERPOLATOR_IMAGES_MAP))\n | 'WriteToTFRecord' >> beam.io.tfrecordio.WriteToTFRecord(\n file_path_prefix=_OUTPUT_TFRECORD_FILEPATH.value,\n num_shards=_NUM_SHARDS.value,\n coder=beam.coders.BytesCoder()))\n result = p.run()\n result.wait_until_finish()\n\n logging.info('Succeeded in creating the output TFRecord file: \\'%s@%s\\'.',\n _OUTPUT_TFRECORD_FILEPATH.value, str(_NUM_SHARDS.value))",
"def main(num_trials, num_actions):\n\tfor i in xrange(int(num_trials)):\n\t\ttrial(i+1, int(num_actions))"
]
| [
"0.6116985",
"0.6031568",
"0.5918441",
"0.5664884",
"0.5622315",
"0.5615459",
"0.5599443",
"0.5529613",
"0.55038154",
"0.54847896",
"0.5482034",
"0.5451923",
"0.5441349",
"0.5423713",
"0.5408815",
"0.5383447",
"0.5371646",
"0.5351493",
"0.53448683",
"0.5335508",
"0.52934724",
"0.5286916",
"0.52813494",
"0.5272756",
"0.5268043",
"0.52259815",
"0.51973027",
"0.5188907",
"0.51681256",
"0.51679724"
]
| 0.7455843 | 0 |
Calculates the fuzzy match of needle in haystack, using a modified version of the Levenshtein distance algorithm. The function is modified from the levenshtein function in the bktree module by Adam Hupp | def __fuzzy_substring(needle, haystack):
m, n = len(needle), len(haystack)
# base cases
if m == 1:
# return not needle in haystack
row = [len(haystack)] * len(haystack)
row[haystack.find(needle)] = 0
return row
if not n:
return m
row1 = [0] * (n + 1)
for i in range(0, m):
row2 = [i + 1]
for j in range(0, n):
cost = (needle[i] != haystack[j])
row2.append(min(row1[j + 1] + 1, # deletion
row2[j] + 1, # insertion
row1[j] + cost) # substitution
)
row1 = row2
return row1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def levenshtein_normalised(str1, str2):\n\treturn levenshtein(str1, str2, normalise=True)",
"def levenshtein(str1, str2, normalise=False):\n\ttmp = Levenshtein.distance(str1, str2)\n\tif(normalise) and (len(str1) + len(str2)): tmp /= max(len(str1), len(str2))\n\treturn tmp",
"def get_closest_levenshtein(word, possible_words, threshold):\n result = None\n min_distance = 10\n for possible_word in possible_words:\n word_distance = distance(word, possible_word)\n if word_distance < min_distance:\n result = possible_word\n min_distance = word_distance\n result = result if min_distance < threshold else None\n return result, min_distance",
"def fuzzy_score_string(first_string, second_string):\n score = 0\n\n if len(first_string) < len(second_string):\n shorter, longer = (first_string, second_string)\n window_length = len(shorter)\n\n num_iterations = len(longer) - len(shorter) + 1\n\n for position in range(0, num_iterations):\n window = longer[position:position + window_length]\n l_ratio = Levenshtein.ratio(window, shorter) * 100\n\n if l_ratio > 60:\n result = statistics.mean(\n [100 - Levenshtein.distance(window, shorter) * 15, l_ratio, l_ratio])\n\n else:\n result = l_ratio\n\n if result > score:\n score = result\n\n else:\n l_ratio = Levenshtein.ratio(first_string, second_string) * 100\n score = statistics.mean(\n [100 - Levenshtein.distance(first_string, second_string) * 15, l_ratio, l_ratio])\n\n simple = fuzz.ratio(first_string, second_string)\n partial = fuzz.partial_ratio(first_string, second_string)\n sort = fuzz.token_sort_ratio(first_string, second_string)\n set_ratio = fuzz.token_set_ratio(first_string, second_string)\n\n score = max([score, simple, partial, sort, set_ratio])\n\n if score < 75:\n score = 0\n\n return score * 0.85",
"def levenshtein_distance(str1, str2):\n m = len(str1)\n n = len(str2)\n lensum = float(m + n)\n d = [] \n for i in range(m+1):\n d.append([i]) \n del d[0][0] \n for j in range(n+1):\n d[0].append(j) \n for j in range(1,n+1):\n for i in range(1,m+1):\n if str1[i-1] == str2[j-1]:\n d[i].insert(j,d[i-1][j-1]) \n else:\n minimum = min(d[i-1][j]+1, d[i][j-1]+1, d[i-1][j-1]+2) \n d[i].insert(j, minimum)\n ldist = d[-1][-1]\n ratio = (lensum - ldist)/lensum\n return {'distance':ldist, 'ratio':ratio}",
"def fuzzy_match_strings(ref, val):\n if not ref or not val:\n return 0\n ref_q = to_q(ref)\n val_q = to_q(val)\n if ref_q or val_q:\n return 100 if ref_q == val_q else 0\n simplified_val = unidecode(val).lower()\n simplified_ref = unidecode(ref).lower()\n\n # Return symmetric score\n r1 = fuzz.token_sort_ratio(simplified_val, simplified_ref)\n r2 = fuzz.token_sort_ratio(simplified_ref, simplified_val)\n r2 = r1\n return int(0.5*(r1+r2))",
"def levenshtein(s1, s2):\n if len(s1) < len(s2):\n return levenshtein(s2, s1)\n\n # len(s1) >= len(s2)\n if len(s2) == 0:\n return len(s1)\n\n previous_row = range(len(s2) + 1)\n for i, c1 in enumerate(s1):\n current_row = [i + 1]\n for j, c2 in enumerate(s2):\n insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer\n deletions = current_row[j] + 1 # than s2\n substitutions = previous_row[j] + (c1 != c2)\n current_row.append(min(insertions, deletions, substitutions))\n previous_row = current_row\n \n return previous_row[-1]",
"def levenshtein_distance(s1,s2):\n\n\t\tif len(s1) < len(s2):\n\t\t\treturn Searcher.levenshtein_distance(s2, s1)\n\n\t\t# len(s1) >= len(s2)\n\t\tif len(s2) == 0:\n\t\t\treturn len(s1)\n\n\t\tprevious_row = range(len(s2) + 1)\n\t\tfor i, c1 in enumerate(s1):\n\t\t\tcurrent_row = [i + 1]\n\t\t\tfor j, c2 in enumerate(s2):\n\t\t\t\tinsertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer\n\t\t\t\tdeletions = current_row[j] + 1 # than s2\n\t\t\t\tsubstitutions = previous_row[j] + (c1 != c2)\n\t\t\t\tcurrent_row.append(min(insertions, deletions, substitutions))\n\t\t\tprevious_row = current_row\n\t\t\n\t\treturn previous_row[-1]",
"def mismatch_search(haystack, needle, mismatches=1):\n haystack = haystack.encode('utf-8');\n needle = needle.encode('utf-8');\n if not using_tre:\n raise RBSError(\"tre isn't loaded.\")\n fz = tre.Fuzzyness(maxerr=mismatches, maxsub=mismatches,\n maxdel=0, maxins=0)\n needle = \".*(%s).*\" % needle\n pt = tre.compile(needle, tre.EXTENDED)\n incidence = 0\n while True:\n m = pt.search(haystack, fz)\n if m:\n index = m.groups()[1][1]-1\n incidence += 1\n haystack = haystack[:index]\n else:\n break\n return incidence",
"def levenshtein(w1, w2):\n\n if len(w1) < len(w2):\n # check if length of word1 is smaller than word2.\n # if so, call function and switch parameters\n return levenshtein(w2, w1)\n elif len(w1) == 0:\n # if the length of word1 equals 0, that means that\n # the Lev' distance is the length of word2\n return len(w2)\n elif len(w2) == 0:\n # if the length of word2 equals 0, that means that\n # the Lev' distance is the length of word1\n return len(w1)\n elif w1 == w2:\n # check if words are simply the same\n return 0\n\n # thanks to the check above, we can assume that w2 is the longest word\n # we use this information to determine the range of 'previous'\n previous = range(len(w2) + 1)\n\n # iterate over the characters of the first word\n for a, char1 in enumerate(w1):\n current = [a + 1]\n # iterate over the characters of the second word\n for b, char2 in enumerate(w2):\n inserts = previous[b + 1] + 1\n deletions = current[b] + 1\n subs = previous[b] + (char1 != char2)\n current.append(min(inserts, deletions, subs))\n previous = current\n return previous[-1]",
"def distance(str1, str2):\n return levenshtein.normalized_distance(str1, str2)",
"def levenshteinDistance(s1, s2):\n singleLetterMapping = {DOWNLEFT: '1', DOWN:'2', DOWNRIGHT:'3',\n LEFT:'4', RIGHT:'6',\n UPLEFT:'7', UP:'8', UPRIGHT:'9'}\n\n len1 = len([singleLetterMapping[letter] for letter in s1])\n len2 = len([singleLetterMapping[letter] for letter in s2])\n\n matrix = list(range(len1 + 1)) * (len2 + 1)\n for i in range(len2 + 1):\n matrix[i] = list(range(i, i + len1 + 1))\n for i in range(len2):\n for j in range(len1):\n if s1[j] == s2[i]:\n matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j])\n else:\n matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j] + 1)\n return matrix[len2][len1]",
"def damerau_levenshtein_similarity(s1, s2):\n max_cost = max(len(s1), len(s2))\n\n if max_cost == 0:\n return 1.0\n\n return 1.0 - float(damerau_levenshtein_distance(s1, s2)) / max_cost",
"def kmp_strstr(haystack, needle):\r\n def computeLPSArray(word):\r\n # The explaination of this function is in _LPSArray()\r\n lps = [0] * len(word)\r\n wordIdx = 0\r\n delta = 1\r\n while delta < len(word):\r\n while delta < len(word) and word[delta] == word[wordIdx]:\r\n lps[delta] = wordIdx + 1\r\n wordIdx += 1\r\n delta += 1\r\n if wordIdx != 0:\r\n wordIdx = lps[wordIdx-1]\r\n else:\r\n delta += 1\r\n return lps\r\n\r\n # handle special cases\r\n if haystack == needle:\r\n return [0]\r\n elif not needle:\r\n return [0]\r\n elif not haystack:\r\n return []\r\n # init lps table\r\n lps = computeLPSArray(needle)\r\n ans = []\r\n # main loop\r\n i_haystack = 0\r\n i_needle = 0\r\n while i_haystack < len(haystack):\r\n if haystack[i_haystack] == needle[i_needle]:\r\n i_haystack += 1\r\n i_needle += 1\r\n # print i_needle, i_haystack\r\n if i_needle == len(needle):\r\n ans.append(i_haystack - i_needle)\r\n i_needle = lps[i_needle-1]\r\n elif i_needle != 0:\r\n i_needle = lps[i_needle-1]\r\n else:\r\n i_haystack += 1\r\n\r\n return ans",
"def levenshtein(proposed, gold, normalize=False):\n lev_densities = []\n for x, y in zip(proposed, gold):\n score = editdistance.eval(x, y)\n if normalize:\n score /= len(y)\n lev_densities.append(score)\n return sum(lev_densities) / len(lev_densities)",
"def get_best_match(query, corpus, step=4, flex=3, case_sensitive=False, verbose=False):\n\n def _match(a, b):\n \"\"\"Compact alias for SequenceMatcher.\"\"\"\n return SequenceMatcher(None, a, b).ratio()\n\n def scan_corpus(step):\n \"\"\"Return list of match values from corpus-wide scan.\"\"\"\n match_values = []\n\n m = 0\n while m + qlen - step <= len(corpus):\n match_values.append(_match(query, corpus[m : m-1+qlen]))\n if verbose:\n print(query, \"-\", corpus[m: m + qlen], _match(query, corpus[m: m + qlen]))\n m += step\n\n return match_values\n\n def index_max(v):\n \"\"\"Return index of max value.\"\"\"\n return max(range(len(v)), key=v.__getitem__)\n\n def adjust_left_right_positions():\n \"\"\"Return left/right positions for best string match.\"\"\"\n # bp_* is synonym for 'Best Position Left/Right' and are adjusted \n # to optimize bmv_*\n p_l, bp_l = [pos] * 2\n p_r, bp_r = [pos + qlen] * 2\n\n # bmv_* are declared here in case they are untouched in optimization\n bmv_l = match_values[p_l / step]\n bmv_r = match_values[p_l / step]\n\n for f in range(flex):\n ll = _match(query, corpus[p_l - f: p_r])\n if ll > bmv_l:\n bmv_l = ll\n bp_l = p_l - f\n\n lr = _match(query, corpus[p_l + f: p_r])\n if lr > bmv_l:\n bmv_l = lr\n bp_l = p_l + f\n\n rl = _match(query, corpus[p_l: p_r - f])\n if rl > bmv_r:\n bmv_r = rl\n bp_r = p_r - f\n\n rr = _match(query, corpus[p_l: p_r + f])\n if rr > bmv_r:\n bmv_r = rr\n bp_r = p_r + f\n\n if verbose:\n print(\"\\n\" + str(f))\n print(\"ll: -- value: %f -- snippet: %s\" % (ll, corpus[p_l - f: p_r]))\n print(\"lr: -- value: %f -- snippet: %s\" % (lr, corpus[p_l + f: p_r]))\n print(\"rl: -- value: %f -- snippet: %s\" % (rl, corpus[p_l: p_r - f]))\n print(\"rr: -- value: %f -- snippet: %s\" % (rl, corpus[p_l: p_r + f]))\n\n return bp_l, bp_r, _match(query, corpus[bp_l : bp_r])\n\n if not case_sensitive:\n query = query.lower()\n corpus = corpus.lower()\n\n qlen = len(query)\n\n if flex >= qlen/2:\n print(\"Warning: flex exceeds length of query / 2. Setting to default.\")\n flex = 3\n\n match_values = scan_corpus(step)\n pos = index_max(match_values) * step\n\n pos_left, pos_right, match_value = adjust_left_right_positions()\n\n return corpus[pos_left: pos_right].strip(), match_value",
"def lev_dist(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.Levenshtein()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n # Call the function to compute the distance measure.\n return measure.get_raw_score(s1, s2)",
"def iterative_levenshtein(s, t):\n rows = len(s)+1\n cols = len(t)+1\n dist = [[0 for x in range(cols)] for x in range(rows)]\n # source prefixes can be transformed into empty strings \n # by deletions:\n for i in range(1, rows):\n dist[i][0] = i\n # target prefixes can be created from an empty source string\n # by inserting the characters\n for i in range(1, cols):\n dist[0][i] = i\n \n for col in range(1, cols):\n for row in range(1, rows):\n if s[row-1] == t[col-1]:\n cost = 0\n else:\n cost = 1\n dist[row][col] = min(dist[row-1][col] + 1, # deletion\n dist[row][col-1] + 1, # insertion\n dist[row-1][col-1] + cost) # substitution\n #for r in range(rows):\n #print(dist[r])\n \n \n return dist[row][col]",
"def edit_distance(str1, str2, reconstruct_answer=False, method=alignments.Levinshtein(),\n swap_case_on_mismatch=True):\n method = alignments.Levinshtein() if method is None else method\n return align(str1, str2, reconstruct_answer, method, swap_case_on_mismatch)",
"def levenshtein_distance(first, second):\n if len(first) > len(second):\n first, second = second, first\n if len(second) == 0:\n return len(first)\n first_length = len(first) + 1\n second_length = len(second) + 1\n distance_matrix = [range(second_length) for x in range(first_length)]\n for i in range(1, first_length):\n for j in range(1, second_length):\n deletion = distance_matrix[i-1][j] + 1\n insertion = distance_matrix[i][j-1] + 1\n substitution = distance_matrix[i-1][j-1]\n if first[i-1] != second[j-1]:\n substitution += 1\n distance_matrix[i][j] = min(insertion, deletion, substitution)\n\n return distance_matrix[first_length-1][second_length-1]",
"def levenshtein(seq1: str, seq2: str) -> int:\n if seq1 == \"\":\n return len(seq2)\n if seq2 == \"\":\n return len(seq1)\n if seq1[-1] == seq2[-1]:\n cost = 0\n else:\n cost = 1\n \n result = min([levenshtein(seq1[:-1], seq2) + 1,\n levenshtein(seq1, seq2[:-1]) + 1,\n levenshtein(seq1[:-1], seq2[:-1]) + cost ])\n return result",
"def _levenshtein_distance(t1: Trace, t2: Trace):\n if t1.length > t2.length:\n t1, t2 = t2, t1\n\n distances = range(t1.length + 1)\n for i2, c2 in enumerate(t2.event_list):\n distances_ = [i2 + 1]\n for i1, c1 in enumerate(t1.event_list):\n if c1 == c2:\n distances_.append(distances[i1])\n else:\n distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))\n distances = distances_\n return distances[-1]",
"def Levenshtein(a, b):\n v0 = list(range(len(b)+1))\n v1 = list(range(len(b)+1)) # Or whatever.\n\n for i in range(len(a)):\n v1[0] = i + 1\n\n for j in range(len(b)):\n deletionCost = v0[j + 1] + 1\n insertionCost = v1[j] + 1\n substitutionCost = v0[j] if a[i] == b[j] else v0[j]+1\n v1[j + 1] = min(deletionCost, insertionCost, substitutionCost)\n\n v1, v0 = v0, v1\n return v0[len(b)]",
"def damerau_levenshtein_distance(s1, s2):\n\n utils.check_for_none(s1, s2)\n utils.check_for_type(str, s1, s2)\n\n # s1 = utils.unicode_normalize(s1)\n # s2 = utils.unicode_normalize(s2)\n\n n1, n2 = len(s1), len(s2)\n infinite = n1 + n2\n\n char_arr = defaultdict(int)\n dp = [[0] * (n2 + 2) for _ in range(n1 + 2)]\n\n dp[0][0] = infinite\n for i in range(0, n1 + 1):\n dp[i + 1][0] = infinite\n dp[i + 1][1] = i\n for i in range(0, n2 + 1):\n dp[0][i + 1] = infinite\n dp[1][i + 1] = i\n\n for i in range(1, n1 + 1):\n db = 0\n for j in range(1, n2 + 1):\n i1 = char_arr[s2[j - 1]]\n j1 = db\n cost = 1\n if s1[i - 1] == s2[j - 1]:\n cost = 0\n db = j\n\n dp[i + 1][j + 1] = min(dp[i][j] + cost,\n dp[i + 1][j] + 1,\n dp[i][j + 1] + 1,\n dp[i1][j1] + (i - i1 - 1) + 1 + (j - j1 - 1))\n char_arr[s1[i - 1]] = i\n\n return dp[n1 + 1][n2 + 1]",
"def get_fuzzy_match(object, answer, threshold=80):\n answer_phrase = generate_ngrams(answer)\n if answer_phrase:\n best_match = [fuzz.ratio(object, phr) for phr in answer_phrase]\n if np.max(best_match)>threshold:\n return np.max(best_match), answer_phrase[np.argmax(best_match)]\n else:\n return 0,''\n else:\n return 0, ''",
"def test_string_similarity_constraint():\n f = SimilarityConstraint(func=LevenshteinDistance(), pred=GreaterThan(0.5))\n assert f('BROOKLYN', 'BROKLYN')\n assert not f('BROOKLYN', 'QUEENS')",
"def fuzzy_match(replist, wordset):\n matches = []\n for rep in replist:\n for word in wordset:\n matches.append((Levenshtein.distance(str(rep), word), word, rep))\n\n matches.sort(key=lambda x: (x[0], x[2].weight))\n try:\n return str(matches[0][2]), matches[0][1]\n except IndexError:\n return (\"\", \"\")",
"def iterative_levenshtein(self, w1, d1, w2, d2):\n rows = len(w1) + 1\n cols = len(w2) + 1\n dist = [[0 for x in range(cols)] for x in range(rows)]\n # source prefixes can be transformed into empty strings\n # by deletions:\n for i in range(1, rows):\n dist[i][0] = i\n # target prefixes can be created from an empty source string\n # by inserting the characters\n for i in range(1, cols):\n dist[0][i] = i\n\n for col in range(1, cols):\n for row in range(1, rows):\n if w1[row - 1] == w2[col - 1]:\n cost = 0\n else:\n cost = 1\n dist[row][col] = min(dist[row - 1][col] + 1, # deletion\n dist[row][col - 1] + 1, # insertion\n dist[row - 1][col - 1] + cost) # substitution\n return dist[row][col] < 5",
"def similarity(a, b):\n distance = Levenshtein.distance(a, b)\n return 1 - (distance / max((len(a), len(b))))",
"def dameraulevenshtein(seq1, seq2):\n # codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = list(range(1, len(seq2) + 1)) + [0]\n for x in range(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = (oneago, thisrow, [0] * len(seq2) + [x + 1])\n for y in range(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x - 1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]"
]
| [
"0.65816826",
"0.64453566",
"0.6444688",
"0.64391017",
"0.63947976",
"0.63769424",
"0.63476413",
"0.6301144",
"0.6251837",
"0.61857057",
"0.6146241",
"0.6121829",
"0.6104246",
"0.6104119",
"0.607645",
"0.60713935",
"0.6024334",
"0.60240644",
"0.60073864",
"0.5990242",
"0.5945959",
"0.5916602",
"0.59081924",
"0.58841723",
"0.58659184",
"0.5864661",
"0.58642524",
"0.5852183",
"0.58376956",
"0.58364713"
]
| 0.66017675 | 0 |
Sets the check_revocation of this TypesConsoleCertificateSettings. | def check_revocation(self, check_revocation):
self._check_revocation = check_revocation | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_compatibility_check(check_status):\r\n if Config.loaded:\r\n raise Exception(\"compatibility_check must be set before before \" \\\r\n \"using any other functionalities in libclang.\")\r\n\r\n Config.compatibility_check = check_status",
"def svn_client_revprop_set(char_propname, svn_string_t_propval, char_URL, svn_opt_revision_t_revision, svn_revnum_t_set_rev, svn_boolean_t_force, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass",
"def svn_client_revprop_set2(char_propname, svn_string_t_propval, svn_string_t_original_propval, char_URL, svn_opt_revision_t_revision, svn_revnum_t_set_rev, svn_boolean_t_force, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass",
"def with_manual_check_always(self):\n self.__manual_check = constants.ALWAYS\n return self",
"def _set_enable_peer_as_check(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"enable-peer-as-check\", rest_name=\"enable-peer-as-check\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Disable routes advertise between peers in same AS', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"enable_peer_as_check must be of a type compatible with empty\"\"\",\n 'defined-type': \"empty\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"enable-peer-as-check\", rest_name=\"enable-peer-as-check\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Disable routes advertise between peers in same AS', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)\"\"\",\n })\n\n self.__enable_peer_as_check = t\n if hasattr(self, '_set'):\n self._set()",
"def setOverrideCertificateErrors(self, override: bool) -> Awaitable[Dict]:\n return self.client.send(\n \"Security.setOverrideCertificateErrors\", {\"override\": override}\n )",
"def verify_server_certificate(self, verify_server_certificate):\n\n self._verify_server_certificate = verify_server_certificate",
"def on_resume_toggled(self, _):\n if not self.resume.get_active():\n self._previous_force_recheck = self.force_recheck.get_active()\n self.force_recheck.set_sensitive(False)\n self.force_recheck.set_active(True)\n else:\n self.force_recheck.set_active(self._previous_force_recheck)\n self.force_recheck.set_sensitive(True)",
"def use_skip_ssl_verify(self, val=True, force=False):\n if val:\n self.ssl_verify = False\n else:\n self.ssl_verify = True\n\n if force:\n self.force_skip_ssl_verify = True\n else:\n self.force_skip_ssl_verify = False\n\n return val",
"def with_manual_check_fallback(self):\n self.__manual_check = constants.FALLBACK\n return self",
"def set_chksum(self, doc, chksum):\n doc.ext_document_references[-1].check_sum = checksum_from_sha1(\n chksum)",
"def rule_pay_roll_run_approval(self, rule_pay_roll_run_approval):\n allowed_values = [\"Automatic\", \"Manual\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and rule_pay_roll_run_approval not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `rule_pay_roll_run_approval` ({0}), must be one of {1}\" # noqa: E501\n .format(rule_pay_roll_run_approval, allowed_values)\n )\n\n self._rule_pay_roll_run_approval = rule_pay_roll_run_approval",
"def set_cropping(self, crop=True):\n self._crop = crop\n self._final = None # Force rebuild",
"def revoke_from_menu(self):\n\n csha1_vhlist = self._get_installed_locations()\n certs = self._populate_saved_certs(csha1_vhlist)\n\n while True:\n if certs:\n code, selection = revocation.display_certs(certs)\n\n if code == display_util.OK:\n revoked_certs = self._safe_revoke([certs[selection]])\n # Since we are currently only revoking one cert at a time...\n if revoked_certs:\n del certs[selection]\n elif code == display_util.HELP:\n revocation.more_info_cert(certs[selection])\n else:\n return\n else:\n logger.info(\n \"There are not any trusted Let's Encrypt \"\n \"certificates for this server.\")\n return",
"def auto_renew(self, auto_renew):\n\n self._auto_renew = auto_renew",
"def _set_lsp_config_type_bypass(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-type-bypass\", rest_name=\"lsp-config-type-bypass\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_type_bypass must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-type-bypass\", rest_name=\"lsp-config-type-bypass\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_type_bypass = t\n if hasattr(self, '_set'):\n self._set()",
"def set_check_paths(self):\n\n if self.path_listbox.size() > 0:\n self.root.docs_paths = [self.path_listbox.get(idx) for idx in range(self.path_listbox.size())]\n else:\n self.root.docs_paths = []",
"def __init__(__self__, *,\n autodefined_reverse_flag: pulumi.Input['ResolverConfigAutodefinedReverseFlag'],\n resource_id: pulumi.Input[str]):\n pulumi.set(__self__, \"autodefined_reverse_flag\", autodefined_reverse_flag)\n pulumi.set(__self__, \"resource_id\", resource_id)",
"def checklists(self, checklists):\n\n self._checklists = checklists",
"def rotor_setting(self, rotor_setting):\n rotor_setting = self.valid_ring_character(rotor_setting)\n self._rot_offset = self._charset.index(rotor_setting)",
"def _set_lsp_config_number_of_retries_configured(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-number-of-retries-configured\", rest_name=\"lsp-config-number-of-retries-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_number_of_retries_configured must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-number-of-retries-configured\", rest_name=\"lsp-config-number-of-retries-configured\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_number_of_retries_configured = t\n if hasattr(self, '_set'):\n self._set()",
"def set_only_check_type(self, only_check_type=0):\r\n return self._arm.set_only_check_type(only_check_type)",
"def with_manual_check_never(self):\n self.__manual_check = constants.NEVER\n return self",
"def set_checklists_status(auth, args):\n global checklists_on\n\n if auth['checklists'] == \"true\":\n checklists_on = True\n else:\n checklists_on = False\n\n # reverse the config setting if specified by the CLI option\n# if args['--checklists']:\n# checklists_on = not checklists_on\n\n return",
"def SetCanVeto(self, can_veto):\r\n\r\n self.canveto_flag = can_veto",
"def published_committee_reference_parity_check(self, published_committee_reference_parity_check):\n\n self._published_committee_reference_parity_check = published_committee_reference_parity_check",
"def __setAutoSpellChecking(self):\n enabled = self.autoSpellCheckAct.isChecked()\n Preferences.setEditor(\"AutoSpellCheckingEnabled\", enabled)\n for editor in self.editors:\n editor.setAutoSpellChecking()",
"def setConsistencyChecks(self, *args):\n return _libsbml.SBMLDocument_setConsistencyChecks(self, *args)",
"def load_trust_anchors(self):\n\n utils.write_to_stdout(f\"{datetime.now()}: Loading RPKI Validator\\n\",\n logging.root.level)\n time.sleep(60)\n while self._get_validation_status() is False:\n time.sleep(10)\n utils.write_to_stdout(\".\", logging.root.level)\n utils.write_to_stdout(\"\\n\", logging.root.level)\n self._wait(30, \"Waiting for upload to bgp preview\")",
"def edit_config_verify(self,\n raw_response: Any,\n *args,\n **kwargs) -> bool:\n pass"
]
| [
"0.50971395",
"0.45247346",
"0.4501634",
"0.44256946",
"0.43760172",
"0.43499175",
"0.43033206",
"0.42995754",
"0.42879218",
"0.42419428",
"0.42351454",
"0.42196298",
"0.41943878",
"0.41834015",
"0.4180629",
"0.4163931",
"0.4148873",
"0.41426238",
"0.41319498",
"0.41168395",
"0.41144472",
"0.4064841",
"0.40640843",
"0.40596324",
"0.40430415",
"0.4030526",
"0.40212867",
"0.4011426",
"0.40088424",
"0.4003236"
]
| 0.7481139 | 0 |
Sets the console_ca_cert of this TypesConsoleCertificateSettings. | def console_ca_cert(self, console_ca_cert):
self._console_ca_cert = console_ca_cert | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ca_cert(self, ca_cert):\n\n self._ca_cert = ca_cert",
"def console_custom_cert(self, console_custom_cert):\n\n self._console_custom_cert = console_custom_cert",
"def ca_cert_path(self, ca_cert_path: str):\n\n self._ca_cert_path = ca_cert_path",
"def save_ca():\n cert_file = os.environ.get('HOME') + '/.cat_installer/ca.pem'\n debug(\"saving cert\")\n with open(cert_file, 'w') as cert:\n cert.write(Config.CA + \"\\n\")",
"def cert(self, value):\n self._cert = value",
"def ca_certificate(self) -> str:\n return pulumi.get(self, \"ca_certificate\")",
"def ca_certificate(self) -> str:\n return pulumi.get(self, \"ca_certificate\")",
"def ca_certificate(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ca_certificate\")",
"def test_set_one_ca_list(self):\n cacert = load_certificate(FILETYPE_PEM, root_cert_pem)\n cadesc = cacert.get_subject()\n\n def single_ca(ctx):\n ctx.set_client_ca_list([cadesc])\n return [cadesc]\n\n self._check_client_ca_list(single_ca)",
"def server_auth_ca_ids(self, server_auth_ca_ids):\n\n self._server_auth_ca_ids = server_auth_ca_ids",
"def certificate(self, certificate):\n\n self._certificate = certificate",
"def ca_cert_path(self) -> str:\n return self._ca_cert_path",
"def org_apache_felix_https_clientcertificate(self, org_apache_felix_https_clientcertificate: ConfigNodePropertyDropDown):\n\n self._org_apache_felix_https_clientcertificate = org_apache_felix_https_clientcertificate",
"def test_set_after_add_client_ca(self):\n cacert = load_certificate(FILETYPE_PEM, root_cert_pem)\n secert = load_certificate(FILETYPE_PEM, server_cert_pem)\n clcert = load_certificate(FILETYPE_PEM, server_cert_pem)\n\n cadesc = cacert.get_subject()\n sedesc = secert.get_subject()\n\n def set_replaces_add_ca(ctx):\n ctx.add_client_ca(clcert)\n ctx.set_client_ca_list([cadesc])\n ctx.add_client_ca(secert)\n return [cadesc, sedesc]\n\n self._check_client_ca_list(set_replaces_add_ca)",
"def install_ca():\n require_root()\n\n config.proxy.install_ca_cert()\n log.info('OK')",
"def client_cert(self, client_cert):\n\n self._client_cert = client_cert",
"def AddCaCertificateFlag(parser, required=False):\n help_text = \"\"\"\\\n x509 PEM-encoded certificate of the CA that signed the database\n server's certificate. The replica will use this certificate to verify\n it's connecting to the correct host. Database Migration Service encrypts the\n value when storing it.\n \"\"\"\n parser.add_argument('--ca-certificate', help=help_text, required=required)",
"def test_set_and_add_client_ca(self):\n cacert = load_certificate(FILETYPE_PEM, root_cert_pem)\n secert = load_certificate(FILETYPE_PEM, server_cert_pem)\n clcert = load_certificate(FILETYPE_PEM, server_cert_pem)\n\n cadesc = cacert.get_subject()\n sedesc = secert.get_subject()\n cldesc = clcert.get_subject()\n\n def mixed_set_add_ca(ctx):\n ctx.set_client_ca_list([cadesc, sedesc])\n ctx.add_client_ca(clcert)\n return [cadesc, sedesc, cldesc]\n\n self._check_client_ca_list(mixed_set_add_ca)",
"def _check_ca_certificate(self):\n if not os.path.exists(self._ca_certificate_path):\n with open(self._ca_certificate_path, \"w\") as f:\n f.write(ssl.get_server_certificate((\"127.0.0.1\", self._app_port), ssl_version=ssl.PROTOCOL_TLSv1_2))",
"def initca(ca_dir):\n click.echo('Initiliasing new CA in %s' % ca_dir)\n sca = SimpleCA(ca_dir)\n try:\n sca.init_ca()\n except FileExistsError as err:\n click.echo('The CA directory (%s) exists, not doing anything' %\n err.filename)\n exit(1)",
"def server_root_ca_certificate(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_root_ca_certificate\")",
"def test_reset_ca_list(self):\n cacert = load_certificate(FILETYPE_PEM, root_cert_pem)\n secert = load_certificate(FILETYPE_PEM, server_cert_pem)\n clcert = load_certificate(FILETYPE_PEM, server_cert_pem)\n\n cadesc = cacert.get_subject()\n sedesc = secert.get_subject()\n cldesc = clcert.get_subject()\n\n def changed_ca(ctx):\n ctx.set_client_ca_list([sedesc, cldesc])\n ctx.set_client_ca_list([cadesc])\n return [cadesc]\n\n self._check_client_ca_list(changed_ca)",
"def gen_ca():\n require_root()\n\n config.proxy.gen_ca_certs()\n log.info('OK')",
"def ca(self):\n\n return self._basic_constraints['ca'].native",
"def insert_ca_certs_into_systemwide_ca_store(self, ca_certs):\n\n raise NotImplementedError()",
"def get_ssl_ca_settings():\n ca_data = {}\n https_service_endpoints = config('https-service-endpoints')\n if (https_service_endpoints and\n bool_from_string(https_service_endpoints)):\n # Pass CA cert as client will need it to\n # verify https connections\n ca = get_ca(user=SSH_USER)\n ca_bundle = ca.get_ca_bundle()\n ca_data['https_keystone'] = 'True'\n ca_data['ca_cert'] = b64encode(ca_bundle)\n return ca_data",
"def ssl(self, cainfo=None, verify=True, cert=None, key=None):\n if cainfo:\n self.curl.setopt(pycurl.CAINFO, cainfo)\n\n if verify == False:\n self.curl.setopt(pycurl.SSL_VERIFYPEER, 0)\n self.curl.setopt(pycurl.SSL_VERIFYHOST, 0)\n else:\n self.curl.setopt(pycurl.SSL_VERIFYPEER, 1)\n self.curl.setopt(pycurl.SSL_VERIFYHOST, 2)\n if cert:\n #self.curl.setopt(pycurl.SSLCERTTYPE, \"DER\")\n self.curl.setopt(pycurl.SSLCERT, cert)\n if key:\n self.curl.setopt(pycurl.SSLKEY, key)",
"def test_load_client_ca(self, context, ca_file):\n context.load_client_ca(ca_file)",
"def tobacco(self, tobacco):\n\n self.logger.debug(\"In 'tobacco' setter.\")\n\n self._tobacco = tobacco",
"def test_one_add_client_ca(self):\n cacert = load_certificate(FILETYPE_PEM, root_cert_pem)\n cadesc = cacert.get_subject()\n\n def single_ca(ctx):\n ctx.add_client_ca(cacert)\n return [cadesc]\n\n self._check_client_ca_list(single_ca)"
]
| [
"0.68265325",
"0.65130776",
"0.6440239",
"0.58595866",
"0.5228995",
"0.5037131",
"0.5037131",
"0.5013002",
"0.490777",
"0.48871157",
"0.48593655",
"0.48507708",
"0.48467252",
"0.4803529",
"0.48032707",
"0.47784925",
"0.4713782",
"0.46828216",
"0.46399626",
"0.4594442",
"0.456615",
"0.455402",
"0.45468968",
"0.453434",
"0.45045602",
"0.45037642",
"0.4468689",
"0.44489378",
"0.4433251",
"0.44297794"
]
| 0.85301137 | 0 |
Sets the console_custom_cert of this TypesConsoleCertificateSettings. | def console_custom_cert(self, console_custom_cert):
self._console_custom_cert = console_custom_cert | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def console_ca_cert(self, console_ca_cert):\n\n self._console_ca_cert = console_ca_cert",
"def set_custom_property(self, sNewVmCustomProperty):\n\t\tcall_sdk_function('PrlVmCfg_SetCustomProperty', self.handle, sNewVmCustomProperty)",
"def custom_compliance_domain(self, custom_compliance_domain):\n\n self._custom_compliance_domain = custom_compliance_domain",
"def custom_domain(self, custom_domain):\n self._custom_domain = custom_domain",
"def custom_compliance_domain_id(self, custom_compliance_domain_id):\n\n self._custom_compliance_domain_id = custom_compliance_domain_id",
"def custom_compliance_standard(self, custom_compliance_standard):\n\n self._custom_compliance_standard = custom_compliance_standard",
"def custom_string(self, custom_string):\n\n self._custom_string = custom_string",
"def custom(self, custom):\n self._context[\"custom\"] = custom",
"def custom_attributes(self, custom_attributes):\n\n self._custom_attributes = custom_attributes",
"def token_cert(self, token_cert):\n\n self._token_cert = token_cert",
"def custom_data(self, custom_data):\n\n self._custom_data = custom_data",
"def set_custom_variable(self, key, value):\n self.logger.info(\"Set custom variable : %s:%s\" % (key, value))\n\n try:\n if 'custom_variables' not in self._answer_payload:\n self._answer_payload['custom_variables'] = {}\n self._answer_payload['custom_variables'][key] = value\n except Exception as e:\n self.logger.error(\"Error on set custom variables : %s\" % e)",
"def custom_compliance_standard_id(self, custom_compliance_standard_id):\n\n self._custom_compliance_standard_id = custom_compliance_standard_id",
"def set_custom_value(self, value):\n self.logger.info(\"Set custom value : %s\" % value)\n\n try:\n self._answer_payload['custom_value'] = value\n except Exception as e:\n self.logger.error(\"Error on set custom variables : %s\" % e)",
"def custom_tags(self, custom_tags):\n\n self._custom_tags = custom_tags",
"def run_custom_config_command(self, context, custom_command):\n with LoggingSessionContext(context) as logger, LogCommand(\n logger, \"run custom config command\"\n ):\n api = CloudShellSessionContext(context).get_api()\n\n resource_config = FirewallResourceConfig.from_context(\n self.SHELL_NAME, context, api, self.SUPPORTED_OS\n )\n\n cli_configurator = CheckpointCliConfigurator(\n self._cli, resource_config, logger\n )\n\n run_command_flow = RunCommandFlow(logger, cli_configurator)\n return run_command_flow.run_custom_config_command(custom_command)",
"def cert(self, value):\n self._cert = value",
"def setCustomVar(self, index, name, value, opt_scope=None):\n self.data_struct['_setCustomVar'][index] = (escape_text(name), escape_text(value), opt_scope)",
"def _use_custom_config(self, standard_conf_path):\n conf_filename = os.path.basename(standard_conf_path)\n custom_conf_expected_path = CUSTOM_CONFIG_DIR + '/' + self._get_tempdir() + '/' + conf_filename\n shutil.copy(custom_conf_expected_path,\n self._get_tempdir() + '/' + standard_conf_path)",
"def enable_custom_ca_trust(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_custom_ca_trust\")",
"def set_custom(self, custom):\n custom = clamp(custom, 1, 12)\n self._state.mode = custom\n self.send_command(Command.SET_CUSTOM, [int(custom)])",
"def custom_signatures(self, custom_signatures):\n\n self._custom_signatures = custom_signatures",
"def setCustomData( self, key, value ):\n self._customData[str(key)] = value",
"def client_cert(self, client_cert):\n\n self._client_cert = client_cert",
"def addCustomDocumentProperty(self,name,value):\n self.PDFreactorConfiguration.in1[\"customDocumentProperties\"].append([name, value])",
"def ca_cert(self, ca_cert):\n\n self._ca_cert = ca_cert",
"def is_custom(self, is_custom):\n\n self._is_custom = is_custom",
"def is_custom(self, is_custom):\n\n self._is_custom = is_custom",
"def custom_user_id(self, custom_user_id):\n # type: (string_types) -> None\n\n if custom_user_id is not None:\n if not isinstance(custom_user_id, string_types):\n raise TypeError(\"Invalid type for `custom_user_id`, type has to be `string_types`\")\n\n self._custom_user_id = custom_user_id",
"def limit(self, custom_limit):\n # NOTE(gibi): this operation needs escalated privileges (e.g. admin)\n # as the owner of the app cannot set its own app's limits. But\n # authorization is out of scope.\n self._limit = custom_limit"
]
| [
"0.6610284",
"0.54055816",
"0.53626394",
"0.52227587",
"0.51395243",
"0.5006331",
"0.49909642",
"0.4988925",
"0.49661958",
"0.49389002",
"0.49314305",
"0.49158552",
"0.489311",
"0.4870659",
"0.48248613",
"0.4814451",
"0.47850242",
"0.4669491",
"0.460155",
"0.4534467",
"0.45080948",
"0.4457423",
"0.44370884",
"0.4422073",
"0.44185385",
"0.4389394",
"0.43788794",
"0.43788794",
"0.4367516",
"0.4337832"
]
| 0.8813465 | 0 |
Sets the hpkp of this TypesConsoleCertificateSettings. | def hpkp(self, hpkp):
self._hpkp = hpkp | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pssh(self, pssh):\n self._pssh = pssh\n return self",
"def hdp_version(self, hdp_version):\n\n self._hdp_version = hdp_version",
"def set_kp():\n kp = request.params.get(\"kp\", 0, type=float)\n pid = request.params.get(\"pid\", 1, type=int)\n retval = RP_LIB.rp_PIDSetKp(pid, ctypes.c_float(kp))\n if retval != 0:\n LOG.error(\"Failed to set PID Kp. Error code: %s\", ERROR_CODES[retval])\n LOG.info(\"Kp: %f\", kp)\n LOG.info(\"PID: %d\", pid)",
"def pthid(self, pthid):\n\n self._pthid = pthid",
"def setKp(self, proportional_gain):\n\t\tself.Kp = proportional_gain",
"def setKp(self, proportional_gain):\r\n self.Kp = proportional_gain",
"def setKp(self, proportional_gain):\n self.Kp = proportional_gain",
"def setKp(self, proportional_gain):\n self.__Kp = proportional_gain",
"def set_pubkey(self, pkey):\n assert m2.x509_type_check(self.x509), \"'x509' type error\"\n return m2.x509_set_pubkey(self.x509, pkey.pkey)",
"def hklout(self, hklout):\n self._hklout = hklout",
"def set_wpa(self, pardus_profile):\n\n self.key_mgmt = \"wpa-psk\"\n self.psk = str(pardus_profile.get_auth_password())",
"def setKp(self, proportional_gain):\r\n\t\tself.Kp = proportional_gain\r\n\t\tself.label = \"standard_PID_Controller/Kp=%f, Ti=%f, Td=%f\" % (self.Kp, self.Ti, self.Td)",
"def _set_set_dscp(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"set-dscp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"set_dscp must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"set-dscp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__set_dscp = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_set_dscp(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"set-dscp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"set_dscp must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"set-dscp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__set_dscp = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_set_dscp(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"set-dscp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"set_dscp must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"set-dscp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=True)\"\"\",\n })\n\n self.__set_dscp = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_set_dscp(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"set-dscp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"set_dscp must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"set-dscp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__set_dscp = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_set_dscp(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"set-dscp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"set_dscp must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"set-dscp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__set_dscp = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_set_dscp(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"set-dscp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"set_dscp must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"set-dscp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__set_dscp = t\n if hasattr(self, '_set'):\n self._set()",
"def __init__(self):\n super(sppasSymbolSettings, self).__init__()\n\n self.__dict__ = dict(\n unk=\"<UNK>\",\n phone=sppasSymbolSettings.__phone_symbols(),\n ortho=sppasSymbolSettings.__ortho_symbols(),\n all=sppasSymbolSettings.__all_symbols()\n )",
"def setHpr(self, hpr):\n self.cameraNode.setHpr(hpr)",
"def dhcp(self, dhcp):\n\n self._dhcp = dhcp",
"def set_wep(self, pardus_profile):\n\n self.auth_alg = \"open\" #TODO: or 'shared' ??\n self.key_mgmt = \"None\" # Which stands for WEP based key management\n self.wep_key0 = str(pardus_profile.get_auth_password()) # Default index\n self.wep_key_type = \"1\" # Interpret WEP keys as hex or ascii keys",
"def setHBin(self, hbin):\n with self.lock:\n self.hbin = hbin",
"def __init__(self, kippt):\n self.kippt = kippt",
"def h(self, h):\n\n self._h = h",
"def hklin(self, hklin):\n self._hklin = hklin",
"def set_primary_object_hash(self, hsh):\n self.hash = hsh",
"def _serverTLS13Handshake(self, settings, clientHello, cipherSuite,\n privateKey, serverCertChain, version, scheme,\n srv_alpns, reqCert):\n prf_name, prf_size = self._getPRFParams(cipherSuite)\n\n secret = bytearray(prf_size)\n\n share = clientHello.getExtension(ExtensionType.key_share)\n if share:\n share_ids = [i.group for i in share.client_shares]\n for group_name in chain(settings.keyShares, settings.eccCurves,\n settings.dhGroups):\n selected_group = getattr(GroupName, group_name)\n if selected_group in share_ids:\n cl_key_share = next(i for i in share.client_shares\n if i.group == selected_group)\n break\n else:\n for result in self._sendError(AlertDescription.internal_error,\n \"HRR did not work?!\"):\n yield result\n\n psk = None\n selected_psk = None\n resumed_client_cert_chain = None\n psks = clientHello.getExtension(ExtensionType.pre_shared_key)\n psk_types = clientHello.getExtension(\n ExtensionType.psk_key_exchange_modes)\n if psks and (PskKeyExchangeMode.psk_dhe_ke in psk_types.modes or\n PskKeyExchangeMode.psk_ke in psk_types.modes) and \\\n (settings.pskConfigs or settings.ticketKeys):\n for i, ident in enumerate(psks.identities):\n ticket = None\n external = True\n match = [j for j in settings.pskConfigs\n if j[0] == ident.identity]\n if not match:\n (match, ticket) = self._tryDecrypt(settings, ident)\n external = False\n if not match:\n continue\n match = [match]\n\n # check if PSK can be used with selected cipher suite\n psk_hash = match[0][2] if len(match[0]) > 2 else 'sha256'\n if psk_hash != prf_name:\n continue\n\n psk = match[0][1]\n selected_psk = i\n if ticket:\n resumed_client_cert_chain = ticket.client_cert_chain\n try:\n HandshakeHelpers.verify_binder(\n clientHello,\n self._pre_client_hello_handshake_hash,\n selected_psk,\n psk,\n psk_hash,\n external)\n except TLSIllegalParameterException as e:\n for result in self._sendError(\n AlertDescription.illegal_parameter,\n str(e)):\n yield result\n break\n\n sh_extensions = []\n\n # we need to gen key share either when we selected psk_dhe_ke or\n # regular certificate authenticated key exchange (the default)\n if (psk and\n PskKeyExchangeMode.psk_dhe_ke in psk_types.modes and\n \"psk_dhe_ke\" in settings.psk_modes) or\\\n (psk is None and privateKey):\n self.ecdhCurve = selected_group\n kex = self._getKEX(selected_group, version)\n key_share = self._genKeyShareEntry(selected_group, version)\n\n try:\n shared_sec = kex.calc_shared_key(key_share.private,\n cl_key_share.key_exchange)\n except TLSIllegalParameterException as alert:\n for result in self._sendError(\n AlertDescription.illegal_parameter,\n str(alert)):\n yield result\n\n sh_extensions.append(ServerKeyShareExtension().create(key_share))\n elif (psk is not None and\n PskKeyExchangeMode.psk_ke in psk_types.modes and\n \"psk_ke\" in settings.psk_modes):\n shared_sec = bytearray(prf_size)\n else:\n for result in self._sendError(\n AlertDescription.handshake_failure,\n \"Could not find acceptable PSK identity nor certificate\"):\n yield result\n\n if psk is None:\n psk = bytearray(prf_size)\n\n sh_extensions.append(SrvSupportedVersionsExtension().create(version))\n if selected_psk is not None:\n sh_extensions.append(SrvPreSharedKeyExtension()\n .create(selected_psk))\n\n serverHello = ServerHello()\n # in TLS1.3 the version selected is sent in extension, (3, 3) is\n # just dummy value to workaround broken middleboxes\n serverHello.create((3, 3), getRandomBytes(32),\n clientHello.session_id,\n cipherSuite, extensions=sh_extensions)\n\n msgs = []\n msgs.append(serverHello)\n if not self._ccs_sent and clientHello.session_id:\n ccs = ChangeCipherSpec().create()\n msgs.append(ccs)\n for result in self._sendMsgs(msgs):\n yield result\n\n # Early secret\n secret = secureHMAC(secret, psk, prf_name)\n\n # Handshake Secret\n secret = derive_secret(secret, bytearray(b'derived'), None, prf_name)\n secret = secureHMAC(secret, shared_sec, prf_name)\n\n sr_handshake_traffic_secret = derive_secret(secret,\n bytearray(b's hs traffic'),\n self._handshake_hash,\n prf_name)\n cl_handshake_traffic_secret = derive_secret(secret,\n bytearray(b'c hs traffic'),\n self._handshake_hash,\n prf_name)\n self.version = version\n self._recordLayer.calcTLS1_3PendingState(\n cipherSuite,\n cl_handshake_traffic_secret,\n sr_handshake_traffic_secret,\n settings.cipherImplementations)\n\n self._changeWriteState()\n\n ee_extensions = []\n\n if clientHello.getExtension(ExtensionType.record_size_limit) and \\\n settings.record_size_limit:\n ee_extensions.append(RecordSizeLimitExtension().create(\n min(2**14+1, settings.record_size_limit)))\n\n # a bit of a hack to detect if the HRR was sent\n # as that means that original key share didn't match what we wanted\n # send the client updated list of shares we support,\n # preferred ones first\n if clientHello.getExtension(ExtensionType.cookie):\n ext = SupportedGroupsExtension()\n groups = [getattr(GroupName, i) for i in settings.keyShares]\n groups += [getattr(GroupName, i) for i in settings.eccCurves\n if getattr(GroupName, i) not in groups]\n groups += [getattr(GroupName, i) for i in settings.dhGroups\n if getattr(GroupName, i) not in groups]\n if groups:\n ext.create(groups)\n ee_extensions.append(ext)\n\n alpn_ext = clientHello.getExtension(ExtensionType.alpn)\n if alpn_ext:\n # error handling was done when receiving ClientHello\n matched = [i for i in alpn_ext.protocol_names if i in srv_alpns]\n if matched:\n ext = ALPNExtension().create([matched[0]])\n ee_extensions.append(ext)\n\n if clientHello.getExtension(ExtensionType.heartbeat):\n if settings.use_heartbeat_extension:\n ee_extensions.append(HeartbeatExtension().create(\n HeartbeatMode.PEER_ALLOWED_TO_SEND))\n\n encryptedExtensions = EncryptedExtensions().create(ee_extensions)\n self._queue_message(encryptedExtensions)\n\n if selected_psk is None:\n\n # optionally send the client a certificate request\n if reqCert:\n\n # the context SHALL be zero length except in post-handshake\n ctx = b''\n\n # Get list of valid Signing Algorithms\n # we don't support DSA for client certificates yet\n cr_settings = settings.validate()\n cr_settings.dsaSigHashes = []\n valid_sig_algs = self._sigHashesToList(cr_settings)\n assert valid_sig_algs\n\n certificate_request = CertificateRequest(self.version)\n certificate_request.create(context=ctx, sig_algs=valid_sig_algs)\n self._queue_message(certificate_request)\n\n certificate = Certificate(CertificateType.x509, self.version)\n certificate.create(serverCertChain, bytearray())\n self._queue_message(certificate)\n\n certificate_verify = CertificateVerify(self.version)\n\n signature_scheme = getattr(SignatureScheme, scheme)\n\n signature_context = \\\n KeyExchange.calcVerifyBytes((3, 4), self._handshake_hash,\n signature_scheme, None, None, None,\n prf_name, b'server')\n\n if signature_scheme in (SignatureScheme.ed25519,\n SignatureScheme.ed448):\n hashName = \"intrinsic\"\n padType = None\n saltLen = None\n sig_func = privateKey.hashAndSign\n ver_func = privateKey.hashAndVerify\n elif signature_scheme[1] == SignatureAlgorithm.ecdsa:\n hashName = HashAlgorithm.toRepr(signature_scheme[0])\n padType = None\n saltLen = None\n sig_func = privateKey.sign\n ver_func = privateKey.verify\n else:\n padType = SignatureScheme.getPadding(scheme)\n hashName = SignatureScheme.getHash(scheme)\n saltLen = getattr(hashlib, hashName)().digest_size\n sig_func = privateKey.sign\n ver_func = privateKey.verify\n\n signature = sig_func(signature_context,\n padType,\n hashName,\n saltLen)\n if not ver_func(signature, signature_context,\n padType,\n hashName,\n saltLen):\n for result in self._sendError(\n AlertDescription.internal_error,\n \"Certificate Verify signature failed\"):\n yield result\n certificate_verify.create(signature, signature_scheme)\n\n self._queue_message(certificate_verify)\n\n finished_key = HKDF_expand_label(sr_handshake_traffic_secret,\n b\"finished\", b'', prf_size, prf_name)\n verify_data = secureHMAC(finished_key,\n self._handshake_hash.digest(prf_name),\n prf_name)\n\n finished = Finished(self.version, prf_size).create(verify_data)\n\n self._queue_message(finished)\n for result in self._queue_flush():\n yield result\n\n self._changeReadState()\n\n # Master secret\n secret = derive_secret(secret, bytearray(b'derived'), None, prf_name)\n secret = secureHMAC(secret, bytearray(prf_size), prf_name)\n\n cl_app_traffic = derive_secret(secret, bytearray(b'c ap traffic'),\n self._handshake_hash, prf_name)\n sr_app_traffic = derive_secret(secret, bytearray(b's ap traffic'),\n self._handshake_hash, prf_name)\n self._recordLayer.calcTLS1_3PendingState(serverHello.cipher_suite,\n cl_app_traffic,\n sr_app_traffic,\n settings\n .cipherImplementations)\n\n # all the messages sent by the server after the Finished message\n # MUST be encrypted with ap traffic secret, even if they regard\n # problems in processing client Certificate, CertificateVerify or\n # Finished messages\n self._changeWriteState()\n\n client_cert_chain = None\n #Get [Certificate,] (if was requested)\n if reqCert and selected_psk is None:\n for result in self._getMsg(ContentType.handshake,\n HandshakeType.certificate,\n CertificateType.x509):\n if result in (0, 1):\n yield result\n else:\n break\n client_certificate = result\n assert isinstance(client_certificate, Certificate)\n client_cert_chain = client_certificate.cert_chain\n\n #Get and check CertificateVerify, if relevant\n cli_cert_verify_hh = self._handshake_hash.copy()\n if client_cert_chain and client_cert_chain.getNumCerts():\n for result in self._getMsg(ContentType.handshake,\n HandshakeType.certificate_verify):\n if result in (0, 1):\n yield result\n else: break\n certificate_verify = result\n assert isinstance(certificate_verify, CertificateVerify)\n\n signature_scheme = certificate_verify.signatureAlgorithm\n\n valid_sig_algs = self._sigHashesToList(settings,\n certList=client_cert_chain,\n version=(3, 4))\n if signature_scheme not in valid_sig_algs:\n for result in self._sendError(\n AlertDescription.illegal_parameter,\n \"Invalid signature on Certificate Verify\"):\n yield result\n\n signature_context = \\\n KeyExchange.calcVerifyBytes((3, 4), cli_cert_verify_hh,\n signature_scheme, None, None, None,\n prf_name, b'client')\n\n public_key = client_cert_chain.getEndEntityPublicKey()\n\n if signature_scheme in (SignatureScheme.ed25519,\n SignatureScheme.ed448):\n hash_name = \"intrinsic\"\n pad_type = None\n salt_len = None\n ver_func = public_key.hashAndVerify\n elif signature_scheme[1] == SignatureAlgorithm.ecdsa:\n hash_name = HashAlgorithm.toRepr(signature_scheme[0])\n pad_type = None\n salt_len = None\n ver_func = public_key.verify\n else:\n scheme = SignatureScheme.toRepr(signature_scheme)\n pad_type = SignatureScheme.getPadding(scheme)\n hash_name = SignatureScheme.getHash(scheme)\n salt_len = getattr(hashlib, hash_name)().digest_size\n ver_func = public_key.verify\n\n if not ver_func(certificate_verify.signature,\n signature_context,\n pad_type,\n hash_name,\n salt_len):\n for result in self._sendError(\n AlertDescription.decrypt_error,\n \"signature verification failed\"):\n yield result\n\n # as both exporter and resumption master secrets include handshake\n # transcript, we need to derive them early\n exporter_master_secret = derive_secret(secret,\n bytearray(b'exp master'),\n self._handshake_hash,\n prf_name)\n\n # verify Finished of client\n cl_finished_key = HKDF_expand_label(cl_handshake_traffic_secret,\n b\"finished\", b'',\n prf_size, prf_name)\n cl_verify_data = secureHMAC(cl_finished_key,\n self._handshake_hash.digest(prf_name),\n prf_name)\n for result in self._getMsg(ContentType.handshake,\n HandshakeType.finished,\n prf_size):\n if result in (0, 1):\n yield result\n else:\n break\n cl_finished = result\n assert isinstance(cl_finished, Finished)\n if cl_finished.verify_data != cl_verify_data:\n for result in self._sendError(\n AlertDescription.decrypt_error,\n \"Finished value is not valid\"):\n yield result\n\n # disallow CCS messages after handshake\n self._middlebox_compat_mode = False\n\n resumption_master_secret = derive_secret(secret,\n bytearray(b'res master'),\n self._handshake_hash,\n prf_name)\n\n self._first_handshake_hashes = self._handshake_hash.copy()\n\n self.session = Session()\n self.extendedMasterSecret = True\n server_name = None\n if clientHello.server_name:\n server_name = clientHello.server_name.decode('utf-8')\n\n app_proto = None\n alpnExt = encryptedExtensions.getExtension(ExtensionType.alpn)\n if alpnExt:\n app_proto = alpnExt.protocol_names[0]\n\n if not client_cert_chain and resumed_client_cert_chain:\n client_cert_chain = resumed_client_cert_chain\n\n self.session.create(secret,\n bytearray(b''), # no session_id\n serverHello.cipher_suite,\n bytearray(b''), # no SRP\n client_cert_chain,\n serverCertChain,\n None,\n False,\n server_name,\n encryptThenMAC=False,\n extendedMasterSecret=True,\n appProto=app_proto,\n cl_app_secret=cl_app_traffic,\n sr_app_secret=sr_app_traffic,\n exporterMasterSecret=exporter_master_secret,\n resumptionMasterSecret=resumption_master_secret,\n # NOTE it must be a reference, not a copy\n tickets=self.tickets)\n\n # switch to application_traffic_secret for client packets\n self._changeReadState()\n\n for result in self._serverSendTickets(settings):\n yield result\n\n yield \"finished\"",
"def vscp_pythia_configure(self):\n vscp_pythia_configure = self._lib.vscp_pythia_configure\n vscp_pythia_configure.argtypes = []\n vscp_pythia_configure.restype = c_int\n return vscp_pythia_configure()",
"def set_pubkey(self, pkey):\n return m2.x509_req_set_pubkey(self.req, pkey.pkey)"
]
| [
"0.56403214",
"0.5224331",
"0.5040199",
"0.50149983",
"0.48443633",
"0.48201424",
"0.48000458",
"0.4754778",
"0.47419602",
"0.4728604",
"0.46491504",
"0.45619443",
"0.45580828",
"0.45580828",
"0.45580828",
"0.4547807",
"0.4547807",
"0.4547807",
"0.4473194",
"0.4460671",
"0.44483814",
"0.44179884",
"0.44020486",
"0.4400223",
"0.4393183",
"0.4372079",
"0.43650445",
"0.43517464",
"0.43492627",
"0.43449652"
]
| 0.7709247 | 0 |
Generates a unique id which will be used by paynow to refer to the payment initiated | def generate_transaction_id():
return str(int(time.time() * 1000)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _generate_id(self, context):\n tmp = datetime.datetime.now()\n tmp = tmp.strftime('%Y%m%d%H%M%S%f')\n tmp += context.peer()\n m = hashlib.md5()\n m.update(tmp.encode('utf-8'))\n return str(m.hexdigest())",
"def _generate_order_id():\n current_milli_time = str(int(round(time.time())))\n rand_str = random_string_generator()\n\n return '%s%s' % (rand_str, current_milli_time)",
"def generateID(self):\n\n return str(uuid.uuid1())",
"def generate_wallet_id(cls) -> str:\n return str(uuid.uuid4())",
"def _get_unique_id(self):\n now = datetime.now()\n\n u_id = now.second + 60*(now.minute + 60*(now.hour + 24*(now.day + 31*(now.month + 366*(now.year)))))\n return \"instance\" + str(u_id)",
"def generate_id():\n return uuid4().get_hex()",
"def generateRegistrationId():\n regId = KeyHelper.getRandomSequence()\n return regId",
"def unique_id() -> str:",
"def generate_id():\n\treturn \"%s-%s\" % (str(uuid.uuid4())[:4],random.choice(funnames).lower())",
"def unique_id(self) -> str:\n return f\"{self.wallet_id}{self.WALLET_KEY_POSTFIX}\"",
"def gen_id(self) -> str:\n self._id += 1\n return str(self._id)",
"def makeid(cls):\n return str(uuid.uuid4().hex)",
"def get_unique_id():\n global unique_id_increment\n if unique_id_increment is None:\n unique_id_increment = 0\n unique_id_increment += 1\n return '%d%d' % (int(time.time()), unique_id_increment)",
"def get_unique_id():\n global unique_id_increment\n if unique_id_increment is None:\n unique_id_increment = 0\n unique_id_increment += 1\n return \"%d%d\" % (int(time.time()), unique_id_increment)",
"def generate_request_id():\n return 'req-%s' % uuid.uuid4()",
"def generate_request_id():\n return 'req-%s' % uuid.uuid4()",
"def generate_id():\n return \"%s-%s\" % (str(uuid.uuid4())[:4],random.choice(funnames).lower())",
"def build_id():\n return \"test123\"",
"def __create_periodic_id() -> str:\n now = datetime.now()\n return now.strftime(\"%m%d%Y%H%M%S%f\")",
"def _generate_tracking_number(self):\n return uuid.uuid4().hex.upper()",
"def _CreateRecordId(self):\n self._record_count += 1\n return '%s_%s' % (self._unique_id, self._record_count)",
"def get_ticket_id():\n return str(time.time()) + str(uuid.uuid4())",
"def unique_id(self) -> str:\n return self.get_unique_id(wallet=self.wallet_id, nonce=self.nonce)",
"def generateUID(self):\n global previous_id\n \n id = previous_id\n previous_id += 1\n \n return id",
"def generate_message_control_id():\n d = datetime.datetime.utcnow()\n # Strip off the decade, ID only has to be unique for 3 years.\n # So now we have a 16 char timestamp.\n timestamp = d.strftime(\"%y%j%H%M%S%f\")[1:]\n # Add 4 chars of uniqueness\n unique = \"\".join(random.sample(alphanumerics, 4))\n return timestamp + unique",
"def generate_timer_id():\n\treturn base64.b64encode(os.urandom(30), b\"Qx\").decode(\"ascii\")",
"def generate_id(cls):\n cls._index += 1\n return 'fp_%s' % cls._index",
"def req_id_generator() -> str:\n # 8 chars long should be long enough, add the 'Generated' prefix to know not to search for this id in the elb logs\n return f'Generated-{str(uuid.uuid4())[:8]}'",
"def make_id():\n global _simple_id\n\n import uuid\n from ..settings import settings\n\n if settings.simple_ids(False):\n _simple_id += 1\n new_id = _simple_id\n else:\n new_id = uuid.uuid4()\n return str(new_id)",
"def generate_product_number():\n return str(uuid.uuid4())"
]
| [
"0.75733525",
"0.7475758",
"0.74029607",
"0.7379154",
"0.7355747",
"0.7351507",
"0.7347127",
"0.73232245",
"0.7290395",
"0.72684765",
"0.7227526",
"0.7220504",
"0.7218613",
"0.7189539",
"0.71791047",
"0.71791047",
"0.717642",
"0.71746486",
"0.71354276",
"0.71192765",
"0.7117874",
"0.71105874",
"0.71091384",
"0.70918924",
"0.707515",
"0.7074721",
"0.70592993",
"0.7044613",
"0.6986325",
"0.69681424"
]
| 0.79250395 | 0 |
Reflect the elements of a numpy array along a specified axis about the first element. | def reflect(arr,axis=0,sign=1):
refl_idx = axis * [slice(None)] + [slice(None,0,-1), Ellipsis]
return np.concatenate((arr[tuple(refl_idx)],arr), axis=axis) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reflect_array(x, axis=1, kind='even'):\n if axis == 0:\n x_sym = np.flipud(x)\n elif axis == 1:\n x_sym = np.fliplr(x)\n else:\n raise NotImplementedError\n\n if kind == 'even':\n fact = 1.0\n elif kind == 'odd':\n fact = -1.0\n else:\n raise NotImplementedError\n\n return np.concatenate((fact*x_sym, x), axis=axis)",
"def _np_apply_along_axis(func1d, axis: int, arr: np.ndarray) -> np.ndarray:\n\n assert arr.ndim == 2\n assert axis in [0, 1]\n\n if axis == 0:\n result = np.empty(arr.shape[1])\n for i in range(len(result)):\n result[i] = func1d(arr[:, i])\n return result\n\n result = np.empty(arr.shape[0])\n for i in range(len(result)):\n result[i] = func1d(arr[i, :])\n\n return result",
"def __call__(self, array, axis=None):\n raise NotImplementedError()",
"def steppify(arr, axis='x'):\n\t\n\tif axis == 'x':\n\t\tnewarr = np.r_[arr[0], np.dstack((arr[1:], arr[1:])).flatten()]\n\t\n\telif axis == 'y':\n\t\tnewarr = np.r_[np.dstack((arr[:-1], arr[:-1])).flatten(), arr[-1]]\n\t\n\telse:\n\t\tprint('your axes in steppify are improperly identified')\n\n\treturn newarr",
"def shuffle_array_along(X, axis=0, inline=True):\n if not inline:\n X = X.copy()\n np.apply_along_axis(np.random.shuffle, axis, X)\n if not inline:\n return X",
"def one_dim(a: cython.double[:]):\n a[0] *= 2\n return a[0], a.ndim",
"def nonzero_first(arr, *, axis):\n def nonzero_first_1d(arr):\n try:\n return np.nonzero(arr)[0][0]\n except IndexError:\n return -1\n return np.apply_along_axis(nonzero_first_1d, axis, arr)",
"def forward_one(self, x):\n return np.squeeze(self.forward(x[np.newaxis,:]), axis=0)",
"def _asvector(self, arr):\n result = self._moveaxis(arr, [-2, -1], [0, 1])\n return self.domain.element(result)",
"def real(x):\n return x[..., 0]",
"def _chk_asarray(a, axis):\r\n if axis is None:\r\n a = ravel(a)\r\n outaxis = 0\r\n else:\r\n a = asarray(a)\r\n outaxis = axis\r\n return a, outaxis",
"def trans(array,dim):\n return array[filter(lambda x: x != dim,range(len(array)) ) ]",
"def _swap_axis(input_tensor, dim_index, last_index, name=None):\n return array_ops.transpose(\n input_tensor,\n array_ops.concat([\n math_ops.range(dim_index), [last_index],\n math_ops.range(dim_index + 1, last_index), [dim_index]\n ], 0),\n name=name)",
"def reflect(self):\n self.vertices[-1, :] = self.reflected",
"def apply_array(self, array: np.ndarray) -> np.ndarray:\n functions = (\n f for f, b in zip((np.transpose, np.fliplr, np.flipud), astuple(self)) if b\n )\n return reduce(lambda f, g: lambda x: g(f(x)), functions, lambda x: x)(array)",
"def test_broadcast(self):\n a = np.ones((3, 4, 1))\n ai = np.ones((1, 2, 5), dtype=np.intp)\n actual = take_along_axis(a, ai, axis=1)\n assert_equal(actual.shape, (3, 2, 5))",
"def indep_roll(arr, shifts, axis=1):\n arr = np.swapaxes(arr, axis, -1)\n all_idcs = np.ogrid[[slice(0, n) for n in arr.shape]]\n\n # Convert to a positive shift\n shifts[shifts < 0] += arr.shape[-1]\n all_idcs[-1] = all_idcs[-1] - shifts[:, np.newaxis]\n\n result = arr[tuple(all_idcs)]\n arr = np.swapaxes(result, -1, axis)\n return arr",
"def corresponding_ravel(X,axis=0):\n\n t1=np.c_[tuple([di.ravel() for di in X])]\n\n if axis==1:\n t1=t1.T\n\n return t1",
"def xray(im, axis=0): # pragma: no cover\n im = np.array(~im, dtype=int)\n if axis == 1:\n im = np.transpose(im, axes=[1, 0, 2])\n if axis == 2:\n im = np.transpose(im, axes=[2, 1, 0])\n im = np.sum(im, axis=0, dtype=np.int64)\n return im",
"def _ureduce(a, func, **kwargs):\n a = np.asanyarray(a)\n axis = kwargs.get('axis', None)\n if axis is not None:\n keepdim = list(a.shape)\n nd = a.ndim\n axis = _nx.normalize_axis_tuple(axis, nd)\n\n for ax in axis:\n keepdim[ax] = 1\n\n if len(axis) == 1:\n kwargs['axis'] = axis[0]\n else:\n keep = set(range(nd)) - set(axis)\n nkeep = len(keep)\n # swap axis that should not be reduced to front\n for i, s in enumerate(sorted(keep)):\n a = a.swapaxes(i, s)\n # merge reduced axis\n a = a.reshape(a.shape[:nkeep] + (-1,))\n kwargs['axis'] = -1\n keepdim = tuple(keepdim)\n else:\n keepdim = (1,) * a.ndim\n\n r = func(a, **kwargs)\n return r, keepdim",
"def two_dim(a: cython.double[:,:]):\n a[0,0] *= 3\n return a[0,0], a[0,1], a.ndim",
"def sf01(arr):\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])",
"def sf01(arr):\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])",
"def sf01(arr):\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])",
"def sf01(arr):\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])",
"def flip(a, axis):\n a_ndim = a.ndim\n if a_ndim < 1:\n raise core.core._AxisError('Input must be >= 1-d')\n\n axis = int(axis)\n if not -a_ndim <= axis < a_ndim:\n raise core.core._AxisError(\n 'axis must be >= %d and < %d' % (-a_ndim, a_ndim))\n\n return _flip(a, axis)",
"def reflect(self, axis):\n if axis == \"x\":\n self.y = - self.y\n elif axis == \"y\":\n self.x = - self.x\n else:\n print(\"The argument axis only accepts values 'x' and 'y'!\")",
"def squeeze(a, axis=None):\n if axis is None:\n axis = tuple(i for i, n in enumerate(a._shape) if n == 1)\n elif isinstance(axis, int):\n axis = axis,\n\n new_shape = []\n new_strides = []\n j = 0\n for i, n in enumerate(a._shape):\n if j < len(axis) and i == axis[j]:\n if n != 1:\n raise RuntimeError('Cannot squeeze dimension of size > 1')\n j += 1\n else:\n new_shape.append(n)\n new_strides.append(a._strides[i])\n\n v = a.view()\n v._shape = tuple(new_shape)\n v._strides = tuple(new_strides)\n v._c_contiguous = -1\n v._f_contiguous = -1\n return v",
"def _moveaxis(self, arr, source, dest):\n try:\n source = list(source)\n except TypeError:\n source = [source]\n try:\n dest = list(dest)\n except TypeError:\n dest = [dest]\n\n source = [a + arr.ndim if a < 0 else a for a in source]\n dest = [a + arr.ndim if a < 0 else a for a in dest]\n\n order = [n for n in range(arr.ndim) if n not in source]\n\n for dest, src in sorted(zip(dest, source)):\n order.insert(dest, src)\n\n return arr.transpose(order)",
"def transform(array):\n assert array.shape == (10, 2)\n new = Array(columns=\"abcd\")\n for x, y in array:\n new.append([x, y, x + y, x * y])\n return new"
]
| [
"0.67617947",
"0.6650867",
"0.61850905",
"0.6083956",
"0.5720959",
"0.57025504",
"0.5559801",
"0.5524613",
"0.55142355",
"0.54787475",
"0.5478125",
"0.5472565",
"0.54558724",
"0.5451142",
"0.53963137",
"0.53875583",
"0.5342568",
"0.5339039",
"0.5313937",
"0.53026325",
"0.53007036",
"0.5296241",
"0.5296241",
"0.5296241",
"0.5296241",
"0.529301",
"0.5286551",
"0.5275018",
"0.52291626",
"0.5220617"
]
| 0.69821197 | 0 |
Returns a list of flows with randomly selected sources and destinations that will saturate the network (i.e. a flow will be admitted provided that it would not cause the utilization of any link in the network to exceed 1. Flows are equally split across the K shortest paths connecting the source node to the destination node. | def compute_path_hopping_flow_allocations(target_graph, K=3):
flow_allocation_seed_number = 0xCAFE_BABE
np.random.seed(flow_allocation_seed_number)
# id_to_dpid = topo_mapper.get_and_validate_onos_topo_x(target_graph)
link_utilization = {(u, v): 0.0 for u, v in target_graph.edges}
node_capacity = {u: 0.0 for u in target_graph.nodes}
flows = []
while True:
source_node, destination_node = flow_selection_fn(target_graph.nodes, 2, replace=False)
print(source_node, destination_node)
shortest_paths = sorted(nx.all_simple_paths(target_graph, source_node, destination_node,
cutoff=3),
key=lambda p: len(p))
k_shortest_paths = list(itertools.islice(shortest_paths, K))
# flow_tx_rate = np.random.uniform() * 10
flow_tx_rate = 1.0
# if node_capacity[source_node] + flow_tx_rate > LINK_CAPACITY:
# break
node_capacity[source_node] += flow_tx_rate
capacity_was_exceeded = False
for path in [nx.utils.pairwise(p_i) for p_i in k_shortest_paths]:
for u, v in [sorted(h_i) for h_i in path]:
flow_rate_per_subpath = flow_tx_rate / K
if (link_utilization[u, v] + flow_rate_per_subpath) > LINK_CAPACITY:
capacity_was_exceeded = True
break
link_utilization[u, v] += flow_rate_per_subpath
if capacity_was_exceeded:
break
if capacity_was_exceeded:
break
the_flow = Flow( source_node = source_node
, destination_node = destination_node
, flow_tx_rate = flow_tx_rate
, paths = k_shortest_paths
, splitting_ratio = [1.0/K]*K
)
flows.append(the_flow)
return flows, link_utilization | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_equal_flow_allocations(target_graph, K=3):\n # id_to_dpid = topo_mapper.get_and_validate_onos_topo_x(target_graph)\n flow_allocation_seed_number = 0xDEAD_BEEF\n np.random.seed(flow_allocation_seed_number)\n flows = []\n for node in target_graph.nodes:\n possible_destination_nodes = set(target_graph.nodes) - set([node])\n [destination_node] = np.random.choice(list(possible_destination_nodes), 1, replace=False)\n # shortest_paths = all_shortest_paths(target_graph, node, destination_node.item())\n shortest_paths = sorted(nx.all_simple_paths(target_graph, node, destination_node.item(),\n cutoff=3),\n key=lambda p: len(p))\n k_shortest_paths = list(itertools.islice(shortest_paths, K))\n the_flow = Flow( source_node = node\n , destination_node = destination_node.item()\n , flow_tx_rate = 10.0\n , paths = k_shortest_paths\n , splitting_ratio = [1/K]*K\n )\n flows.append(the_flow)\n \n return flow_allocation_seed_number, flows",
"def bfsSample(G, source=None, k = 50):\n\twarn(\"networkit.sampling.bfsSample is deprecated, will be removed in future updates.\")\n\tif not source:\n\t\tsource = GraphTools.randomNode(G)\n\tn = G.numberOfNodes()\n\tvisited = [False]*n\n\tQ = [source]\n\tclosest = set([source])\n\tglobal found\n\tfound = 0\n\twhile len(Q) > 0 and found < k:\n\t\tu = Q.pop(0)\n\t\tdef enqueue(u,v,weight, eid):\n\t\t\tglobal found\n\t\t\tif not visited[v] and found < k:\n\t\t\t\tfound += 1\n\t\t\t\tvisited[v] = True\n\t\t\t\tQ.append(v)\n\t\t\t\tclosest.add(v)\n\t\tG.forEdgesOf(u, enqueue)\n\tprint(\"found {0} nodes\".format(len(closest)))\n\tG1 = GraphTools.subgraphFromNodes(G, closest)\n\treturn G1",
"def compute_greedy_flow_allocations( target_graph\n , flow_selection_fn\n , seed_number=DEFAULT_SEED_NUMBER):\n\n flow_allocation_seed_number = seed_number\n np.random.seed(flow_allocation_seed_number)\n\n link_utilization = {tuple(sorted(link_tuple)): 0.0 for link_tuple in target_graph.edges}\n flows = []\n\n while True:\n capacity_was_exceeded = False\n\n source_node, destination_node = flow_selection_fn(target_graph.nodes)\n flow_tx_rate = np.random.uniform(FLOW_TX_RATE_LOWER_BOUND, FLOW_TX_RATE_UPPER_BOUND)\n\n connecting_paths = list(node_disjoint_paths(target_graph, source_node, destination_node))\n disjoint_path_count = len(connecting_paths)\n flow_rate_per_subpath = flow_tx_rate / disjoint_path_count\n for path in [nx.utils.pairwise(p_i) for p_i in connecting_paths]:\n for u, v in [tuple(sorted(t_i)) for t_i in path]:\n if (link_utilization[u, v] + flow_rate_per_subpath) > LINK_CAPACITY:\n capacity_was_exceeded = True\n break\n link_utilization[u, v] += flow_rate_per_subpath\n if capacity_was_exceeded:\n break\n if capacity_was_exceeded:\n break\n\n the_flow = Flow( source_node = source_node\n , destination_node = destination_node\n , flow_tx_rate = flow_tx_rate\n , paths = connecting_paths\n , splitting_ratio = [1.0/disjoint_path_count]*disjoint_path_count\n )\n flows.append(the_flow)\n return flows, link_utilization",
"def compute_unequal_flow_allocations(target_graph, K=3):\n\n # id_to_dpid = topo_mapper.get_and_validate_onos_topo_x(target_graph)\n flow_allocation_seed_number = 0xDEAD_BEEF\n np.random.seed(flow_allocation_seed_number)\n flows = []\n link_utilization = {}\n for node in target_graph.nodes:\n possible_destination_nodes = set(target_graph.nodes) - {node}\n destination_node = np.random.choice(list(possible_destination_nodes), 1, \n replace=False).item()\n shortest_path = nx.shortest_path(target_graph, node, destination_node)\n the_flow = Flow( source_node = node\n , destination_node = destination_node\n , flow_tx_rate = 10.0\n , paths = [shortest_path]\n , splitting_ratio = [1.0]\n )\n flows.append(the_flow)\n\n return flow_allocation_seed_number, flows",
"def __generate_all_shortest_paths(self,cutoff = 10):\n if cutoff < 1:\n cutoff = 10\n self.__logger.info(\"cutoff value must be a positive integer. Set back to default value: 10\")\n\n all_pair_shortest_paths = nx.all_pairs_shortest_path(self.G, cutoff=cutoff)\n for item in all_pair_shortest_paths:\n from_node = item[0]\n paths = item[1]\n for destination,path in paths.items():\n yield (len(path),path)",
"def get_paths_for_flow(F, s, f):\n links = [((u, v), split_ratio) \n for (flow_id, u, v), split_ratio in F.items() \n if flow_id == f and u == s and split_ratio > 0.001]\n return links",
"def get_paths_for_flow(F, s, f):\n links = [((u, v), split_ratio) \n for (flow_id, u, v), split_ratio in F.items() \n if flow_id == f and u == s and split_ratio > 0.001]\n return links",
"def generate_graph(size, number_of_clusters, minimal_size):\n base_list = list(range(size))\n result_list = []\n random.shuffle(base_list)\n for i in range(number_of_clusters - 1):\n size = random.randint(minimal_size, len(base_list) - (number_of_clusters - i - 1) * minimal_size)\n cluster = []\n for n in range(size):\n actual = random.choice(base_list)\n base_list.remove(actual)\n cluster.append(actual)\n result_list.append(strongly_connect(cluster))\n result_list.append(strongly_connect(base_list))\n\n while len(result_list) < 5:\n result_list.append([])\n\n print(sorted([len(i) for i in result_list], reverse=True)[:5])\n\n return weak_connect_graph(result_list)",
"def get_pathologic_covering_routes(n_pl, n_target, attacker_strategy, target_values):\n # computes the coefficient used by the greedy oracle to choose routes\n targets_coeff = np.transpose(np.multiply(attacker_strategy, target_values))\n\n # randomly selects the player for which the non optimal choice will be made\n wrong_pl = randint(1, n_pl)\n\n # generate the non optimal route randomly\n n_covered_targets = randint(n_pl,n_target-1)\n non_opt_action = np.zeros(n_target)\n for i in range(0, n_covered_targets):\n random_covered_target = randint(0, n_target-1)\n non_opt_action[random_covered_target] = 1\n\n # compute the value of the non optimal route\n non_opt_val = get_value_single_route(non_opt_action, targets_coeff)\n\n # generate routes that have, as a single, values smaller than the best greedy route but taken togher perform\n # at least as well. [[0,1,...],[...],...] a[r][t]=1 iff t is covered by r.\n # The returned list should have n_pl - 1 routes\n opt_routes = get_opt_routes(n_pl, non_opt_action)\n\n I={}\n for pl in range(1, n_pl+1):\n\n n_r = randint(0, MAX_ROUTES)\n temp = lil_matrix((n_r+1, n_target), dtype='int8')\n\n if pl == wrong_pl:\n # put the non opt route in the bucket\n for t in non_opt_action.nonzero():\n temp[0,t] = 1\n else:\n for t in opt_routes.pop().nonzero():\n temp[0,t] = 1\n\n # generate other random routes with single value less than the non_opt_value\n for r in range(1, n_r):\n new_route = get_r_limited_val(non_opt_val, targets_coeff)\n\n for t in new_route.nonzero():\n temp[r,t] = 1\n\n I[pl] = temp.tocsr()\n\n return I",
"def multipleTravellingSalesmen(self, iterations=None):\n if not iterations:\n iterations = self.mTSPIterations\n\n # Randomly select points drawn from unvisited and distribute equally among the ships\n paths = self.partition(self.unvisited.keys(), len(self.ships))\n [path.append(ship) for path, ship in zip(paths, self.ships)]\n\n # Run single travelling salesman on each of the ships\n paths = [self.singleTravellingSalesman(path, iterations=0) for path in paths]\n\n\n # IF TWO OR MORE SHIPS!!!\n # Point exchange between ships and rerun tsp on each ship\n if len(self.ships) > 1:\n for _ in range(iterations):\n twopathindices = random.sample(xrange(len(paths)), 2)\n newtwopaths = self.swapTwoPointsBetweenPaths([paths[index] for index in twopathindices])\n newtwopaths = [self.singleTravellingSalesman(path, iterations=max(len(path), 100)) for path in\n newtwopaths]\n if self.totalPathDistance(newtwopaths) < self.totalPathDistance(\n [paths[index] for index in twopathindices]):\n for i, index in enumerate(twopathindices):\n paths[index] = newtwopaths[i]\n\n return paths",
"def set_random_session(self, G, degree_s):\n sorted_nodes = nx.topological_sort(G)\n num_nodes = G.number_of_nodes()\n\n # create sources and destinations of each of the sections\n # name the nodes to be the last 4 numbers\n srcs = [num_nodes, num_nodes + 1]\n dsts = [num_nodes + 2, num_nodes + 3]\n\n end_idx = int(0.3 * len(sorted_nodes))\n end_idx = max(end_idx, 2)\n for i in range(2):\n s = srcs[i]\n t = dsts[i]\n reachables = []\n iter_num = 0\n\n while len(reachables) == 0:\n iter_num += 1\n if iter_num > 100:\n end_idx = end_idx * 2\n\n # pick an entry point from the first 30%\n entry_point = random.choice(sorted_nodes[:end_idx])\n # print \"Source \", i\n # print \"candidates: \", sorted_nodes[:end_idx]\n # print \"entry point: \", entry_point\n # print \"all nodes: \", G.nodes()\n\n # pick a random point from the reachables\n reachables = nx.shortest_path(G, entry_point)\n del reachables[entry_point]\n #print \"reachables: \", reachables\n reachables = reachables.keys()\n\n exit_point = random.choice(reachables)\n #print \"exit_point: \", exit_point\n\n if degree_s[i]:\n G.add_edge(s, entry_point, weight=degree_s[i])\n G.add_edge(exit_point, t, weight=degree_s[i])\n else:\n # figure out the out_degree of entry point\n out_degree = np.sum(G[u][v]['weight'] for u,v in G.out_edges(entry_point))\n G.add_edge(s, entry_point, weight=out_degree)\n\n # figure out the int_degree of exit point\n in_degree = np.sum(G[u][v]['weight'] for u,v in G.in_edges(exit_point))\n G.add_edge(exit_point, t, weight=in_degree)\n\n edges = G.edges()\n for u, v in edges:\n par_num = int(G[u][v]['weight'])\n for i in range(par_num):\n self.add_edge(u, v)\n\n # set indices etc\n self.set_sources(srcs)\n self.set_destinations(dsts)\n self.set_indices()\n #print \"number of nodes: \" + str(self.number_of_nodes())\n #print \"number of edges: \" + str(self.number_of_edges())",
"def get_k_shortest_paths(env: RailEnv,\n source_position: Tuple[int, int],\n source_direction: int,\n target_position=Tuple[int, int],\n k: int = 1, debug=False) -> List[Tuple[Waypoint]]:\n\n # P: set of shortest paths from s to t\n # P =empty,\n shortest_paths: List[Tuple[Waypoint]] = []\n\n # countu: number of shortest paths found to node u\n # countu = 0, for all u in V\n count = {(r, c, d): 0 for r in range(env.height) for c in range(env.width) for d in range(4)}\n\n # B is a heap data structure containing paths\n # N.B. use OrderedSet to make result deterministic!\n heap: OrderedSet[Tuple[Waypoint]] = OrderedSet()\n\n # insert path Ps = {s} into B with cost 0\n heap.add((Waypoint(source_position, source_direction),))\n\n # while B is not empty and countt < K:\n while len(heap) > 0 and len(shortest_paths) < k:\n if debug:\n print(\"iteration heap={}, shortest_paths={}\".format(heap, shortest_paths))\n # – let Pu be the shortest cost path in B with cost C\n cost = np.inf\n pu = None\n for path in heap:\n if len(path) < cost:\n pu = path\n cost = len(path)\n u: Waypoint = pu[-1]\n if debug:\n print(\" looking at pu={}\".format(pu))\n\n # – B = B − {Pu }\n heap.remove(pu)\n # – countu = countu + 1\n\n urcd = (*u.position, u.direction)\n count[urcd] += 1\n\n # – if u = t then P = P U {Pu}\n if u.position == target_position:\n if debug:\n print(\" found of length {} {}\".format(len(pu), pu))\n shortest_paths.append(pu)\n\n # – if countu ≤ K then\n # CAVEAT: do not allow for loopy paths\n elif count[urcd] <= k:\n possible_transitions = env.rail.get_transitions(*urcd)\n if debug:\n print(\" looking at neighbors of u={}, transitions are {}\".format(u, possible_transitions))\n # for each vertex v adjacent to u:\n for new_direction in range(4):\n if debug:\n print(\" looking at new_direction={}\".format(new_direction))\n if possible_transitions[new_direction]:\n new_position = get_new_position(u.position, new_direction)\n if debug:\n print(\" looking at neighbor v={}\".format((*new_position, new_direction)))\n\n v = Waypoint(position=new_position, direction=new_direction)\n # CAVEAT: do not allow for loopy paths\n if v in pu:\n continue\n\n # – let Pv be a new path with cost C + w(u, v) formed by concatenating edge (u, v) to path Pu\n pv = pu + (v,)\n # – insert Pv into B\n heap.add(pv)\n\n # return P\n return shortest_paths",
"def stochastic_event_set(\n sources, source_site_filter=filters.source_site_noop_filter):\n for source, s_sites in source_site_filter(sources):\n try:\n for rupture in source.iter_ruptures():\n for i in range(rupture.sample_number_of_occurrences()):\n yield rupture\n except Exception as err:\n etype, err, tb = sys.exc_info()\n msg = 'An error occurred with source id=%s. Error: %s'\n msg %= (source.source_id, str(err))\n raise_(etype, msg, tb)",
"def ford_fulkerson_algorithm(graph: np.ndarray, source: int, sink: int) -> np.ndarray:\r\n\r\n residual_graph = copy.deepcopy(graph)\r\n row = len(residual_graph)\r\n parent = [-1] * row\r\n max_flow = 0\r\n\r\n if source == sink or sink < 0 or source < 0 or source >= row or sink >= row:\r\n raise WrongInputException('Wrong input source/sink vertice(s)')\r\n\r\n while bfs(residual_graph, row, source, sink, parent):\r\n\r\n path_flow = float(\"Inf\")\r\n s = sink\r\n while s != source:\r\n path_flow = min(path_flow, residual_graph[parent[s]][s])\r\n s = parent[s]\r\n\r\n max_flow += path_flow\r\n\r\n v = sink\r\n while v != source:\r\n u = parent[v]\r\n residual_graph[u][v] -= path_flow\r\n residual_graph[v][u] += path_flow\r\n v = parent[v]\r\n print(\"Max flow: %d\" % max_flow)\r\n\r\n return residual_graph",
"def run_random_annealing(tsp_file, T, scheme, N_sim, max_chain_length=100000, c=.95):\n # create empty lists for to be stored values\n best_routes, costs, cost_lists = [], [], []\n\n adjacency_matrix = make_matrix(tsp_file)\n\n for _ in range(N_sim):\n # generate random initial route\n x = list(range(len(adjacency_matrix)))\n init_route = random.sample(x,len(x))\n \n # find best route with SA algorithm\n best_route, cost_list = tsp_annealing_random(T, scheme, init_route, adjacency_matrix, max_chain_length, c)\n\n # append all values from simulation to lists\n costs.append(calculate_cost(best_route,adjacency_matrix)[1])\n best_routes.append(best_route)\n cost_lists.append(cost_list)\n\n return best_routes, costs, cost_lists",
"def k_shortest_paths(G, source, target, k=1, weight='weight'):\n\tif source == target:\n\t\treturn ([0], [[source]]) \n\t \n\tlength, path = nx.single_source_dijkstra(G, source, target, weight=weight)\n\tif target not in length:\n\t\traise nx.NetworkXNoPath(\"node %s not reachable from %s\" % (source, target))\n\t\t\n\tlengths = [length[target]]\n\tpaths = [path[target]]\n\tc = count()\t\t\n\tB = []\t\t\t\t\t\t\n\tG_original = G.copy()\t\n\t\n\tfor i in range(1, k):\n\t\tfor j in range(len(paths[-1]) - 1):\t\t\t\n\t\t\tspur_node = paths[-1][j]\n\t\t\troot_path = paths[-1][:j + 1]\n\t\t\t\n\t\t\tedges_removed = []\n\t\t\tfor c_path in paths:\n\t\t\t\tif len(c_path) > j and root_path == c_path[:j + 1]:\n\t\t\t\t\tu = c_path[j]\n\t\t\t\t\tv = c_path[j + 1]\n\t\t\t\t\tif G.has_edge(u, v):\n\t\t\t\t\t\tedge_attr = G.edge[u][v]\n\t\t\t\t\t\tG.remove_edge(u, v)\n\t\t\t\t\t\tedges_removed.append((u, v, edge_attr))\n\t\t\t\n\t\t\tfor n in range(len(root_path) - 1):\n\t\t\t\tnode = root_path[n]\n\t\t\t\t# out-edges\n\t\t\t\tfor u, v, edge_attr in G.copy().edges_iter(node, data=True):\n\t\t\t\t\tG.remove_edge(u, v)\n\t\t\t\t\tedges_removed.append((u, v, edge_attr))\n\t\t\t\t\n\t\t\t\tif G.is_directed():\n\t\t\t\t\t# in-edges\n\t\t\t\t\tfor u, v, edge_attr in G.in_edges_iter(node, data=True):\n\t\t\t\t\t\tG.remove_edge(u, v)\n\t\t\t\t\t\tedges_removed.append((u, v, edge_attr))\n\t\t\t\n\t\t\tspur_path_length, spur_path = nx.single_source_dijkstra(G, spur_node, target, weight=weight)\t\t\t\n\t\t\tif target in spur_path and spur_path[target]:\n\t\t\t\ttotal_path = root_path[:-1] + spur_path[target]\n\t\t\t\ttotal_path_length = get_path_length(G_original, root_path, weight) + spur_path_length[target]\t\t\t\t\n\t\t\t\theappush(B, (total_path_length, next(c), total_path))\n\t\t\t\t\n\t\t\tfor e in edges_removed:\n\t\t\t\tu, v, edge_attr = e\n\t\t\t\tG.add_edge(u, v, edge_attr)\n\t\t\t\t\t \n\t\tif B:\n\t\t\t(l, _, p) = heappop(B)\n\t\t\tlengths.append(l)\n\t\t\tpaths.append(p)\n\t\telse:\n\t\t\tbreak\n\t\n\treturn (lengths, paths)",
"def k_shortest_paths(G, source, target, k=1, weight='weight'):\n\tif source == target:\n\t\treturn ([0], [[source]]) \n\t \n\tlength, path = nx.single_source_dijkstra(G, source, target, weight=weight)\n\tif target not in length:\n\t\traise nx.NetworkXNoPath(\"node %s not reachable from %s\" % (source, target))\n\t\t\n\tlengths = [length[target]]\n\tpaths = [path[target]]\n\tc = count()\t\t\n\tB = []\t\t\t\t\t\t\n\tG_original = G.copy()\t\n\t\n\tfor i in range(1, k):\n\t\tfor j in range(len(paths[-1]) - 1):\t\t\t\n\t\t\tspur_node = paths[-1][j]\n\t\t\troot_path = paths[-1][:j + 1]\n\t\t\t\n\t\t\tedges_removed = []\n\t\t\tfor c_path in paths:\n\t\t\t\tif len(c_path) > j and root_path == c_path[:j + 1]:\n\t\t\t\t\tu = c_path[j]\n\t\t\t\t\tv = c_path[j + 1]\n\t\t\t\t\tif G.has_edge(u, v):\n\t\t\t\t\t\tedge_attr = G.edge[u][v]\n\t\t\t\t\t\tG.remove_edge(u, v)\n\t\t\t\t\t\tedges_removed.append((u, v, edge_attr))\n\t\t\t\n\t\t\tfor n in range(len(root_path) - 1):\n\t\t\t\tnode = root_path[n]\n\t\t\t\t# out-edges\n\t\t\t\tfor u, v, edge_attr in G.copy().edges_iter(node, data=True):\n\t\t\t\t\tG.remove_edge(u, v)\n\t\t\t\t\tedges_removed.append((u, v, edge_attr))\n\t\t\t\t\n\t\t\t\tif G.is_directed():\n\t\t\t\t\t# in-edges\n\t\t\t\t\tfor u, v, edge_attr in G.in_edges_iter(node, data=True):\n\t\t\t\t\t\tG.remove_edge(u, v)\n\t\t\t\t\t\tedges_removed.append((u, v, edge_attr))\n\t\t\t\n\t\t\tspur_path_length, spur_path = nx.single_source_dijkstra(G, spur_node, target, weight=weight)\t\t\t\n\t\t\tif target in spur_path and spur_path[target]:\n\t\t\t\ttotal_path = root_path[:-1] + spur_path[target]\n\t\t\t\ttotal_path_length = get_path_length(G_original, root_path, weight) + spur_path_length[target]\t\t\t\t\n\t\t\t\theappush(B, (total_path_length, next(c), total_path))\n\t\t\t\t\n\t\t\tfor e in edges_removed:\n\t\t\t\tu, v, edge_attr = e\n\t\t\t\tG.add_edge(u, v, edge_attr)\n\t\t\t\t\t \n\t\tif B:\n\t\t\t(l, _, p) = heappop(B)\t\t\n\t\t\tlengths.append(l)\n\t\t\tpaths.append(p)\n\t\telse:\n\t\t\tbreak\n\t\n\treturn (lengths, paths)",
"def _select_destination(self):\n # Ideally this should do something clever based on the start location\n # ie known trips. But for now, it will pick randomly!\n station_dict = self.network.station_dict\n\n stations = list(station_dict.keys())\n #stations = [x for x in stations if isinstance(x, int) or x.startswith(\"801\")]\n #stations = [x for x in stations if isinstance(x, int) or x.startswith(\"80139\")]\n weights = [station_dict[x].in_popularity for x in stations]\n\n # pick using the given weight distributions\n self.dest = random.choices(stations, weights=weights)[0]\n\n return",
"def possible(self):\n return [tuple(path) for path in nx.all_shortest_paths(self._gpm.Graph, source=self.source, target=self.target)]",
"def test_sources():\n g = Graph(from_list=[\n (1, 3, 1),\n (2, 4, 1),\n (2, 5, 1),\n (3, 5, 1),\n (4, 6, 1),\n (5, 6, 1),\n ])\n g.add_node(7)\n s = g.sources(5)\n e = {1, 2, 3}\n assert s == e\n\n s2 = g.sources(1)\n e2 = set()\n assert s2 == e2, s2\n\n s3 = g.sources(6)\n e3 = {1, 2, 3, 4, 5}\n assert s3 == e3\n\n s4 = g.sources(7)\n e4 = set()\n assert s4 == e4",
"def test_heuristic_first_steps(self):\n graph = {n: set(self.deterministic_graph[n]) - set([n])\n for n in self.deterministic_graph}\n deg_heuristic = MinDegreeHeuristic(graph)\n elim_node = deg_heuristic.best_node(graph)\n print(\"Graph {}:\".format(graph))\n steps = []\n\n while elim_node is not None:\n print(\"Removing {}:\".format(elim_node))\n steps.append(elim_node)\n nbrs = graph[elim_node]\n\n for u, v in itertools.permutations(nbrs, 2):\n if v not in graph[u]:\n graph[u].add(v)\n\n for u in graph:\n if elim_node in graph[u]:\n graph[u].remove(elim_node)\n\n del graph[elim_node]\n print(\"Graph {}:\".format(graph))\n elim_node = deg_heuristic.best_node(graph)\n\n # check only the first 5 elements for equality\n assert_equals(steps[:5], [0, 1, 2, 3, 4])",
"def get_routing_solution(self):\n G = self.base_digraph\n s1 = self.sources[0]\n s2 = self.sources[1]\n t1 = self.destinations[0]\n t2 = self.destinations[1]\n\n try:\n m = Model('routing')\n m.setParam('OutputFlag', False)\n\n # variables,\n # We have one variable per edge per session\n # e is the dict of dict for the variables\n e = {}\n r = {}\n for i in [1,2]:\n e[i] = {}\n r[i] = m.addVar()\n for u,v in G.edges():\n e[i][u,v] = m.addVar(lb=0)\n\n m.update()\n\n obj = quicksum(r.values())\n m.setObjective(obj, GRB.MAXIMIZE)\n\n # constraints\n # 1. conservations of flow at all intermediate nodes\n # 2. capacity constraints for each edge\n\n for u,v in G.edges():\n m.addConstr(e[1][u,v] + e[2][u,v] <= G[u][v]['capacity'])\n\n m.addConstr(quicksum(e[1][u,v] for u,v in G.out_edges(s1)) == r[1])\n m.addConstr(quicksum(e[2][u,v] for u,v in G.out_edges(s2)) == r[2])\n m.addConstr(quicksum(e[1][u,v] for u,v in G.out_edges(s2)) == 0)\n m.addConstr(quicksum(e[2][u,v] for u,v in G.out_edges(s1)) == 0)\n m.addConstr(quicksum(e[1][u,v] for u,v in G.in_edges(t1)) == r[1])\n m.addConstr(quicksum(e[2][u,v] for u,v in G.in_edges(t2)) == r[2])\n\n for n in G.nodes():\n if n not in [s1, s2, t1, t2]:\n for i in [1, 2]:\n inflow = quicksum(e[i][u,v] for u,v in G.in_edges(n))\n outflow = quicksum(e[i][u,v] for u,v in G.out_edges(n))\n m.addConstr(inflow == outflow)\n\n m.optimize()\n\n if m.status == GRB.status.OPTIMAL:\n for u, v in G.edges():\n G[u][v]['Routing'] = {}\n G[u][v]['Routing'][1] = e[1][u,v].x\n G[u][v]['Routing'][2] = e[2][u,v].x\n return (m.objVal, r[1].x, r[2].x)\n else:\n # something went wrong...err...\n print \"Something was wrong, no optimal solution obtained\"\n return None, None, None\n\n except GurobiError:\n Print ('Error Report from Gurobi')",
"def sampled_clique(clusters,strategy):\n G = nx.Graph()\n sample = []\n #Sample 'size' nodes from a single cluster\n if strategy == \"rand\":\n size = len(clusters)\n while len(sample) < size:\n cluster = random.choice(clusters)\n if len(cluster) >= size:\n sample = random.sample(cluster,size)\n #Sample 1 choice from each cluster\n elif strategy == \"optim\":\n for _,cluster in clusters.items():\n if len(cluster) > 0:\n sample.append(random.choice(cluster))\n for n1 in sample:\n for n2 in sample:\n if n1 != n2:\n G.add_edge(n1,n2)\n return G",
"def random_partition_graph(groups, p_in, p_out, seed=None):\r\n\r\n if p_in > 1 or p_in < 0:\r\n raise errorhandler.ErrorHandler(\"p_in must be in [0,1]\")\r\n\r\n if p_out > 1 or p_out < 0:\r\n raise errorhandler.ErrorHandler(\"p_out must be in [0,1]\")\r\n\r\n size = sum(groups)\r\n g = graph.Graph(size, is_partition=True)\r\n\r\n next_group = {}\r\n start = 0\r\n group_index = 0\r\n for n in groups: # connect nodes inside a group\r\n edges = ((u + start, v + start) for u, v in fast_random_graph(n, p_in).edges)\r\n g.add_edges(edges)\r\n g.partition.append(set(range(start, start+n)))\r\n next_group.update(dict.fromkeys(range(start, start + n), start + n))\r\n group_index += 1\r\n start += n\r\n\r\n # connect nodes between groups\r\n if p_out == 0:\r\n return g\r\n if p_out == 1:\r\n for n in next_group:\r\n targets = range(next_group[n], len(g))\r\n g.add_edges(zip([n] * len(targets), targets))\r\n return g\r\n\r\n # using method similar to fast_random_graph\r\n lp = math.log(1.0 - p_out)\r\n n = len(g)\r\n\r\n for u in range(n - 1):\r\n v = next_group[u]\r\n while v < n:\r\n lr = math.log(1.0 - random.random())\r\n v += int(lr / lp)\r\n if v < n:\r\n g.add_edge(u, v)\r\n v += 1\r\n\r\n return g",
"def k_shortest_paths(\n self,\n G,\n source,\n target,\n k=1,\n weight='weight',\n ):\n\n if source == target:\n return ([0], [[source]])\n\n (length, path) = nx.single_source_dijkstra(G, source, target,\n weight=weight)\n if target not in length:\n raise nx.NetworkXNoPath('node %s not reachable from %s' % (source,\n target))\n\n lengths = [length[target]]\n paths = [path[target]]\n c = count()\n B = []\n\n # Is deep copy really required?\n # Fails due to embedded Ctype objects which can not be pickled\n # # G_original = G.copy()\n # Swapping with shallow copy...will it work?\n\n G_original = G\n if nx.is_directed(G_original):\n G = nx.DiGraph(G_original)\n else:\n G = nx.Graph(G_original)\n\n ######################################\n #TODO: wrap this up somehow\n print ''\n print term.move_up + term.move_up\n ######################################\n print 'getting K:{} paths...'.format(k),\n for i in range(1, k):\n with term.location():\n print i\n for j in range(len(paths[-1]) - 1):\n spur_node = paths[-1][j]\n root_path = (paths[-1])[:j + 1]\n\n edges_removed = []\n for c_path in paths:\n if len(c_path) > j and root_path == c_path[:j + 1]:\n u = c_path[j]\n v = c_path[j + 1]\n if G.has_edge(u, v):\n edge_attr = G.edge[u][v]\n G.remove_edge(u, v)\n edges_removed.append((u, v, edge_attr))\n\n for n in range(len(root_path) - 1):\n node = root_path[n]\n\n # out-edges\n\n for (u, v, edge_attr) in G.edges_iter(node, data=True):\n\n # print 'lala1: {} -> {}'.format(u,v)\n\n G.remove_edge(u, v)\n edges_removed.append((u, v, edge_attr))\n\n if G.is_directed():\n\n # in-edges\n\n for (u, v, edge_attr) in G.in_edges_iter(node,\n data=True):\n\n # print 'lala2: {} -> {}'.format(u,v)\n\n G.remove_edge(u, v)\n edges_removed.append((u, v, edge_attr))\n\n (spur_path_length, spur_path) = nx.single_source_dijkstra(G,\n spur_node, target, weight=weight)\n if target in spur_path and spur_path[target]:\n total_path = root_path[:-1] + spur_path[target]\n total_path_length = self.get_path_length(G_original,\n root_path, weight) + spur_path_length[target]\n heappush(B, (total_path_length, next(c), total_path))\n\n for e in edges_removed:\n (u, v, edge_attr) = e\n G.add_edge(u, v, edge_attr)\n\n if B:\n (l, _, p) = heappop(B)\n lengths.append(l)\n paths.append(p)\n else:\n break\n\n return (lengths, paths)",
"def search_shortest_paths(\n self,\n src_nodes: list[TN],\n dst_node: TN,\n operation_src: str,\n operation_dest: str,\n domain: str,\n limit_dest_schemes: list[str],\n *,\n session: \"Session\",\n ) -> dict[TN, list[dict[str, Any]]]:\n\n for rse in itertools.chain(src_nodes, [dst_node], self._multihop_nodes):\n rse.ensure_loaded(load_attributes=True, load_info=True, session=session)\n self.ensure_edges_loaded(session=session)\n\n if self._multihop_nodes:\n # Filter out island source RSEs\n nodes_to_find = {node for node in src_nodes if node.out_edges}\n else:\n nodes_to_find = set(src_nodes)\n\n class _NodeStateProvider:\n _hop_penalty = self._hop_penalty\n\n def __init__(self, node: TN):\n self.enabled: bool = True\n self.cost: _Number = 0\n if node != dst_node:\n try:\n self.cost = int(node.attributes.get('hop_penalty', self._hop_penalty))\n except ValueError:\n self.cost = self._hop_penalty\n\n scheme_missmatch_found = {}\n\n class _EdgeStateProvider:\n def __init__(self, edge: TE):\n self.edge = edge\n self.chosen_scheme = {}\n\n @property\n def cost(self) -> _Number:\n return self.edge.cost\n\n @property\n def enabled(self) -> bool:\n try:\n matching_scheme = rsemgr.find_matching_scheme(\n rse_settings_src=self.edge.src_node.info,\n rse_settings_dest=self.edge.dst_node.info,\n operation_src=operation_src,\n operation_dest=operation_dest,\n domain=domain,\n scheme=limit_dest_schemes if self.edge.dst_node == dst_node and limit_dest_schemes else None,\n )\n self.chosen_scheme = {\n 'source_scheme': matching_scheme[1],\n 'dest_scheme': matching_scheme[0],\n 'source_scheme_priority': matching_scheme[3],\n 'dest_scheme_priority': matching_scheme[2],\n }\n return True\n except RSEProtocolNotSupported:\n scheme_missmatch_found[self.edge.src_node] = True\n return False\n\n paths = {dst_node: []}\n for node, distance, _, edge_to_next_hop, edge_state in self.dijkstra_spf(dst_node=dst_node,\n nodes_to_find=nodes_to_find,\n node_state_provider=_NodeStateProvider,\n edge_state_provider=_EdgeStateProvider):\n nh_node = edge_to_next_hop.dst_node\n edge_state = cast(_EdgeStateProvider, edge_state)\n hop = {\n 'source_rse': node,\n 'dest_rse': nh_node,\n 'hop_distance': edge_state.cost,\n 'cumulated_distance': distance,\n **edge_state.chosen_scheme,\n }\n paths[node] = [hop] + paths[nh_node]\n\n nodes_to_find.discard(node)\n if not nodes_to_find:\n # We found the shortest paths to all desired nodes\n break\n\n result = {}\n for node in src_nodes:\n path = paths.get(node)\n if path is not None:\n result[node] = path\n elif scheme_missmatch_found.get(node):\n result[node] = []\n return result",
"def _dfs(\n self, kg: KG, entity: Vertex, is_reverse: bool = False\n ) -> List[Walk]:\n self.sampler.visited = set()\n walks: List[Walk] = []\n assert self.max_walks is not None\n\n rng = np.random.RandomState(self.random_state)\n\n while len(walks) < self.max_walks:\n sub_walk: Walk = (entity,)\n d = 1\n while d // 2 < self.max_depth:\n pred_obj = self.sampler.sample_hop(\n kg, sub_walk, d // 2 == self.max_depth - 1, is_reverse\n )\n if pred_obj is None:\n break\n\n if is_reverse:\n if (\n pred_obj[0] in self.communities\n and rng.random() < self.hop_prob\n ):\n community_nodes = self.labels_per_community[\n self.communities[pred_obj[0]]\n ]\n sub_walk = (\n pred_obj[1],\n rng.choice(community_nodes),\n ) + sub_walk\n else:\n sub_walk = (pred_obj[1], pred_obj[0]) + sub_walk\n else:\n if (\n pred_obj[1] in self.communities\n and rng.random() < self.hop_prob\n ):\n community_nodes = self.labels_per_community[\n self.communities[pred_obj[1]]\n ]\n sub_walk += (\n pred_obj[0],\n rng.choice(community_nodes),\n )\n else:\n sub_walk += (pred_obj[0], pred_obj[1])\n d = len(sub_walk) - 1\n walks.append(sub_walk)\n return list(walks)",
"def test_heuristic_first_steps(self):\n graph = {n: set(self.deterministic_graph[n]) - set([n])\n for n in self.deterministic_graph}\n print(\"Graph {}:\".format(graph))\n elim_node = min_fill_in_heuristic(graph)\n steps = []\n\n while elim_node is not None:\n print(\"Removing {}:\".format(elim_node))\n steps.append(elim_node)\n nbrs = graph[elim_node]\n\n for u, v in itertools.permutations(nbrs, 2):\n if v not in graph[u]:\n graph[u].add(v)\n\n for u in graph:\n if elim_node in graph[u]:\n graph[u].remove(elim_node)\n\n del graph[elim_node]\n print(\"Graph {}:\".format(graph))\n elim_node = min_fill_in_heuristic(graph)\n\n # check only the first 2 elements for equality\n assert_equals(steps[:2], [6, 5])",
"def get_all_pairs(G):\n # list all (start,dest) pairs between which the route must be computed\n pairs_list = [(start, dest) for dest in G.nodes for start in G.nodes]\n\n # shuffle all elements in-place\n random.shuffle(pairs_list)\n\n # generate a set from the list\n pairs_set = set(pairs_list)\n\n return pairs_list, pairs_set",
"def multipleTravellingSalesmen2(self, iterations=None):\n if not iterations:\n iterations = self.mTSPIterations\n\n paths, self.totaldistanceMTSP = self.greedyMultiplePaths(self.ships)\n\n # Run single travelling salesman on each of the ships\n paths = [self.singleTravellingSalesman2(path, iterations=5000) for path in paths]\n\n # IF TWO OR MORE SHIPS!!!\n # Point exchange between ships and rerun tsp on each ship\n if len(self.ships) > 1:\n for _ in range(iterations):\n twopathindices = random.sample(xrange(len(paths)), 2)\n if min([len(path) for path in paths]) > 1:\n newtwopaths = self.swapTwoPointsBetweenPaths([paths[index] for index in twopathindices])\n newtwopaths = [self.singleTravellingSalesman2(path, iterations=100) for path in newtwopaths]\n if self.totalPathDistance(newtwopaths) < self.totalPathDistance([paths[index] for index in twopathindices]):\n for i, index in enumerate(twopathindices):\n paths[index] = newtwopaths[i]\n\n return [path[::-1] for path in paths]\n\n # Might be a simple tsp whereby the new points search for the twp closest points and\n # inserts between them on the path"
]
| [
"0.6073624",
"0.5772932",
"0.56998974",
"0.5527996",
"0.54848486",
"0.5443951",
"0.5443951",
"0.53789777",
"0.53648436",
"0.53508836",
"0.5280728",
"0.5269755",
"0.5099018",
"0.50866324",
"0.50755084",
"0.50711346",
"0.50711346",
"0.50291127",
"0.5007661",
"0.4996117",
"0.49696457",
"0.49682721",
"0.49646184",
"0.49446902",
"0.49350876",
"0.4915417",
"0.4906965",
"0.4900397",
"0.4894073",
"0.4883474"
]
| 0.62126094 | 0 |
Returns a list of flows with randomly selected sources and destinations that will saturate the network (i.e. a flow will be addmitted provided that it will not cause the utilization of any link in the network to exceed 1. Flows are split across the K least utilized paths connecting the source node to the destination node (i.e. this is a greedy algorithm). | def compute_greedy_flow_allocations( target_graph
, flow_selection_fn
, seed_number=DEFAULT_SEED_NUMBER):
flow_allocation_seed_number = seed_number
np.random.seed(flow_allocation_seed_number)
link_utilization = {tuple(sorted(link_tuple)): 0.0 for link_tuple in target_graph.edges}
flows = []
while True:
capacity_was_exceeded = False
source_node, destination_node = flow_selection_fn(target_graph.nodes)
flow_tx_rate = np.random.uniform(FLOW_TX_RATE_LOWER_BOUND, FLOW_TX_RATE_UPPER_BOUND)
connecting_paths = list(node_disjoint_paths(target_graph, source_node, destination_node))
disjoint_path_count = len(connecting_paths)
flow_rate_per_subpath = flow_tx_rate / disjoint_path_count
for path in [nx.utils.pairwise(p_i) for p_i in connecting_paths]:
for u, v in [tuple(sorted(t_i)) for t_i in path]:
if (link_utilization[u, v] + flow_rate_per_subpath) > LINK_CAPACITY:
capacity_was_exceeded = True
break
link_utilization[u, v] += flow_rate_per_subpath
if capacity_was_exceeded:
break
if capacity_was_exceeded:
break
the_flow = Flow( source_node = source_node
, destination_node = destination_node
, flow_tx_rate = flow_tx_rate
, paths = connecting_paths
, splitting_ratio = [1.0/disjoint_path_count]*disjoint_path_count
)
flows.append(the_flow)
return flows, link_utilization | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_path_hopping_flow_allocations(target_graph, K=3):\n flow_allocation_seed_number = 0xCAFE_BABE\n np.random.seed(flow_allocation_seed_number)\n # id_to_dpid = topo_mapper.get_and_validate_onos_topo_x(target_graph)\n link_utilization = {(u, v): 0.0 for u, v in target_graph.edges}\n node_capacity = {u: 0.0 for u in target_graph.nodes}\n flows = []\n while True:\n source_node, destination_node = flow_selection_fn(target_graph.nodes, 2, replace=False)\n print(source_node, destination_node)\n\n shortest_paths = sorted(nx.all_simple_paths(target_graph, source_node, destination_node,\n cutoff=3),\n key=lambda p: len(p))\n k_shortest_paths = list(itertools.islice(shortest_paths, K))\n\n # flow_tx_rate = np.random.uniform() * 10\n flow_tx_rate = 1.0\n # if node_capacity[source_node] + flow_tx_rate > LINK_CAPACITY:\n # break\n node_capacity[source_node] += flow_tx_rate\n capacity_was_exceeded = False \n for path in [nx.utils.pairwise(p_i) for p_i in k_shortest_paths]:\n for u, v in [sorted(h_i) for h_i in path]:\n flow_rate_per_subpath = flow_tx_rate / K\n if (link_utilization[u, v] + flow_rate_per_subpath) > LINK_CAPACITY:\n capacity_was_exceeded = True\n break\n link_utilization[u, v] += flow_rate_per_subpath\n if capacity_was_exceeded:\n break\n\n if capacity_was_exceeded:\n break\n\n the_flow = Flow( source_node = source_node\n , destination_node = destination_node\n , flow_tx_rate = flow_tx_rate\n , paths = k_shortest_paths\n , splitting_ratio = [1.0/K]*K\n )\n flows.append(the_flow)\n return flows, link_utilization",
"def compute_equal_flow_allocations(target_graph, K=3):\n # id_to_dpid = topo_mapper.get_and_validate_onos_topo_x(target_graph)\n flow_allocation_seed_number = 0xDEAD_BEEF\n np.random.seed(flow_allocation_seed_number)\n flows = []\n for node in target_graph.nodes:\n possible_destination_nodes = set(target_graph.nodes) - set([node])\n [destination_node] = np.random.choice(list(possible_destination_nodes), 1, replace=False)\n # shortest_paths = all_shortest_paths(target_graph, node, destination_node.item())\n shortest_paths = sorted(nx.all_simple_paths(target_graph, node, destination_node.item(),\n cutoff=3),\n key=lambda p: len(p))\n k_shortest_paths = list(itertools.islice(shortest_paths, K))\n the_flow = Flow( source_node = node\n , destination_node = destination_node.item()\n , flow_tx_rate = 10.0\n , paths = k_shortest_paths\n , splitting_ratio = [1/K]*K\n )\n flows.append(the_flow)\n \n return flow_allocation_seed_number, flows",
"def bfsSample(G, source=None, k = 50):\n\twarn(\"networkit.sampling.bfsSample is deprecated, will be removed in future updates.\")\n\tif not source:\n\t\tsource = GraphTools.randomNode(G)\n\tn = G.numberOfNodes()\n\tvisited = [False]*n\n\tQ = [source]\n\tclosest = set([source])\n\tglobal found\n\tfound = 0\n\twhile len(Q) > 0 and found < k:\n\t\tu = Q.pop(0)\n\t\tdef enqueue(u,v,weight, eid):\n\t\t\tglobal found\n\t\t\tif not visited[v] and found < k:\n\t\t\t\tfound += 1\n\t\t\t\tvisited[v] = True\n\t\t\t\tQ.append(v)\n\t\t\t\tclosest.add(v)\n\t\tG.forEdgesOf(u, enqueue)\n\tprint(\"found {0} nodes\".format(len(closest)))\n\tG1 = GraphTools.subgraphFromNodes(G, closest)\n\treturn G1",
"def compute_unequal_flow_allocations(target_graph, K=3):\n\n # id_to_dpid = topo_mapper.get_and_validate_onos_topo_x(target_graph)\n flow_allocation_seed_number = 0xDEAD_BEEF\n np.random.seed(flow_allocation_seed_number)\n flows = []\n link_utilization = {}\n for node in target_graph.nodes:\n possible_destination_nodes = set(target_graph.nodes) - {node}\n destination_node = np.random.choice(list(possible_destination_nodes), 1, \n replace=False).item()\n shortest_path = nx.shortest_path(target_graph, node, destination_node)\n the_flow = Flow( source_node = node\n , destination_node = destination_node\n , flow_tx_rate = 10.0\n , paths = [shortest_path]\n , splitting_ratio = [1.0]\n )\n flows.append(the_flow)\n\n return flow_allocation_seed_number, flows",
"def generate_graph(size, number_of_clusters, minimal_size):\n base_list = list(range(size))\n result_list = []\n random.shuffle(base_list)\n for i in range(number_of_clusters - 1):\n size = random.randint(minimal_size, len(base_list) - (number_of_clusters - i - 1) * minimal_size)\n cluster = []\n for n in range(size):\n actual = random.choice(base_list)\n base_list.remove(actual)\n cluster.append(actual)\n result_list.append(strongly_connect(cluster))\n result_list.append(strongly_connect(base_list))\n\n while len(result_list) < 5:\n result_list.append([])\n\n print(sorted([len(i) for i in result_list], reverse=True)[:5])\n\n return weak_connect_graph(result_list)",
"def get_pathologic_covering_routes(n_pl, n_target, attacker_strategy, target_values):\n # computes the coefficient used by the greedy oracle to choose routes\n targets_coeff = np.transpose(np.multiply(attacker_strategy, target_values))\n\n # randomly selects the player for which the non optimal choice will be made\n wrong_pl = randint(1, n_pl)\n\n # generate the non optimal route randomly\n n_covered_targets = randint(n_pl,n_target-1)\n non_opt_action = np.zeros(n_target)\n for i in range(0, n_covered_targets):\n random_covered_target = randint(0, n_target-1)\n non_opt_action[random_covered_target] = 1\n\n # compute the value of the non optimal route\n non_opt_val = get_value_single_route(non_opt_action, targets_coeff)\n\n # generate routes that have, as a single, values smaller than the best greedy route but taken togher perform\n # at least as well. [[0,1,...],[...],...] a[r][t]=1 iff t is covered by r.\n # The returned list should have n_pl - 1 routes\n opt_routes = get_opt_routes(n_pl, non_opt_action)\n\n I={}\n for pl in range(1, n_pl+1):\n\n n_r = randint(0, MAX_ROUTES)\n temp = lil_matrix((n_r+1, n_target), dtype='int8')\n\n if pl == wrong_pl:\n # put the non opt route in the bucket\n for t in non_opt_action.nonzero():\n temp[0,t] = 1\n else:\n for t in opt_routes.pop().nonzero():\n temp[0,t] = 1\n\n # generate other random routes with single value less than the non_opt_value\n for r in range(1, n_r):\n new_route = get_r_limited_val(non_opt_val, targets_coeff)\n\n for t in new_route.nonzero():\n temp[r,t] = 1\n\n I[pl] = temp.tocsr()\n\n return I",
"def multipleTravellingSalesmen(self, iterations=None):\n if not iterations:\n iterations = self.mTSPIterations\n\n # Randomly select points drawn from unvisited and distribute equally among the ships\n paths = self.partition(self.unvisited.keys(), len(self.ships))\n [path.append(ship) for path, ship in zip(paths, self.ships)]\n\n # Run single travelling salesman on each of the ships\n paths = [self.singleTravellingSalesman(path, iterations=0) for path in paths]\n\n\n # IF TWO OR MORE SHIPS!!!\n # Point exchange between ships and rerun tsp on each ship\n if len(self.ships) > 1:\n for _ in range(iterations):\n twopathindices = random.sample(xrange(len(paths)), 2)\n newtwopaths = self.swapTwoPointsBetweenPaths([paths[index] for index in twopathindices])\n newtwopaths = [self.singleTravellingSalesman(path, iterations=max(len(path), 100)) for path in\n newtwopaths]\n if self.totalPathDistance(newtwopaths) < self.totalPathDistance(\n [paths[index] for index in twopathindices]):\n for i, index in enumerate(twopathindices):\n paths[index] = newtwopaths[i]\n\n return paths",
"def get_paths_for_flow(F, s, f):\n links = [((u, v), split_ratio) \n for (flow_id, u, v), split_ratio in F.items() \n if flow_id == f and u == s and split_ratio > 0.001]\n return links",
"def get_paths_for_flow(F, s, f):\n links = [((u, v), split_ratio) \n for (flow_id, u, v), split_ratio in F.items() \n if flow_id == f and u == s and split_ratio > 0.001]\n return links",
"def set_random_session(self, G, degree_s):\n sorted_nodes = nx.topological_sort(G)\n num_nodes = G.number_of_nodes()\n\n # create sources and destinations of each of the sections\n # name the nodes to be the last 4 numbers\n srcs = [num_nodes, num_nodes + 1]\n dsts = [num_nodes + 2, num_nodes + 3]\n\n end_idx = int(0.3 * len(sorted_nodes))\n end_idx = max(end_idx, 2)\n for i in range(2):\n s = srcs[i]\n t = dsts[i]\n reachables = []\n iter_num = 0\n\n while len(reachables) == 0:\n iter_num += 1\n if iter_num > 100:\n end_idx = end_idx * 2\n\n # pick an entry point from the first 30%\n entry_point = random.choice(sorted_nodes[:end_idx])\n # print \"Source \", i\n # print \"candidates: \", sorted_nodes[:end_idx]\n # print \"entry point: \", entry_point\n # print \"all nodes: \", G.nodes()\n\n # pick a random point from the reachables\n reachables = nx.shortest_path(G, entry_point)\n del reachables[entry_point]\n #print \"reachables: \", reachables\n reachables = reachables.keys()\n\n exit_point = random.choice(reachables)\n #print \"exit_point: \", exit_point\n\n if degree_s[i]:\n G.add_edge(s, entry_point, weight=degree_s[i])\n G.add_edge(exit_point, t, weight=degree_s[i])\n else:\n # figure out the out_degree of entry point\n out_degree = np.sum(G[u][v]['weight'] for u,v in G.out_edges(entry_point))\n G.add_edge(s, entry_point, weight=out_degree)\n\n # figure out the int_degree of exit point\n in_degree = np.sum(G[u][v]['weight'] for u,v in G.in_edges(exit_point))\n G.add_edge(exit_point, t, weight=in_degree)\n\n edges = G.edges()\n for u, v in edges:\n par_num = int(G[u][v]['weight'])\n for i in range(par_num):\n self.add_edge(u, v)\n\n # set indices etc\n self.set_sources(srcs)\n self.set_destinations(dsts)\n self.set_indices()\n #print \"number of nodes: \" + str(self.number_of_nodes())\n #print \"number of edges: \" + str(self.number_of_edges())",
"def __generate_all_shortest_paths(self,cutoff = 10):\n if cutoff < 1:\n cutoff = 10\n self.__logger.info(\"cutoff value must be a positive integer. Set back to default value: 10\")\n\n all_pair_shortest_paths = nx.all_pairs_shortest_path(self.G, cutoff=cutoff)\n for item in all_pair_shortest_paths:\n from_node = item[0]\n paths = item[1]\n for destination,path in paths.items():\n yield (len(path),path)",
"def ford_fulkerson_algorithm(graph: np.ndarray, source: int, sink: int) -> np.ndarray:\r\n\r\n residual_graph = copy.deepcopy(graph)\r\n row = len(residual_graph)\r\n parent = [-1] * row\r\n max_flow = 0\r\n\r\n if source == sink or sink < 0 or source < 0 or source >= row or sink >= row:\r\n raise WrongInputException('Wrong input source/sink vertice(s)')\r\n\r\n while bfs(residual_graph, row, source, sink, parent):\r\n\r\n path_flow = float(\"Inf\")\r\n s = sink\r\n while s != source:\r\n path_flow = min(path_flow, residual_graph[parent[s]][s])\r\n s = parent[s]\r\n\r\n max_flow += path_flow\r\n\r\n v = sink\r\n while v != source:\r\n u = parent[v]\r\n residual_graph[u][v] -= path_flow\r\n residual_graph[v][u] += path_flow\r\n v = parent[v]\r\n print(\"Max flow: %d\" % max_flow)\r\n\r\n return residual_graph",
"def calc_per_flow_link_utilisation(self, flow: Tuple[int, int],\n demand: float,\n routing: np.ndarray) -> np.ndarray:\n edge_mapping = {edge: i for i, edge in\n enumerate(sorted(self.graph.edges))}\n\n link_utilisation = np.zeros(self.num_edges)\n node_flow = np.zeros(self.num_nodes) # the flow stored at a node\n node_flow[flow[0]] = demand\n\n to_explore = [flow[0]]\n while to_explore:\n current_node = to_explore.pop(0)\n current_flow = node_flow[current_node]\n\n # this is the flow destination node so we absorb all flow\n if current_node == flow[1]:\n node_flow[current_node] = 0.0\n continue\n\n # push the flow at this node over all edges\n for edge in self.graph.out_edges(current_node):\n edge_index = edge_mapping[edge]\n ratio = routing[edge_index]\n flow_to_send = ratio * current_flow\n # only send flow if greater than epsilon (so no 'infinite' loops)\n if flow_to_send > 1.e-8:\n node_flow[edge[1]] += ratio * current_flow\n # all important step, update our output\n link_utilisation[edge_index] += ratio * current_flow\n # have updated the dst so add it to the list of things to do\n to_explore.append(edge[1])\n # we've moved all the flow from this node now, so reset back to zero\n node_flow[current_node] = 0.0\n\n return link_utilisation",
"def run_random_annealing(tsp_file, T, scheme, N_sim, max_chain_length=100000, c=.95):\n # create empty lists for to be stored values\n best_routes, costs, cost_lists = [], [], []\n\n adjacency_matrix = make_matrix(tsp_file)\n\n for _ in range(N_sim):\n # generate random initial route\n x = list(range(len(adjacency_matrix)))\n init_route = random.sample(x,len(x))\n \n # find best route with SA algorithm\n best_route, cost_list = tsp_annealing_random(T, scheme, init_route, adjacency_matrix, max_chain_length, c)\n\n # append all values from simulation to lists\n costs.append(calculate_cost(best_route,adjacency_matrix)[1])\n best_routes.append(best_route)\n cost_lists.append(cost_list)\n\n return best_routes, costs, cost_lists",
"def stochastic_event_set(\n sources, source_site_filter=filters.source_site_noop_filter):\n for source, s_sites in source_site_filter(sources):\n try:\n for rupture in source.iter_ruptures():\n for i in range(rupture.sample_number_of_occurrences()):\n yield rupture\n except Exception as err:\n etype, err, tb = sys.exc_info()\n msg = 'An error occurred with source id=%s. Error: %s'\n msg %= (source.source_id, str(err))\n raise_(etype, msg, tb)",
"def sampled_clique(clusters,strategy):\n G = nx.Graph()\n sample = []\n #Sample 'size' nodes from a single cluster\n if strategy == \"rand\":\n size = len(clusters)\n while len(sample) < size:\n cluster = random.choice(clusters)\n if len(cluster) >= size:\n sample = random.sample(cluster,size)\n #Sample 1 choice from each cluster\n elif strategy == \"optim\":\n for _,cluster in clusters.items():\n if len(cluster) > 0:\n sample.append(random.choice(cluster))\n for n1 in sample:\n for n2 in sample:\n if n1 != n2:\n G.add_edge(n1,n2)\n return G",
"def get_k_shortest_paths(env: RailEnv,\n source_position: Tuple[int, int],\n source_direction: int,\n target_position=Tuple[int, int],\n k: int = 1, debug=False) -> List[Tuple[Waypoint]]:\n\n # P: set of shortest paths from s to t\n # P =empty,\n shortest_paths: List[Tuple[Waypoint]] = []\n\n # countu: number of shortest paths found to node u\n # countu = 0, for all u in V\n count = {(r, c, d): 0 for r in range(env.height) for c in range(env.width) for d in range(4)}\n\n # B is a heap data structure containing paths\n # N.B. use OrderedSet to make result deterministic!\n heap: OrderedSet[Tuple[Waypoint]] = OrderedSet()\n\n # insert path Ps = {s} into B with cost 0\n heap.add((Waypoint(source_position, source_direction),))\n\n # while B is not empty and countt < K:\n while len(heap) > 0 and len(shortest_paths) < k:\n if debug:\n print(\"iteration heap={}, shortest_paths={}\".format(heap, shortest_paths))\n # – let Pu be the shortest cost path in B with cost C\n cost = np.inf\n pu = None\n for path in heap:\n if len(path) < cost:\n pu = path\n cost = len(path)\n u: Waypoint = pu[-1]\n if debug:\n print(\" looking at pu={}\".format(pu))\n\n # – B = B − {Pu }\n heap.remove(pu)\n # – countu = countu + 1\n\n urcd = (*u.position, u.direction)\n count[urcd] += 1\n\n # – if u = t then P = P U {Pu}\n if u.position == target_position:\n if debug:\n print(\" found of length {} {}\".format(len(pu), pu))\n shortest_paths.append(pu)\n\n # – if countu ≤ K then\n # CAVEAT: do not allow for loopy paths\n elif count[urcd] <= k:\n possible_transitions = env.rail.get_transitions(*urcd)\n if debug:\n print(\" looking at neighbors of u={}, transitions are {}\".format(u, possible_transitions))\n # for each vertex v adjacent to u:\n for new_direction in range(4):\n if debug:\n print(\" looking at new_direction={}\".format(new_direction))\n if possible_transitions[new_direction]:\n new_position = get_new_position(u.position, new_direction)\n if debug:\n print(\" looking at neighbor v={}\".format((*new_position, new_direction)))\n\n v = Waypoint(position=new_position, direction=new_direction)\n # CAVEAT: do not allow for loopy paths\n if v in pu:\n continue\n\n # – let Pv be a new path with cost C + w(u, v) formed by concatenating edge (u, v) to path Pu\n pv = pu + (v,)\n # – insert Pv into B\n heap.add(pv)\n\n # return P\n return shortest_paths",
"def test_heuristic_first_steps(self):\n graph = {n: set(self.deterministic_graph[n]) - set([n])\n for n in self.deterministic_graph}\n deg_heuristic = MinDegreeHeuristic(graph)\n elim_node = deg_heuristic.best_node(graph)\n print(\"Graph {}:\".format(graph))\n steps = []\n\n while elim_node is not None:\n print(\"Removing {}:\".format(elim_node))\n steps.append(elim_node)\n nbrs = graph[elim_node]\n\n for u, v in itertools.permutations(nbrs, 2):\n if v not in graph[u]:\n graph[u].add(v)\n\n for u in graph:\n if elim_node in graph[u]:\n graph[u].remove(elim_node)\n\n del graph[elim_node]\n print(\"Graph {}:\".format(graph))\n elim_node = deg_heuristic.best_node(graph)\n\n # check only the first 5 elements for equality\n assert_equals(steps[:5], [0, 1, 2, 3, 4])",
"def _compute_flows(paths):\n assert isinstance(paths, list)\n\n # allocate memory\n flows = np.zeros([len(paths), FLAGS.target_height, FLAGS.target_width, 2],\n dtype=np.float32)\n\n # load all images\n pbar = ProgressBar(max_value=len(paths))\n for i in range(len(paths)-1):\n im1 = skimage.io.imread(paths[i])\n im2 = skimage.io.imread(paths[i+1])\n\n im1 = cv2.cvtColor(im1, cv2.COLOR_RGB2GRAY)\n im2 = cv2.cvtColor(im2, cv2.COLOR_RGB2GRAY)\n im1 = skimage.transform.resize(\n im1, [FLAGS.target_height, FLAGS.target_width], preserve_range=True,\n mode='constant', anti_aliasing=True)\n im2 = skimage.transform.resize(\n im2, [FLAGS.target_height, FLAGS.target_width], preserve_range=True,\n mode='constant', anti_aliasing=True)\n flow = cv2.calcOpticalFlowFarneback(\n im1, im2, flow=None, pyr_scale=0.5, levels=3, winsize=15,\n iterations=3, poly_n=5, poly_sigma=1.2, flags=0)\n\n # store images\n flows[i] = flow\n pbar.update(i)\n\n # Replicate the flow for last frame\n flows[-1] = flow\n return flows",
"def random_partition_graph(groups, p_in, p_out, seed=None):\r\n\r\n if p_in > 1 or p_in < 0:\r\n raise errorhandler.ErrorHandler(\"p_in must be in [0,1]\")\r\n\r\n if p_out > 1 or p_out < 0:\r\n raise errorhandler.ErrorHandler(\"p_out must be in [0,1]\")\r\n\r\n size = sum(groups)\r\n g = graph.Graph(size, is_partition=True)\r\n\r\n next_group = {}\r\n start = 0\r\n group_index = 0\r\n for n in groups: # connect nodes inside a group\r\n edges = ((u + start, v + start) for u, v in fast_random_graph(n, p_in).edges)\r\n g.add_edges(edges)\r\n g.partition.append(set(range(start, start+n)))\r\n next_group.update(dict.fromkeys(range(start, start + n), start + n))\r\n group_index += 1\r\n start += n\r\n\r\n # connect nodes between groups\r\n if p_out == 0:\r\n return g\r\n if p_out == 1:\r\n for n in next_group:\r\n targets = range(next_group[n], len(g))\r\n g.add_edges(zip([n] * len(targets), targets))\r\n return g\r\n\r\n # using method similar to fast_random_graph\r\n lp = math.log(1.0 - p_out)\r\n n = len(g)\r\n\r\n for u in range(n - 1):\r\n v = next_group[u]\r\n while v < n:\r\n lr = math.log(1.0 - random.random())\r\n v += int(lr / lp)\r\n if v < n:\r\n g.add_edge(u, v)\r\n v += 1\r\n\r\n return g",
"def get_routing_solution(self):\n G = self.base_digraph\n s1 = self.sources[0]\n s2 = self.sources[1]\n t1 = self.destinations[0]\n t2 = self.destinations[1]\n\n try:\n m = Model('routing')\n m.setParam('OutputFlag', False)\n\n # variables,\n # We have one variable per edge per session\n # e is the dict of dict for the variables\n e = {}\n r = {}\n for i in [1,2]:\n e[i] = {}\n r[i] = m.addVar()\n for u,v in G.edges():\n e[i][u,v] = m.addVar(lb=0)\n\n m.update()\n\n obj = quicksum(r.values())\n m.setObjective(obj, GRB.MAXIMIZE)\n\n # constraints\n # 1. conservations of flow at all intermediate nodes\n # 2. capacity constraints for each edge\n\n for u,v in G.edges():\n m.addConstr(e[1][u,v] + e[2][u,v] <= G[u][v]['capacity'])\n\n m.addConstr(quicksum(e[1][u,v] for u,v in G.out_edges(s1)) == r[1])\n m.addConstr(quicksum(e[2][u,v] for u,v in G.out_edges(s2)) == r[2])\n m.addConstr(quicksum(e[1][u,v] for u,v in G.out_edges(s2)) == 0)\n m.addConstr(quicksum(e[2][u,v] for u,v in G.out_edges(s1)) == 0)\n m.addConstr(quicksum(e[1][u,v] for u,v in G.in_edges(t1)) == r[1])\n m.addConstr(quicksum(e[2][u,v] for u,v in G.in_edges(t2)) == r[2])\n\n for n in G.nodes():\n if n not in [s1, s2, t1, t2]:\n for i in [1, 2]:\n inflow = quicksum(e[i][u,v] for u,v in G.in_edges(n))\n outflow = quicksum(e[i][u,v] for u,v in G.out_edges(n))\n m.addConstr(inflow == outflow)\n\n m.optimize()\n\n if m.status == GRB.status.OPTIMAL:\n for u, v in G.edges():\n G[u][v]['Routing'] = {}\n G[u][v]['Routing'][1] = e[1][u,v].x\n G[u][v]['Routing'][2] = e[2][u,v].x\n return (m.objVal, r[1].x, r[2].x)\n else:\n # something went wrong...err...\n print \"Something was wrong, no optimal solution obtained\"\n return None, None, None\n\n except GurobiError:\n Print ('Error Report from Gurobi')",
"def shuffle(self) -> NoReturn:\n for edge_type in self.edge_types:\n for edge_class in range(self.edge_types[edge_type]):\n self.train_edges[edge_type][edge_class] = np.random.permutation(\n self.train_edges[edge_type][edge_class])\n\n self.freebatch_edge_types = {edge_type: list(range(edge_class))\n for edge_type, edge_class in self.edge_types.items()}\n self.batch_num = {edge_type: [0] * edge_class for edge_type, edge_class in\n self.edge_types.items()}\n self.took_all_edges = {edge_type: False for edge_type in self.edge_types}\n self.iter = 0",
"def get_random_depth_sample(n=8, depths=list(range(2,26,2)), num_samples=100):\n\n def get_states(start):\n frontier = [start]\n frontier_set = {start}\n explored = set()\n\n states = [False for _ in range(len(depths))]\n while not all(states):\n node = frontier.pop(0)\n frontier_set.remove(node)\n explored.add(node)\n\n children = node.get_children()\n\n # It's necessary to shuffle children to get a truly random sample; otherwise, the first child (always\n # produced from the parent by the same action) produced at a certain depth will always be selected,\n # and children produced by other actions will never be selected\n shuffle(children)\n\n for child in children:\n if child not in frontier_set and child not in explored:\n frontier_set.add(child)\n frontier.append(child)\n child.path_cost = node.path_cost+1\n index = depths.index(child.path_cost) if child.path_cost in depths else None\n if index is not None and not states[index]:\n states[index] = {'start': start.sequence, 'end': child.sequence}\n\n return states\n\n depth_sample = [[] for depth in range(len(depths))]\n\n for _ in range(num_samples):\n start = list(range(1,n+2))\n shuffle(start)\n start = PuzzleState(start, path_cost=0)\n\n states = get_states(start)\n print('\\rSet ' + str(_+1) + ' of ' + str(num_samples) + ' complete', end='', flush=True)\n list(map(list.append, depth_sample, states))\n\n return depth_sample",
"def tsp_annealing_random(T, scheme, route, adjacency_matrix, max_chain_length,c):\n best = route.copy()\n chains = 0\n cost_list = []\n T_0 = T\n T_list = []\n \n while T > 0:\n # Sample city from route\n temp = route.copy()\n index1, index2 = np.random.randint(1,len(best)-1,size=2)\n\n sd, cost0 = calculate_cost(route,adjacency_matrix)\n cost_list.append(cost0)\n\n temp[index1:index2] = temp[index2-1:index1-1:-1]\n _, cost1 = calculate_cost(temp,adjacency_matrix)\n\n chains += 1\n T_list.append(T)\n\n # Adjust temperature\n if scheme == \"exp\":\n T = T*c\n if scheme == \"log\":\n alpha = 50\n T = T_0/(1+alpha*np.log(1+chains))\n if scheme == \"std\":\n delta = .1\n T = T / (1 + ((np.log(1+delta)* T) / (3 * sd)))\n if scheme == \"quad\":\n alpha = 1\n T = T_0/(1+alpha*chains**2)\n\n # Metropolis step \n if cost0 > cost1:\n route = temp.copy()\n else:\n U = rs.uniform()\n if U < np.exp((cost0-cost1)/T):\n route = temp.copy()\n\n best = route.copy()\n if chains > max_chain_length:\n return best, cost_list\n return best, cost_list",
"def _dfs(\n self, kg: KG, entity: Vertex, is_reverse: bool = False\n ) -> List[Walk]:\n self.sampler.visited = set()\n walks: List[Walk] = []\n assert self.max_walks is not None\n\n rng = np.random.RandomState(self.random_state)\n\n while len(walks) < self.max_walks:\n sub_walk: Walk = (entity,)\n d = 1\n while d // 2 < self.max_depth:\n pred_obj = self.sampler.sample_hop(\n kg, sub_walk, d // 2 == self.max_depth - 1, is_reverse\n )\n if pred_obj is None:\n break\n\n if is_reverse:\n if (\n pred_obj[0] in self.communities\n and rng.random() < self.hop_prob\n ):\n community_nodes = self.labels_per_community[\n self.communities[pred_obj[0]]\n ]\n sub_walk = (\n pred_obj[1],\n rng.choice(community_nodes),\n ) + sub_walk\n else:\n sub_walk = (pred_obj[1], pred_obj[0]) + sub_walk\n else:\n if (\n pred_obj[1] in self.communities\n and rng.random() < self.hop_prob\n ):\n community_nodes = self.labels_per_community[\n self.communities[pred_obj[1]]\n ]\n sub_walk += (\n pred_obj[0],\n rng.choice(community_nodes),\n )\n else:\n sub_walk += (pred_obj[0], pred_obj[1])\n d = len(sub_walk) - 1\n walks.append(sub_walk)\n return list(walks)",
"def update_flow(self):\n N = len(self.vertices)\n _vertices = self.vertices+['_source', '_sink']\n s, t = _vertices.index('_source'), _vertices.index('_sink')\n cost, capacity = dok_matrix((N+2, N+2)), dok_matrix((N+2, N+2))\n\n cost[:N, :N] = self.cost\n capacity[:N, :N] = self.upper_bound-self.lower_bound\n # _source to main vertices\n l_in = self.lower_bound.toarray().sum(axis=0)\n us, = l_in.nonzero()\n for u in us:\n capacity[s, u] = l_in[u]\n # main vertices to _sink\n l_out = self.lower_bound.toarray().sum(axis=1)\n us, = l_out.nonzero()\n for u in us:\n capacity[u, t] = l_out[u]\n # sink to source\n infinite_flow = self.upper_bound.toarray().sum()\n capacity[_vertices.index('sink'), _vertices.index('source')] = infinite_flow\n\n # get a feasible flow on original graph by finding the max flow on\n # auxiliary graph\n aux_fg = FlowGraph(_vertices, cost, capacity, True)\n aux_fg.FordFulkerson()\n assert aux_fg.residual[s].toarray().sum()==0, 'feasible flow within bounds not found'\n\n self.residual = aux_fg.residual[:N, :N]\n s, t = self.vertices.index('source'), self.vertices.index('sink')\n self.residual[s, t] = 0\n self.residual[t, s] = 0\n\n self.FordFulkerson()",
"def test_heuristic_first_steps(self):\n graph = {n: set(self.deterministic_graph[n]) - set([n])\n for n in self.deterministic_graph}\n print(\"Graph {}:\".format(graph))\n elim_node = min_fill_in_heuristic(graph)\n steps = []\n\n while elim_node is not None:\n print(\"Removing {}:\".format(elim_node))\n steps.append(elim_node)\n nbrs = graph[elim_node]\n\n for u, v in itertools.permutations(nbrs, 2):\n if v not in graph[u]:\n graph[u].add(v)\n\n for u in graph:\n if elim_node in graph[u]:\n graph[u].remove(elim_node)\n\n del graph[elim_node]\n print(\"Graph {}:\".format(graph))\n elim_node = min_fill_in_heuristic(graph)\n\n # check only the first 2 elements for equality\n assert_equals(steps[:2], [6, 5])",
"def randnet(G):\n\t\n\t#initializationo of the lists\n\telst = G.get_edgelist()\n\tclst = G.es['color'][:]\n\tcdic = {}\n\tfor i,c in enumerate(clst):\n\t\tcdic[c]=i\n\tlst1 = [[]]*len(key)\n\tfor i,e in enumerate(elst):\n\t\tlst1[cdic[clst[i]]].append([e[0],e[1]])\n\t\t\n\t#randomization procedure\n\tfor i in range(random.randrange(100,200)):\n\t\tcid = cdic[random.choice(clst)] #random choice weighted by the frequency of each color\n\t\te1id = random.randrange(0,len(lst1[cid])) #random choice of edge with that color\n\t\te2id = random.randrange(0,len(lst1[cid]))\n\t\textop = random.randrange(0,2)#random choice of top/bottom\n\t\tttemp = lst1[cid][e1id][extop]\n\t\tlst1[cid][e1id][extop] = lst1[cid][e2id][extop]\n\t\tlst1[cid][e2id][extop] = ttemp\n\t\n\tcoutlst = []\n\teoutlst\t= []\n\tfor i in range(len(lst1)):\n\t\tc = clst[i]\n\t\tfor e in lst1[i]:\n\t\t\tcoutlst.append(c)\n\t\t\teoutlst.append((e[0],e[1]))\n\n\treturn [eoutlst,coutlst]",
"def get_weight_from_minflow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n unit_costs = []\n A = 0\n s_prime = self.sink() + 1\n t_prime = self.sink() + 2\n x = self.sink() + 3\n # for every edge in the graph, add edge to mincost flow instance with\n # infinite capacity and cost 1\n # also add backwards edge\n for arc in self.arc_info.keys():\n # forward edge\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.arc_info[arc][\"start\"],\n self.arc_info[arc][\"destin\"]))\n # backward edge\n start_nodes.append(self.arc_info[arc][\"destin\"])\n end_nodes.append(self.arc_info[arc][\"start\"])\n capacities.append(int(self.arc_info[arc][\"weight\"])) # no negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.arc_info[arc][\"destin\"],\n self.arc_info[arc][\"start\"]))\n # add (x,s) and (t,x) edges with same cap, cost as above\n in_weight_x = 0\n for in_arc in self.in_arcs_lists[self.sink()]:\n in_weight_x += self.arc_info[in_arc][\"weight\"]\n out_weight_x = 0\n for out_arc in self.out_arcs_lists[self.source()]:\n out_weight_x += self.arc_info[out_arc][\"weight\"]\n # (x,s)\n start_nodes.append(x)\n end_nodes.append(self.source())\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n x,\n self.source()))\n # backward\n start_nodes.append(self.source())\n end_nodes.append(x)\n capacities.append(int(out_weight_x)) # don't go negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.source(),\n x))\n # (t,x)\n start_nodes.append(self.sink())\n end_nodes.append(x)\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.sink(),\n x))\n # backward\n start_nodes.append(x)\n end_nodes.append(self.sink())\n capacities.append(int(in_weight_x)) # don't go negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n x,\n self.sink()))\n # for all verts, if a-exc < 0, add edge (s', v) with capacity -a-exc(v)\n # and cost 0, and if a-exc > 0, add edge (v, t') with capacity a-exc(v)\n # and cost 0.\n for v in self:\n # process internal verts only, since we assume source and sink have\n # no in and out edges respectively\n if v != self.source() and v != self.sink():\n # compute a-exc(v)\n in_weight = 0\n for in_arc in self.in_arcs_lists[v]:\n in_weight += self.arc_info[in_arc][\"weight\"]\n out_weight = 0\n for out_arc in self.out_arcs_lists[v]:\n out_weight += self.arc_info[out_arc][\"weight\"]\n a_exc = out_weight - in_weight\n if a_exc < 0:\n # add edge (s', v)\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n s_prime,\n v,\n int(-a_exc)))\n if a_exc > 0:\n # add edge (v, t')\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n v,\n t_prime,\n int(a_exc)))\n # update A\n A += a_exc\n # process x node\n a_exc = out_weight_x - in_weight_x\n if a_exc < 0:\n # add edge (s', x)\n start_nodes.append(s_prime)\n end_nodes.append(x)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n s_prime,\n x,\n int(-a_exc)))\n if a_exc > 0:\n # add edge (x, t')\n start_nodes.append(x)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n x,\n t_prime,\n int(a_exc)))\n # update A\n A += a_exc\n # we must send flow of A from s_prime to t_prime\n supplies = [0]*(len(self) + 3)\n supplies[s_prime] = int(A)\n supplies[t_prime] = int(-A)\n # Instantiate a SimpleMinCostFlow solver.\n min_cost_flow = pywrapgraph.SimpleMinCostFlow()\n # Add each arc.\n for i in range(len(start_nodes)):\n min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i],\n end_nodes[i], capacities[i], unit_costs[i])\n # Add node supplies\n for i in range(0, len(supplies)):\n min_cost_flow.SetNodeSupply(i, supplies[i])\n # Find the minimum cost flow between node s' and t'.\n if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:\n print('Minimum cost:', min_cost_flow.OptimalCost())\n print('')\n print(' Arc Flow / Capacity Cost')\n for i in range(min_cost_flow.NumArcs()):\n cost = min_cost_flow.Flow(i)*min_cost_flow.UnitCost(i)\n print('%1s -> %1s %3s / %3s %3s' % (\n min_cost_flow.Tail(i),\n min_cost_flow.Head(i),\n min_cost_flow.Flow(i),\n min_cost_flow.Capacity(i),\n cost))\n # update arcs\n start = min_cost_flow.Tail(i)\n destin = min_cost_flow.Head(i)\n if start != s_prime and \\\n start != t_prime and \\\n start != x and \\\n destin != s_prime and \\\n destin != t_prime and \\\n destin != x:\n # if forward, increase flow. otherwise decrease.\n print(\"Processing edge ({}, {})\".format(start, destin))\n if start < destin:\n sup_flow = min_cost_flow.Flow(i)\n else:\n sup_flow = -min_cost_flow.Flow(i)\n temp_start = start\n start = destin\n destin = temp_start\n print(\"Has become ({}, {}) with sup {}\".format(start,\n destin,\n sup_flow))\n arc = self.get_arc(start, destin)\n if (sup_flow != 0) or (\"lower_bound\" not in \\\n self.arc_info[arc].keys()):\n print(\"We should add this\")\n old_flow = self.arc_info[arc][\"weight\"]\n new_flow = old_flow + sup_flow\n self.arc_info[arc][\"weight\"] = int(new_flow)\n print(\"Edge ({},{}) weight is changed from {} to {}\".format(\n start,\n destin,\n old_flow,\n new_flow))\n else:\n print('There was an issue with the min cost flow input.')\n #self.check_conservation_of_flow() # check that solution is valid",
"def get_flows(self, num_flows_per_entry):\n flows = []\n for tenant in self._tenants:\n for contract in tenant.get_children(only_class=Contract):\n providing_epgs = contract.get_all_providing_epgs()\n consuming_epgs = contract.get_all_consuming_epgs()\n for providing_epg in providing_epgs:\n vlan_ifs = providing_epg.get_all_attached(L2Interface)\n if len(vlan_ifs):\n providing_vlan = vlan_ifs[0].encap_id\n phys_ifs = vlan_ifs[0].get_all_attached(Interface)\n if len(phys_ifs):\n providing_phys_if = phys_ifs[0].name\n for consuming_epg in consuming_epgs:\n vlan_ifs = consuming_epg.get_all_attached(L2Interface)\n if len(vlan_ifs):\n consuming_vlan = vlan_ifs[0].encap_id\n phys_ifs = vlan_ifs[0].get_all_attached(Interface)\n if len(phys_ifs):\n consuming_phys_if = phys_ifs[0].name\n if providing_vlan == consuming_vlan and providing_phys_if == consuming_phys_if:\n # Skip this case since traffic would be switched outside fabric\n continue\n for filter_entry in contract.get_all_filter_entries():\n for i in range(0, num_flows_per_entry):\n flow = Flow()\n flow.ethertype = filter_entry.etherT\n if flow.ethertype == 'arp':\n flow.arp_opcode = filter_entry.arpOpc\n flow.populate_random_ip_addresses()\n elif flow.ethertype == 'ip':\n flow.populate_random_ip_addresses()\n flow.proto = filter_entry.prot\n if flow.proto == '6' or flow.proto == '17':\n dFromPort = int(filter_entry.dFromPort)\n dToPort = int(filter_entry.dToPort)\n sFromPort = int(filter_entry.sFromPort)\n sToPort = int(filter_entry.sToPort)\n if dFromPort == 0:\n dFromPort = 1\n dToPort += 1\n if sFromPort == 0:\n sFromPort = 1\n sToPort += 1\n if dToPort > 65534:\n dToPort = 65534\n if sToPort > 65534:\n sToPort = 65534\n flow.dport = str(random_number(dFromPort,\n dToPort))\n flow.sport = str(random_number(sFromPort,\n sToPort))\n if flow.proto == '6':\n flow.tcp_rules = filter_entry.tcpRules\n flow.svlan = providing_vlan\n flow.dvlan = consuming_vlan\n flow.src_intf = providing_phys_if\n flow.dst_intf = consuming_phys_if\n\n # Is the flow expected to succeed ?\n flow.expected_action = 'drop'\n providing_bd = providing_epg.get_bd()\n consuming_bd = consuming_epg.get_bd()\n if providing_bd and consuming_bd:\n if providing_bd == consuming_bd:\n if providing_bd.get_context() == consuming_bd.get_context():\n flow.expected_action = 'permit'\n flow.populate_random_mac_addresses()\n flows.append(flow)\n return flows"
]
| [
"0.6332378",
"0.60935616",
"0.5811239",
"0.560641",
"0.5514502",
"0.5492238",
"0.5380612",
"0.5344111",
"0.5344111",
"0.53399456",
"0.5291331",
"0.5245833",
"0.52410334",
"0.5200011",
"0.51825786",
"0.51523155",
"0.51386243",
"0.51315624",
"0.5101362",
"0.5068613",
"0.5018153",
"0.50158495",
"0.501156",
"0.50016",
"0.49956098",
"0.49911538",
"0.49850804",
"0.4983879",
"0.49826854",
"0.49777502"
]
| 0.622782 | 1 |
RETURNS A set of outgoing links and corresponding splitting ratios for flow f at node s | def get_paths_for_flow(F, s, f):
links = [((u, v), split_ratio)
for (flow_id, u, v), split_ratio in F.items()
if flow_id == f and u == s and split_ratio > 0.001]
return links | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def traverse_graph(F, f, s, t, u, sr):\n def get_paths_for_flow(F, s, f):\n \"\"\"\n RETURNS\n A set of outgoing links and corresponding splitting ratios for flow f\n at node s\n \"\"\"\n links = [((u, v), split_ratio) \n for (flow_id, u, v), split_ratio in F.items() \n if flow_id == f and u == s and split_ratio > 0.001]\n return links\n \n if u == t:\n return [([t], sr)]\n\n outgoing_links = get_paths_for_flow(F, u, f)\n paths_to_t = []\n\n for ((current_node, next_hop), split_ratio) in outgoing_links:\n paths_from_u_to_t = traverse_graph(F, f, s, t, next_hop, split_ratio) \n paths_to_t.extend(paths_from_u_to_t)\n\n paths_to_t_with_u = []\n for path in paths_to_t:\n nodes, sr = path\n new_path = [u] + nodes\n paths_to_t_with_u.append((new_path, sr))\n\n return paths_to_t_with_u",
"def step(nodes, outputs, edges):\n flowed = []\n for node_name in nodes.copy():\n if node_name in flowed:\n continue\n if len(nodes[node_name]) == 2:\n if node_name in flowed:\n continue\n node = [int(value) for value in nodes[node_name]]\n low_value, high_value = min(node), max(node)\n low_flow, high_flow = edges[node_name] \n low_dictionary, low_node_name = low_flow\n high_dictionary, high_node_name = high_flow\n low_node = low_dictionary.get(low_node_name, tuple())\n high_node = high_dictionary.get(high_node_name, tuple())\n low_dictionary[low_node_name] = low_node + (str(low_value),)\n high_dictionary[high_node_name] = high_node + (str(high_value),)\n nodes[node_name] = tuple()\n if low_dictionary is nodes:\n flowed.append(low_node_name)\n if high_dictionary is nodes:\n flowed.append(high_node_name)\n return nodes, outputs, edges",
"def split_at_nodes(shp):\n nodes = find_nodes(shp)\n nodeIds = list(nodes)\n nodeIds.sort()\n nodeIds = dict([(node,i) for i,node in enumerate(nodeIds)])\n \n for road in shp:\n vrts = road.vertices\n midVrts = set(road.vertices[1:-1]) #we know end points are nodes\n midNodes = midVrts.intersection(nodes) # find any nodes in the middle of the feature.\n midIdx = [vrts.index(node) for node in midNodes] # Get their indices\n midIdx.sort()\n if midIdx:\n #print vrts\n starts = [0]+midIdx\n stops = [x+1 for x in midIdx]+[None]\n for start,stop in zip(starts,stops):\n feat = pysal.cg.Chain(vrts[start:stop])\n rec = (nodeIds[feat.vertices[0]],nodeIds[feat.vertices[-1]],False)\n yield feat,rec\n else:\n rec = (nodeIds[road.vertices[0]],nodeIds[road.vertices[-1]],False)\n yield road,rec",
"def _split(self):\r\n \r\n temp = [self.upstream.demand]\r\n for item, p in zip(self.downstream, self.priority):\r\n temp.append(item.supply/p)\r\n \r\n flow = min(temp) # total flow\r\n \r\n self.upstream.outflow = flow\r\n \r\n for item, p in zip(self.downstream, self.priority):\r\n item.inflow = p * flow",
"def ford_fulkerson_algorithm(graph: np.ndarray, source: int, sink: int) -> np.ndarray:\r\n\r\n residual_graph = copy.deepcopy(graph)\r\n row = len(residual_graph)\r\n parent = [-1] * row\r\n max_flow = 0\r\n\r\n if source == sink or sink < 0 or source < 0 or source >= row or sink >= row:\r\n raise WrongInputException('Wrong input source/sink vertice(s)')\r\n\r\n while bfs(residual_graph, row, source, sink, parent):\r\n\r\n path_flow = float(\"Inf\")\r\n s = sink\r\n while s != source:\r\n path_flow = min(path_flow, residual_graph[parent[s]][s])\r\n s = parent[s]\r\n\r\n max_flow += path_flow\r\n\r\n v = sink\r\n while v != source:\r\n u = parent[v]\r\n residual_graph[u][v] -= path_flow\r\n residual_graph[v][u] += path_flow\r\n v = parent[v]\r\n print(\"Max flow: %d\" % max_flow)\r\n\r\n return residual_graph",
"def split_edges(self, maximum_distance):\n \"\"\" Iterate through the vertices of each section. For each vertex v, evaluate edges for which v is a source.\n If an edge of weight greater than maximum_distance, then split it. \"\"\"\n for section_id in self.sections:\n utils.print_progress(len(self.sections), prefix='splitting edges')\n current_section = [] # Need to update the section data after splitting the edges.\n for source in self.sections[section_id]:\n current_section.append(source)\n edges_to_remove = [] # If an edge is split, it will need to be removed.\n for edge in self.graph.get_out_edges(source):\n if self.edge_weights[edge] > maximum_distance:\n target = edge[1] # edge is a numpy array of [source, target, edge]. Select target.\n edges_to_remove.append(self.graph.edge(edge[0], edge[\n 1])) # If an edge is split, the original edge should be removed.\n\n new_edge_count = int(math.ceil(self.edge_weights[edge] / maximum_distance))\n new_edge_distance = self.edge_weights[edge] / new_edge_count\n current_point = shapes.Point.from_list(\n list(self.node_locations[source]) + [self.node_heading[target]])\n previous_vertex = source\n for _ in range(new_edge_count):\n current_point = utils.offset_point(current_point, new_edge_distance, current_point.bearing)\n current_vertex = self.graph.add_vertex()\n current_section.append(current_vertex) # The new vertex becomes a part of the section.\n \"\"\" Populate the property map for the new vertex. Inherit values from the target node,\n unless the target node is a junction node. Then inherit values from the source. \"\"\"\n self.node_locations[current_vertex] = current_point.as_list()\n self.node_heading[current_vertex] = current_point.bearing\n property_vertex = source if not self.junctions[target] else target\n self.node_speed_limit[current_vertex] = self.node_speed_limit[property_vertex]\n self.node_width[current_vertex] = self.node_width[property_vertex]\n self.node_id[current_vertex] = self.node_id[property_vertex]\n\n \"\"\" Create an edge between the previous vertex and the newly created vertex, \n and update the edge weight property map. \"\"\"\n current_edge = self.graph.add_edge(previous_vertex, current_vertex)\n self.edge_weights[current_edge] = new_edge_distance\n\n # The current vertex becomes the previous vertex in the next step.\n previous_vertex = current_vertex\n\n \"\"\" Create an edge between the last new vertex that was created and the target of the\n original edge which is being split, and update the property map. \"\"\"\n self.edge_weights[self.graph.add_edge(previous_vertex, target)] = new_edge_distance\n list(map(self.graph.remove_edge, edges_to_remove)) # Remove all relevant edges\n self.sections[section_id] = current_section # Update the section with the new vertices",
"def dfs(x, p, step):\n disc[x] = low[x] = step\n for xx in graph.get(x, []): \n if disc[xx] == inf: \n step += 1\n dfs(xx, x, step)\n low[x] = min(low[x], low[xx])\n if low[xx] > disc[x]: ans.append([x, xx]) # bridge\n elif xx != p: low[x] = min(low[x], disc[xx])",
"def get_flow_2frames(self, x):\n b, n, c, h, w = x.size()\n x_1 = x[:, :-1, :, :, :].reshape(-1, c, h, w)\n x_2 = x[:, 1:, :, :, :].reshape(-1, c, h, w)\n flows_backward = self.spynet(x_1, x_2)\n flows_backward = [flow.view(b, n - 1, 2, h // 2 ** i, w // 2 ** i) for flow, i in zip(flows_backward, range(4))]\n flows_forward = self.spynet(x_2, x_1)\n flows_forward = [flow.view(b, n - 1, 2, h // 2 ** i, w // 2 ** i) for flow, i in zip(flows_forward, range(4))]\n return flows_backward, flows_forward",
"def sg_hops (self):\n return (link for s, d, link in self.network.edges_iter(data=True) if\n link.type == Link.SG)",
"def compute_greedy_flow_allocations( target_graph\n , flow_selection_fn\n , seed_number=DEFAULT_SEED_NUMBER):\n\n flow_allocation_seed_number = seed_number\n np.random.seed(flow_allocation_seed_number)\n\n link_utilization = {tuple(sorted(link_tuple)): 0.0 for link_tuple in target_graph.edges}\n flows = []\n\n while True:\n capacity_was_exceeded = False\n\n source_node, destination_node = flow_selection_fn(target_graph.nodes)\n flow_tx_rate = np.random.uniform(FLOW_TX_RATE_LOWER_BOUND, FLOW_TX_RATE_UPPER_BOUND)\n\n connecting_paths = list(node_disjoint_paths(target_graph, source_node, destination_node))\n disjoint_path_count = len(connecting_paths)\n flow_rate_per_subpath = flow_tx_rate / disjoint_path_count\n for path in [nx.utils.pairwise(p_i) for p_i in connecting_paths]:\n for u, v in [tuple(sorted(t_i)) for t_i in path]:\n if (link_utilization[u, v] + flow_rate_per_subpath) > LINK_CAPACITY:\n capacity_was_exceeded = True\n break\n link_utilization[u, v] += flow_rate_per_subpath\n if capacity_was_exceeded:\n break\n if capacity_was_exceeded:\n break\n\n the_flow = Flow( source_node = source_node\n , destination_node = destination_node\n , flow_tx_rate = flow_tx_rate\n , paths = connecting_paths\n , splitting_ratio = [1.0/disjoint_path_count]*disjoint_path_count\n )\n flows.append(the_flow)\n return flows, link_utilization",
"def compute_flow(self, lqs):\n n, t, c, h, w = lqs.size()\n lqs_1 = lqs[:, :-1, :, :, :].reshape(-1, c, h, w)\n lqs_2 = lqs[:, 1:, :, :, :].reshape(-1, c, h, w)\n flows_backward = self.spynet(lqs_1, lqs_2).view(n, t - 1, 2, h, w)\n if self.is_mirror_extended:\n flows_forward = None\n else:\n flows_forward = self.spynet(lqs_2, lqs_1).view(n, t - 1, 2, h, w)\n return flows_forward, flows_backward",
"def calc_per_flow_link_utilisation(self, flow: Tuple[int, int],\n demand: float,\n routing: np.ndarray) -> np.ndarray:\n edge_mapping = {edge: i for i, edge in\n enumerate(sorted(self.graph.edges))}\n\n link_utilisation = np.zeros(self.num_edges)\n node_flow = np.zeros(self.num_nodes) # the flow stored at a node\n node_flow[flow[0]] = demand\n\n to_explore = [flow[0]]\n while to_explore:\n current_node = to_explore.pop(0)\n current_flow = node_flow[current_node]\n\n # this is the flow destination node so we absorb all flow\n if current_node == flow[1]:\n node_flow[current_node] = 0.0\n continue\n\n # push the flow at this node over all edges\n for edge in self.graph.out_edges(current_node):\n edge_index = edge_mapping[edge]\n ratio = routing[edge_index]\n flow_to_send = ratio * current_flow\n # only send flow if greater than epsilon (so no 'infinite' loops)\n if flow_to_send > 1.e-8:\n node_flow[edge[1]] += ratio * current_flow\n # all important step, update our output\n link_utilisation[edge_index] += ratio * current_flow\n # have updated the dst so add it to the list of things to do\n to_explore.append(edge[1])\n # we've moved all the flow from this node now, so reset back to zero\n node_flow[current_node] = 0.0\n\n return link_utilisation",
"def DRBFS(self, flow, edge_mark):\n\t\t# Distance flag for each node\n\t\td = {v:float('inf') for v in self.topo.nodes}\n\t\t# Parent node for each node\n\t\tpa = {v:-1 for v in self.topo.nodes}\n\t\t# Request info\n\t\ts = flow[0]\n\t\tt = flow[1]\n\n\t\t# BFS to find a min-hop path\n\t\tqueue = [s]; hdr = 0; d[s] = 0\n\t\twhile hdr < len(queue):\n\t\t\tu = queue[hdr]\n\t\t\thdr += 1\n\n\t\t\tfor v in self.topo.topo.neighbors(u):\n\t\t\t\tif edge_mark[(u, v)] or d[v] <= d[u] + 1:\n\t\t\t\t\tcontinue\n\t\t\t\tqueue.append(v)\n\t\t\t\td[v] = d[u] + 1\n\t\t\t\tpa[v] = u\n\t\t\t\tif v == t:\n\t\t\t\t\t# This is because when BFS on edges, the first time reaching t meaning the smallest hop it can be reached\n\t\t\t\t\thdr = len(queue)\n\t\t\t\t\tbreak\n\n\t\tif d[t] == float('inf'):\n\t\t\treturn False\n\n\t\tp = [t]; v = t\n\t\twhile v != s and v != -1:\n\t\t\tv = pa[v]\n\t\t\tp.append(v)\n\t\tp.reverse()\n\n\t\treturn p",
"def get_all_sghop_info (nffg, return_paths=False):\n sg_map = {}\n for i in nffg.infras:\n for p in i.ports:\n for fr in p.flowrules:\n # if fr.external:\n # continue\n if fr.id not in sg_map:\n # The path is unordered!!\n path_of_shop = []\n flowclass = NFFGToolBox._extract_flowclass(fr.match.split(\";\"))\n sg_map[fr.id] = [None, None, flowclass, fr.bandwidth, fr.delay]\n # We have to find the BEGINNING of this flowrule sequence.\n inbound_link = NFFGToolBox._find_infra_link(nffg, p, outbound=False,\n accept_dyn=True)\n while inbound_link.type != 'DYNAMIC':\n path_of_shop.append(inbound_link)\n if inbound_link.src.node.type == 'SAP':\n break\n # The link is STATIC, and its src is not SAP so it is an Infra.\n prev_fr, prev_p = \\\n NFFGToolBox._get_flowrule_and_its_starting_port(\n inbound_link.src.node, fr.id)\n NFFGToolBox._check_flow_consistencity(sg_map, prev_fr)\n inbound_link = NFFGToolBox._find_infra_link(nffg, prev_p,\n outbound=False,\n accept_dyn=True)\n # 'inbound_link' is DYNAMIC here or it is STATIC and starts from\n # a SAP,\n # so the sequence starts here\n sg_map[fr.id][0] = inbound_link.src\n\n # We have to find the ENDING of this flowrule sequence.\n output_port = NFFGToolBox._get_output_port_of_flowrule(i, fr)\n if output_port is None:\n continue\n outbound_link = NFFGToolBox._find_infra_link(nffg, output_port,\n outbound=True,\n accept_dyn=True)\n while outbound_link.type != 'DYNAMIC':\n path_of_shop.append(outbound_link)\n if outbound_link.dst.node.type == 'SAP':\n break\n # The link is STATIC and its dst is not a SAP so it is an Infra.\n next_fr, _ = NFFGToolBox._get_flowrule_and_its_starting_port(\n outbound_link.dst.node, fr.id)\n # '_' is 'outbound_link.dst'\n next_output_port = NFFGToolBox._get_output_port_of_flowrule(\n outbound_link.dst.node, next_fr)\n NFFGToolBox._check_flow_consistencity(sg_map, next_fr)\n outbound_link = NFFGToolBox._find_infra_link(nffg,\n next_output_port,\n outbound=True,\n accept_dyn=True)\n # the 'outbound_link' is DYNAMIC here or finishes in a SAP, so the\n # flowrule sequence finished here.\n sg_map[fr.id][1] = outbound_link.dst\n\n if return_paths:\n sg_map[fr.id].append(path_of_shop)\n\n return sg_map",
"def routes_for_od(self, r, s):\n\n for path in self.paths_for_od(r, s):\n route = self._road_network.network.copy()\n\n for edge in route.edges_iter():\n u,v = edge\n if BaseRouter._edge_in_path(path, edge):\n route.edge[u][v]['pathweight'] = 0.1\n else:\n route.edge[u][v]['pathweight'] = 1.0\n\n # Set node weights\n for node in route:\n try:\n dist_from_path = nx.shortest_path_length(route,\n node,\n path[-1],\n weight='pathweight')\n except nx.NetworkXNoPath:\n dist_from_path = float('inf')\n\n route.node[node]['weight'] = math.exp(-self._beta*dist_from_path)\n\n \n # Create policy from weights\n def edge_weight(u,v):\n return route.node[v]['weight']*math.exp(\n -self._beta*route.edge[u][v]['pathweight'])\n\n for node in route:\n edges = route.edges(node)\n total_edge_weight = reduce(\n lambda x,y: x+y,\n [edge_weight(u,v) for u,v in edges],\n 0.0)\n\n for u,v in edges:\n route[u][v]['weight'] = \\\n edge_weight(u, v) / float(total_edge_weight)\n\n yield Route(route, path)",
"def dfs_r(self, s):\n g = Graph(attr={DIRECTED: True})\n return self.dfs_rec(g, ('#', s))",
"def distribute(node, rank_links):\n r = rank_links[0]\n links = rank_links[1]\n\n ol = str(links).split('|')\n Ni = len(ol)\n\n # if the node is for dangling (i.e. no outgoing link),\n # emit the loss to redistribute to all the incoming\n # links to the dangling node\n if (Ni == 1 and ol[0] == '') or Ni == 0:\n yield 'DANGLING', r\n else:\n r_new = float(r)/float(Ni)\n for l in ol:\n yield l, r_new\n\n # recover graph structure\n if links <> '':\n yield node, links",
"def update_flow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n # (1): add all edges (u, v) with capacity ub-lb\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n for arc in self.arc_info.keys():\n if self.arc_info[arc][\"upper_bound\"] == float('inf'):\n self.arc_info[arc][\"upper_bound\"] = B\n for arc in self.arc_info.keys():\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(int(self.arc_info[arc][\"upper_bound\"]\\\n - self.arc_info[arc][\"lower_bound\"]))\n # (2): add edge (t, s) with capacity B\n # B = max_lb * (m - n + 2)\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n if B == 0:\n #B = float('inf')\n B = 100000\n start_nodes.append(self.sink())\n end_nodes.append(self.source())\n capacities.append(int(B))\n # (3): for all verts, if exc > 0, add edge (s', v) with capacity exc(v),\n # and if exc < 0, add edge(s', v) with capacity -exc(v)\n s_prime = max(self.vertices) + 1\n t_prime = max(self.vertices) + 2\n print(\"s'={}, t'={}\".format(s_prime, t_prime))\n for v in self:\n #print(\"vert {} in arcs: {}\".format(v,\n # self.in_arcs_lists[v]))\n # compute exc: lower bounds of in - lower bounds of out\n sum_lb_in = 0\n for in_arc in self.in_arcs_lists[v]:\n sum_lb_in += self.arc_info[in_arc][\"lower_bound\"]\n sum_lb_out = 0\n #print(\"vert {} out arcs: {}\".format(v,\n # self.out_arcs_lists[v]))\n for out_arc in self.out_arcs_lists[v]:\n sum_lb_out += self.arc_info[out_arc][\"lower_bound\"]\n exc = sum_lb_in - sum_lb_out\n #print(\"exc is {}\".format(exc))\n if exc > 0:\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(exc))\n else:\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(-exc))\n # solve maxflow\n #print(\"s' is {} and t' is {}\".format(s_prime, t_prime))\n max_flow = pywrapgraph.SimpleMaxFlow()\n for u, v, cap in zip(start_nodes, end_nodes, capacities):\n #print(\"Adding edge {}, {} with cap {}\".format(u,v,cap))\n max_flow.AddArcWithCapacity(u, v, cap)\n success = True\n if max_flow.Solve(s_prime, t_prime) == max_flow.OPTIMAL:\n #print('Max flow: {}'.format( max_flow.OptimalFlow()))\n #print(' Arc Flow / Capacity')\n for i in range(max_flow.NumArcs()):\n # print('%1s -> %1s %3s / %3s' % (\n # max_flow.Tail(i),\n # max_flow.Head(i),\n # max_flow.Flow(i),\n # max_flow.Capacity(i)))\n # check that (s', v) edges are saturated (once we find a false,\n # stay false forever)\n if success:\n if max_flow.Tail(i) == s_prime:\n success = max_flow.Flow(i) == max_flow.Capacity(i)\n else:\n success = False\n print('There was an issue with the max flow input.')\n if success:\n # update the flows to be the flow found from maxflow problem\n for i in range(max_flow.NumArcs()):\n # if this is an original arc, update the flow\n if max_flow.Tail(i) != s_prime \\\n and max_flow.Head(i) != t_prime \\\n and not (max_flow.Tail(i) == self.sink() \\\n and max_flow.Head(i) == self.source()):\n # update arc\n start = max_flow.Tail(i)\n destin = max_flow.Head(i)\n arc = self.get_arc(start, destin)\n new_flow = self.arc_info[arc][\"lower_bound\"] + max_flow.Flow(i)\n old_flow = self.arc_info[arc][\"weight\"]\n self.arc_info[arc][\"weight\"] = new_flow\n #print(\"Edge {} {} adjusted from {} to {}\".format(\n # start,\n # destin,\n # old_flow,\n # new_flow\n # ))\n self.check_conservation_of_flow() # check that solution is valid\n return True\n else:\n return False",
"def findPathsToBase(A,bSize):\n M,N = A.shape\n pressedPaths = []\n\n #For every two nodes in the base find all paths between them\n for b1 in range(bSize):\n for b2 in range(bSize):\n #Remove all other base nodes from the graph so that\n #we only find paths that go through the specialization set\n if b1 == b2:\n #In this case we are looking for a cycle.\n mask = [b1]+list(range(bSize,N))\n newSize = len(mask) + 1\n reduA = np.zeros((newSize,newSize))\n #Because the networkx cycle finders don't do what we need\n #them to do, we create a new graph and find paths instead\n reduA[:-1,:-1] = A[mask,:][:,mask]\n #Remove ingoing edges from the base node and add to new node\n reduA[-1,:] = reduA[0,:]\n reduA[0,:] = np.zeros(newSize)\n G = nx.DiGraph(reduA.T)\n #Find paths from the base node to the new node\n #same as finding all the cycles\n paths = list(nx.all_simple_paths(G,0,newSize-1))\n\n else:\n mask = [b1,b2]+list(range(bSize,N))\n reduA = A[mask,:][:,mask]\n #Remove base node interactions\n reduA[:2,:2] = np.zeros((2,2))\n G = nx.DiGraph(reduA.T)\n paths = list(nx.all_simple_paths(G,0,1))\n\n #Process Paths so that they make sense when the rest of the base\n #set is added to the graph\n for p in paths:\n if p != []:\n if b1 == b2:\n p = np.array(p) + bSize-1\n else:\n p = np.array(p) + bSize-2\n p[[0,-1]] = [b1, b2]\n pressedPaths.append(p)\n\n return pressedPaths",
"def compute_half_life_vested_shares(params, step, sL, s, inputs):\n key = 'delegators'\n \n delegators = s['delegators']\n\n half_life_vesting_rate = params['half_life_vesting_rate']\n \n for delegator in delegators.values():\n # for future computation speed, vest them in chunks, it doesn't matter which chunk\n shares_vesting_this_period = delegator.unvested_shares * half_life_vesting_rate\n for timestep in delegator._unvested_shares:\n remaining_shares_to_vest = shares_vesting_this_period\n if delegator._unvested_shares[timestep] > remaining_shares_to_vest:\n delegator._unvested_shares[timestep] -= remaining_shares_to_vest\n break\n else:\n # 0 out and go onto the next one\n remaining_shares_to_vest -= delegator._unvested_shares[timestep]\n delegator._unvested_shares[timestep] = 0\n \n delegator.vested_shares += shares_vesting_this_period\n # print(f'{delegator.vested_shares=}, {delegator.unvested_shares=}, {delegator.shares=}')\n value = delegators\n\n return key, value",
"def dfs_traversal(graph, s, goals=[]):\n visited = []\n boundary = [s]\n while len(boundary) > 0:\n v = boundary.pop()\n visited += [v]\n if v in goals:\n return visited\n for w in neighbours(v, graph):\n if w not in visited and w not in boundary:\n boundary.append(w)\n return visited\n\n \"\"\"\n visited = []\n boundary = [s]\n while len(boundary) > 0:\n v = boundary.pop()\n visited += [v]\n for w in neighbours(v, graph):\n if w not in visited and w not in boundary:\n boundary.append(w)\n if w in goals:\n v = boundary.pop()\n visited += [v]\n return visited\n\"\"\"",
"def get_dfs(self, s):\n results = []\n # mark all the vertices as not visited\n visited = [False] * (len(self.graph))\n self._dfs_recursive(s, visited, results)\n return results",
"def bfs(g: nx.Graph, start_node: Hashable) -> List[Hashable]:\n list_ = list(g.neighbors(start_node))\n len_graph = g.number_of_nodes()\n list2 = [start_node]\n while len(list2) < len_graph:\n for i in range(len(list_) - 1):\n if list_[0] not in list2:\n list2.append(list_[0])\n list_ += list(g.neighbors(list_[0]))\n list_.remove(list_[0])\n # nx.draw(g, with_labels=True)\n # plt.show()\n return list2",
"def flowingFrom(self, fount):",
"def hmn2(a, s, m0):\n n = 2*m0**s\n links = np.zeros((int(a/m0*n*sum([1/2**x for x in range(1, s+1)])), 2), dtype=np.int32)\n links_i = 0\n p = 0\n \n # At each hierarchy level a number of a links are established,\n # repeating the process if links are repeated.\n for si in range(1, s+1):\n m0_si = m0**si\n for n in range(0, n+1-2*m0_si, 2*m0_si):\n \n if a == 1:\n i = np.random.randint(0 + n, m0_si + n)\n j = np.random.randint(m0_si + n, 2*m0_si + n)\n links[p] = np.array([i, j])\n p += 1\n \n else:\n while len(np.unique(links[links_i:a + links_i], axis=0)) != a:\n for m in range(a):\n i = np.random.randint(0 + n, m0_si + n)\n j = np.random.randint(m0_si + n, 2*m0_si + n)\n links[links_i:a + links_i][m] = np.array([i, j])\n links_i += a\n \n blocks = np.arange(n).reshape((int(n/m0), m0))\n return np.concatenate((blocks, links))",
"def destination_floors(self):\r\n return self.destinations",
"def compare_nodes(G,all_match_pairs,match_pair,traversed,node1,node2, ports_weight):\n logger.debug(f\"comparing {node1},{node2}, traversed {traversed}\")\n nbrs1 = sorted(set(G.neighbors(node1)) - set(traversed))\n #remove dummies\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=7]))\n nbrs2 = sorted(set(G.neighbors(node2)) - set(traversed))\n #remove dummies\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=7]))\n logger.debug(f\"node1:{node1},property: {G.nodes[node1]},neigbors1: {nbrs1}\")\n logger.debug(f\"node2:{node2},property: {G.nodes[node2]},neigbors2: {nbrs2}\")\n if not nbrs1 or not nbrs2:\n if compare_two_nodes(G, node1, node2, ports_weight):\n match_pair[node1] = node2\n logger.debug(f\"no new neihbours, returning recursion {match_pair}\")\n return\n elif len(nbrs1)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n logger.debug(f\"skipping high fanout nets due to large computation, {node1} {nbrs1}\")\n traversed.append(node1)\n return\n elif len(nbrs2)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n traversed.append(node2)\n logger.debug(f\"skipping high fanout nets due to large computation, {node2} {nbrs2}\")\n return\n\n if node1 == node2:\n if node1 in match_pair.keys() or node1 in match_pair.values():\n logger.debug(\"avoid existing pair wise symmetry\")\n return\n logger.debug(f\"single node {node1}, nbrs {nbrs1}, nbr_weight {[G.get_edge_data(node1,nbr) for nbr in nbrs1]}\")\n SD_nbrs= [nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]\n ## TBD: filter based on primitive constraints\n ## Right now will try to figure out S/D paths\n if len(SD_nbrs) ==0:\n logger.debug(f\"No SD paths found to traverse\")\n match_pair[node1]=node1\n elif len(SD_nbrs) ==1:\n logger.debug(f\"traversing single S/D path {SD_nbrs}\")\n match_pair[node1]=node1\n traversed.append(node1)\n compare_nodes(G,all_match_pairs,match_pair,traversed,SD_nbrs[0],SD_nbrs[0],ports_weight)\n else:\n logger.debug(f\" multiple nodes diverging {SD_nbrs}\")\n logger.debug(f\"nbr weights: {SD_nbrs} {[G.get_edge_data(node1, nbr)['weight'] for nbr in SD_nbrs ]}\")\n match_pair[node1]=node1\n traversed.append(node1)\n new_sp=sorted(set(SD_nbrs)-set(traversed))\n all_match_pairs_local={}\n for nbr1,nbr2 in combinations(new_sp, 2):\n logger.debug(f\"recursive pair call from single branch {nbr1} {nbr2}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n if new_pair:\n #new_pair[nbr1]=nbr2\n all_match_pairs_local[nbr1+'_'+nbr2] = new_pair\n all_match_pairs_local={k: v for k, v in all_match_pairs_local.items() if len(v)>0}\n if len(all_match_pairs_local)==1:\n match_pair.update( all_match_pairs_local[list(all_match_pairs_local.keys())[0]])\n logger.debug(f\"found inline pair: {pprint.pformat(match_pair, indent=4)}\")\n else:\n for nbr1 in new_sp:\n if (nbr1+'_'+nbr1 not in all_match_pairs.keys()):\n logger.debug(f\"recursive single branch call from single branch {nbr1} {nbr1}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr1,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr1] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif nbrs1 == nbrs2:\n logger.debug(f\"traversing converging branch\")\n match_pair[node1]=node2\n traversed+=[node1,node2]\n nbrs1=sorted(set(nbrs1)-set([node1,node2]))\n logger.debug(f\"all non traversed neighbours: {nbrs1}\")\n if len(nbrs1)==1:\n nbr1=nbr2=nbrs1[0]\n logger.debug(f\"keeping single converged branch inline {nbr1} {nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n else:\n for nbr1,nbr2 in combinations_with_replacement(nbrs1,2):\n logger.debug(f\"recursive call from converged branch {nbr1} {nbr2}\")\n if nbr1+'_'+nbr2 not in all_match_pairs.keys():\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr2] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif compare_two_nodes(G,node1,node2,ports_weight):\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]))\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=2]))\n match_pair[node1]=node2\n traversed+=[node1,node2]\n logger.debug(f\"Traversing parallel branches from {node1},{node2} {nbrs1}, {nbrs2}\")\n nbrs1_wt = [G.get_edge_data(node1, nbr)['weight'] for nbr in nbrs1]\n nbrs2_wt = [G.get_edge_data(node2, nbr)['weight'] for nbr in nbrs2]\n unique_match=find_unique_matching_branches(G,nbrs1,nbrs2,ports_weight)\n if len(nbrs1)==0 or len(nbrs2)==0:\n logger.debug(f\"no new SD neihbours, returning recursion {match_pair}\")\n elif len(nbrs1) ==1 and len(nbrs2)==1:\n logger.debug(f\"traversing binary branch\")\n compare_nodes(G,all_match_pairs,match_pair,traversed,nbrs1.pop(),nbrs2.pop(),ports_weight)\n elif unique_match:\n logger.debug(f'traversing unique matches {unique_match}')\n match_pair[node1]=node2\n traversed+=[node1,node2]\n for nbr1,nbr2 in unique_match.items():\n logger.debug(f\"recursive call from binary {node1}:{node2} to {nbr1}:{nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n elif len(nbrs1_wt)>len(set(nbrs1_wt))>1 and len(nbrs2_wt)>len(set(nbrs2_wt))>1:\n logger.debug(f\"setting new start points {node1} {node2}\")\n match_pair[node1]=node2\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n else:\n match_pair = {}\n logger.debug(f\"end all traversal from binary branch {node1} {node2}\")\n\n else:\n match_pair = {}\n logger.debug(f\"end of recursion branch, matches {match_pair}\")",
"def enumerate_links_around_node(self, node):\n\n l0 = self.node_link[node]\n l = l0\n edges = []\n traversing = True\n while traversing:\n edges.append(l)\n v = l[0]\n if v == node:\n l = self.pred_right[l]\n else:\n l = self.pred_left[l]\n if l0 == l:\n traversing = False\n if l0[1] == l[0] and l0[0] == l[1]:\n traversing = False\n #print v, l\n #raw_input('here')\n return edges",
"def directed_dfs(digraph, start, end, max_total_dist, max_dist_outdoors):\n\n\n path = [[],0 , 0]\n best_path = get_best_path(digraph, start, end, path, max_dist_outdoors, max_total_dist, best_path = None)\n\n if best_path[0] is None:\n raise ValueError('No work')\n else :\n return best_path[0]",
"def conversion(data_path, nodes, simtime, resolution=1):\r\n\r\n receivers = set() # i.e. neurons; senders can be external inputs\r\n edges_set = set()\r\n\r\n # Ask number of neurons per node\r\n nodes_dict = dict()\r\n if input(\"Every node has same number of neurons? (y/n)\") == \"y\":\r\n n = int(input(\"Number of neurons per node: \"))\r\n for node in range(nodes):\r\n nodes_dict[node] = n\r\n receivers.update([str(node) + \"-\" + str(neuron) for neuron in range(nodes_dict[node])])\r\n else:\r\n for node in range(nodes):\r\n nodes_dict[node] = int(input(\"Number of neurons in node %s: \" % node))\r\n receivers.update([str(node) + \"-\" + str(neuron) for neuron in range(nodes_dict[node])])\r\n\r\n receivers = sorted(list(receivers))\r\n\r\n # Load a part of FNS data to gather information about edges\r\n df = pd.read_csv(data_path, header=None, usecols=[0, 1, 2, 3, 4, 5], chunksize=500000, nrows=2000000)\r\n for chunk in df:\r\n chunk.columns = [\"Burning Time\", \"Firing Node\", \"Firing Neuron\", \"Burning Node\", \"Burning Neuron\",\r\n \"External Source\"]\r\n print(chunk.index)\r\n for node in range(nodes):\r\n temp = chunk.loc[\r\n (chunk[\"Burning Node\"] == node) & (chunk[\"External Source\"] == False)] # Remove external inputs\r\n for i in range(len(temp)):\r\n print(\"Gathering edge information for node: %i/%i. Connection: %i/%i\" % (\r\n node + 1, nodes, i + 1, len(temp)), end=\"\\r\")\r\n sender = str(np.asarray(temp[\"Firing Node\"])[i]) + \"-\" + str(np.asarray(temp[\"Firing Neuron\"])[i])\r\n receiver = str(node) + \"-\" + str(np.asarray(temp[\"Burning Neuron\"])[i])\r\n edges_set.add((sender, receiver))\r\n print(\"Gathering edge information for node: %i/%i. Connection: %i/%i\" % (node + 1, nodes, i + 1, len(temp)))\r\n\r\n status_table = pd.DataFrame(np.zeros((len(receivers), len(range((simtime - 1) * resolution)))),\r\n columns=list(range((simtime - 1) * resolution)), index=receivers)\r\n\r\n df = pd.read_csv(data_path, header=None, usecols=[0, 1, 2, 3, 4, 5], chunksize=1000000)\r\n for chunk in df:\r\n chunk.columns = [\"Burning Time\", \"Firing Node\", \"Firing Neuron\", \"Burning Node\", \"Burning Neuron\",\r\n \"External Source\"]\r\n print(chunk.index)\r\n\r\n # Loop over times instead of receivers: slightly better efficiency for large datasets\r\n times_raw = sorted(set(chunk[\"Burning Time\"]))\r\n status_table_temp = pd.DataFrame(columns=receivers)\r\n status_table_aux = pd.DataFrame(np.zeros((len(receivers), len(range((simtime - 1) * resolution)))),\r\n columns=list(range((simtime - 1) * resolution)), index=receivers)\r\n\r\n times = list(np.arange(0, int(max(times_raw) + 1), 1 / resolution))\r\n min_t = int(np.trunc(min(chunk[\"Burning Time\"])))\r\n max_t = int(np.trunc(max(chunk[\"Burning Time\"])))\r\n\r\n for t in times[min_t:max_t]:\r\n print(\"Gathering nodes' activity dynamics - t: %i/%i\" % (t + 1, max_t), end=\"\\r\")\r\n temp = chunk.loc[np.trunc(chunk[\"Burning Time\"]) == t, (\"Burning Node\", \"Burning Neuron\")]\r\n # Count burning events for each neuron at time t\r\n count = Counter([str(node) + \"-\" + str(neuron) for node, neuron in np.asarray(temp)])\r\n # Every time step we add a Counter dict to fill status table\r\n status_table_temp = status_table_temp.append(count, ignore_index=True)\r\n print(\"Gathering nodes' activity dynamics - t: %i/%i\" % (t + 1, max_t))\r\n\r\n # status_table_temp contains just a limited space of time each chunk\r\n status_table_temp = status_table_temp.transpose()\r\n status_table_temp.columns = list(\r\n np.arange(start=int(np.trunc(min(chunk[\"Burning Time\"]))), stop=int(np.trunc(max(chunk[\"Burning Time\"])))))\r\n status_table_temp = status_table_temp.fillna(0)\r\n\r\n # status_table_aux contains all time steps with temp table values and 0s for the rest each chunk\r\n status_table_aux = status_table_aux.add(status_table_temp)\r\n status_table_aux = status_table_aux.fillna(0)\r\n\r\n # status_table merges all chunks' data\r\n status_table = status_table.add(status_table_aux)\r\n status_table = status_table.fillna(0)\r\n\r\n timestamp = datetime.now()\r\n new_dir = \"gephiFiles\" + timestamp.strftime(\"d%d_%m_%Y-t%H_%M_%S\")\r\n os.mkdir(new_dir)\r\n\r\n # Generate gephi compliant files\r\n t = \"<\" + str(times) + \">\"\r\n gephi_nodes = pd.DataFrame(columns=[\"id\", \"label\", \"timeset\", \"events\", \"node\"])\r\n for idx, node in enumerate(receivers):\r\n print(\"Writing nodes' file for Gephi: %i/%i\" % (idx, len(receivers)), end=\"\\r\")\r\n events_row = [[float(i), int(events)] for i, events in enumerate(status_table.loc[node])]\r\n events_row = str(events_row).replace(\"[[\", \"<[\").replace(\"]]\", \"]>\").replace(\"],\", \"];\")\r\n new_row = pd.Series([node, node, t, events_row, node.split(\"-\")[0]], index=gephi_nodes.columns)\r\n gephi_nodes = gephi_nodes.append(new_row, ignore_index=True)\r\n print(\"Writing nodes' file for Gephi: %i/%i\" % (idx, len(receivers)))\r\n gephi_nodes.to_csv(new_dir + \"/gephi_nodes.csv\", index=False)\r\n\r\n print(\"Compute gephi files with %i edges will last %0.2fm approx.\" % (len(edges_set), len(edges_set) / 12000))\r\n if input(\"Do you want to proceed? (y/n) \") == \"n\":\r\n exit()\r\n gephi_edges = pd.DataFrame(columns=[\"Source\", \"Target\", \"type\", \"id\", \"weight\"])\r\n for idx, edge in enumerate(edges_set):\r\n print(\"Writing edges' file for Gephi: %i/%i\" % (idx, len(edges_set)), end=\"\\r\")\r\n edge_row = pd.Series([edge[0], edge[1], \"Directed\", idx, 1], index=gephi_edges.columns)\r\n gephi_edges = gephi_edges.append(edge_row, ignore_index=True)\r\n print(\"Writing edges' file for Gephi: %i/%i\" % (idx, len(edges_set)))\r\n gephi_edges.to_csv(new_dir + \"/gephi_edges.csv\", index=False)\r\n\r\n return None"
]
| [
"0.69459766",
"0.5595106",
"0.5534561",
"0.5483686",
"0.54691195",
"0.5467508",
"0.53494895",
"0.53470665",
"0.53366846",
"0.53234804",
"0.5310389",
"0.52716064",
"0.52382374",
"0.5235263",
"0.52303815",
"0.5227401",
"0.52202076",
"0.5218126",
"0.5190948",
"0.51826143",
"0.51786155",
"0.51763994",
"0.5171623",
"0.5167743",
"0.51458013",
"0.5132611",
"0.5119417",
"0.5104868",
"0.5102928",
"0.50998455"
]
| 0.7221145 | 1 |
RETURNS A set of outgoing links and corresponding splitting ratios for flow f at node s | def get_paths_for_flow(F, s, f):
links = [((u, v), split_ratio)
for (flow_id, u, v), split_ratio in F.items()
if flow_id == f and u == s and split_ratio > 0.001]
return links | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def traverse_graph(F, f, s, t, u, sr):\n def get_paths_for_flow(F, s, f):\n \"\"\"\n RETURNS\n A set of outgoing links and corresponding splitting ratios for flow f\n at node s\n \"\"\"\n links = [((u, v), split_ratio) \n for (flow_id, u, v), split_ratio in F.items() \n if flow_id == f and u == s and split_ratio > 0.001]\n return links\n \n if u == t:\n return [([t], sr)]\n\n outgoing_links = get_paths_for_flow(F, u, f)\n paths_to_t = []\n\n for ((current_node, next_hop), split_ratio) in outgoing_links:\n paths_from_u_to_t = traverse_graph(F, f, s, t, next_hop, split_ratio) \n paths_to_t.extend(paths_from_u_to_t)\n\n paths_to_t_with_u = []\n for path in paths_to_t:\n nodes, sr = path\n new_path = [u] + nodes\n paths_to_t_with_u.append((new_path, sr))\n\n return paths_to_t_with_u",
"def step(nodes, outputs, edges):\n flowed = []\n for node_name in nodes.copy():\n if node_name in flowed:\n continue\n if len(nodes[node_name]) == 2:\n if node_name in flowed:\n continue\n node = [int(value) for value in nodes[node_name]]\n low_value, high_value = min(node), max(node)\n low_flow, high_flow = edges[node_name] \n low_dictionary, low_node_name = low_flow\n high_dictionary, high_node_name = high_flow\n low_node = low_dictionary.get(low_node_name, tuple())\n high_node = high_dictionary.get(high_node_name, tuple())\n low_dictionary[low_node_name] = low_node + (str(low_value),)\n high_dictionary[high_node_name] = high_node + (str(high_value),)\n nodes[node_name] = tuple()\n if low_dictionary is nodes:\n flowed.append(low_node_name)\n if high_dictionary is nodes:\n flowed.append(high_node_name)\n return nodes, outputs, edges",
"def split_at_nodes(shp):\n nodes = find_nodes(shp)\n nodeIds = list(nodes)\n nodeIds.sort()\n nodeIds = dict([(node,i) for i,node in enumerate(nodeIds)])\n \n for road in shp:\n vrts = road.vertices\n midVrts = set(road.vertices[1:-1]) #we know end points are nodes\n midNodes = midVrts.intersection(nodes) # find any nodes in the middle of the feature.\n midIdx = [vrts.index(node) for node in midNodes] # Get their indices\n midIdx.sort()\n if midIdx:\n #print vrts\n starts = [0]+midIdx\n stops = [x+1 for x in midIdx]+[None]\n for start,stop in zip(starts,stops):\n feat = pysal.cg.Chain(vrts[start:stop])\n rec = (nodeIds[feat.vertices[0]],nodeIds[feat.vertices[-1]],False)\n yield feat,rec\n else:\n rec = (nodeIds[road.vertices[0]],nodeIds[road.vertices[-1]],False)\n yield road,rec",
"def _split(self):\r\n \r\n temp = [self.upstream.demand]\r\n for item, p in zip(self.downstream, self.priority):\r\n temp.append(item.supply/p)\r\n \r\n flow = min(temp) # total flow\r\n \r\n self.upstream.outflow = flow\r\n \r\n for item, p in zip(self.downstream, self.priority):\r\n item.inflow = p * flow",
"def ford_fulkerson_algorithm(graph: np.ndarray, source: int, sink: int) -> np.ndarray:\r\n\r\n residual_graph = copy.deepcopy(graph)\r\n row = len(residual_graph)\r\n parent = [-1] * row\r\n max_flow = 0\r\n\r\n if source == sink or sink < 0 or source < 0 or source >= row or sink >= row:\r\n raise WrongInputException('Wrong input source/sink vertice(s)')\r\n\r\n while bfs(residual_graph, row, source, sink, parent):\r\n\r\n path_flow = float(\"Inf\")\r\n s = sink\r\n while s != source:\r\n path_flow = min(path_flow, residual_graph[parent[s]][s])\r\n s = parent[s]\r\n\r\n max_flow += path_flow\r\n\r\n v = sink\r\n while v != source:\r\n u = parent[v]\r\n residual_graph[u][v] -= path_flow\r\n residual_graph[v][u] += path_flow\r\n v = parent[v]\r\n print(\"Max flow: %d\" % max_flow)\r\n\r\n return residual_graph",
"def split_edges(self, maximum_distance):\n \"\"\" Iterate through the vertices of each section. For each vertex v, evaluate edges for which v is a source.\n If an edge of weight greater than maximum_distance, then split it. \"\"\"\n for section_id in self.sections:\n utils.print_progress(len(self.sections), prefix='splitting edges')\n current_section = [] # Need to update the section data after splitting the edges.\n for source in self.sections[section_id]:\n current_section.append(source)\n edges_to_remove = [] # If an edge is split, it will need to be removed.\n for edge in self.graph.get_out_edges(source):\n if self.edge_weights[edge] > maximum_distance:\n target = edge[1] # edge is a numpy array of [source, target, edge]. Select target.\n edges_to_remove.append(self.graph.edge(edge[0], edge[\n 1])) # If an edge is split, the original edge should be removed.\n\n new_edge_count = int(math.ceil(self.edge_weights[edge] / maximum_distance))\n new_edge_distance = self.edge_weights[edge] / new_edge_count\n current_point = shapes.Point.from_list(\n list(self.node_locations[source]) + [self.node_heading[target]])\n previous_vertex = source\n for _ in range(new_edge_count):\n current_point = utils.offset_point(current_point, new_edge_distance, current_point.bearing)\n current_vertex = self.graph.add_vertex()\n current_section.append(current_vertex) # The new vertex becomes a part of the section.\n \"\"\" Populate the property map for the new vertex. Inherit values from the target node,\n unless the target node is a junction node. Then inherit values from the source. \"\"\"\n self.node_locations[current_vertex] = current_point.as_list()\n self.node_heading[current_vertex] = current_point.bearing\n property_vertex = source if not self.junctions[target] else target\n self.node_speed_limit[current_vertex] = self.node_speed_limit[property_vertex]\n self.node_width[current_vertex] = self.node_width[property_vertex]\n self.node_id[current_vertex] = self.node_id[property_vertex]\n\n \"\"\" Create an edge between the previous vertex and the newly created vertex, \n and update the edge weight property map. \"\"\"\n current_edge = self.graph.add_edge(previous_vertex, current_vertex)\n self.edge_weights[current_edge] = new_edge_distance\n\n # The current vertex becomes the previous vertex in the next step.\n previous_vertex = current_vertex\n\n \"\"\" Create an edge between the last new vertex that was created and the target of the\n original edge which is being split, and update the property map. \"\"\"\n self.edge_weights[self.graph.add_edge(previous_vertex, target)] = new_edge_distance\n list(map(self.graph.remove_edge, edges_to_remove)) # Remove all relevant edges\n self.sections[section_id] = current_section # Update the section with the new vertices",
"def dfs(x, p, step):\n disc[x] = low[x] = step\n for xx in graph.get(x, []): \n if disc[xx] == inf: \n step += 1\n dfs(xx, x, step)\n low[x] = min(low[x], low[xx])\n if low[xx] > disc[x]: ans.append([x, xx]) # bridge\n elif xx != p: low[x] = min(low[x], disc[xx])",
"def get_flow_2frames(self, x):\n b, n, c, h, w = x.size()\n x_1 = x[:, :-1, :, :, :].reshape(-1, c, h, w)\n x_2 = x[:, 1:, :, :, :].reshape(-1, c, h, w)\n flows_backward = self.spynet(x_1, x_2)\n flows_backward = [flow.view(b, n - 1, 2, h // 2 ** i, w // 2 ** i) for flow, i in zip(flows_backward, range(4))]\n flows_forward = self.spynet(x_2, x_1)\n flows_forward = [flow.view(b, n - 1, 2, h // 2 ** i, w // 2 ** i) for flow, i in zip(flows_forward, range(4))]\n return flows_backward, flows_forward",
"def sg_hops (self):\n return (link for s, d, link in self.network.edges_iter(data=True) if\n link.type == Link.SG)",
"def compute_greedy_flow_allocations( target_graph\n , flow_selection_fn\n , seed_number=DEFAULT_SEED_NUMBER):\n\n flow_allocation_seed_number = seed_number\n np.random.seed(flow_allocation_seed_number)\n\n link_utilization = {tuple(sorted(link_tuple)): 0.0 for link_tuple in target_graph.edges}\n flows = []\n\n while True:\n capacity_was_exceeded = False\n\n source_node, destination_node = flow_selection_fn(target_graph.nodes)\n flow_tx_rate = np.random.uniform(FLOW_TX_RATE_LOWER_BOUND, FLOW_TX_RATE_UPPER_BOUND)\n\n connecting_paths = list(node_disjoint_paths(target_graph, source_node, destination_node))\n disjoint_path_count = len(connecting_paths)\n flow_rate_per_subpath = flow_tx_rate / disjoint_path_count\n for path in [nx.utils.pairwise(p_i) for p_i in connecting_paths]:\n for u, v in [tuple(sorted(t_i)) for t_i in path]:\n if (link_utilization[u, v] + flow_rate_per_subpath) > LINK_CAPACITY:\n capacity_was_exceeded = True\n break\n link_utilization[u, v] += flow_rate_per_subpath\n if capacity_was_exceeded:\n break\n if capacity_was_exceeded:\n break\n\n the_flow = Flow( source_node = source_node\n , destination_node = destination_node\n , flow_tx_rate = flow_tx_rate\n , paths = connecting_paths\n , splitting_ratio = [1.0/disjoint_path_count]*disjoint_path_count\n )\n flows.append(the_flow)\n return flows, link_utilization",
"def compute_flow(self, lqs):\n n, t, c, h, w = lqs.size()\n lqs_1 = lqs[:, :-1, :, :, :].reshape(-1, c, h, w)\n lqs_2 = lqs[:, 1:, :, :, :].reshape(-1, c, h, w)\n flows_backward = self.spynet(lqs_1, lqs_2).view(n, t - 1, 2, h, w)\n if self.is_mirror_extended:\n flows_forward = None\n else:\n flows_forward = self.spynet(lqs_2, lqs_1).view(n, t - 1, 2, h, w)\n return flows_forward, flows_backward",
"def calc_per_flow_link_utilisation(self, flow: Tuple[int, int],\n demand: float,\n routing: np.ndarray) -> np.ndarray:\n edge_mapping = {edge: i for i, edge in\n enumerate(sorted(self.graph.edges))}\n\n link_utilisation = np.zeros(self.num_edges)\n node_flow = np.zeros(self.num_nodes) # the flow stored at a node\n node_flow[flow[0]] = demand\n\n to_explore = [flow[0]]\n while to_explore:\n current_node = to_explore.pop(0)\n current_flow = node_flow[current_node]\n\n # this is the flow destination node so we absorb all flow\n if current_node == flow[1]:\n node_flow[current_node] = 0.0\n continue\n\n # push the flow at this node over all edges\n for edge in self.graph.out_edges(current_node):\n edge_index = edge_mapping[edge]\n ratio = routing[edge_index]\n flow_to_send = ratio * current_flow\n # only send flow if greater than epsilon (so no 'infinite' loops)\n if flow_to_send > 1.e-8:\n node_flow[edge[1]] += ratio * current_flow\n # all important step, update our output\n link_utilisation[edge_index] += ratio * current_flow\n # have updated the dst so add it to the list of things to do\n to_explore.append(edge[1])\n # we've moved all the flow from this node now, so reset back to zero\n node_flow[current_node] = 0.0\n\n return link_utilisation",
"def DRBFS(self, flow, edge_mark):\n\t\t# Distance flag for each node\n\t\td = {v:float('inf') for v in self.topo.nodes}\n\t\t# Parent node for each node\n\t\tpa = {v:-1 for v in self.topo.nodes}\n\t\t# Request info\n\t\ts = flow[0]\n\t\tt = flow[1]\n\n\t\t# BFS to find a min-hop path\n\t\tqueue = [s]; hdr = 0; d[s] = 0\n\t\twhile hdr < len(queue):\n\t\t\tu = queue[hdr]\n\t\t\thdr += 1\n\n\t\t\tfor v in self.topo.topo.neighbors(u):\n\t\t\t\tif edge_mark[(u, v)] or d[v] <= d[u] + 1:\n\t\t\t\t\tcontinue\n\t\t\t\tqueue.append(v)\n\t\t\t\td[v] = d[u] + 1\n\t\t\t\tpa[v] = u\n\t\t\t\tif v == t:\n\t\t\t\t\t# This is because when BFS on edges, the first time reaching t meaning the smallest hop it can be reached\n\t\t\t\t\thdr = len(queue)\n\t\t\t\t\tbreak\n\n\t\tif d[t] == float('inf'):\n\t\t\treturn False\n\n\t\tp = [t]; v = t\n\t\twhile v != s and v != -1:\n\t\t\tv = pa[v]\n\t\t\tp.append(v)\n\t\tp.reverse()\n\n\t\treturn p",
"def get_all_sghop_info (nffg, return_paths=False):\n sg_map = {}\n for i in nffg.infras:\n for p in i.ports:\n for fr in p.flowrules:\n # if fr.external:\n # continue\n if fr.id not in sg_map:\n # The path is unordered!!\n path_of_shop = []\n flowclass = NFFGToolBox._extract_flowclass(fr.match.split(\";\"))\n sg_map[fr.id] = [None, None, flowclass, fr.bandwidth, fr.delay]\n # We have to find the BEGINNING of this flowrule sequence.\n inbound_link = NFFGToolBox._find_infra_link(nffg, p, outbound=False,\n accept_dyn=True)\n while inbound_link.type != 'DYNAMIC':\n path_of_shop.append(inbound_link)\n if inbound_link.src.node.type == 'SAP':\n break\n # The link is STATIC, and its src is not SAP so it is an Infra.\n prev_fr, prev_p = \\\n NFFGToolBox._get_flowrule_and_its_starting_port(\n inbound_link.src.node, fr.id)\n NFFGToolBox._check_flow_consistencity(sg_map, prev_fr)\n inbound_link = NFFGToolBox._find_infra_link(nffg, prev_p,\n outbound=False,\n accept_dyn=True)\n # 'inbound_link' is DYNAMIC here or it is STATIC and starts from\n # a SAP,\n # so the sequence starts here\n sg_map[fr.id][0] = inbound_link.src\n\n # We have to find the ENDING of this flowrule sequence.\n output_port = NFFGToolBox._get_output_port_of_flowrule(i, fr)\n if output_port is None:\n continue\n outbound_link = NFFGToolBox._find_infra_link(nffg, output_port,\n outbound=True,\n accept_dyn=True)\n while outbound_link.type != 'DYNAMIC':\n path_of_shop.append(outbound_link)\n if outbound_link.dst.node.type == 'SAP':\n break\n # The link is STATIC and its dst is not a SAP so it is an Infra.\n next_fr, _ = NFFGToolBox._get_flowrule_and_its_starting_port(\n outbound_link.dst.node, fr.id)\n # '_' is 'outbound_link.dst'\n next_output_port = NFFGToolBox._get_output_port_of_flowrule(\n outbound_link.dst.node, next_fr)\n NFFGToolBox._check_flow_consistencity(sg_map, next_fr)\n outbound_link = NFFGToolBox._find_infra_link(nffg,\n next_output_port,\n outbound=True,\n accept_dyn=True)\n # the 'outbound_link' is DYNAMIC here or finishes in a SAP, so the\n # flowrule sequence finished here.\n sg_map[fr.id][1] = outbound_link.dst\n\n if return_paths:\n sg_map[fr.id].append(path_of_shop)\n\n return sg_map",
"def routes_for_od(self, r, s):\n\n for path in self.paths_for_od(r, s):\n route = self._road_network.network.copy()\n\n for edge in route.edges_iter():\n u,v = edge\n if BaseRouter._edge_in_path(path, edge):\n route.edge[u][v]['pathweight'] = 0.1\n else:\n route.edge[u][v]['pathweight'] = 1.0\n\n # Set node weights\n for node in route:\n try:\n dist_from_path = nx.shortest_path_length(route,\n node,\n path[-1],\n weight='pathweight')\n except nx.NetworkXNoPath:\n dist_from_path = float('inf')\n\n route.node[node]['weight'] = math.exp(-self._beta*dist_from_path)\n\n \n # Create policy from weights\n def edge_weight(u,v):\n return route.node[v]['weight']*math.exp(\n -self._beta*route.edge[u][v]['pathweight'])\n\n for node in route:\n edges = route.edges(node)\n total_edge_weight = reduce(\n lambda x,y: x+y,\n [edge_weight(u,v) for u,v in edges],\n 0.0)\n\n for u,v in edges:\n route[u][v]['weight'] = \\\n edge_weight(u, v) / float(total_edge_weight)\n\n yield Route(route, path)",
"def dfs_r(self, s):\n g = Graph(attr={DIRECTED: True})\n return self.dfs_rec(g, ('#', s))",
"def distribute(node, rank_links):\n r = rank_links[0]\n links = rank_links[1]\n\n ol = str(links).split('|')\n Ni = len(ol)\n\n # if the node is for dangling (i.e. no outgoing link),\n # emit the loss to redistribute to all the incoming\n # links to the dangling node\n if (Ni == 1 and ol[0] == '') or Ni == 0:\n yield 'DANGLING', r\n else:\n r_new = float(r)/float(Ni)\n for l in ol:\n yield l, r_new\n\n # recover graph structure\n if links <> '':\n yield node, links",
"def update_flow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n # (1): add all edges (u, v) with capacity ub-lb\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n for arc in self.arc_info.keys():\n if self.arc_info[arc][\"upper_bound\"] == float('inf'):\n self.arc_info[arc][\"upper_bound\"] = B\n for arc in self.arc_info.keys():\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(int(self.arc_info[arc][\"upper_bound\"]\\\n - self.arc_info[arc][\"lower_bound\"]))\n # (2): add edge (t, s) with capacity B\n # B = max_lb * (m - n + 2)\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n if B == 0:\n #B = float('inf')\n B = 100000\n start_nodes.append(self.sink())\n end_nodes.append(self.source())\n capacities.append(int(B))\n # (3): for all verts, if exc > 0, add edge (s', v) with capacity exc(v),\n # and if exc < 0, add edge(s', v) with capacity -exc(v)\n s_prime = max(self.vertices) + 1\n t_prime = max(self.vertices) + 2\n print(\"s'={}, t'={}\".format(s_prime, t_prime))\n for v in self:\n #print(\"vert {} in arcs: {}\".format(v,\n # self.in_arcs_lists[v]))\n # compute exc: lower bounds of in - lower bounds of out\n sum_lb_in = 0\n for in_arc in self.in_arcs_lists[v]:\n sum_lb_in += self.arc_info[in_arc][\"lower_bound\"]\n sum_lb_out = 0\n #print(\"vert {} out arcs: {}\".format(v,\n # self.out_arcs_lists[v]))\n for out_arc in self.out_arcs_lists[v]:\n sum_lb_out += self.arc_info[out_arc][\"lower_bound\"]\n exc = sum_lb_in - sum_lb_out\n #print(\"exc is {}\".format(exc))\n if exc > 0:\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(exc))\n else:\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(-exc))\n # solve maxflow\n #print(\"s' is {} and t' is {}\".format(s_prime, t_prime))\n max_flow = pywrapgraph.SimpleMaxFlow()\n for u, v, cap in zip(start_nodes, end_nodes, capacities):\n #print(\"Adding edge {}, {} with cap {}\".format(u,v,cap))\n max_flow.AddArcWithCapacity(u, v, cap)\n success = True\n if max_flow.Solve(s_prime, t_prime) == max_flow.OPTIMAL:\n #print('Max flow: {}'.format( max_flow.OptimalFlow()))\n #print(' Arc Flow / Capacity')\n for i in range(max_flow.NumArcs()):\n # print('%1s -> %1s %3s / %3s' % (\n # max_flow.Tail(i),\n # max_flow.Head(i),\n # max_flow.Flow(i),\n # max_flow.Capacity(i)))\n # check that (s', v) edges are saturated (once we find a false,\n # stay false forever)\n if success:\n if max_flow.Tail(i) == s_prime:\n success = max_flow.Flow(i) == max_flow.Capacity(i)\n else:\n success = False\n print('There was an issue with the max flow input.')\n if success:\n # update the flows to be the flow found from maxflow problem\n for i in range(max_flow.NumArcs()):\n # if this is an original arc, update the flow\n if max_flow.Tail(i) != s_prime \\\n and max_flow.Head(i) != t_prime \\\n and not (max_flow.Tail(i) == self.sink() \\\n and max_flow.Head(i) == self.source()):\n # update arc\n start = max_flow.Tail(i)\n destin = max_flow.Head(i)\n arc = self.get_arc(start, destin)\n new_flow = self.arc_info[arc][\"lower_bound\"] + max_flow.Flow(i)\n old_flow = self.arc_info[arc][\"weight\"]\n self.arc_info[arc][\"weight\"] = new_flow\n #print(\"Edge {} {} adjusted from {} to {}\".format(\n # start,\n # destin,\n # old_flow,\n # new_flow\n # ))\n self.check_conservation_of_flow() # check that solution is valid\n return True\n else:\n return False",
"def findPathsToBase(A,bSize):\n M,N = A.shape\n pressedPaths = []\n\n #For every two nodes in the base find all paths between them\n for b1 in range(bSize):\n for b2 in range(bSize):\n #Remove all other base nodes from the graph so that\n #we only find paths that go through the specialization set\n if b1 == b2:\n #In this case we are looking for a cycle.\n mask = [b1]+list(range(bSize,N))\n newSize = len(mask) + 1\n reduA = np.zeros((newSize,newSize))\n #Because the networkx cycle finders don't do what we need\n #them to do, we create a new graph and find paths instead\n reduA[:-1,:-1] = A[mask,:][:,mask]\n #Remove ingoing edges from the base node and add to new node\n reduA[-1,:] = reduA[0,:]\n reduA[0,:] = np.zeros(newSize)\n G = nx.DiGraph(reduA.T)\n #Find paths from the base node to the new node\n #same as finding all the cycles\n paths = list(nx.all_simple_paths(G,0,newSize-1))\n\n else:\n mask = [b1,b2]+list(range(bSize,N))\n reduA = A[mask,:][:,mask]\n #Remove base node interactions\n reduA[:2,:2] = np.zeros((2,2))\n G = nx.DiGraph(reduA.T)\n paths = list(nx.all_simple_paths(G,0,1))\n\n #Process Paths so that they make sense when the rest of the base\n #set is added to the graph\n for p in paths:\n if p != []:\n if b1 == b2:\n p = np.array(p) + bSize-1\n else:\n p = np.array(p) + bSize-2\n p[[0,-1]] = [b1, b2]\n pressedPaths.append(p)\n\n return pressedPaths",
"def compute_half_life_vested_shares(params, step, sL, s, inputs):\n key = 'delegators'\n \n delegators = s['delegators']\n\n half_life_vesting_rate = params['half_life_vesting_rate']\n \n for delegator in delegators.values():\n # for future computation speed, vest them in chunks, it doesn't matter which chunk\n shares_vesting_this_period = delegator.unvested_shares * half_life_vesting_rate\n for timestep in delegator._unvested_shares:\n remaining_shares_to_vest = shares_vesting_this_period\n if delegator._unvested_shares[timestep] > remaining_shares_to_vest:\n delegator._unvested_shares[timestep] -= remaining_shares_to_vest\n break\n else:\n # 0 out and go onto the next one\n remaining_shares_to_vest -= delegator._unvested_shares[timestep]\n delegator._unvested_shares[timestep] = 0\n \n delegator.vested_shares += shares_vesting_this_period\n # print(f'{delegator.vested_shares=}, {delegator.unvested_shares=}, {delegator.shares=}')\n value = delegators\n\n return key, value",
"def dfs_traversal(graph, s, goals=[]):\n visited = []\n boundary = [s]\n while len(boundary) > 0:\n v = boundary.pop()\n visited += [v]\n if v in goals:\n return visited\n for w in neighbours(v, graph):\n if w not in visited and w not in boundary:\n boundary.append(w)\n return visited\n\n \"\"\"\n visited = []\n boundary = [s]\n while len(boundary) > 0:\n v = boundary.pop()\n visited += [v]\n for w in neighbours(v, graph):\n if w not in visited and w not in boundary:\n boundary.append(w)\n if w in goals:\n v = boundary.pop()\n visited += [v]\n return visited\n\"\"\"",
"def get_dfs(self, s):\n results = []\n # mark all the vertices as not visited\n visited = [False] * (len(self.graph))\n self._dfs_recursive(s, visited, results)\n return results",
"def bfs(g: nx.Graph, start_node: Hashable) -> List[Hashable]:\n list_ = list(g.neighbors(start_node))\n len_graph = g.number_of_nodes()\n list2 = [start_node]\n while len(list2) < len_graph:\n for i in range(len(list_) - 1):\n if list_[0] not in list2:\n list2.append(list_[0])\n list_ += list(g.neighbors(list_[0]))\n list_.remove(list_[0])\n # nx.draw(g, with_labels=True)\n # plt.show()\n return list2",
"def flowingFrom(self, fount):",
"def hmn2(a, s, m0):\n n = 2*m0**s\n links = np.zeros((int(a/m0*n*sum([1/2**x for x in range(1, s+1)])), 2), dtype=np.int32)\n links_i = 0\n p = 0\n \n # At each hierarchy level a number of a links are established,\n # repeating the process if links are repeated.\n for si in range(1, s+1):\n m0_si = m0**si\n for n in range(0, n+1-2*m0_si, 2*m0_si):\n \n if a == 1:\n i = np.random.randint(0 + n, m0_si + n)\n j = np.random.randint(m0_si + n, 2*m0_si + n)\n links[p] = np.array([i, j])\n p += 1\n \n else:\n while len(np.unique(links[links_i:a + links_i], axis=0)) != a:\n for m in range(a):\n i = np.random.randint(0 + n, m0_si + n)\n j = np.random.randint(m0_si + n, 2*m0_si + n)\n links[links_i:a + links_i][m] = np.array([i, j])\n links_i += a\n \n blocks = np.arange(n).reshape((int(n/m0), m0))\n return np.concatenate((blocks, links))",
"def destination_floors(self):\r\n return self.destinations",
"def compare_nodes(G,all_match_pairs,match_pair,traversed,node1,node2, ports_weight):\n logger.debug(f\"comparing {node1},{node2}, traversed {traversed}\")\n nbrs1 = sorted(set(G.neighbors(node1)) - set(traversed))\n #remove dummies\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=7]))\n nbrs2 = sorted(set(G.neighbors(node2)) - set(traversed))\n #remove dummies\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=7]))\n logger.debug(f\"node1:{node1},property: {G.nodes[node1]},neigbors1: {nbrs1}\")\n logger.debug(f\"node2:{node2},property: {G.nodes[node2]},neigbors2: {nbrs2}\")\n if not nbrs1 or not nbrs2:\n if compare_two_nodes(G, node1, node2, ports_weight):\n match_pair[node1] = node2\n logger.debug(f\"no new neihbours, returning recursion {match_pair}\")\n return\n elif len(nbrs1)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n logger.debug(f\"skipping high fanout nets due to large computation, {node1} {nbrs1}\")\n traversed.append(node1)\n return\n elif len(nbrs2)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n traversed.append(node2)\n logger.debug(f\"skipping high fanout nets due to large computation, {node2} {nbrs2}\")\n return\n\n if node1 == node2:\n if node1 in match_pair.keys() or node1 in match_pair.values():\n logger.debug(\"avoid existing pair wise symmetry\")\n return\n logger.debug(f\"single node {node1}, nbrs {nbrs1}, nbr_weight {[G.get_edge_data(node1,nbr) for nbr in nbrs1]}\")\n SD_nbrs= [nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]\n ## TBD: filter based on primitive constraints\n ## Right now will try to figure out S/D paths\n if len(SD_nbrs) ==0:\n logger.debug(f\"No SD paths found to traverse\")\n match_pair[node1]=node1\n elif len(SD_nbrs) ==1:\n logger.debug(f\"traversing single S/D path {SD_nbrs}\")\n match_pair[node1]=node1\n traversed.append(node1)\n compare_nodes(G,all_match_pairs,match_pair,traversed,SD_nbrs[0],SD_nbrs[0],ports_weight)\n else:\n logger.debug(f\" multiple nodes diverging {SD_nbrs}\")\n logger.debug(f\"nbr weights: {SD_nbrs} {[G.get_edge_data(node1, nbr)['weight'] for nbr in SD_nbrs ]}\")\n match_pair[node1]=node1\n traversed.append(node1)\n new_sp=sorted(set(SD_nbrs)-set(traversed))\n all_match_pairs_local={}\n for nbr1,nbr2 in combinations(new_sp, 2):\n logger.debug(f\"recursive pair call from single branch {nbr1} {nbr2}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n if new_pair:\n #new_pair[nbr1]=nbr2\n all_match_pairs_local[nbr1+'_'+nbr2] = new_pair\n all_match_pairs_local={k: v for k, v in all_match_pairs_local.items() if len(v)>0}\n if len(all_match_pairs_local)==1:\n match_pair.update( all_match_pairs_local[list(all_match_pairs_local.keys())[0]])\n logger.debug(f\"found inline pair: {pprint.pformat(match_pair, indent=4)}\")\n else:\n for nbr1 in new_sp:\n if (nbr1+'_'+nbr1 not in all_match_pairs.keys()):\n logger.debug(f\"recursive single branch call from single branch {nbr1} {nbr1}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr1,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr1] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif nbrs1 == nbrs2:\n logger.debug(f\"traversing converging branch\")\n match_pair[node1]=node2\n traversed+=[node1,node2]\n nbrs1=sorted(set(nbrs1)-set([node1,node2]))\n logger.debug(f\"all non traversed neighbours: {nbrs1}\")\n if len(nbrs1)==1:\n nbr1=nbr2=nbrs1[0]\n logger.debug(f\"keeping single converged branch inline {nbr1} {nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n else:\n for nbr1,nbr2 in combinations_with_replacement(nbrs1,2):\n logger.debug(f\"recursive call from converged branch {nbr1} {nbr2}\")\n if nbr1+'_'+nbr2 not in all_match_pairs.keys():\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr2] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif compare_two_nodes(G,node1,node2,ports_weight):\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]))\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=2]))\n match_pair[node1]=node2\n traversed+=[node1,node2]\n logger.debug(f\"Traversing parallel branches from {node1},{node2} {nbrs1}, {nbrs2}\")\n nbrs1_wt = [G.get_edge_data(node1, nbr)['weight'] for nbr in nbrs1]\n nbrs2_wt = [G.get_edge_data(node2, nbr)['weight'] for nbr in nbrs2]\n unique_match=find_unique_matching_branches(G,nbrs1,nbrs2,ports_weight)\n if len(nbrs1)==0 or len(nbrs2)==0:\n logger.debug(f\"no new SD neihbours, returning recursion {match_pair}\")\n elif len(nbrs1) ==1 and len(nbrs2)==1:\n logger.debug(f\"traversing binary branch\")\n compare_nodes(G,all_match_pairs,match_pair,traversed,nbrs1.pop(),nbrs2.pop(),ports_weight)\n elif unique_match:\n logger.debug(f'traversing unique matches {unique_match}')\n match_pair[node1]=node2\n traversed+=[node1,node2]\n for nbr1,nbr2 in unique_match.items():\n logger.debug(f\"recursive call from binary {node1}:{node2} to {nbr1}:{nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n elif len(nbrs1_wt)>len(set(nbrs1_wt))>1 and len(nbrs2_wt)>len(set(nbrs2_wt))>1:\n logger.debug(f\"setting new start points {node1} {node2}\")\n match_pair[node1]=node2\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n else:\n match_pair = {}\n logger.debug(f\"end all traversal from binary branch {node1} {node2}\")\n\n else:\n match_pair = {}\n logger.debug(f\"end of recursion branch, matches {match_pair}\")",
"def enumerate_links_around_node(self, node):\n\n l0 = self.node_link[node]\n l = l0\n edges = []\n traversing = True\n while traversing:\n edges.append(l)\n v = l[0]\n if v == node:\n l = self.pred_right[l]\n else:\n l = self.pred_left[l]\n if l0 == l:\n traversing = False\n if l0[1] == l[0] and l0[0] == l[1]:\n traversing = False\n #print v, l\n #raw_input('here')\n return edges",
"def directed_dfs(digraph, start, end, max_total_dist, max_dist_outdoors):\n\n\n path = [[],0 , 0]\n best_path = get_best_path(digraph, start, end, path, max_dist_outdoors, max_total_dist, best_path = None)\n\n if best_path[0] is None:\n raise ValueError('No work')\n else :\n return best_path[0]",
"def conversion(data_path, nodes, simtime, resolution=1):\r\n\r\n receivers = set() # i.e. neurons; senders can be external inputs\r\n edges_set = set()\r\n\r\n # Ask number of neurons per node\r\n nodes_dict = dict()\r\n if input(\"Every node has same number of neurons? (y/n)\") == \"y\":\r\n n = int(input(\"Number of neurons per node: \"))\r\n for node in range(nodes):\r\n nodes_dict[node] = n\r\n receivers.update([str(node) + \"-\" + str(neuron) for neuron in range(nodes_dict[node])])\r\n else:\r\n for node in range(nodes):\r\n nodes_dict[node] = int(input(\"Number of neurons in node %s: \" % node))\r\n receivers.update([str(node) + \"-\" + str(neuron) for neuron in range(nodes_dict[node])])\r\n\r\n receivers = sorted(list(receivers))\r\n\r\n # Load a part of FNS data to gather information about edges\r\n df = pd.read_csv(data_path, header=None, usecols=[0, 1, 2, 3, 4, 5], chunksize=500000, nrows=2000000)\r\n for chunk in df:\r\n chunk.columns = [\"Burning Time\", \"Firing Node\", \"Firing Neuron\", \"Burning Node\", \"Burning Neuron\",\r\n \"External Source\"]\r\n print(chunk.index)\r\n for node in range(nodes):\r\n temp = chunk.loc[\r\n (chunk[\"Burning Node\"] == node) & (chunk[\"External Source\"] == False)] # Remove external inputs\r\n for i in range(len(temp)):\r\n print(\"Gathering edge information for node: %i/%i. Connection: %i/%i\" % (\r\n node + 1, nodes, i + 1, len(temp)), end=\"\\r\")\r\n sender = str(np.asarray(temp[\"Firing Node\"])[i]) + \"-\" + str(np.asarray(temp[\"Firing Neuron\"])[i])\r\n receiver = str(node) + \"-\" + str(np.asarray(temp[\"Burning Neuron\"])[i])\r\n edges_set.add((sender, receiver))\r\n print(\"Gathering edge information for node: %i/%i. Connection: %i/%i\" % (node + 1, nodes, i + 1, len(temp)))\r\n\r\n status_table = pd.DataFrame(np.zeros((len(receivers), len(range((simtime - 1) * resolution)))),\r\n columns=list(range((simtime - 1) * resolution)), index=receivers)\r\n\r\n df = pd.read_csv(data_path, header=None, usecols=[0, 1, 2, 3, 4, 5], chunksize=1000000)\r\n for chunk in df:\r\n chunk.columns = [\"Burning Time\", \"Firing Node\", \"Firing Neuron\", \"Burning Node\", \"Burning Neuron\",\r\n \"External Source\"]\r\n print(chunk.index)\r\n\r\n # Loop over times instead of receivers: slightly better efficiency for large datasets\r\n times_raw = sorted(set(chunk[\"Burning Time\"]))\r\n status_table_temp = pd.DataFrame(columns=receivers)\r\n status_table_aux = pd.DataFrame(np.zeros((len(receivers), len(range((simtime - 1) * resolution)))),\r\n columns=list(range((simtime - 1) * resolution)), index=receivers)\r\n\r\n times = list(np.arange(0, int(max(times_raw) + 1), 1 / resolution))\r\n min_t = int(np.trunc(min(chunk[\"Burning Time\"])))\r\n max_t = int(np.trunc(max(chunk[\"Burning Time\"])))\r\n\r\n for t in times[min_t:max_t]:\r\n print(\"Gathering nodes' activity dynamics - t: %i/%i\" % (t + 1, max_t), end=\"\\r\")\r\n temp = chunk.loc[np.trunc(chunk[\"Burning Time\"]) == t, (\"Burning Node\", \"Burning Neuron\")]\r\n # Count burning events for each neuron at time t\r\n count = Counter([str(node) + \"-\" + str(neuron) for node, neuron in np.asarray(temp)])\r\n # Every time step we add a Counter dict to fill status table\r\n status_table_temp = status_table_temp.append(count, ignore_index=True)\r\n print(\"Gathering nodes' activity dynamics - t: %i/%i\" % (t + 1, max_t))\r\n\r\n # status_table_temp contains just a limited space of time each chunk\r\n status_table_temp = status_table_temp.transpose()\r\n status_table_temp.columns = list(\r\n np.arange(start=int(np.trunc(min(chunk[\"Burning Time\"]))), stop=int(np.trunc(max(chunk[\"Burning Time\"])))))\r\n status_table_temp = status_table_temp.fillna(0)\r\n\r\n # status_table_aux contains all time steps with temp table values and 0s for the rest each chunk\r\n status_table_aux = status_table_aux.add(status_table_temp)\r\n status_table_aux = status_table_aux.fillna(0)\r\n\r\n # status_table merges all chunks' data\r\n status_table = status_table.add(status_table_aux)\r\n status_table = status_table.fillna(0)\r\n\r\n timestamp = datetime.now()\r\n new_dir = \"gephiFiles\" + timestamp.strftime(\"d%d_%m_%Y-t%H_%M_%S\")\r\n os.mkdir(new_dir)\r\n\r\n # Generate gephi compliant files\r\n t = \"<\" + str(times) + \">\"\r\n gephi_nodes = pd.DataFrame(columns=[\"id\", \"label\", \"timeset\", \"events\", \"node\"])\r\n for idx, node in enumerate(receivers):\r\n print(\"Writing nodes' file for Gephi: %i/%i\" % (idx, len(receivers)), end=\"\\r\")\r\n events_row = [[float(i), int(events)] for i, events in enumerate(status_table.loc[node])]\r\n events_row = str(events_row).replace(\"[[\", \"<[\").replace(\"]]\", \"]>\").replace(\"],\", \"];\")\r\n new_row = pd.Series([node, node, t, events_row, node.split(\"-\")[0]], index=gephi_nodes.columns)\r\n gephi_nodes = gephi_nodes.append(new_row, ignore_index=True)\r\n print(\"Writing nodes' file for Gephi: %i/%i\" % (idx, len(receivers)))\r\n gephi_nodes.to_csv(new_dir + \"/gephi_nodes.csv\", index=False)\r\n\r\n print(\"Compute gephi files with %i edges will last %0.2fm approx.\" % (len(edges_set), len(edges_set) / 12000))\r\n if input(\"Do you want to proceed? (y/n) \") == \"n\":\r\n exit()\r\n gephi_edges = pd.DataFrame(columns=[\"Source\", \"Target\", \"type\", \"id\", \"weight\"])\r\n for idx, edge in enumerate(edges_set):\r\n print(\"Writing edges' file for Gephi: %i/%i\" % (idx, len(edges_set)), end=\"\\r\")\r\n edge_row = pd.Series([edge[0], edge[1], \"Directed\", idx, 1], index=gephi_edges.columns)\r\n gephi_edges = gephi_edges.append(edge_row, ignore_index=True)\r\n print(\"Writing edges' file for Gephi: %i/%i\" % (idx, len(edges_set)))\r\n gephi_edges.to_csv(new_dir + \"/gephi_edges.csv\", index=False)\r\n\r\n return None"
]
| [
"0.6946733",
"0.5595293",
"0.55341357",
"0.5483804",
"0.54689676",
"0.546619",
"0.53488106",
"0.5347624",
"0.53359586",
"0.5324602",
"0.531136",
"0.527324",
"0.52406126",
"0.52348506",
"0.52295214",
"0.5226473",
"0.5220885",
"0.52176976",
"0.5189963",
"0.5180724",
"0.5178925",
"0.5175554",
"0.5173613",
"0.51691705",
"0.5143392",
"0.51332587",
"0.5121009",
"0.5105846",
"0.51043695",
"0.5100887"
]
| 0.7221145 | 0 |
To check if a route is feasible using given vehicle type, and return check result and route cost. | def check_violation(route, vehicle_type):
if len(route) == 2: # [0, 0] route
return True, 0, 0, 0
else:
accu_res = [0, 0, 0] # 0-leaving time, 1-accumulated distance, 2-volume
if vehicle_type == 2:
veh_cap = small_veh
elif vehicle_type == 3:
veh_cap = medium_veh
elif vehicle_type == 5:
veh_cap = large_veh
else:
veh_cap = large_veh
print('Input wrong vehicle type!', vehicle_type)
# small_veh = [1, 12, 10, 400000, 0.012, 200]
fixed_cost = veh_cap[5]
trans_cost = 0
# wait_cost = 0
if time_mat[0, route[1]] < num_timez[route[1]][0]:
accu_res[0] = num_timez[route[1]][0] - time_mat[0, route[1]] # vehicle leaving depot time
depart_time = accu_res[0] # departing from depot time
else:
depart_time = 0
for i in range(len(route) - 1):
last_cust = route[i]
curr_cust = route[i+1]
# checking leaving time
arr_time = accu_res[0] + time_mat[last_cust, curr_cust]
if arr_time < num_timez[curr_cust][0]:
accu_res[0] = num_timez[curr_cust][0] + oprt_t
wait_time = num_timez[curr_cust][0] - arr_time
# wait_cost += (wait_time / 60. * wait_cost0)
elif arr_time <= num_timez[curr_cust][1]:
accu_res[0] = arr_time + oprt_t
else:
# print('Infeasible route!(Service Time Error.)')
return False, 1000000, 0, 0
# checking vehicle max distance
trans_cost += (dist_mat[last_cust, curr_cust] * veh_cap[4])
accu_res[1] += dist_mat[last_cust, curr_cust]
if accu_res[0] - oprt_t - depart_time > veh_cap[3]:
# print('Infeasible route!(Max Time Error.)')
return False, 1000000, 0, 0
# checking vehicle max volume
accu_res[2] += (num_demd[curr_cust][0] * bskt_vol + num_demd[curr_cust][1] * trsf_vol + (num_demd[curr_cust][2]
+ num_demd[curr_cust][3]) * milk_vol + num_demd[curr_cust][4] * paper_bskt)
if accu_res[2] > veh_cap[2]:
# print('Infeasible route!(Max Weight/Volume Error.)', accu_res[2])
return False, 1000000, 0, 0
route_cost = fixed_cost + accu_res[1] * veh_cap[4]
route_dist = accu_res[1]
route_time = accu_res[0] - oprt_t - depart_time
# print fixed_cost, trvl_cost, trvl_dist
return True, route_cost, route_time, depart_time + 600 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def route_type(route):\r\n typ = 2\r\n vol_accu = 0 # accumulated volume\r\n\r\n if len(route) <= 2:\r\n return typ\r\n else:\r\n for i in range(1, len(route) - 1):\r\n cust0 = route[i]\r\n vol_accu += (num_demd[cust0][0] * bskt_vol + num_demd[cust0][1] * trsf_vol + (num_demd[cust0][2] +\r\n num_demd[cust0][3]) * milk_vol + num_demd[cust0][4] * paper_bskt)\r\n\r\n if vol_accu <= small_veh[2]:\r\n return 2\r\n elif vol_accu <= medium_veh[2]:\r\n return 3\r\n elif vol_accu <= large_veh[2]:\r\n return 5\r\n else:\r\n print('!!!Route is invalid: out of max volume!', route)",
"def road_travel(self, path, vehicle_type):\n # last node in path\n # is destination\n if len(path) < 2:\n return\n\n leg = path[0]\n if vehicle_type is VehicleType.Public:\n edge = self.transit_roads.network[leg.frm][leg.to][leg.edge_no]\n else:\n edge = self.roads.network[leg.frm][leg.to][leg.edge_no]\n\n # where leg.p is the proportion of the edge we travel\n time = self.roads.router.edge_travel_time(edge) * leg.p\n\n return leg, edge, time",
"def route(self, is_check_lanes=True):\n print 'route'\n # TODO: if too mant vtypes, better go through id_modes\n exectime_start = time.clock()\n\n net = self.get_scenario().net\n edges = net.edges\n vtypes = self.parent.vtypes\n\n ids_edges = []\n ids_trip = []\n costs = []\n for id_vtype in self.get_vtypes():\n id_mode = vtypes.ids_mode[id_vtype]\n\n # no routing for pedestrians\n if id_mode != net.modes.get_id_mode('pedestrian'):\n weights = edges.get_times(id_mode=id_mode,\n speed_max=vtypes.speeds_max[id_vtype],\n is_check_lanes=is_check_lanes)\n\n ids_trip_vtype = self.get_trips_for_vtype(id_vtype)\n # print ' id_vtype,id_mode',id_vtype,id_mode#,ids_trip_vtype\n # print ' weights',weights\n ids_edge_depart = self.ids_edge_depart[ids_trip_vtype]\n ids_edge_arrival = self.ids_edge_arrival[ids_trip_vtype]\n\n for id_trip, id_edge_depart, id_edge_arrival in zip(ids_trip_vtype, ids_edge_depart, ids_edge_arrival):\n cost, route = routing.get_mincostroute_edge2edge(id_edge_depart,\n id_edge_arrival,\n edges=edges,\n weights=weights)\n if len(route) > 0:\n ids_edges.append(route)\n ids_trip.append(id_trip)\n costs.append(cost)\n\n ids_route = self.routes.get_value().add_rows(ids_trip=ids_trip,\n ids_edges=ids_edges,\n costs=costs,\n )\n self.add_routes(ids_trip, ids_route)\n print ' exectime', time.clock()-exectime_start\n return ids_trip, ids_route",
"def cost(route):\n cost = 0\n for li in route:\n if cost<li.get_cost():\n cost=li.get_cost()\n return cost",
"def route_info(g, journey):\n distance = 0\n cost = 0.00\n time = 0\n check = 0\n \n for i in range(0, len(journey) - 1):\n city_name = journey[i]\n city_next = journey[i + 1]\n code_city = g.convert[city_name] \n code_next = g.convert[city_next]\n \n for flight in g.city_dict[code_city].get_flights_out():\n if(flight[0] == code_next):\n distance = distance + flight[1]\n time = time + route_info_helper(g, code_city, code_next, flight[1])\n if(i < 7):\n cost = cost + (distance * (0.35 - (i * 0.05)))\n \n check = check + 1\n if((check + 1) == len(journey)):\n return distance, cost, time\n else:\n print(\"Invalid Route\")\n return 0, 0, 0",
"def cost(self, route: List[int]) -> float:\n raise NotImplementedError",
"def route_info(self, route):\r\n total_distance = 0\r\n cost_mult = 0.35\r\n cost = 0\r\n time = 0\r\n if route[0] in self.edges:\r\n for i in range(len(route) - 1):\r\n for edge in self.edges[route[i]]:\r\n if edge.destination == route[i + 1]:\r\n total_distance += edge.distance\r\n cost += cost_mult * edge.distance\r\n time += self.calc_time(edge.distance)\r\n outgoing = len(self.edges[edge.destination])\r\n # if this airport is not the last one since we don't need to calculate layover for last\r\n if i is not len(route) - 2:\r\n time += 2 - ((1 / 6) * (outgoing - 1))\r\n if cost_mult > 0:\r\n cost_mult -= 0.05\r\n break;\r\n else:\r\n if edge == self.edges[route[i]][-1]:\r\n return\r\n return total_distance, round(cost, 2), round(time, 2)",
"def _cost_route_fine(self):\n return self.fine",
"def print_result(self, solution, vehicle_type, if_write):\r\n\r\n result = [['Vehicle_ID', 'Vehicle_type', 'Route', 'Leave_Time', 'Back_Time', 'Work_Time', 'Distance',\r\n 'Load_Volume', 'Wait_Time', 'Fixed_Cost', 'Travel_Cost', 'Total_Cost']]\r\n total_dist = 0\r\n total_cost = 0\r\n for k, veh in enumerate(solution):\r\n if len(veh) == 2:\r\n continue\r\n\r\n if vehicle_type[k] == 2:\r\n trans0 = small_veh[4]\r\n fix0 = small_veh[5]\r\n elif vehicle_type[k] == 3:\r\n trans0 = medium_veh[4]\r\n fix0 = medium_veh[5]\r\n else:\r\n trans0 = large_veh[4]\r\n fix0 = large_veh[5]\r\n\r\n total_cost += fix0\r\n departt = check_violation(veh, vehicle_type[k])[3]\r\n\r\n trvl_dist = 0\r\n veh_load_vol = 0\r\n wait_time = 0\r\n\r\n # get the output format\r\n route = [0] * len(result[0])\r\n route[0] = k + 1 # vehicle name\r\n route[1] = vehicle_type_dict[vehicle_type[k]] # vehicle type\r\n route_ele = []\r\n for ele in veh:\r\n if ele == 0:\r\n route_ele.append(str(ele))\r\n else:\r\n route_ele.append(num_id[ele][0])\r\n route[2] = '-'.join(route_ele) # route\r\n\r\n trvl_dist += (dist_mat[0, veh[1]] + dist_mat[veh[-2], 0])\r\n veh_load_vol += (num_demd[veh[1]][0] * bskt_vol + num_demd[veh[1]][1] * trsf_vol + (num_demd[veh[1]][2] +\r\n num_demd[veh[1]][3]) * milk_vol + num_demd[veh[1]][4] * paper_bskt)\r\n if departt / 60. < 24.:\r\n out_time = int(departt)\r\n else:\r\n out_time = int(departt - 24 * 60)\r\n route[3] = str(out_time // 60) + ':' + str(out_time % 60).zfill(2)\r\n t = departt + time_mat[0, veh[1]] + oprt_t\r\n for i in range(2, len(veh) - 1): # can not wait at the first 2 points\r\n trvl_dist += dist_mat[veh[i - 1], veh[i]]\r\n veh_load_vol += (num_demd[veh[i]][0] * bskt_vol + num_demd[veh[i]][1] * trsf_vol + (num_demd[veh[i]][2] +\r\n num_demd[veh[i]][3]) * milk_vol + num_demd[veh[i]][4] * paper_bskt)\r\n wait_t = num_timez[veh[i]][0] - (t + time_mat[veh[i - 1], veh[i]])\r\n if wait_t > 0 + 1e-5:\r\n # print veh[i-1], veh[i], wait_t\r\n wait_time += wait_t\r\n t = num_timez[veh[i]][0] + oprt_t\r\n else:\r\n t += (time_mat[veh[i - 1], veh[i]] + oprt_t)\r\n if t + time_mat[veh[-2], 0] < 24. * 60:\r\n in_time = int(t + time_mat[veh[-2], 0])\r\n else:\r\n in_time = int(t + time_mat[veh[-2], 0] - 24 * 60)\r\n\r\n route[4] = str(in_time // 60) + ':' + str(in_time % 60).zfill(2) # vehicle back time\r\n route[5] = round((t + time_mat[veh[-2], 0] - departt) / 60., 1)\r\n route[6] = round(trvl_dist / 1000., 2) # total distance\r\n route[7] = veh_load_vol # vehicle load volume\r\n route[8] = wait_time # vehicle wait time\r\n route[9] = fix0 # vehicle fixed cost\r\n route[10] = round(trvl_dist * trans0, 2) # vehicle travel cost\r\n route[11] = route[9] + route[10] # total cost\r\n\r\n total_cost += trvl_dist * trans0\r\n result.append(route)\r\n # print route\r\n total_dist += route[6]\r\n # print 'Last leave time: ', int(t) / 60, ':', int(t) % 60\r\n # print 'total distances: ', route[5]\r\n\r\n if if_write:\r\n run_time = time.strftime(\"%m%d_%H%M\", time.localtime())\r\n with open(r'C:\\Bee\\0Huaat\\Starbucks\\results\\Route_Plan_%s.csv' % run_time, 'w', newline='') as fw:\r\n writer = csv.writer(fw)\r\n for v in result:\r\n writer.writerow(v)\r\n\r\n return round(total_cost, 2)",
"def work(params) -> Union[None, float]:\n try:\n # either HTTP or bindings\n if host:\n path = action if action == \"route\" else \"sources_to_targets\"\n params_str = delimit_tuple(\n tuple((delimit_tuple(x) for x in params)), delimiter=\";\"\n )\n route = requests.get(f\"{host}/{path}/v1/driving/{params_str}\")\n else:\n route = router.route(params) if action == \"route\" else None\n except (RuntimeError, requests.exceptions.BaseHTTPError):\n return None\n\n if (\n random() > 0.95\n ): # assume that large number of routes will be tested, only print sample in debug mode\n LOGGER.debug(f\"Calculated route between {params[0]} and {params[1]}\")\n\n if report:\n result = route.json()\n if action == \"route\":\n try:\n dist = sum([x[\"distance\"] for x in result[\"routes\"]])\n except KeyError:\n LOGGER.critical(\n f\"No route was found from {params[0]} to {params[1]}. \"\n f\"Try regenerating the locations or specify a more narrow bounding box.\"\n )\n return None\n else:\n dists = [\n inner[\"distance\"]\n for outer in route[\"sources_to_targets\"]\n for inner in outer\n ]\n dist: float = mean(filter(lambda x: x is not None, dists))\n\n return dist",
"def print_route_detail(self, solution, vehicle_type, if_write):\r\n\r\n result = [[\r\n '线路编号',\r\n '门店编码',\r\n '门店名称',\r\n '门店地址',\r\n '经度',\r\n '纬度',\r\n '车型',\r\n '额定体积/m3',\r\n '额定重量/t',\r\n '到达时间',\r\n '离开时间',\r\n '行驶距离/km',\r\n '累计行驶距离km',\r\n '行驶时间/min',\r\n '卸货时间/min',\r\n '累计工作时间/h',\r\n '鲜食篮总数',\r\n '周转箱个数',\r\n '新绿园鲜奶980ML(罐)',\r\n '新绿园脱脂牛奶980ML(罐)',\r\n '纸箱个数',\r\n '卸货体积',\r\n '卸货重量']]\r\n\r\n total_dist = 0\r\n for k, veh in enumerate(solution):\r\n if vehicle_type[k] == 2:\r\n trans0 = small_veh[4]\r\n veh_param = small_veh\r\n\r\n elif vehicle_type[k] == 3:\r\n trans0 = medium_veh[4]\r\n veh_param = medium_veh\r\n\r\n else:\r\n trans0 = large_veh[4]\r\n veh_param = large_veh\r\n\r\n\r\n departt = check_violation(veh, vehicle_type[k])[3]\r\n t = departt\r\n\r\n trvl_dist = 0\r\n veh_load_vol = 0\r\n wait_time = 0\r\n\r\n veh_load_vol += (num_demd[veh[1]][0] * bskt_vol + num_demd[veh[1]][1] * trsf_vol + (num_demd[veh[1]][2] +\r\n num_demd[veh[1]][3]) * milk_vol + num_demd[veh[1]][4] * paper_bskt)\r\n if departt / 60. < 24.:\r\n out_time = int(math.ceil(departt))\r\n else:\r\n out_time = int(math.ceil(departt - 24 * 60))\r\n\r\n # get the output format\r\n store = [0] * len(result[0])\r\n store[0] = k + 1 # 线路序号\r\n store[1] = num_id[0][0] # 门店编号\r\n store[2] = num_id[0][1] # 门店名称\r\n store[3] = num_id[0][2] # 门店地址\r\n store[4] = loc[0][0] # 经度\r\n store[5] = loc[0][1] # 纬度\r\n store[6] = vehicle_type_dict[vehicle_type[k]] # 车型\r\n store[7] = veh_param[2] # 额定体积\r\n store[8] = veh_param[1] # 额定重量\r\n store[9] = str(out_time // 60) + ':' + str(out_time % 60).zfill(2) # 到达时间\r\n store[10] = str(out_time // 60) + ':' + str(out_time % 60).zfill(2) # 离开时间\r\n store[11] = 0 # 行驶距离\r\n store[12] = 0 # 累计行驶距离\r\n store[13] = 0 # 行驶时间\r\n store[14] = 0 # 卸货时间\r\n store[15] = 0 # 累计工作时间\r\n store[16] = 0 # 鲜食篮件数\r\n store[17] = 0 # 周转箱个数\r\n store[18] = 0 # 新绿园鲜奶\r\n store[19] = 0 # 新绿园脱脂牛奶\r\n store[20] = 0 # 纸箱\r\n store[21] = 0 # 卸货体积\r\n store[22] = 0 # 卸货重量\r\n\r\n store0 = copy.deepcopy(store)\r\n result.append(store0)\r\n\r\n # t = departt + time_mat[0, veh[1]] + oprt_t # t is the leaving time\r\n for i in range(1, len(veh)-1): # can not wait at the first 2 points\r\n store[1] = num_id[veh[i]][0]\r\n store[2] = num_id[veh[i]][1]\r\n store[3] = num_id[veh[i]][2]\r\n store[4] = loc[veh[i]][0]\r\n store[5] = loc[veh[i]][1]\r\n arr_time = t + time_mat[veh[i-1], veh[i]]\r\n if arr_time / 60. < 24.:\r\n in_time = int(math.ceil(arr_time))\r\n else:\r\n in_time = int(math.ceil(arr_time - 24 * 60))\r\n\r\n trvl_dist += dist_mat[veh[i-1], veh[i]]\r\n veh_load_vol += (num_demd[veh[i]][0] * bskt_vol + num_demd[veh[i]][1] * trsf_vol + (num_demd[veh[i]][2] +\r\n num_demd[veh[i]][3]) * milk_vol + num_demd[veh[i]][4] * paper_bskt)\r\n wait_t = num_timez[veh[i]][0] - (t + time_mat[veh[i-1], veh[i]])\r\n if wait_t > 0 + 1e-5:\r\n # t is the leaving time\r\n wait_time += wait_t\r\n t = num_timez[veh[i]][0] + oprt_t\r\n else:\r\n t += (time_mat[veh[i - 1], veh[i]] + oprt_t)\r\n if t < 24. * 60:\r\n out_time = int(math.ceil(t))\r\n else:\r\n out_time = int(math.ceil(t - 24 * 60))\r\n\r\n store[9] = str(in_time // 60) + ':' + str(in_time % 60).zfill(2) # 到达时间\r\n store[10] = str(out_time // 60) + ':' + str(out_time % 60).zfill(2) # 离开时间\r\n store[11] = round(dist_mat[veh[i-1], veh[i]] / 1000., 2) # 行驶距离\r\n store[12] = round(trvl_dist / 1000., 2) # 累计行驶距离\r\n store[13] = round(time_mat[veh[i-1], veh[i]], 1) # 行驶时间\r\n store[14] = oprt_t\r\n store[15] = round((t - departt) / 60., 2) # 累计工作时间\r\n store[16] = num_demd[veh[i]][0] # 鲜食篮件数\r\n store[17] = num_demd[veh[i]][1] # 周转箱个数\r\n store[18] = num_demd[veh[i]][2] # 新绿园鲜奶\r\n store[19] = num_demd[veh[i]][3] # 新绿园脱脂牛奶\r\n store[20] = num_demd[veh[i]][4] # 纸箱\r\n store[21] = (num_demd[veh[i]][0] * bskt_vol + num_demd[veh[i]][1] * trsf_vol + (num_demd[veh[i]][2] +\r\n num_demd[veh[i]][3]) * milk_vol + num_demd[veh[i]][4] * paper_bskt) # 卸货体积\r\n store[22] = 0 # 卸货重量\r\n\r\n store0 = copy.deepcopy(store)\r\n result.append(store0)\r\n # print(result[-1])\r\n\r\n store[1] = num_id[0][0] # 门店编号\r\n store[2] = num_id[0][1] # 门店名称\r\n store[3] = num_id[0][2] # 门店地址\r\n store[4] = loc[0][0] # 经度\r\n store[5] = loc[0][1] # 纬度\r\n arr_time = t + time_mat[veh[-2], 0]\r\n if arr_time / 60. < 24.:\r\n in_time = int(math.ceil(arr_time))\r\n else:\r\n in_time = int(math.ceil(arr_time - 24 * 60))\r\n store[9] = str(in_time // 60) + ':' + str(in_time % 60).zfill(2) # 到达时间\r\n store[10] = str(in_time // 60) + ':' + str(in_time % 60).zfill(2) # 离开时间\r\n store[11] = round(dist_mat[veh[-2], 0] / 1000., 2) # 行驶距离\r\n store[12] = round((trvl_dist + dist_mat[veh[-2], 0]) / 1000., 2) # 累计行驶距离\r\n store[13] = round(time_mat[veh[-2], 0], 1) # 行驶时间\r\n store[14] = 0 # 卸货时间\r\n store[15] = round((t - departt + time_mat[veh[-2], 0]) / 60., 2) # 累计工作时间\r\n store[16] = 0 # 鲜食篮件数\r\n store[17] = 0 # 周转箱个数\r\n store[18] = 0 # 新绿园鲜奶\r\n store[19] = 0 # 新绿园脱脂牛奶\r\n store[20] = 0 # 纸箱\r\n store[21] = 0 # 卸货体积\r\n store[22] = 0 # 卸货重量\r\n\r\n store0 = copy.deepcopy(store)\r\n result.append(store0)\r\n # print(result[-1])\r\n\r\n if if_write:\r\n # run_time = time.strftime(\"%m%d_%H%M\", time.localtime())\r\n with open(r'C:\\Bee\\0Huaat\\Starbucks\\output\\Route_Details_%s_%s.csv' % (veh_spd_kmh, run_time), 'w', newline='') as fw:\r\n writer = csv.writer(fw)\r\n for v in result:\r\n # print(v)\r\n writer.writerow(v)",
"def find_best_route(all_cost, all_routes):\n cost_best_route = np.inf\n for i in range(len(all_cost)):\n if all_cost[i] < cost_best_route:\n cost_best_route = all_cost[i]\n best_route = all_routes[i]\n return cost_best_route, best_route",
"def test_route(self):\n\n params = get_params()\n estimator = LinearEstimator()\n problem_builder = ProblemBuilder(params=params, estimator=estimator)\n model_builder = OptimizationModelBuilder(\n constraints=[CapacityConstraint()]\n )\n router = Router(\n problem_builder=problem_builder,\n optimization_model_builder=model_builder\n )\n riders = parse_models(model_dicts=test_riders, cls=Rider)\n vehicles = parse_models(model_dicts=test_vehicles, cls=Vehicle)\n depots = parse_models(model_dicts=test_depots, cls=Depot)\n routes = router.route(riders, vehicles, depots)\n self.assertTrue(routes, msg='Routes could not be built.')\n\n for route in routes:\n self.assertTrue(route['vehicle_id'], msg='Route without vehicle.')\n self.assertTrue(\n len(route['stops']) > 1,\n msg='Route with single stop.'\n )",
"def time_nn(self, on_way_time, curr_cust, remain_list, used_resource, rout_len, vehicle_type):\r\n if vehicle_type == 2:\r\n veh_cap = small_veh\r\n elif vehicle_type == 3:\r\n veh_cap = medium_veh\r\n else:\r\n veh_cap = large_veh\r\n real_wait_time = 0 # the final wait time after testing all the possible stores\r\n real_vst_cust = -1 # the final visiting store after testing all the possible stores\r\n visit_cust = [-1, 100000, 600000, 10000] # [cust_id, next_start, distance, closeness]\r\n if rout_len - 1 < 50: # max number of stores a vehicle visits\r\n for cust in remain_list:\r\n # print('checking customer: ', cust)\r\n if (used_resource[0] + num_demd[cust][0] * bskt_vol + num_demd[cust][1] * trsf_vol + (num_demd[cust][2] +\r\n num_demd[cust][3]) * milk_vol + num_demd[cust][4] * paper_bskt) > veh_cap[2]:\r\n # print('run out of effective volume')\r\n continue # volume overload\r\n # elif dist_mat[curr_cust, cust] + dist_mat[cust, 0] > veh_cap[3] - used_resource[3]:\r\n # print('run out of distance')\r\n # continue\r\n elif used_resource[2] + time_mat[curr_cust, cust] > num_timez[cust][1]:\r\n # print('late than last receive time')\r\n continue # can not arrive before last receive time\r\n elif time_mat[curr_cust, cust] + oprt_t + time_mat[cust, 0] > veh_cap[3] - on_way_time:\r\n # print('run out of work time')\r\n continue\r\n elif (curr_cust > 0 and used_resource[2] + time_mat[curr_cust, cust] < num_timez[cust][0] and\r\n num_timez[cust][0] - used_resource[2] + oprt_t + time_mat[cust, 0] > veh_cap[3] - on_way_time):\r\n # print('run out of work time - with waiting time')\r\n continue\r\n else:\r\n wait_time = num_timez[cust][0] - (used_resource[2] + time_mat[curr_cust, cust])\r\n\r\n if wait_time < 0:\r\n next_start = used_resource[2] + time_mat[curr_cust, cust]\r\n h_ij = time_mat[curr_cust, cust]\r\n else: # arrive early\r\n next_start = num_timez[cust][0]\r\n if curr_cust == 0:\r\n h_ij = time_mat[curr_cust, cust]\r\n wait_time = 0 # special situation for depot depart\r\n else:\r\n h_ij = next_start - used_resource[2]\r\n v_ij = num_timez[cust][1] - (used_resource[2] + time_mat[curr_cust, cust])\r\n close_ij = alp * time_mat[curr_cust, cust] + bet * h_ij + gam * v_ij # closeness between i and j\r\n # print(curr_cust, cust, close_ij)\r\n if close_ij < visit_cust[3]:\r\n real_wait_time = wait_time\r\n real_vst_cust = cust\r\n visit_cust[0] = cust\r\n visit_cust[1] = next_start\r\n visit_cust[2] = dist_mat[curr_cust, cust]\r\n visit_cust[3] = close_ij\r\n else:\r\n continue\r\n\r\n\r\n if visit_cust[0] == -1: # no customer to visit\r\n visit_cust[0] = 0\r\n visit_cust[1] = used_resource[-1] + time_mat[curr_cust, 0]\r\n on_way_time += time_mat[curr_cust, 0]\r\n else:\r\n # print(curr_cust, real_vst_cust, real_wait_time)\r\n if real_wait_time <= 0:\r\n on_way_time += (oprt_t + time_mat[curr_cust, real_vst_cust])\r\n else:\r\n on_way_time += (oprt_t + real_wait_time + time_mat[curr_cust, real_vst_cust])\r\n\r\n return visit_cust, on_way_time",
"def heuristic_function(self, node_current: PriorityNode) -> float:\n ########################################################################\n # todo: Implement your own heuristic cost calculation here. #\n # Hint: #\n # Use the State of the current node and the information from the #\n # planning problem, as well as from the scenario. #\n # Some helper functions for your convenience can be found in #\n # ./search_algorithms/base_class.py #\n ########################################################################\n output_logs = False\n if output_logs:\n print(\"##################\")\n print(\"current time step: \", node_current.list_paths[-1][-1].time_step)\n print(\"current problem mode\", self.planningProblemType)\n print(\"depth tree: \", node_current.depth_tree)\n currentorient = node_current.list_paths[-1][-1].orientation\n currentpos = node_current.list_paths[-1][-1].position\n currenttimestep = node_current.list_paths[-1][-1].time_step\n currentVel = node_current.list_paths[-1][-1].velocity\n\n # Test if reached goal:\n if self.reached_goal(node_current.list_paths[-1]):\n return 0.0\n # Test if route planner failed to find a path: \n if self.routeplannerresult is None:\n return np.inf\n\n ############ Detect cars in front:\n # calc cost based on distance to gool following the refrence path:\n # loop through all obstacles at time step x and find if any is close of current pos:\n if not self.disableObstAvoidance:\n for obst in self.list_obstacles:\n obstPos = obst.state_at_time(currenttimestep)\n if currentorient is not None and obstPos is not None:\n disttoobst = self.euclidean_distance(currentpos, obstPos.position)\n lookaheadVar = 1.375 * currentVel + 2.5\n if disttoobst <= lookaheadVar:\n # calc orientation diff between car and obstacle:\n vectorToObst = np.array([currentpos, obstPos.position])\n vectorToObstOrient = self.calc_angle_of_position(vectorToObst, currentpos)\n orientdiff = self.calc_orientation_diff(currentorient, vectorToObstOrient)\n if abs(orientdiff) <= 0.261799:\n if not 'velocity' in obstPos.attributes:\n continue\n if node_current.list_paths[-1][-1].velocity > obstPos.velocity and obstPos.velocity != 0:\n return np.inf\n \n # get index of closest object to the ego vehicle:\n index_smallest_dist = self.get_index_nearest_obst_infront(node_current)\n \n # use the index to locate vehicle to calc cost: \n if index_smallest_dist != -1:\n # found the index of vehicle with smallest distance to ego car:\n obst = self.list_obstacles[index_smallest_dist]\n obstPos = obst.state_at_time(currenttimestep)\n if obstPos is not None and 'velocity' in obstPos.attributes:\n if obstPos.velocity == 0:\n cost = node_current.list_paths[-1][-1].velocity\n return cost\n if node_current.list_paths[-1][-1].velocity > obstPos.velocity:\n return np.inf\n cost = abs(node_current.list_paths[-1][-1].velocity - obstPos.velocity)\n return cost\n #########################################################\n\n # Decide based on planning problem type how to calculate cost\n if self.planningProblemType == 'ModeA':\n # Call function for planning problem with desired time, position, speed and orientation\n cost = self.cost_for_modeA_problem(node_current, output_logs)\n if output_logs:\n print(\"Cost from modeA cost func: \", cost)\n if cost < 0:\n return 0\n return cost\n elif self.planningProblemType == 'ModeB':\n # Call function for planning problem with desired time, position and velocity:\n cost = self.cost_for_modeB_problem(node_current, output_logs)\n if output_logs:\n print(\"Cost from modeB cost func: \", cost)\n if cost < 0:\n return 0\n return cost\n elif self.planningProblemType == 'ModeC':\n # Call function for planning problem with desired time, position and orientation:\n cost = self.cost_for_modeC_problem(node_current, output_logs)\n if output_logs:\n print(\"Cost from modeB cost func: \", cost)\n if cost < 0:\n return 0\n return cost\n elif self.planningProblemType == 'ModeD':\n # Call function for planning problem with desired time and position:\n cost = self.cost_for_modeD_problem(node_current, output_logs)\n if output_logs:\n print(\"Cost from modeB cost func: \", cost)\n if cost < 0:\n return 0\n return cost\n elif self.planningProblemType == 'Survival':\n # Call function for planning problem with desired time:\n cost = self.cost_for_Survival_problem(node_current, output_logs)\n if output_logs:\n print(\"Cost from modeB cost func: \", cost)\n if cost < 0:\n return 0\n return cost",
"def get_context_route_condition(self, pathology_choose,\n way_choose, pathologies, ways):\n essentials_oils = 1\n if way_choose.name == \"orale\":\n essentials_oils = EssentialOil.objects.filter(\n pathology__name=pathology_choose.name).filter(\n way__name=way_choose.name)[0:2]\n\n vegetable_oil = NeutralProduct.objects.get(name=\"miel\")\n protocole = MethodOfUse.objects.get(name=\"orale\")\n\n elif way_choose.name == \"bain\":\n essentials_oils = EssentialOil.objects.filter(\n pathology__name=pathology_choose.name).filter(\n way__name=way_choose.name)[0:1]\n\n vegetable_oil = NeutralProduct.objects.get(name=\"gel douche\")\n protocole = MethodOfUse.objects.get(name=\"bain\")\n\n elif way_choose.name == \"diffusion\":\n essentials_oils = EssentialOil.objects.filter(\n pathology__name=pathology_choose.name).filter(\n way__name=way_choose.name)\n\n vegetable_oil = NeutralProduct.objects.get(name=\"alcool\")\n protocole = MethodOfUse.objects.get(name=\"diffusion\")\n\n elif way_choose.name == \"Inhalation\":\n essentials_oils = EssentialOil.objects.filter(\n pathology__name=pathology_choose.name).filter(\n way__name=way_choose.name)\n\n vegetable_oil = NeutralProduct.objects.get(name=\"bol d'eau\")\n protocole = MethodOfUse.objects.get(name=\"inhalation\")\n\n elif way_choose.name == \"cutanée\":\n essentials_oils = EssentialOil.objects.filter(\n pathology__name=pathology_choose.name).filter(\n way__name=way_choose.name)\n\n vegetable_oil = pathology_choose.vegetable_oil\n\n if pathology_choose.zone == \"general\":\n protocole = MethodOfUse.objects.get(\n name=\"cutanée générale\")\n else:\n protocole = MethodOfUse.objects.get(name=\"cutanée\")\n\n number_he = essentials_oils.count()\n amount = Recipe.objects.filter(\n way__name=way_choose.name).get(number_he=number_he)\n sides_effects = SideEffect.objects.filter(\n essential_oil__in=essentials_oils).distinct()\n contraindication = Contraindication.objects.filter(\n essential_oil__in=essentials_oils).distinct()\n\n context = {\n \"pathologies\": pathologies,\n \"pathology_choose\": pathology_choose,\n \"essentials_oils\": essentials_oils,\n \"vegetable_oil\": vegetable_oil,\n \"way_choose\": way_choose,\n \"ways\": ways,\n \"amount\": amount,\n \"protocole\": protocole,\n \"sides_effects\": sides_effects,\n \"contraindications\": contraindication,\n }\n\n return context",
"def print_route_summary(self, solution, vehicle_type, if_write):\r\n result_summary = [[\r\n '计划编号',\r\n '门店数',\r\n '配送总体积/m3',\r\n '配送总重量/t',\r\n '设定车速/km/h',\r\n '总车数',\r\n '总额定体积/m3',\r\n '总额定重量/t',\r\n '体积装载率/%',\r\n '重量装载率/%',\r\n '总行驶里程/km',\r\n '有效里程/km',\r\n '返空里程/km',\r\n '工作时间/h',\r\n '行驶时间/min',\r\n '卸货时间/min',\r\n '总成本/元',\r\n '固定成本/元',\r\n '运输成本/元',\r\n '2T车数量',\r\n '3T车数量',\r\n '5T车数量',\r\n '鲜食篮总数',\r\n '周转箱个数',\r\n '新绿园鲜奶980ML(罐)',\r\n '新绿园脱脂牛奶980ML(罐)',\r\n '纸箱个数']]\r\n summ_value = [0] * len(result_summary[0])\r\n\r\n result = [[\r\n '线路编号',\r\n '出发时间',\r\n '返回时间',\r\n '工作时间/h',\r\n '行驶总时间/min',\r\n '卸货总时间/min',\r\n '等待时间/min',\r\n '总行驶里程/km',\r\n '有效里程/km',\r\n '返空里程/km',\r\n '车型',\r\n '额定装载体积/m3',\r\n '额定装载重量/t',\r\n '实际装载体积/m3',\r\n '实际装载重量/t',\r\n '体积装载率/%',\r\n '重量装载率/%',\r\n '总成本/元',\r\n '固定成本/元',\r\n '运输成本/元',\r\n '配送门店总数',\r\n '门店1编号',\r\n '门店1名称',\r\n '门店2编号',\r\n '门店2名称',\r\n '门店3编号',\r\n '门店3名称',\r\n '门店4编号',\r\n '门店4名称',\r\n '门店5编号',\r\n '门店5名称',\r\n '门店6编号',\r\n '门店6名称',\r\n '门店7编号',\r\n '门店7名称',\r\n '门店8编号',\r\n '门店8名称',\r\n '门店9编号',\r\n '门店9名称',\r\n '门店10编号',\r\n '门店10名称',\r\n '门店11编号',\r\n '门店11名称',\r\n '门店12编号',\r\n '门店12名称',\r\n '门店13编号',\r\n '门店13名称',\r\n '门店14编号',\r\n '门店14名称',\r\n '门店15编号',\r\n '门店15名称',\r\n '门店16编号',\r\n '门店16名称',\r\n '门店17编号',\r\n '门店17名称',\r\n '门店18编号',\r\n '门店18名称',\r\n '门店19编号',\r\n '门店19名称',\r\n '门店20编号',\r\n '门店20名称']]\r\n\r\n total_dist = 0\r\n for k, veh in enumerate(solution):\r\n if vehicle_type[k] == 2:\r\n trans0 = small_veh[4]\r\n veh_param = small_veh\r\n summ_value[19] += 1\r\n elif vehicle_type[k] == 3:\r\n trans0 = medium_veh[4]\r\n veh_param = medium_veh\r\n summ_value[20] += 1\r\n else:\r\n trans0 = large_veh[4]\r\n veh_param = large_veh\r\n summ_value[21] += 1\r\n\r\n departt = check_violation(veh, vehicle_type[k])[3]\r\n\r\n trvl_dist = 0\r\n veh_load_vol = 0\r\n wait_time = 0\r\n trvl_time = 0\r\n\r\n # get the output format\r\n route = [0] * 21\r\n route[0] = k + 1 # vehicle name\r\n route[10] = vehicle_type_dict[vehicle_type[k]] # 车型\r\n\r\n\r\n trvl_dist += (dist_mat[0, veh[1]] + dist_mat[veh[-2], 0])\r\n trvl_time += (time_mat[0, veh[1]] + time_mat[veh[-2], 0])\r\n veh_load_vol += (num_demd[veh[1]][0] * bskt_vol + num_demd[veh[1]][1] * trsf_vol + (num_demd[veh[1]][2] +\r\n num_demd[veh[1]][3]) * milk_vol + num_demd[veh[1]][4] * paper_bskt)\r\n\r\n summ_value[22] += num_demd[veh[1]][0]\r\n summ_value[23] += num_demd[veh[1]][1]\r\n summ_value[24] += num_demd[veh[1]][2]\r\n summ_value[25] += num_demd[veh[1]][3]\r\n summ_value[26] += num_demd[veh[1]][4]\r\n\r\n if departt / 60. < 24.:\r\n out_time = int(departt)\r\n else:\r\n out_time = int(departt - 24 * 60)\r\n route[1] = str(out_time // 60) + ':' + str(out_time % 60).zfill(2)\r\n t = departt + time_mat[0, veh[1]] + oprt_t\r\n for i in range(2, len(veh)-1): # can not wait at the first 2 points\r\n trvl_dist += dist_mat[veh[i-1], veh[i]]\r\n trvl_time += time_mat[veh[i-1], veh[i]]\r\n veh_load_vol += (num_demd[veh[i]][0] * bskt_vol + num_demd[veh[i]][1] * trsf_vol + (num_demd[veh[i]][2] +\r\n num_demd[veh[i]][3]) * milk_vol + num_demd[veh[i]][4] * paper_bskt)\r\n\r\n summ_value[22] += num_demd[veh[i]][0]\r\n summ_value[23] += num_demd[veh[i]][1]\r\n summ_value[24] += num_demd[veh[i]][2]\r\n summ_value[25] += num_demd[veh[i]][3]\r\n summ_value[26] += num_demd[veh[i]][4]\r\n\r\n wait_t = num_timez[veh[i]][0] - (t + time_mat[veh[i-1], veh[i]])\r\n if wait_t > 0 + 1e-5:\r\n # print veh[i-1], veh[i], wait_t\r\n wait_time += wait_t\r\n t = num_timez[veh[i]][0] + oprt_t\r\n else:\r\n t += (time_mat[veh[i - 1], veh[i]] + oprt_t)\r\n if t + time_mat[veh[-2], 0] < 24. * 60:\r\n in_time = int(t + time_mat[veh[-2], 0])\r\n else:\r\n in_time = int(t + time_mat[veh[-2], 0] - 24 * 60)\r\n\r\n route[2] = str(in_time // 60) + ':' + str(in_time % 60).zfill(2) # 返回时间\r\n route[3] = round((t + time_mat[veh[-2], 0] - departt) / 60., 1) # 工作时间\r\n route[4] = round(trvl_time, 1) # 行驶时间\r\n route[5] = round(oprt_t * (len(veh) - 2), 1) # 操作时间\r\n route[6] = wait_time\r\n route[7] = round(trvl_dist / 1000., 2) # 行驶里程\r\n route[8] = round((trvl_dist - dist_mat[veh[-2], 0]) / 1000., 2) # 有效里程\r\n route[9] = round(dist_mat[veh[-2], 0] / 1000., 2) # 返空里程\r\n route[11] = veh_param[2] # 额定体积\r\n route[12] = veh_param[1] # 额定重量\r\n route[13] = veh_load_vol # 实际装载体积\r\n route[14] = 0. # 实际装载重量\r\n route[15] = round(veh_load_vol / veh_param[2] * 100, 2) # 体积装载率\r\n route[16] = round(route[14] / veh_param[1] * 100, 2) # 重量装载率\r\n route[18] = veh_param[-1] # 固定成本\r\n route[19] = round(trvl_dist * trans0, 2) # 运输成本\r\n route[17] = route[18] + route[19] # 总成本\r\n route[20] = len(veh) - 2 # 配送门店总数\r\n\r\n for ele in veh:\r\n if ele != 0:\r\n route.append(num_id[ele][0])\r\n route.append(num_id[ele][1])\r\n\r\n\r\n result.append(route)\r\n # print route\r\n total_dist += route[7]\r\n # print 'Last leave time: ', int(t) / 60, ':', int(t) % 60\r\n # print 'total distances: ', route[5]\r\n\r\n summ_value[2] += veh_load_vol\r\n summ_value[3] += 0\r\n summ_value[4] = veh_spd_kmh\r\n summ_value[5] += 1\r\n summ_value[6] += veh_param[2]\r\n summ_value[7] += veh_param[1]\r\n summ_value[10] += round(trvl_dist / 1000., 2)\r\n summ_value[11] += route[8]\r\n summ_value[12] += route[9]\r\n summ_value[13] += route[3]\r\n summ_value[14] += route[4]\r\n summ_value[15] += route[5]\r\n summ_value[16] += route[17]\r\n summ_value[17] += route[18]\r\n summ_value[18] += route[19]\r\n\r\n\r\n if if_write:\r\n # run_time = time.strftime(\"%m%d_%H%M\", time.localtime())\r\n with open(r'C:\\Bee\\0Huaat\\Starbucks\\output\\Route_Summary_%s_%s.csv' % (veh_spd_kmh, run_time), 'w', newline='') as fw:\r\n writer = csv.writer(fw)\r\n for v in result:\r\n writer.writerow(v)\r\n\r\n\r\n summ_value[0] = run_time\r\n summ_value[1] = store_num - 1\r\n summ_value[8] = round(summ_value[2] / summ_value[6] * 100, 2)\r\n summ_value[9] = round(summ_value[3] / summ_value[7] * 100, 2)\r\n result_summary.append(summ_value)\r\n with open(r'C:\\Bee\\0Huaat\\Starbucks\\output\\Plan_Summary_%s_%s.csv' % (veh_spd_kmh, run_time), 'w', newline='') as fww:\r\n writer = csv.writer(fww)\r\n for vv in result_summary:\r\n writer.writerow(vv)\r\n\r\n\r\n return total_dist",
"def solve(\n self,\n initial_routes=None,\n solver=\"cbc\",\n cspy=False,\n exact=True,\n pricing_strategy=\"PrunePaths\",\n ):\n if cspy:\n self.G.graph[\"subproblem\"] = \"cspy\"\n else:\n self.G.graph[\"subproblem\"] = \"lp\"\n print(self.G.graph[\"name\"], self.G.graph[\"subproblem\"])\n print(\"===========\")\n prob = VehicleRoutingProblem(\n self.G,\n duration=self.max_duration,\n load_capacity=self.max_load,\n drop_penalty=self.penalty,\n pickup_delivery=self.activate_pickup_delivery,\n distribution_collection=self.activate_distribution_collection,\n time_windows=self.activate_time_windows,\n )\n prob.solve(\n initial_routes=initial_routes,\n cspy=cspy,\n exact=exact,\n pricing_strategy=pricing_strategy,\n solver=solver,\n )\n self.best_value, self.best_routes = prob.best_value, prob._best_routes_as_graphs\n self.best_routes_nodes = prob.best_routes",
"def get_objective(self):\n self.objective = 0\n for r in self.routes:\n r.update_route(self.vrpdata)\n self.objective += r.distance\n # all() returns True if all elements of the iterable are true\n self.solutionValid = (all([r.tourValid for r in self.routes]) and len(self.routes) <= self.vrpdata.MaxNumVeh)\n if self.solutionValid:\n return self.objective\n return -1",
"def optimise(self):\n route = str(sorted(self.heuristic_path))\n\n if route in self.routes:\n saved = TSP.routes[route]\n self.heuristic_path = saved[\"path\"]\n self.heuristic_cost = saved[\"cost\"]\n else:\n self._optimise()\n\n return self.heuristic_path, self.heuristic_cost",
"def test_cost(self):\n self.assertTrue(\n int(self.ospf.parse_state(\n pattern='cost',\n cmd_key='sh_ospf_ints')) == 10, 'OSPF Interface: cost not found')",
"def routeOptions(ORBITS, SET_OF_VEHICLES, CURRENT_WEATHER):\n OPTION = []\n for eachOrbit in ORBITS:\n if eachOrbit.traffic_speed == 0:\n print('Route {} Blocked'.format(eachOrbit.route_name))\n OPTION = None\n break\n else:\n for eachVehicle in SET_OF_VEHICLES:\n if eachVehicle.weather_suitability(CURRENT_WEATHER):\n eff_speed = min(eachVehicle.max_speed, eachOrbit.traffic_speed)\n time_taken = (eachOrbit.distance/eff_speed)+(eachVehicle.cross_crater*eachOrbit.craters*crater_factor(CURRENT_WEATHER)/60)\n time_taken = round(time_taken, 2)\n OPTION.append((eachVehicle.veh_type, eachOrbit.route_name, time_taken))\n else:\n continue\n return OPTION",
"def route_cost(self, route):\n total_weight = 0\n c = 0\n start = route[0]\n for end in route[1:]:\n y = float(self.stars[start][end]['weight']) - c\n t = total_weight + y\n c = (t - total_weight) - y\n\n total_weight = t\n\n start = end\n return total_weight",
"def _calculate_costs(self):\n cost = 0\n cost += self._cost_route_fine()\n cost += self._cost_petrol()\n cost += self._cost_wage()\n cost += self._cost_refueling()\n cost += self._cost_caught_by_police()\n cost += self._cost_vehicle_malfunction()\n return cost",
"def find_route_optimal_route_length(tsp):\n best_route = []\n for city_id in tsp.best_route:\n for city in tsp.cities:\n if city_id == city.id:\n best_route.append(city)\n return Fitness(route=best_route).route_distance()",
"def print_solution(self):\n print(f'Objective: {self.solution.ObjectiveValue()}')\n total_distance = 0\n total_load = 0\n max_route_distance = 0\n for vehicle_id in range(self.data['num_vehicles']):\n index = self.routingManager.Start(vehicle_id)\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\n route_distance = 0\n route_load = 0\n while not self.routingManager.IsEnd(index):\n node_index = self.manager.IndexToNode(index)\n route_load += self.data['demands'][node_index]\n plan_output += ' {0} Load({1}) -> '.format(self.data['names'][node_index], route_load)\n\n previous_index = index\n index = self.solution.Value(self.routingManager.NextVar(index))\n route_distance += self.routingManager.GetArcCostForVehicle(\n previous_index, index, vehicle_id\n )\n print(route_distance)\n\n plan_output += '{0}, Load({1}) \\n '.format(self.data['names'][self.manager.IndexToNode(index)], route_load)\n\n plan_output += 'Distance of the route: {}\\n'.format(route_distance)\n plan_output += 'Load of the route: {}\\n'.format(route_load)\n\n print(plan_output)\n total_distance += route_distance\n total_load += route_load\n\n print('Total distance of all routes: {}km'.format(total_distance))\n print('Total load of all routes: {}'.format(total_load))",
"def cost(distance, highway, bicycle, incline, preferences):\n\n #unpack preferences\n (flatness_pref, bicycle_pref, distance_pref,\n motorway_pref, highway_pref, residential_pref) = preferences\n multiplier = 1 + bike_multiplier(bicycle, bicycle_pref) + road_multiplier(highway, bicycle_pref, motorway_pref, highway_pref, residential_pref)\n if multiplier <= 0:\n multiplier = 0.01\n incl = incline_multiplier(float(incline))*flatness_pref\n cost = float(distance) * multiplier + incl\n if cost <= 0:\n cost = 0.01\n return cost",
"def route(self, ori, dest, pois):\n #find one route from ori to dest\n departure_time = int(time.time())\n routes = util.query_routes(origin=ori, \n destination=dest,\n departure_time=departure_time)\n if routes is None or routes['status'] != \"OK\":\n print ',=====',routes\n return None\n\n route = routes[\"routes\"][0] #get the first route\n\n #get the points in the route to search the potential poi\n points = util.extract_points(route)\n\n if points is None or len(points) ==0:\n print \"Error in extracting points\"\n return None\n #get the candiates in the route\n candidates = []\n way_points = pois.split(\"|\")\n for point in points:\n information = {}\n information[\"location\"] = point\n for way_p in way_points:\n response = util.get_nearby_points(location=point, keyword=way_p)\n if response is None or response[\"status\"] != \"OK\":\n information[way_p] = []\n continue\n ps = []\n for result in response[\"results\"]:\n poi = {\"geometry\": result[\"geometry\"],\n \"name\": result[\"name\"],\n \"price_level\": result.get(\"price_level\", None),\n \"rating\": result.get(\"rating\", None),\n \"vicinity\": result[\"vicinity\"]}\n ps.append(poi)\n information[way_p] = ps\n candidates.append(information)\n \n cost_matrix = waypoint.find_waypoints([candidates], way_points)\n cost_matrix.sort(key=lambda x:x[1])\n\n top_candidate = cost_matrix[0]\n json.dump(top_candidate, open('./top_candidate.json','w'))\n final_route = self.get_direction(ori, dest, top_candidate)\n json.dump(final_route, open(\"./real_route.json\", \"w\"))\n\n return final_route, top_candidate",
"def run_travel_optimisation(trip_start_date, is_min_co2_search = False, is_force_compute = False):\n \n waypoint_co2 = {}\n waypoint_durations = {}\n\n # get all prefectures referential\n db_connector = Connector()\n with db_connector:\n results = db_connector.execute_query(sql.SQL_GET_ALL_PREFECTURE)\n all_waypoints = pd.DataFrame(results.fetchall())\n\n # Vérification si les trajets péfecture à préfecture ont été déjà calculés\n db_connector = Connector()\n with db_connector:\n saved_waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)\n\n # Dans le précalcul des trajets optimaux, utilisation de la date courante\n travel_date = datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n bad_waypoints = []\n\n if saved_waypoints.rowcount > 0 and not is_force_compute:\n print(\"le référentiel des voyage existe déjà\")\n else:\n try:\n bdd_management.truncate_journey()\n\n for (from_city, to_city) in combinations(all_waypoints[0].values, 2):\n try:\n if int(from_city) in bad_waypoints or int(to_city) in bad_waypoints:\n continue\n\n route = requests.get(API_NAVITIA.format(\n int(from_city), int(to_city), travel_date, API_KEY))\n response = json.loads(route.text)\n\n mid_duration = 0\n mid_co2 = 0\n for journey in response[\"journeys\"]:\n mid_duration += journey[\"duration\"]\n mid_co2 += journey[\"co2_emission\"][\"value\"]\n\n waypoint_co2[frozenset([from_city, to_city])\n ] = mid_co2/len(response[\"journeys\"])\n waypoint_durations[frozenset(\n [from_city, to_city])] = mid_duration/len(response[\"journeys\"])\n\n except Exception as e:\n print(\"Error with finding the route between %s and %s : %s\" %\n (from_city, to_city, response[\"error\"][\"message\"]))\n if 'no destination point' == response[\"error\"][\"message\"]:\n bad_waypoints.append(int(to_city))\n\n if 'no origin point' == response[\"error\"][\"message\"]:\n bad_waypoints.append(int(from_city))\n\n for bad_insee_code in re.findall('The entry point: admin:fr:([0-9]+) is not valid', response[\"error\"][\"message\"]):\n if not int(bad_insee_code) in bad_waypoints:\n bad_waypoints.append(int(bad_insee_code))\n\n # Enregistrement des trajets point à point (préfecture à préfecture)\n db_connector = Connector()\n with db_connector:\n for (waypoint1, waypoint2) in waypoint_co2.keys():\n waypoint = [waypoint1,\n waypoint2,\n str(waypoint_co2[frozenset([waypoint1, waypoint2])]),\n str(int(waypoint_durations[frozenset([waypoint1, waypoint2])]))]\n \n db_connector.execute_nonquery(sql.SQL_INSERT_WAYPOINT, waypoint)\n # commit trajets unitaires dans la bdd\n db_connector.commit()\n\n # enregistrement des préfectures non trouvée (pas de gare)\n print(bad_waypoints)\n db_connector = Connector()\n with db_connector:\n for bad_city in bad_waypoints:\n db_connector.execute_nonquery(\n sql.SQL_INSERT_CITY_WITHOUT_STATION, str(bad_city))\n #db_connector.commit()\n except Exception as e:\n print('Erreur durant la génération des trajets de préfecture en préfecture. Rollback effectué')\n\n waypoint_co2 = {}\n waypoint_durations = {}\n processed_waypoints = set()\n\n db_connector = Connector()\n with db_connector:\n waypoints = db_connector.execute_query(sql.SQL_GET_WAYPOINTS)\n\n for row in waypoints:\n waypoint_co2[frozenset([int(row[0]), int(row[1])])] = row[2]\n waypoint_durations[frozenset([int(row[0]), int(row[1])])] = row[3]\n processed_waypoints.update([row[0], row[1]])\n\n travel_results = algorithms.run_genetic_algorithm(waypoints = list(processed_waypoints), is_min_co2_search = is_min_co2_search, generations=300, population_size=100 )\n\n # take most represented trip order\n journey_groups = Counter(chain(*travel_results))\n top_journeys = journey_groups.most_common(1)[0][0]\n\n print('Le voyage le plus représentatif est :')\n print(top_journeys)\n\n # calcul des horaires de voyage réels pour le trajet le plus optimisé\n\n print('Départ du calcul du voyage le %s' %\n (datetime_str_to_datetime_str(trip_start_date)))\n travel_date = trip_start_date\n\n db_connector = Connector()\n with db_connector:\n try:\n #vidage de la table contenant les informations du voyage\n bdd_management.truncate_roadtrip()\n\n for i in range(len(top_journeys)-1):\n try:\n from_city_insee = top_journeys[i]\n to_city_insee = top_journeys[i+1]\n route = requests.get(API_NAVITIA.format(\n int(from_city_insee), int(to_city_insee), travel_date, API_KEY))\n travels = json.loads(route.text)\n\n # Contrôle des voyage reçus pour identifier le plus adapté à recherche\n best_travel = travels[\"journeys\"][0]\n for travel in travels[\"journeys\"]:\n if is_min_co2_search and float(best_travel['co2_emission']['value']) > float(travel['co2_emission']['value']):\n best_travel = travel\n if best_travel['arrival_date_time'] > travel['arrival_date_time']:\n best_travel = travel\n\n # sauvegarde du trajet 'i' en base\n save_trip_section(db_connector, all_waypoints, from_city_insee, to_city_insee, best_travel)\n\n # le prochain trajet devra avoir une date de départ > à la date de ce trajet\n travel_date = best_travel['arrival_date_time']\n\n except Exception as e:\n print(\"!! Erreur durant le calcul du trajet entre '%s' et '%s'\" %\n (from_city_insee, to_city_insee))\n\n #Ecriture du résumé du voyage\n resume = db_connector.execute_query(sql.SQL_GET_C02_CONSUMPTION_RESUME)\n resume = resume.fetchone()\n\n resume_description = \"\"\"Début du voyage le {} . Arrivée le {}. \n Le voyage à durée {} pour un total de {:d} kgeC\"\"\".format(\n datetime_str_to_datetime_str(trip_start_date),\n datetime_str_to_datetime_str(travel_date),\n str(timedelta(seconds=resume[0])) ,\n trunc( resume[1]/1000))\n\n store_section(db_connector, resume_description, None, None, 'INFO', resume[0], resume[1])\n\n db_connector.commit()\n\n except Exception as e:\n db_connector.rollback()\n print('Erreur durant la création du voyage. rollback effectué!!!')\n\n print('print map with road-trip data')\n visualization.generate_visualization()\n\n print('Travel complete. Have nive trip!!!')",
"def calculate_cost(route, adjacency_matrix):\n route_shifted = np.roll(route,1)\n cost = np.sum(adjacency_matrix[route, route_shifted])\n st_dev = np.std(adjacency_matrix[route, route_shifted])\n return st_dev, cost"
]
| [
"0.6641761",
"0.6013915",
"0.5916763",
"0.5848608",
"0.5837383",
"0.5789502",
"0.56984466",
"0.56722516",
"0.56595474",
"0.5631287",
"0.5621344",
"0.5614399",
"0.5525109",
"0.55222577",
"0.55129397",
"0.5479792",
"0.5474687",
"0.5432638",
"0.5411634",
"0.54076946",
"0.53974736",
"0.5392777",
"0.52961904",
"0.5280245",
"0.52538913",
"0.52064294",
"0.52054536",
"0.5186037",
"0.51762354",
"0.5162019"
]
| 0.77047276 | 0 |
Given a route, return the vehicle type of the route. Samll vehicle first, medium second, large last. | def route_type(route):
typ = 2
vol_accu = 0 # accumulated volume
if len(route) <= 2:
return typ
else:
for i in range(1, len(route) - 1):
cust0 = route[i]
vol_accu += (num_demd[cust0][0] * bskt_vol + num_demd[cust0][1] * trsf_vol + (num_demd[cust0][2] +
num_demd[cust0][3]) * milk_vol + num_demd[cust0][4] * paper_bskt)
if vol_accu <= small_veh[2]:
return 2
elif vol_accu <= medium_veh[2]:
return 3
elif vol_accu <= large_veh[2]:
return 5
else:
print('!!!Route is invalid: out of max volume!', route) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vehicle_type():\n pass",
"def vehicle_type(self):\n return 'truck'",
"def vehicle_type(self):\n return 'truck'",
"def vehicle_type(self):\n pass",
"def vehicle_type(self):\n return 'car'",
"def vehicle_type(self):\n return 'car'",
"def vehicle_type(self):\n return 'car'",
"def road_travel(self, path, vehicle_type):\n # last node in path\n # is destination\n if len(path) < 2:\n return\n\n leg = path[0]\n if vehicle_type is VehicleType.Public:\n edge = self.transit_roads.network[leg.frm][leg.to][leg.edge_no]\n else:\n edge = self.roads.network[leg.frm][leg.to][leg.edge_no]\n\n # where leg.p is the proportion of the edge we travel\n time = self.roads.router.edge_travel_time(edge) * leg.p\n\n return leg, edge, time",
"def index_to_vehicle_type(index):\n if index == 0:\n return \"wheel\"\n if index == 1:\n return \"tank\"\n if index == 2:\n return \"hover\"",
"def vehicle_type(self):\n return 'motorcycle'",
"def vehicle_type(self):\n return 'motorcycle'",
"def vehicle_type(self):\n return 'motorcycle'",
"def RouteTargetType(self):\n return self._get_attribute('routeTargetType')",
"def get_for_type(class_, vehicle):\n Fuel = class_\n found = session.query(Fuel).filter_by(name=vehicle.fuel_type).first()\n return found",
"def route_info(self, route):\r\n total_distance = 0\r\n cost_mult = 0.35\r\n cost = 0\r\n time = 0\r\n if route[0] in self.edges:\r\n for i in range(len(route) - 1):\r\n for edge in self.edges[route[i]]:\r\n if edge.destination == route[i + 1]:\r\n total_distance += edge.distance\r\n cost += cost_mult * edge.distance\r\n time += self.calc_time(edge.distance)\r\n outgoing = len(self.edges[edge.destination])\r\n # if this airport is not the last one since we don't need to calculate layover for last\r\n if i is not len(route) - 2:\r\n time += 2 - ((1 / 6) * (outgoing - 1))\r\n if cost_mult > 0:\r\n cost_mult -= 0.05\r\n break;\r\n else:\r\n if edge == self.edges[route[i]][-1]:\r\n return\r\n return total_distance, round(cost, 2), round(time, 2)",
"def _get_model_type() -> ModelType:\n type_str = util.get_optional_param(\"model-type\", ModelType.SVM.value)\n try:\n return ModelType[type_str]\n except KeyError:\n raise errors.BadRequestError(f\"Unkown model type: {type_str}\")",
"def route(self, route):\n return self._routes.get(route, None)",
"def check_violation(route, vehicle_type):\r\n if len(route) == 2: # [0, 0] route\r\n return True, 0, 0, 0\r\n else:\r\n accu_res = [0, 0, 0] # 0-leaving time, 1-accumulated distance, 2-volume\r\n if vehicle_type == 2:\r\n veh_cap = small_veh\r\n elif vehicle_type == 3:\r\n veh_cap = medium_veh\r\n elif vehicle_type == 5:\r\n veh_cap = large_veh\r\n else:\r\n veh_cap = large_veh\r\n print('Input wrong vehicle type!', vehicle_type)\r\n # small_veh = [1, 12, 10, 400000, 0.012, 200]\r\n fixed_cost = veh_cap[5]\r\n trans_cost = 0\r\n # wait_cost = 0\r\n if time_mat[0, route[1]] < num_timez[route[1]][0]:\r\n accu_res[0] = num_timez[route[1]][0] - time_mat[0, route[1]] # vehicle leaving depot time\r\n depart_time = accu_res[0] # departing from depot time\r\n else:\r\n depart_time = 0\r\n for i in range(len(route) - 1):\r\n last_cust = route[i]\r\n curr_cust = route[i+1]\r\n # checking leaving time\r\n arr_time = accu_res[0] + time_mat[last_cust, curr_cust]\r\n if arr_time < num_timez[curr_cust][0]:\r\n accu_res[0] = num_timez[curr_cust][0] + oprt_t\r\n wait_time = num_timez[curr_cust][0] - arr_time\r\n # wait_cost += (wait_time / 60. * wait_cost0)\r\n elif arr_time <= num_timez[curr_cust][1]:\r\n accu_res[0] = arr_time + oprt_t\r\n else:\r\n # print('Infeasible route!(Service Time Error.)')\r\n return False, 1000000, 0, 0\r\n\r\n # checking vehicle max distance\r\n trans_cost += (dist_mat[last_cust, curr_cust] * veh_cap[4])\r\n\r\n accu_res[1] += dist_mat[last_cust, curr_cust]\r\n\r\n if accu_res[0] - oprt_t - depart_time > veh_cap[3]:\r\n # print('Infeasible route!(Max Time Error.)')\r\n return False, 1000000, 0, 0\r\n\r\n # checking vehicle max volume\r\n accu_res[2] += (num_demd[curr_cust][0] * bskt_vol + num_demd[curr_cust][1] * trsf_vol + (num_demd[curr_cust][2]\r\n + num_demd[curr_cust][3]) * milk_vol + num_demd[curr_cust][4] * paper_bskt)\r\n\r\n if accu_res[2] > veh_cap[2]:\r\n # print('Infeasible route!(Max Weight/Volume Error.)', accu_res[2])\r\n return False, 1000000, 0, 0\r\n route_cost = fixed_cost + accu_res[1] * veh_cap[4]\r\n route_dist = accu_res[1]\r\n route_time = accu_res[0] - oprt_t - depart_time\r\n # print fixed_cost, trvl_cost, trvl_dist\r\n return True, route_cost, route_time, depart_time + 600",
"def get_length_of_route(network, route):\n result = 0\n for road_id in route:\n result += get_length(network,road_id)\n return result",
"def street_type():\r\n\r\n cursor.execute('SELECT * FROM street_types \\\r\n order by RANDOM() limit 1;')\r\n return cursor.fetchone()[0]",
"def get_time_of_route(network, route):\n result = 0\n for road_id in route:\n result += get_time(network,road_id)\n return result",
"def print_route_detail(self, solution, vehicle_type, if_write):\r\n\r\n result = [[\r\n '线路编号',\r\n '门店编码',\r\n '门店名称',\r\n '门店地址',\r\n '经度',\r\n '纬度',\r\n '车型',\r\n '额定体积/m3',\r\n '额定重量/t',\r\n '到达时间',\r\n '离开时间',\r\n '行驶距离/km',\r\n '累计行驶距离km',\r\n '行驶时间/min',\r\n '卸货时间/min',\r\n '累计工作时间/h',\r\n '鲜食篮总数',\r\n '周转箱个数',\r\n '新绿园鲜奶980ML(罐)',\r\n '新绿园脱脂牛奶980ML(罐)',\r\n '纸箱个数',\r\n '卸货体积',\r\n '卸货重量']]\r\n\r\n total_dist = 0\r\n for k, veh in enumerate(solution):\r\n if vehicle_type[k] == 2:\r\n trans0 = small_veh[4]\r\n veh_param = small_veh\r\n\r\n elif vehicle_type[k] == 3:\r\n trans0 = medium_veh[4]\r\n veh_param = medium_veh\r\n\r\n else:\r\n trans0 = large_veh[4]\r\n veh_param = large_veh\r\n\r\n\r\n departt = check_violation(veh, vehicle_type[k])[3]\r\n t = departt\r\n\r\n trvl_dist = 0\r\n veh_load_vol = 0\r\n wait_time = 0\r\n\r\n veh_load_vol += (num_demd[veh[1]][0] * bskt_vol + num_demd[veh[1]][1] * trsf_vol + (num_demd[veh[1]][2] +\r\n num_demd[veh[1]][3]) * milk_vol + num_demd[veh[1]][4] * paper_bskt)\r\n if departt / 60. < 24.:\r\n out_time = int(math.ceil(departt))\r\n else:\r\n out_time = int(math.ceil(departt - 24 * 60))\r\n\r\n # get the output format\r\n store = [0] * len(result[0])\r\n store[0] = k + 1 # 线路序号\r\n store[1] = num_id[0][0] # 门店编号\r\n store[2] = num_id[0][1] # 门店名称\r\n store[3] = num_id[0][2] # 门店地址\r\n store[4] = loc[0][0] # 经度\r\n store[5] = loc[0][1] # 纬度\r\n store[6] = vehicle_type_dict[vehicle_type[k]] # 车型\r\n store[7] = veh_param[2] # 额定体积\r\n store[8] = veh_param[1] # 额定重量\r\n store[9] = str(out_time // 60) + ':' + str(out_time % 60).zfill(2) # 到达时间\r\n store[10] = str(out_time // 60) + ':' + str(out_time % 60).zfill(2) # 离开时间\r\n store[11] = 0 # 行驶距离\r\n store[12] = 0 # 累计行驶距离\r\n store[13] = 0 # 行驶时间\r\n store[14] = 0 # 卸货时间\r\n store[15] = 0 # 累计工作时间\r\n store[16] = 0 # 鲜食篮件数\r\n store[17] = 0 # 周转箱个数\r\n store[18] = 0 # 新绿园鲜奶\r\n store[19] = 0 # 新绿园脱脂牛奶\r\n store[20] = 0 # 纸箱\r\n store[21] = 0 # 卸货体积\r\n store[22] = 0 # 卸货重量\r\n\r\n store0 = copy.deepcopy(store)\r\n result.append(store0)\r\n\r\n # t = departt + time_mat[0, veh[1]] + oprt_t # t is the leaving time\r\n for i in range(1, len(veh)-1): # can not wait at the first 2 points\r\n store[1] = num_id[veh[i]][0]\r\n store[2] = num_id[veh[i]][1]\r\n store[3] = num_id[veh[i]][2]\r\n store[4] = loc[veh[i]][0]\r\n store[5] = loc[veh[i]][1]\r\n arr_time = t + time_mat[veh[i-1], veh[i]]\r\n if arr_time / 60. < 24.:\r\n in_time = int(math.ceil(arr_time))\r\n else:\r\n in_time = int(math.ceil(arr_time - 24 * 60))\r\n\r\n trvl_dist += dist_mat[veh[i-1], veh[i]]\r\n veh_load_vol += (num_demd[veh[i]][0] * bskt_vol + num_demd[veh[i]][1] * trsf_vol + (num_demd[veh[i]][2] +\r\n num_demd[veh[i]][3]) * milk_vol + num_demd[veh[i]][4] * paper_bskt)\r\n wait_t = num_timez[veh[i]][0] - (t + time_mat[veh[i-1], veh[i]])\r\n if wait_t > 0 + 1e-5:\r\n # t is the leaving time\r\n wait_time += wait_t\r\n t = num_timez[veh[i]][0] + oprt_t\r\n else:\r\n t += (time_mat[veh[i - 1], veh[i]] + oprt_t)\r\n if t < 24. * 60:\r\n out_time = int(math.ceil(t))\r\n else:\r\n out_time = int(math.ceil(t - 24 * 60))\r\n\r\n store[9] = str(in_time // 60) + ':' + str(in_time % 60).zfill(2) # 到达时间\r\n store[10] = str(out_time // 60) + ':' + str(out_time % 60).zfill(2) # 离开时间\r\n store[11] = round(dist_mat[veh[i-1], veh[i]] / 1000., 2) # 行驶距离\r\n store[12] = round(trvl_dist / 1000., 2) # 累计行驶距离\r\n store[13] = round(time_mat[veh[i-1], veh[i]], 1) # 行驶时间\r\n store[14] = oprt_t\r\n store[15] = round((t - departt) / 60., 2) # 累计工作时间\r\n store[16] = num_demd[veh[i]][0] # 鲜食篮件数\r\n store[17] = num_demd[veh[i]][1] # 周转箱个数\r\n store[18] = num_demd[veh[i]][2] # 新绿园鲜奶\r\n store[19] = num_demd[veh[i]][3] # 新绿园脱脂牛奶\r\n store[20] = num_demd[veh[i]][4] # 纸箱\r\n store[21] = (num_demd[veh[i]][0] * bskt_vol + num_demd[veh[i]][1] * trsf_vol + (num_demd[veh[i]][2] +\r\n num_demd[veh[i]][3]) * milk_vol + num_demd[veh[i]][4] * paper_bskt) # 卸货体积\r\n store[22] = 0 # 卸货重量\r\n\r\n store0 = copy.deepcopy(store)\r\n result.append(store0)\r\n # print(result[-1])\r\n\r\n store[1] = num_id[0][0] # 门店编号\r\n store[2] = num_id[0][1] # 门店名称\r\n store[3] = num_id[0][2] # 门店地址\r\n store[4] = loc[0][0] # 经度\r\n store[5] = loc[0][1] # 纬度\r\n arr_time = t + time_mat[veh[-2], 0]\r\n if arr_time / 60. < 24.:\r\n in_time = int(math.ceil(arr_time))\r\n else:\r\n in_time = int(math.ceil(arr_time - 24 * 60))\r\n store[9] = str(in_time // 60) + ':' + str(in_time % 60).zfill(2) # 到达时间\r\n store[10] = str(in_time // 60) + ':' + str(in_time % 60).zfill(2) # 离开时间\r\n store[11] = round(dist_mat[veh[-2], 0] / 1000., 2) # 行驶距离\r\n store[12] = round((trvl_dist + dist_mat[veh[-2], 0]) / 1000., 2) # 累计行驶距离\r\n store[13] = round(time_mat[veh[-2], 0], 1) # 行驶时间\r\n store[14] = 0 # 卸货时间\r\n store[15] = round((t - departt + time_mat[veh[-2], 0]) / 60., 2) # 累计工作时间\r\n store[16] = 0 # 鲜食篮件数\r\n store[17] = 0 # 周转箱个数\r\n store[18] = 0 # 新绿园鲜奶\r\n store[19] = 0 # 新绿园脱脂牛奶\r\n store[20] = 0 # 纸箱\r\n store[21] = 0 # 卸货体积\r\n store[22] = 0 # 卸货重量\r\n\r\n store0 = copy.deepcopy(store)\r\n result.append(store0)\r\n # print(result[-1])\r\n\r\n if if_write:\r\n # run_time = time.strftime(\"%m%d_%H%M\", time.localtime())\r\n with open(r'C:\\Bee\\0Huaat\\Starbucks\\output\\Route_Details_%s_%s.csv' % (veh_spd_kmh, run_time), 'w', newline='') as fw:\r\n writer = csv.writer(fw)\r\n for v in result:\r\n # print(v)\r\n writer.writerow(v)",
"def get_valid_emission_vehicle_type(self, vehicle_type):\n if vehicle_type:\n try:\n vehicle_type = str(vehicle_type).lower()\n if vehicle_type in s.VALID_VEHICLE_TYPES:\n return vehicle_type\n except:\n pass\n \n raise InvalidUsage('This vehicleType is not valid in Snowdonia. Try ' + ', '.join(str(vt) for vt in s.VALID_VEHICLE_TYPES) + '.')",
"def getNodeRouteVia(self, route_type, node_id=None, state=None):\n node_routes = self.getNodeRoutes(node_id, state)\n for node_route in node_routes.keys():\n if route_type in node_route:\n return node_routes[node_route][\"via\"]\n else:\n return None",
"def RouteDistinguisherType(self):\n return self._get_attribute('routeDistinguisherType')",
"def get_variant_type(variant):\n _validate_str(variant)\n v = variant.split(\", \")[0] # test first token of multi-mutant\n if re_protein.match(v) is not None:\n return \"protein\"\n elif re_coding.match(v) is not None:\n return \"coding\"\n elif re_noncoding.match(v) is not None:\n return \"noncoding\"\n else:\n return None",
"def street_type():\r\n return _random.choice(\r\n [\r\n \"Abbey\", \"Acres\", \"Allée\", \"Alley\", \"Autoroute\", \"Avenue\",\r\n \"Bay\", \"Beach\", \"Bend\", \"Boulevard\", \"By-pass\", \"Byway\",\r\n \"Campus\", \"Cape\", \"Carré\", \"Carrefour\", \"Centre\", \"Cercle\",\r\n \"Chase\", \"Chemin\", \"Circle\", \"Circuit\", \"Close\", \"Common\",\r\n \"Concession\", \"Corners\", \"Côte\", \"Cour\", \"Cours\", \"Court\",\r\n \"Cove\", \"Crescent\", \"Croissant\", \"Crossing\", \"Cul-de-sac\"\r\n \"Dale\", \"Dell\", \"Diversion\", \"Downs\", \"Drive\", \"Échangeur\",\r\n \"End\", \"Esplanade\", \"Estates\", \"Expressway\", \"Extension\",\r\n \"Farm\", \"Field\", \"Forest\", \"Freeway\", \"Front\", \"Gardens\",\r\n \"Gate\", \"Glade\", \"Glen\", \"Green\", \"Grounds\", \"Grove\",\r\n \"Harbour\", \"Heath\", \"Heights\", \"Highlands\", \"Highway\",\r\n \"Hill\", \"Hollow\", \"Île\", \"Impasse\", \"Inlet\", \"Island\",\r\n \"Key\", \"Knoll\", \"Landing\", \"Lane\", \"Limits\", \"Line\",\r\n \"Link\", \"Lookout\", \"Loop\", \"Mall\", \"Manor\", \"Maze\",\r\n \"Meadow\", \"Mews\", \"Montée\", \"Moor\", \"Mount\", \"Mountain\",\r\n \"Orchard\", \"Parade\", \"Parc\", \"Park\", \"Parkway\",\r\n \"Passage\", \"Path\", \"Pathway\", \"Pines\", \"Place\",\r\n \"Plateau\", \"Plaza\", \"Point\", \"Pointe\", \"Port\",\r\n \"Private\", \"Promenade\", \"Quai\", \"Quay\", \"Ramp\",\r\n \"Rang\", \"Range\", \"Ridge\", \"Rise\", \"Road\",\r\n \"Rond-point\" \"Route\", \"Row\", \"Rue\", \"Ruelle\",\r\n \"Run\", \"Sentier\", \"Square\", \"Street\", \"Subdivision\",\r\n \"Terrace\", \"Terrasse\", \"Thicket\", \"Towers\",\r\n \"Townline\", \"Trail\", \"Turnabout\", \"Vale\", \"Via\",\r\n \"View\", \"Village\", \"Villas\", \"Vista\", \"Voie\", \"Walk\",\r\n \"Way\", \"Wharf\", \"Wood\", \"Wynd\"\r\n ]\r\n )",
"def generate_route(vehicle, turn_flag=0, hop_resolution=1.0):\n world = vehicle.get_world()\n map = world.get_map()\n\n # get initial location of ego_vehicle\n start_waypoint = map.get_waypoint(vehicle.get_location())\n\n # Using generate_target_waypoint to generate target waypoint\n # ref on scenario_helper.py module\n turn_flag = 0 # turn_flag by current scenario\n end_waypoint = generate_target_waypoint(start_waypoint, turn_flag)\n\n # generate a dense route according to current scenario\n # Setting up global router\n waypoints = [start_waypoint.transform.location, end_waypoint.transform.location]\n # from srunner.challenge.utils.route_manipulation import interpolate_trajectory\n gps_route, trajectory = interpolate_trajectory(world, waypoints, hop_resolution)\n return gps_route, trajectory",
"def validate_routes(route):\n if ROUTE_PATTERN.match(route):\n if route[0] == route[1]:\n raise argparse.ArgumentTypeError('Invalid route format, cannot have same city: %s' % route)\n return route\n else:\n raise argparse.ArgumentTypeError('Invalid route format for: %s. Should be {A-Z}{A-Z}{0-9}+' % route)",
"def get_line_type(line):\n\n line_type = None\n if line.find('Train') != -1:\n line_type = 'train'\n elif line.find('Test') != -1:\n line_type = 'test'\n return line_type"
]
| [
"0.6574357",
"0.623225",
"0.623225",
"0.6055304",
"0.59921247",
"0.59921247",
"0.59921247",
"0.57707924",
"0.57326573",
"0.5594481",
"0.5594481",
"0.5594481",
"0.55097127",
"0.53885955",
"0.5273151",
"0.52576345",
"0.52177763",
"0.5201615",
"0.51950127",
"0.5176466",
"0.51732844",
"0.5168296",
"0.51589656",
"0.51403934",
"0.51243603",
"0.511697",
"0.50570655",
"0.50551575",
"0.50528026",
"0.5034114"
]
| 0.77867645 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.